Merge remote-tracking branch 'common/android-3.0' into android-omap-3.0
* common/android-3.0:
netfilter: xt_qtaguid: Allow tracking loopback
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index 1eebdbf..c167a21 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -268,6 +268,33 @@
on a write to boostpulse, before allowing speed to drop according to
load as usual. Default is 80000 uS.
+2.7 Hotplug
+-----------
+
+The CPUfreq governor "hotplug" operates similary to "ondemand" and
+"conservative". It's decisions are based primarily on CPU load. Like
+"ondemand" the "hotplug" governor will ramp up to the highest frequency
+once the run-time tunable "up_threshold" parameter is crossed. Like
+"conservative", the "hotplug" governor exports a "down_threshold"
+parameter that is also tunable at run-time. When the "down_threshold"
+is crossed the CPU transitions to the next lowest frequency in the
+CPUfreq frequency table instead of decrementing the frequency based on a
+percentage of maximum load.
+
+The main reason "hotplug" governor exists is for architectures requiring
+that only the master CPU be online in order to hit low-power states
+(C-states). OMAP4 is one such example of this. The "hotplug" governor
+is also helpful in reducing thermal output in devices with tight thermal
+constraints.
+
+Auxillary CPUs are onlined/offline based on CPU load, but the decision
+to do so is made after averaging several sampling windows. This is to
+reduce CPU hotplug "thrashing", which can be caused by normal system
+entropy and leads to lots of spurious plug-in and plug-out transitions.
+The number of sampling periods averaged together is tunable via the
+"hotplug_in_sampling_periods" and "hotplug_out_sampling_periods"
+run-time tunable parameters.
+
3. The Governor Interface in the CPUfreq Core
=============================================
diff --git a/Documentation/power/opp.txt b/Documentation/power/opp.txt
index 5ae70a12..3035d00 100644
--- a/Documentation/power/opp.txt
+++ b/Documentation/power/opp.txt
@@ -321,6 +321,8 @@
addition to CONFIG_PM as power management feature is required to
dynamically scale voltage and frequency in a system.
+opp_free_cpufreq_table - Free up the table allocated by opp_init_cpufreq_table
+
7. Data Structures
==================
Typically an SoC contains multiple voltage domains which are variable. Each
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b9d6077..45e2bee 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -29,6 +29,7 @@
select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ
select GENERIC_IRQ_SHOW
+ select CPU_PM if (SUSPEND || CPU_IDLE)
help
The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 4ddd0a6..be604f0 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -365,6 +365,7 @@
if (gic_nr == 0)
gic_cpu_base_addr = cpu_base;
+ gic_chip.flags |= gic_arch_extn.flags;
gic_dist_init(gic, irq_start);
gic_cpu_init(gic);
}
diff --git a/arch/arm/configs/android_4430_defconfig b/arch/arm/configs/android_4430_defconfig
new file mode 100644
index 0000000..d432abe
--- /dev/null
+++ b/arch/arm/configs/android_4430_defconfig
@@ -0,0 +1,1926 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.35
+# Fri Sep 3 08:14:37 2010
+#
+CONFIG_ARM=y
+CONFIG_HAVE_PWM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+# CONFIG_ARCH_USES_GETTIMEOFFSET is not set
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_LOCKBREAK=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_ARCH_HAS_CPUFREQ=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_RCU_FAST_NO_HZ is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_PANIC_TIMEOUT=0
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+# CONFIG_ELF_CORE is not set
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_ASHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_UNLOCK is not set
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_READ_UNLOCK is not set
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_WRITE_UNLOCK is not set
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P6440 is not set
+# CONFIG_ARCH_S5P6442 is not set
+# CONFIG_ARCH_S5PC100 is not set
+# CONFIG_ARCH_S5PV210 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_DAVINCI is not set
+CONFIG_ARCH_OMAP=y
+# CONFIG_PLAT_SPEAR is not set
+
+#
+# TI OMAP Implementations
+#
+# CONFIG_ARCH_OMAP1 is not set
+CONFIG_ARCH_OMAP2PLUS=y
+# CONFIG_ARCH_OMAP2 is not set
+# CONFIG_ARCH_OMAP3 is not set
+CONFIG_ARCH_OMAP4=y
+
+#
+# OMAP Feature Selections
+#
+CONFIG_OMAP_SMARTREFLEX=y
+# CONFIG_OMAP_SMARTREFLEX_TESTING is not set
+CONFIG_OMAP_SMARTREFLEX_CLASS3=y
+# CONFIG_OMAP_RESET_CLOCKS is not set
+# CONFIG_OMAP_MUX is not set
+CONFIG_OMAP_MCBSP=y
+CONFIG_OMAP_MBOX_FWK=y
+CONFIG_OMAP_REMOTE_PROC=y
+CONFIG_OMAP_RPROC_MEMPOOL_SIZE=0x600000
+CONFIG_OMAP_IOMMU=y
+# CONFIG_OMAP_IOMMU_DEBUG is not set
+# CONFIG_OMAP_MPU_TIMER is not set
+CONFIG_OMAP_32K_TIMER=y
+CONFIG_OMAP_32K_TIMER_HZ=128
+CONFIG_OMAP_DM_TIMER=y
+# CONFIG_OMAP_PM_NONE is not set
+CONFIG_OMAP_PM_NOOP=y
+# CONFIG_OMAP_PM is not set
+
+#
+# OMAP Board Type
+#
+# CONFIG_WIFI_CONTROL_FUNC is not set
+# CONFIG_TIWLAN_SDIO is not set
+# CONFIG_OMAP4_ES1 is not set
+CONFIG_MACH_OMAP_4430SDP=y
+CONFIG_MACH_OMAP4_PANDA=y
+# CONFIG_ERRATA_OMAP4_AXI2OCP is not set
+
+#
+# Processor Type
+#
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_V7=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_PABRT_V7=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+CONFIG_ARM_THUMBEE=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_HAS_TLS_REG=y
+CONFIG_OUTER_CACHE=y
+CONFIG_OUTER_CACHE_SYNC=y
+CONFIG_CACHE_L2X0=y
+CONFIG_CACHE_PL310=y
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+CONFIG_CPU_HAS_PMU=y
+# CONFIG_ARM_ERRATA_430973 is not set
+# CONFIG_ARM_ERRATA_458693 is not set
+# CONFIG_ARM_ERRATA_460075 is not set
+# CONFIG_PL310_ERRATA_588369 is not set
+CONFIG_ARM_ERRATA_720789=y
+CONFIG_ARM_GIC=y
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_SMP=y
+CONFIG_HAVE_ARM_SCU=y
+CONFIG_HAVE_ARM_TWD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_NR_CPUS=2
+CONFIG_HOTPLUG_CPU=y
+CONFIG_LOCAL_TIMERS=y
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_HZ=128
+# CONFIG_THUMB2_KERNEL is not set
+CONFIG_AEABI=y
+CONFIG_OABI_COMPAT=y
+CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_LEDS is not set
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="root=/dev/ram0 rw mem=128M console=ttyS2,115200n8 initrd=0x81600000,20M ramdisk_size=20480"
+# CONFIG_CMDLINE_FORCE is not set
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_FREQ is not set
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+# CONFIG_FPE_NWFPE is not set
+# CONFIG_FPE_FASTFPE is not set
+CONFIG_VFP=y
+CONFIG_VFPv3=y
+CONFIG_NEON=y
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options
+#
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+# CONFIG_PM_ADVANCED_DEBUG is not set
+# CONFIG_PM_VERBOSE is not set
+CONFIG_CAN_PM_TRACE=y
+CONFIG_PM_SLEEP_SMP=y
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND_NVS=y
+CONFIG_SUSPEND=y
+# CONFIG_PM_TEST_SUSPEND is not set
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_HAS_WAKELOCK=y
+CONFIG_HAS_EARLYSUSPEND=y
+CONFIG_WAKELOCK=y
+CONFIG_WAKELOCK_STAT=y
+CONFIG_USER_WAKELOCK=y
+CONFIG_EARLYSUSPEND=y
+# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set
+CONFIG_CONSOLE_EARLYSUSPEND=y
+# CONFIG_FB_EARLYSUSPEND is not set
+# CONFIG_APM_EMULATION is not set
+CONFIG_PM_RUNTIME=y
+CONFIG_PM_OPS=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_LRO=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+CONFIG_ANDROID_PARANOID_NETWORK=y
+CONFIG_NET_ACTIVITY_STATS=y
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_L2TP is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+CONFIG_RPS=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=y
+CONFIG_BT_L2CAP=y
+# CONFIG_BT_L2CAP_EXT_FEATURES is not set
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+# CONFIG_BT_BNEP is not set
+# CONFIG_BT_HIDP is not set
+
+#
+# Bluetooth device drivers
+#
+# CONFIG_BT_HCIBTSDIO is not set
+# CONFIG_BT_HCIUART is not set
+# CONFIG_BT_HCIVHCI is not set
+# CONFIG_BT_MRVL is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+CONFIG_RFKILL=y
+# CONFIG_RFKILL_PM is not set
+# CONFIG_RFKILL_INPUT is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_AD525X_DPOT is not set
+CONFIG_ANDROID_PMEM=y
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_KERNEL_DEBUGGER_CORE=y
+# CONFIG_ISL29003 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+CONFIG_SENSORS_BH1780=y
+# CONFIG_SENSORS_AK8975 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_TI_DAC7512 is not set
+CONFIG_UID_STAT=y
+CONFIG_BMP085=y
+# CONFIG_WL127X_RFKILL is not set
+# CONFIG_APANIC is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_IWMC3200TOP is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+CONFIG_KS8851=y
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+CONFIG_INPUT_KEYRESET=y
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+CONFIG_KEYBOARD_OMAP4=y
+# CONFIG_KEYBOARD_TWL4030 is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
+CONFIG_TOUCHSCREEN_SYNTM12XX=y
+# CONFIG_TOUCHSCREEN_TPS6507X is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+CONFIG_INPUT_KEYCHORD=y
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+# CONFIG_INPUT_TWL4030_PWRBUTTON is not set
+# CONFIG_INPUT_TWL4030_VIBRA is not set
+# CONFIG_INPUT_TWL6040_VIBRA is not set
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_INPUT_PCF8574 is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+CONFIG_INPUT_SFH7741=y
+CONFIG_INPUT_CMA3000_I2C=y
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_ALTERA_PS2 is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVMEM=y
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_N_GSM is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_OMAP=y
+CONFIG_SERIAL_OMAP_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_DCC_TTY is not set
+# CONFIG_RAMOOPS is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_OMAP=y
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+CONFIG_SPI_OMAP24XX=y
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
+
+#
+# Memory mapped GPIO expanders:
+#
+# CONFIG_GPIO_IT8761E is not set
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_TWL4030 is not set
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
+
+#
+# MODULbus GPIO expanders:
+#
+# CONFIG_W1 is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+# CONFIG_PDA_POWER is not set
+# CONFIG_TEST_POWER is not set
+# CONFIG_BATTERY_DS2760 is not set
+# CONFIG_BATTERY_DS2782 is not set
+CONFIG_TWL6030_BCI_BATTERY=y
+CONFIG_CHARGER_BQ2415x=y
+# CONFIG_BATTERY_BQ27x00 is not set
+# CONFIG_BATTERY_MAX17040 is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM73 is not set
+CONFIG_SENSORS_LM75=y
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_ADS7871 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_MPCORE_WATCHDOG is not set
+CONFIG_OMAP_WATCHDOG=y
+# CONFIG_TWL4030_WATCHDOG is not set
+# CONFIG_MAX63XX_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_MFD_SUPPORT=y
+CONFIG_MFD_CORE=y
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+CONFIG_TWL4030_CORE=y
+# CONFIG_TWL4030_POWER is not set
+# CONFIG_TWL4030_CODEC is not set
+# CONFIG_MFD_TC35892 is not set
+CONFIG_TWL6030_PWM=y
+CONFIG_TWL6030_GPADC=y
+CONFIG_TWL6040_CODEC=y
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_AB8500_CORE is not set
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
+# CONFIG_REGULATOR_MAX8660 is not set
+CONFIG_REGULATOR_TWL4030=y
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
+CONFIG_MEDIA_SUPPORT=y
+
+#
+# Multimedia core support
+#
+CONFIG_VIDEO_DEV=y
+CONFIG_VIDEO_V4L2_COMMON=y
+# CONFIG_VIDEO_ALLOW_V4L1 is not set
+CONFIG_VIDEO_V4L1_COMPAT=y
+# CONFIG_DVB_CORE is not set
+CONFIG_VIDEO_MEDIA=y
+
+#
+# Multimedia drivers
+#
+CONFIG_IR_CORE=y
+CONFIG_VIDEO_IR=y
+CONFIG_RC_MAP=y
+CONFIG_IR_NEC_DECODER=y
+CONFIG_IR_RC5_DECODER=y
+CONFIG_IR_RC6_DECODER=y
+CONFIG_IR_JVC_DECODER=y
+CONFIG_IR_SONY_DECODER=y
+# CONFIG_IR_IMON is not set
+# CONFIG_MEDIA_ATTACH is not set
+CONFIG_MEDIA_TUNER=y
+# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
+CONFIG_MEDIA_TUNER_SIMPLE=y
+CONFIG_MEDIA_TUNER_TDA8290=y
+CONFIG_MEDIA_TUNER_TDA9887=y
+CONFIG_MEDIA_TUNER_TEA5761=y
+CONFIG_MEDIA_TUNER_TEA5767=y
+CONFIG_MEDIA_TUNER_MT20XX=y
+CONFIG_MEDIA_TUNER_XC2028=y
+CONFIG_MEDIA_TUNER_XC5000=y
+CONFIG_MEDIA_TUNER_MC44S803=y
+CONFIG_VIDEO_V4L2=y
+CONFIG_VIDEOBUF_GEN=y
+CONFIG_VIDEOBUF_DMA_CONTIG=y
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+CONFIG_VIDEO_IR_I2C=y
+# CONFIG_VIDEO_VIVI is not set
+CONFIG_VIDEO_OMAP2_VOUT=y
+# CONFIG_VIDEO_SAA5246A is not set
+# CONFIG_VIDEO_SAA5249 is not set
+# CONFIG_SOC_CAMERA is not set
+# CONFIG_V4L_MEM2MEM_DRIVERS is not set
+CONFIG_RADIO_ADAPTERS=y
+# CONFIG_I2C_SI4713 is not set
+# CONFIG_RADIO_SI4713 is not set
+# CONFIG_RADIO_SI470X is not set
+# CONFIG_RADIO_TEA5764 is not set
+# CONFIG_RADIO_SAA7706H is not set
+# CONFIG_RADIO_TEF6862 is not set
+# CONFIG_DAB is not set
+CONFIG_DMM_OMAP=y
+CONFIG_TILER_OMAP=y
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_TMIO is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=4
+CONFIG_OMAP2_DSS_DEBUG_SUPPORT=y
+# CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS is not set
+CONFIG_OMAP2_DSS_DPI=y
+# CONFIG_OMAP2_DSS_RFBI is not set
+# CONFIG_OMAP2_DSS_VENC is not set
+CONFIG_OMAP2_DSS_HDMI=y
+CONFIG_OMAP2_DSS_DSI=y
+# CONFIG_OMAP2_DSS_USE_DSI_PLL is not set
+CONFIG_OMAP2_DSS_FAKE_VSYNC=y
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=0
+CONFIG_FB_OMAP2=y
+CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
+CONFIG_FB_OMAP2_NUM_FBS=2
+
+#
+# OMAP2/3 Display Device Drivers
+#
+# CONFIG_PANEL_GENERIC is not set
+# CONFIG_PANEL_SHARP_LS037V7DW01 is not set
+# CONFIG_PANEL_SHARP_LQ043T1DG01 is not set
+CONFIG_PANEL_PICO_DLP=y
+CONFIG_PANEL_TAAL=y
+# CONFIG_PANEL_TOPPOLY_TDO35S is not set
+# CONFIG_PANEL_TPO_TD043MTEA1 is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_L4F00242T03 is not set
+# CONFIG_LCD_LMS283GF05 is not set
+# CONFIG_LCD_LTV350QV is not set
+# CONFIG_LCD_TDO24M is not set
+# CONFIG_LCD_VGG2432A4 is not set
+# CONFIG_LCD_PLATFORM is not set
+# CONFIG_LCD_S6E63M0 is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+# CONFIG_BACKLIGHT_PWM is not set
+# CONFIG_BACKLIGHT_ADP8860 is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FONT_MINI_4x6 is not set
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
+CONFIG_SOUND=y
+# CONFIG_SOUND_OSS_CORE is not set
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_JACK=y
+# CONFIG_SND_SEQUENCER is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_HRTIMER is not set
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+CONFIG_SND_ARM=y
+CONFIG_SND_SPI=y
+CONFIG_SND_SOC=y
+CONFIG_SND_OMAP_SOC=y
+CONFIG_SND_OMAP_SOC_ABE_DSP=y
+CONFIG_SND_OMAP_SOC_MCBSP=y
+CONFIG_SND_OMAP_SOC_MCPDM=y
+CONFIG_SND_OMAP_SOC_ABE=y
+CONFIG_SND_OMAP_SOC_DMIC=y
+CONFIG_SND_OMAP_SOC_SDP4430=y
+CONFIG_SND_OMAP_SOC_HDMI=y
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_ALL_CODECS is not set
+CONFIG_SND_SOC_DMIC=y
+CONFIG_SND_SOC_TWL6040=y
+# CONFIG_SOUND_PRIME is not set
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+# CONFIG_USB is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SOC=y
+
+#
+# OMAP 44xx high speed USB support
+#
+# CONFIG_USB_MUSB_HOST is not set
+CONFIG_USB_MUSB_PERIPHERAL=y
+# CONFIG_USB_MUSB_OTG is not set
+CONFIG_USB_GADGET_MUSB_HDRC=y
+# CONFIG_MUSB_PIO_ONLY is not set
+CONFIG_USB_INVENTRA_DMA=y
+# CONFIG_USB_TI_CPPI_DMA is not set
+# CONFIG_USB_MUSB_DEBUG is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FUNCTIONFS is not set
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+# CONFIG_USB_MASS_STORAGE is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_ANDROID is not set
+CONFIG_USB_CDC_COMPOSITE=m
+# CONFIG_USB_G_NOKIA is not set
+CONFIG_USB_G_MULTI=m
+CONFIG_USB_G_MULTI_RNDIS=y
+CONFIG_USB_G_MULTI_CDC=y
+# CONFIG_USB_G_HID is not set
+# CONFIG_USB_G_DBGP is not set
+# CONFIG_USB_G_WEBCAM is not set
+
+#
+# OTG and related infrastructure
+#
+CONFIG_USB_OTG_UTILS=y
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_ULPI is not set
+# CONFIG_TWL4030_USB is not set
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_UNSAFE_RESUME=y
+# CONFIG_MMC_EMBEDDED_SDIO is not set
+# CONFIG_MMC_PARANOID_SD_INIT is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_MMC_BLOCK_DEFERRED_RESUME is not set
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_OMAP is not set
+CONFIG_MMC_OMAP_HS=y
+# CONFIG_MMC_SPI is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+CONFIG_LEDS_PWM=y
+# CONFIG_LEDS_REGULATOR is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_LT3593 is not set
+# CONFIG_LEDS_TRIGGERS is not set
+CONFIG_SWITCH=y
+CONFIG_SWITCH_GPIO=y
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+CONFIG_RTC_INTF_ALARM=y
+CONFIG_RTC_INTF_ALARM_DEV=y
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+CONFIG_RTC_DRV_TWL4030=y
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+CONFIG_STAGING=y
+# CONFIG_STAGING_EXCLUDE_BUILD is not set
+# CONFIG_ECHO is not set
+# CONFIG_COMEDI is not set
+
+#
+# Android
+#
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_RAM_CONSOLE=y
+CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE=y
+# CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION is not set
+# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set
+CONFIG_ANDROID_TIMED_OUTPUT=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+# CONFIG_POHMELFS is not set
+
+#
+# RAR Register Driver
+#
+CONFIG_IIO=y
+# CONFIG_IIO_RING_BUFFER is not set
+# CONFIG_IIO_TRIGGER is not set
+
+#
+# Accelerometers
+#
+# CONFIG_ADIS16209 is not set
+# CONFIG_ADIS16220 is not set
+# CONFIG_ADIS16240 is not set
+# CONFIG_KXSD9 is not set
+# CONFIG_LIS3L02DQ is not set
+
+#
+# Analog to digital convertors
+#
+# CONFIG_MAX1363 is not set
+
+#
+# Digital gyroscope sensors
+#
+# CONFIG_ADIS16260 is not set
+
+#
+# Inertial measurement units
+#
+# CONFIG_ADIS16300 is not set
+# CONFIG_ADIS16350 is not set
+# CONFIG_ADIS16400 is not set
+
+#
+# Light sensors
+#
+# CONFIG_SENSORS_TSL2563 is not set
+
+#
+# Magnetometer sensors
+#
+CONFIG_SENSORS_HMC5843=y
+
+#
+# Triggers - standalone
+#
+# CONFIG_RAMZSWAP is not set
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_FB_SM7XX is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+CONFIG_TI_ST=m
+CONFIG_ST_BT=m
+CONFIG_ST_FM=m
+# CONFIG_ADIS16255 is not set
+CONFIG_Sys_Link=y
+CONFIG_SYSLINK_PROC=y
+CONFIG_SYSLINK_PROC4430=y
+CONFIG_DUCATI_BASEIMAGE_PHYS_ADDR=0x9CF00000
+CONFIG_SYSLINK_DUCATI_PM=y
+CONFIG_OMAP_DEVICE_HANDLER=y
+CONFIG_MPU_SYSLINK_PLATFORM=y
+CONFIG_MPU_SYSLINK_IPC=y
+CONFIG_SYSLINK_USE_SYSMGR=y
+CONFIG_SYSLINK_IOMMU_ENABLE=y
+CONFIG_SYSLINK_RECOVERY=y
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_QUOTA=y
+# CONFIG_QUOTA_NETLINK_INTERFACE is not set
+CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QUOTA_DEBUG is not set
+CONFIG_QUOTA_TREE=y
+# CONFIG_QFMT_V1 is not set
+CONFIG_QFMT_V2=y
+CONFIG_QUOTACTL=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+# CONFIG_DETECT_SOFTLOCKUP is not set
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
+# CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_ARM_UNWIND is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+# CONFIG_OC_ETM is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_PCRYPT is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=m
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+CONFIG_CRC_T10DIF=y
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index d5f00d7..5708151 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -8,7 +8,6 @@
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
@@ -21,6 +20,8 @@
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_ARCH_OMAP=y
+CONFIG_OMAP_SMARTREFLEX=y
+CONFIG_OMAP_SMARTREFLEX_CLASS3=y
CONFIG_OMAP_RESET_CLOCKS=y
CONFIG_OMAP_MUX_DEBUG=y
CONFIG_ARM_THUMBEE=y
@@ -34,6 +35,14 @@
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO2,115200"
CONFIG_KEXEC=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_HOTPLUG=y
+CONFIG_CPU_IDLE=y
CONFIG_FPE_NWFPE=y
CONFIG_BINFMT_MISC=y
CONFIG_PM_DEBUG=y
@@ -64,6 +73,8 @@
CONFIG_MAC80211_RC_PID=y
CONFIG_MAC80211_RC_DEFAULT_PID=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
@@ -219,13 +230,33 @@
CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_SLAB=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_PROVE_LOCKING=y
+CONFIG_SPARSE_RCU_POINTER=y
+CONFIG_DEBUG_LOCKDEP=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=120
+CONFIG_DEBUG_PER_CPU_MAPS=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_LL=y
+CONFIG_EARLY_PRINTK=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_MICHAEL_MIC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/panda_defconfig b/arch/arm/configs/panda_defconfig
new file mode 100644
index 0000000..b88843c
--- /dev/null
+++ b/arch/arm/configs/panda_defconfig
@@ -0,0 +1,279 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_ASHMEM=y
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_ARCH_OMAP=y
+# CONFIG_ARCH_OMAP2 is not set
+# CONFIG_ARCH_OMAP3 is not set
+# CONFIG_MACH_OMAP_4430SDP is not set
+CONFIG_ARM_THUMBEE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
+# CONFIG_SMP_ON_UP is not set
+CONFIG_NR_CPUS=2
+CONFIG_PREEMPT=y
+CONFIG_CMDLINE="console=ttyFIQ0"
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_WAKELOCK=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_TUNNEL=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_LOG=y
+CONFIG_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_PHONET=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_BT=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_RFKILL=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND_IDS=y
+CONFIG_MTD_ONENAND=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_MISC_DEVICES=y
+# CONFIG_ANDROID_PMEM is not set
+CONFIG_KERNEL_DEBUGGER_CORE=y
+CONFIG_UID_STAT=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_IFB=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_PPP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_HW_RANDOM=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_GPIO=y
+CONFIG_SPI=y
+CONFIG_SPI_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_TWL4030=y
+CONFIG_POWER_SUPPLY=y
+# CONFIG_HWMON is not set
+CONFIG_TWL6030_PWM=y
+CONFIG_REGULATOR_TWL4030=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_PVR_SGX=y
+CONFIG_PVR_NEED_PVR_DPF=y
+CONFIG_PVR_NEED_PVR_ASSERT=y
+CONFIG_PVR_USSE_EDM_STATUS_DEBUG=y
+CONFIG_FB=y
+CONFIG_OMAP2_DSS=y
+# CONFIG_OMAP2_DSS_VENC is not set
+CONFIG_FB_OMAP2=y
+CONFIG_PANEL_GENERIC_DPI=y
+CONFIG_DISPLAY_SUPPORT=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_OMAP2PLUS=y
+CONFIG_USB_MUSB_PERIPHERAL=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_KEYSPAN=y
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_G_ANDROID=y
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_EMBEDDED_SDIO=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_OMAP=y
+CONFIG_MMC_OMAP_HS=y
+CONFIG_SWITCH=y
+CONFIG_SWITCH_GPIO=y
+CONFIG_RTC_CLASS=y
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_RAM_CONSOLE=y
+CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_EFI_PARTITION=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+CONFIG_DEBUG_INFO=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_ARM_UNWIND is not set
+CONFIG_DEBUG_USER=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRC_CCITT=y
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 4fff837..07f1d11 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -236,6 +236,21 @@
int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
void *, dma_addr_t, size_t);
+/**
+ * dma_alloc_stronglyordered - allocate strongly ordered memory
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @size: required memory size
+ * @handle: bus-specific address
+ *
+ * Allocate some stronly ordered memory. This function allocates pages, and
+ * will return the CPU-viewed address, and sets @handle to be the
+ * device-viewed address.
+ */
+extern void *dma_alloc_stronglyordered(struct device *, size_t, dma_addr_t *,
+ gfp_t);
+
+#define dma_free_stronglyordered(dev, size, cpu_addr, handle) \
+ dma_free_coherent(dev, size, cpu_addr, handle)
#ifdef CONFIG_DMABOUNCE
/*
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 2a20876..5dc6db1 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -45,16 +45,18 @@
#define L2X0_CLEAN_INV_LINE_PA 0x7F0
#define L2X0_CLEAN_INV_LINE_IDX 0x7F8
#define L2X0_CLEAN_INV_WAY 0x7FC
-#define L2X0_LOCKDOWN_WAY_D 0x900
-#define L2X0_LOCKDOWN_WAY_I 0x904
+#define L2X0_LOCKDOWN_WAY_D0 0x900
+#define L2X0_LOCKDOWN_WAY_D1 0x908
+#define L2X0_LOCKDOWN_WAY_I0 0x904
+#define L2X0_LOCKDOWN_WAY_I1 0x90C
#define L2X0_TEST_OPERATION 0xF00
#define L2X0_LINE_DATA 0xF10
#define L2X0_LINE_TAG 0xF30
#define L2X0_DEBUG_CTRL 0xF40
#define L2X0_PREFETCH_CTRL 0xF60
#define L2X0_POWER_CTRL 0xF80
-#define L2X0_DYNAMIC_CLK_GATING_EN (1 << 1)
-#define L2X0_STNDBY_MODE_EN (1 << 0)
+#define L2X0_DYNAMIC_CLK_GATING_EN (1 << 1)
+#define L2X0_STNDBY_MODE_EN (1 << 0)
/* Registers shifts and masks */
#define L2X0_CACHE_ID_REV_MASK (0x3f)
@@ -62,7 +64,7 @@
#define L2X0_CACHE_ID_PART_L210 (1 << 6)
#define L2X0_CACHE_ID_PART_L310 (3 << 6)
-#define L2X0_AUX_CTRL_MASK 0xc0000fff
+#define L2X0_AUX_CTRL_MASK 0xd0000fff
#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
@@ -73,6 +75,10 @@
#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29
#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30
+#define L2X0_PREFETCH_DATA_PREFETCH_SHIFT 28
+#define L2X0_PREFETCH_INTSTR_PREFETCH_SHIFT 29
+#define L2X0_PREFETCH_DOUBLE_LINEFILL_SHIFT 30
+
#define REV_PL310_R2P0 4
#ifndef __ASSEMBLY__
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 28810c6..2faf2e0 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -3,6 +3,8 @@
#include <mach/irqs.h>
+#define ARCH_HAS_NMI_WATCHDOG
+
#ifndef irq_canonicalize
#define irq_canonicalize(i) (i)
#endif
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index d2fedb5..b36f365 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -29,6 +29,7 @@
#define MT_MEMORY_NONCACHED 11
#define MT_MEMORY_DTCM 12
#define MT_MEMORY_ITCM 13
+#define MT_MEMORY_SO 14
#ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int);
diff --git a/arch/arm/include/asm/nmi.h b/arch/arm/include/asm/nmi.h
new file mode 100644
index 0000000..3cf8bdf
--- /dev/null
+++ b/arch/arm/include/asm/nmi.h
@@ -0,0 +1,14 @@
+#ifndef __ASM_ARM_NMI_H_
+#define __ASM_ARM_NMI_H_
+
+#include <linux/notifier.h>
+
+extern struct atomic_notifier_head touch_watchdog_notifier_head;
+
+static inline void touch_nmi_watchdog(void)
+{
+ atomic_notifier_call_chain(&touch_watchdog_notifier_head, 0, 0);
+ touch_softlockup_watchdog();
+}
+
+#endif /* __ASM_ARM_NMI_H_ */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 6afd081..a558978 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -232,6 +232,9 @@
#define pgprot_writecombine(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
+#define pgprot_stronglyordered(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
+
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index 8ec535e..35c3fc9 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -97,8 +97,22 @@
#ifdef CONFIG_MMU
+#ifdef CONFIG_SMP
+
+#define cpu_switch_mm(pgd, mm) \
+ ({ \
+ unsigned long flags; \
+ local_irq_save(flags); \
+ cpu_do_switch_mm(virt_to_phys(pgd), mm); \
+ local_irq_restore(flags); \
+ })
+
+#else /* SMP */
+
#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
+#endif
+
#define cpu_get_pgd() \
({ \
unsigned long pg; \
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 83bbad0..fee2edf 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -51,6 +51,8 @@
unsigned long irq_err_count;
+ATOMIC_NOTIFIER_HEAD(touch_watchdog_notifier_head);
+
int arch_show_interrupts(struct seq_file *p, int prec)
{
#ifdef CONFIG_FIQ
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index e895f97..1312861 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -279,6 +279,7 @@
{
struct mm_struct *mm = &init_mm;
unsigned int cpu;
+ static bool booted;
/*
* The identity mapping is uncached (strongly ordered), so
@@ -310,7 +311,9 @@
notify_cpu_starting(cpu);
- calibrate_delay();
+ if (!booted)
+ calibrate_delay();
+ booted = true;
smp_store_cpu_info(cpu);
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 2c277d4..350a125 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -10,13 +10,17 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/err.h>
#include <linux/smp.h>
#include <linux/jiffies.h>
#include <linux/clockchips.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/percpu.h>
#include <asm/smp_twd.h>
#include <asm/hardware/gic.h>
@@ -24,7 +28,9 @@
/* set up by the platform code */
void __iomem *twd_base;
+static struct clk *twd_clk;
static unsigned long twd_timer_rate;
+static DEFINE_PER_CPU(struct clock_event_device *, twd_ce);
static void twd_set_mode(enum clock_event_mode mode,
struct clock_event_device *clk)
@@ -80,6 +86,48 @@
return 0;
}
+/*
+ * Updates clockevent frequency when the cpu frequency changes.
+ * Called on the cpu that is changing frequency with interrupts disabled.
+ */
+static void twd_update_frequency(void *data)
+{
+ twd_timer_rate = clk_get_rate(twd_clk);
+
+ clockevents_update_freq(__get_cpu_var(twd_ce), twd_timer_rate);
+}
+
+static int twd_cpufreq_transition(struct notifier_block *nb,
+ unsigned long state, void *data)
+{
+ struct cpufreq_freqs *freqs = data;
+
+ /*
+ * The twd clock events must be reprogrammed to account for the new
+ * frequency. The timer is local to a cpu, so cross-call to the
+ * changing cpu.
+ */
+ if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
+ smp_call_function_single(freqs->cpu, twd_update_frequency,
+ NULL, 1);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block twd_cpufreq_nb = {
+ .notifier_call = twd_cpufreq_transition,
+};
+
+static int twd_cpufreq_init(void)
+{
+ if (!IS_ERR_OR_NULL(twd_clk))
+ return cpufreq_register_notifier(&twd_cpufreq_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ return 0;
+}
+core_initcall(twd_cpufreq_init);
+
static void __cpuinit twd_calibrate_rate(void)
{
unsigned long count;
@@ -124,7 +172,16 @@
*/
void __cpuinit twd_timer_setup(struct clock_event_device *clk)
{
- twd_calibrate_rate();
+ if (twd_clk == NULL) {
+ twd_clk = clk_get_sys("smp_twd", NULL);
+ if (IS_ERR_OR_NULL(twd_clk))
+ pr_warn("%s: no clock found\n", __func__);
+ }
+
+ if (!IS_ERR_OR_NULL(twd_clk))
+ twd_timer_rate = clk_get_rate(twd_clk);
+ else
+ twd_calibrate_rate();
clk->name = "local_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
@@ -132,13 +189,11 @@
clk->rating = 350;
clk->set_mode = twd_set_mode;
clk->set_next_event = twd_set_next_event;
- clk->shift = 20;
- clk->mult = div_sc(twd_timer_rate, NSEC_PER_SEC, clk->shift);
- clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
- clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
/* Make sure our local interrupt controller has this enabled */
gic_enable_ppi(clk->irq);
- clockevents_register_device(clk);
+ __get_cpu_var(twd_ce) = clk;
+
+ clockevents_config_and_register(clk, twd_timer_rate, 0xf, 0xffffffff);
}
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index 5b114d1..19e5a82 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -4,12 +4,15 @@
# Common support
obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o
-obj-y += clock.o clock_data.o opp_data.o reset.o pm_bus.o
+obj-y += clock.o clock_data.o opp_data.o reset.o pm_bus.o dmtimer.o
obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
obj-$(CONFIG_OMAP_32K_TIMER) += timer32k.o
+# CPUFREQ driver
+obj-$(CONFIG_CPU_FREQ) += omap1-cpufreq.o
+
# Power Management
obj-$(CONFIG_PM) += pm.o sleep.o
diff --git a/arch/arm/mach-omap1/dmtimer.c b/arch/arm/mach-omap1/dmtimer.c
new file mode 100644
index 0000000..dbc189c
--- /dev/null
+++ b/arch/arm/mach-omap1/dmtimer.c
@@ -0,0 +1,179 @@
+/**
+ * OMAP1 Dual-Mode Timers - platform device registration
+ *
+ * Contains first level initialization routines which internally
+ * generates timer device information and registers with linux
+ * device model. It also has low level function to chnage the timer
+ * input clock source.
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Tarun Kanti DebBarma <tarun.kanti@ti.com>
+ * Thara Gopinath <thara@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include <mach/irqs.h>
+
+#include <plat/dmtimer.h>
+
+#define OMAP1610_GPTIMER1_BASE 0xfffb1400
+#define OMAP1610_GPTIMER2_BASE 0xfffb1c00
+#define OMAP1610_GPTIMER3_BASE 0xfffb2400
+#define OMAP1610_GPTIMER4_BASE 0xfffb2c00
+#define OMAP1610_GPTIMER5_BASE 0xfffb3400
+#define OMAP1610_GPTIMER6_BASE 0xfffb3c00
+#define OMAP1610_GPTIMER7_BASE 0xfffb7400
+#define OMAP1610_GPTIMER8_BASE 0xfffbd400
+
+#define OMAP1_DM_TIMER_COUNT 8
+
+#define OMAP_TIMER_OCP_CFG_REG 0x10
+#define OMAP_TIMER_SYS_STAT_REG 0x14
+#define OMAP_TIMER_IF_CTRL_REG 0x40
+
+static int omap1_dm_timer_set_src(struct platform_device *pdev,
+ int source)
+{
+ int n = (pdev->id - 1) << 1;
+ u32 l;
+
+ l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n);
+ l |= source << n;
+ omap_writel(l, MOD_CONF_CTRL_1);
+
+ return 0;
+}
+
+
+int __init omap1_dm_timer_init(void)
+{
+ int i;
+ int ret;
+ struct dmtimer_platform_data *pdata;
+ struct platform_device *pdev;
+
+ if (!cpu_is_omap16xx())
+ return 0;
+
+ for (i = 1; i <= OMAP1_DM_TIMER_COUNT; i++) {
+ struct resource res[2];
+ u32 base, irq;
+
+ switch (i) {
+ case 1:
+ base = OMAP1610_GPTIMER1_BASE;
+ irq = INT_1610_GPTIMER1;
+ break;
+ case 2:
+ base = OMAP1610_GPTIMER2_BASE;
+ irq = INT_1610_GPTIMER2;
+ break;
+ case 3:
+ base = OMAP1610_GPTIMER3_BASE;
+ irq = INT_1610_GPTIMER3;
+ break;
+ case 4:
+ base = OMAP1610_GPTIMER4_BASE;
+ irq = INT_1610_GPTIMER4;
+ break;
+ case 5:
+ base = OMAP1610_GPTIMER5_BASE;
+ irq = INT_1610_GPTIMER5;
+ break;
+ case 6:
+ base = OMAP1610_GPTIMER6_BASE;
+ irq = INT_1610_GPTIMER6;
+ break;
+ case 7:
+ base = OMAP1610_GPTIMER7_BASE;
+ irq = INT_1610_GPTIMER7;
+ break;
+ case 8:
+ base = OMAP1610_GPTIMER8_BASE;
+ irq = INT_1610_GPTIMER8;
+ break;
+ default:
+ /*
+ * not supposed to reach here.
+ * this is to remove warning.
+ */
+ return -EINVAL;
+ }
+
+ pdev = platform_device_alloc("omap_timer", i);
+ if (!pdev) {
+ pr_err("%s: Failed to device alloc for dmtimer%d\n",
+ __func__, i);
+ return -ENOMEM;
+ }
+
+ memset(res, 0, 2 * sizeof(struct resource));
+ res[0].start = base;
+ res[0].end = base + 0x46;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = irq;
+ res[1].end = irq;
+ res[1].flags = IORESOURCE_IRQ;
+ ret = platform_device_add_resources(pdev, res,
+ ARRAY_SIZE(res));
+ if (ret) {
+ dev_err(&pdev->dev, "%s: Failed to add resources.\n",
+ __func__);
+ goto err_free_pdev;
+ }
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: Failed to allocate pdata.\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err_free_pdata;
+ }
+
+ pdata->set_timer_src = omap1_dm_timer_set_src;
+ pdata->is_early_init = 0;
+ pdata->timer_ip_type = OMAP_TIMER_IP_VERSION_1;
+ pdata->needs_manual_reset = 1;
+
+ ret = platform_device_add_data(pdev, pdata, sizeof(*pdata));
+ if (ret) {
+ dev_err(&pdev->dev, "%s: Failed to add platform data.\n",
+ __func__);
+ goto err_free_pdata;
+ }
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: Failed to add platform device.\n",
+ __func__);
+ goto err_free_pdata;
+ }
+
+ dev_dbg(&pdev->dev, " Registered.\n");
+ }
+
+ return 0;
+
+err_free_pdata:
+ kfree(pdata);
+
+err_free_pdev:
+ platform_device_unregister(pdev);
+
+ return ret;
+}
+arch_initcall(omap1_dm_timer_init);
diff --git a/arch/arm/mach-omap1/gpio15xx.c b/arch/arm/mach-omap1/gpio15xx.c
index 364137c..634903e 100644
--- a/arch/arm/mach-omap1/gpio15xx.c
+++ b/arch/arm/mach-omap1/gpio15xx.c
@@ -34,11 +34,23 @@
},
};
+static struct omap_gpio_reg_offs omap15xx_mpuio_regs = {
+ .revision = USHRT_MAX,
+ .direction = OMAP_MPUIO_IO_CNTL,
+ .datain = OMAP_MPUIO_INPUT_LATCH,
+ .dataout = OMAP_MPUIO_OUTPUT,
+ .irqstatus = OMAP_MPUIO_GPIO_INT,
+ .irqenable = OMAP_MPUIO_GPIO_MASKIT,
+ .irqenable_inv = true,
+ .irqctrl = OMAP_MPUIO_GPIO_INT_EDGE,
+};
+
static struct __initdata omap_gpio_platform_data omap15xx_mpu_gpio_config = {
.virtual_irq_start = IH_MPUIO_BASE,
- .bank_type = METHOD_MPUIO,
+ .is_mpuio = true,
.bank_width = 16,
.bank_stride = 1,
+ .regs = &omap15xx_mpuio_regs,
};
static struct platform_device omap15xx_mpu_gpio = {
@@ -64,10 +76,22 @@
},
};
+static struct omap_gpio_reg_offs omap15xx_gpio_regs = {
+ .revision = USHRT_MAX,
+ .direction = OMAP1510_GPIO_DIR_CONTROL,
+ .datain = OMAP1510_GPIO_DATA_INPUT,
+ .dataout = OMAP1510_GPIO_DATA_OUTPUT,
+ .irqstatus = OMAP1510_GPIO_INT_STATUS,
+ .irqenable = OMAP1510_GPIO_INT_MASK,
+ .irqenable_inv = true,
+ .irqctrl = OMAP1510_GPIO_INT_CONTROL,
+ .pinctrl = OMAP1510_GPIO_PIN_CONTROL,
+};
+
static struct __initdata omap_gpio_platform_data omap15xx_gpio_config = {
.virtual_irq_start = IH_GPIO_BASE,
- .bank_type = METHOD_GPIO_1510,
.bank_width = 16,
+ .regs = &omap15xx_gpio_regs,
};
static struct platform_device omap15xx_gpio = {
@@ -93,7 +117,6 @@
platform_device_register(&omap15xx_mpu_gpio);
platform_device_register(&omap15xx_gpio);
- gpio_bank_count = 2;
return 0;
}
postcore_initcall(omap15xx_gpio_init);
diff --git a/arch/arm/mach-omap1/gpio16xx.c b/arch/arm/mach-omap1/gpio16xx.c
index 293a246..c6a4554 100644
--- a/arch/arm/mach-omap1/gpio16xx.c
+++ b/arch/arm/mach-omap1/gpio16xx.c
@@ -37,11 +37,24 @@
},
};
+static struct omap_gpio_reg_offs omap16xx_mpuio_regs = {
+ .revision = USHRT_MAX,
+ .direction = OMAP_MPUIO_IO_CNTL,
+ .datain = OMAP_MPUIO_INPUT_LATCH,
+ .dataout = OMAP_MPUIO_OUTPUT,
+ .irqstatus = OMAP_MPUIO_GPIO_INT,
+ .irqenable = OMAP_MPUIO_GPIO_MASKIT,
+ .irqenable_inv = true,
+ .irqctrl = OMAP_MPUIO_GPIO_INT_EDGE,
+};
+
static struct __initdata omap_gpio_platform_data omap16xx_mpu_gpio_config = {
.virtual_irq_start = IH_MPUIO_BASE,
- .bank_type = METHOD_MPUIO,
+ .is_mpuio = true,
.bank_width = 16,
.bank_stride = 1,
+ .suspend_support = true,
+ .regs = &omap16xx_mpuio_regs,
};
static struct platform_device omap16xx_mpu_gpio = {
@@ -67,10 +80,30 @@
},
};
+static struct omap_gpio_reg_offs omap16xx_gpio_regs = {
+ .revision = OMAP1610_GPIO_REVISION,
+ .direction = OMAP1610_GPIO_DIRECTION,
+ .set_dataout = OMAP1610_GPIO_SET_DATAOUT,
+ .clr_dataout = OMAP1610_GPIO_CLEAR_DATAOUT,
+ .datain = OMAP1610_GPIO_DATAIN,
+ .dataout = OMAP1610_GPIO_DATAOUT,
+ .irqstatus = OMAP1610_GPIO_IRQSTATUS1,
+ .irqenable = OMAP1610_GPIO_IRQENABLE1,
+ .set_irqenable = OMAP1610_GPIO_SET_IRQENABLE1,
+ .clr_irqenable = OMAP1610_GPIO_CLEAR_IRQENABLE1,
+ .wkup_status = OMAP1610_GPIO_WAKEUPENABLE,
+ .wkup_clear = OMAP1610_GPIO_CLEAR_WAKEUPENA,
+ .wkup_set = OMAP1610_GPIO_SET_WAKEUPENA,
+ .edgectrl1 = OMAP1610_GPIO_EDGE_CTRL1,
+ .edgectrl2 = OMAP1610_GPIO_EDGE_CTRL2,
+ .sysconfig = OMAP1610_GPIO_SYSCONFIG,
+};
+
static struct __initdata omap_gpio_platform_data omap16xx_gpio1_config = {
.virtual_irq_start = IH_GPIO_BASE,
- .bank_type = METHOD_GPIO_1610,
.bank_width = 16,
+ .suspend_support = true,
+ .regs = &omap16xx_gpio_regs,
};
static struct platform_device omap16xx_gpio1 = {
@@ -98,8 +131,9 @@
static struct __initdata omap_gpio_platform_data omap16xx_gpio2_config = {
.virtual_irq_start = IH_GPIO_BASE + 16,
- .bank_type = METHOD_GPIO_1610,
.bank_width = 16,
+ .suspend_support = true,
+ .regs = &omap16xx_gpio_regs,
};
static struct platform_device omap16xx_gpio2 = {
@@ -127,8 +161,9 @@
static struct __initdata omap_gpio_platform_data omap16xx_gpio3_config = {
.virtual_irq_start = IH_GPIO_BASE + 32,
- .bank_type = METHOD_GPIO_1610,
.bank_width = 16,
+ .suspend_support = true,
+ .regs = &omap16xx_gpio_regs,
};
static struct platform_device omap16xx_gpio3 = {
@@ -156,8 +191,9 @@
static struct __initdata omap_gpio_platform_data omap16xx_gpio4_config = {
.virtual_irq_start = IH_GPIO_BASE + 48,
- .bank_type = METHOD_GPIO_1610,
.bank_width = 16,
+ .suspend_support = true,
+ .regs = &omap16xx_gpio_regs,
};
static struct platform_device omap16xx_gpio4 = {
@@ -193,8 +229,6 @@
for (i = 0; i < ARRAY_SIZE(omap16xx_gpio_dev); i++)
platform_device_register(omap16xx_gpio_dev[i]);
- gpio_bank_count = ARRAY_SIZE(omap16xx_gpio_dev);
-
return 0;
}
postcore_initcall(omap16xx_gpio_init);
diff --git a/arch/arm/mach-omap1/gpio7xx.c b/arch/arm/mach-omap1/gpio7xx.c
index c6ad248..3ca9600 100644
--- a/arch/arm/mach-omap1/gpio7xx.c
+++ b/arch/arm/mach-omap1/gpio7xx.c
@@ -39,11 +39,23 @@
},
};
+static struct omap_gpio_reg_offs omap7xx_mpuio_regs = {
+ .revision = USHRT_MAX,
+ .direction = OMAP_MPUIO_IO_CNTL / 2,
+ .datain = OMAP_MPUIO_INPUT_LATCH / 2,
+ .dataout = OMAP_MPUIO_OUTPUT / 2,
+ .irqstatus = OMAP_MPUIO_GPIO_INT / 2,
+ .irqenable = OMAP_MPUIO_GPIO_MASKIT / 2,
+ .irqenable_inv = true,
+ .irqctrl = OMAP_MPUIO_GPIO_INT_EDGE / 2,
+};
+
static struct __initdata omap_gpio_platform_data omap7xx_mpu_gpio_config = {
.virtual_irq_start = IH_MPUIO_BASE,
- .bank_type = METHOD_MPUIO,
- .bank_width = 32,
+ .is_mpuio = true,
+ .bank_width = 16,
.bank_stride = 2,
+ .regs = &omap7xx_mpuio_regs,
};
static struct platform_device omap7xx_mpu_gpio = {
@@ -69,10 +81,21 @@
},
};
+static struct omap_gpio_reg_offs omap7xx_gpio_regs = {
+ .revision = USHRT_MAX,
+ .direction = OMAP7XX_GPIO_DIR_CONTROL,
+ .datain = OMAP7XX_GPIO_DATA_INPUT,
+ .dataout = OMAP7XX_GPIO_DATA_OUTPUT,
+ .irqstatus = OMAP7XX_GPIO_INT_STATUS,
+ .irqenable = OMAP7XX_GPIO_INT_MASK,
+ .irqenable_inv = true,
+ .irqctrl = OMAP7XX_GPIO_INT_CONTROL,
+};
+
static struct __initdata omap_gpio_platform_data omap7xx_gpio1_config = {
.virtual_irq_start = IH_GPIO_BASE,
- .bank_type = METHOD_GPIO_7XX,
.bank_width = 32,
+ .regs = &omap7xx_gpio_regs,
};
static struct platform_device omap7xx_gpio1 = {
@@ -100,8 +123,8 @@
static struct __initdata omap_gpio_platform_data omap7xx_gpio2_config = {
.virtual_irq_start = IH_GPIO_BASE + 32,
- .bank_type = METHOD_GPIO_7XX,
.bank_width = 32,
+ .regs = &omap7xx_gpio_regs,
};
static struct platform_device omap7xx_gpio2 = {
@@ -129,8 +152,8 @@
static struct __initdata omap_gpio_platform_data omap7xx_gpio3_config = {
.virtual_irq_start = IH_GPIO_BASE + 64,
- .bank_type = METHOD_GPIO_7XX,
.bank_width = 32,
+ .regs = &omap7xx_gpio_regs,
};
static struct platform_device omap7xx_gpio3 = {
@@ -158,8 +181,8 @@
static struct __initdata omap_gpio_platform_data omap7xx_gpio4_config = {
.virtual_irq_start = IH_GPIO_BASE + 96,
- .bank_type = METHOD_GPIO_7XX,
.bank_width = 32,
+ .regs = &omap7xx_gpio_regs,
};
static struct platform_device omap7xx_gpio4 = {
@@ -187,8 +210,8 @@
static struct __initdata omap_gpio_platform_data omap7xx_gpio5_config = {
.virtual_irq_start = IH_GPIO_BASE + 128,
- .bank_type = METHOD_GPIO_7XX,
.bank_width = 32,
+ .regs = &omap7xx_gpio_regs,
};
static struct platform_device omap7xx_gpio5 = {
@@ -216,8 +239,8 @@
static struct __initdata omap_gpio_platform_data omap7xx_gpio6_config = {
.virtual_irq_start = IH_GPIO_BASE + 160,
- .bank_type = METHOD_GPIO_7XX,
.bank_width = 32,
+ .regs = &omap7xx_gpio_regs,
};
static struct platform_device omap7xx_gpio6 = {
@@ -255,8 +278,6 @@
for (i = 0; i < ARRAY_SIZE(omap7xx_gpio_dev); i++)
platform_device_register(omap7xx_gpio_dev[i]);
- gpio_bank_count = ARRAY_SIZE(omap7xx_gpio_dev);
-
return 0;
}
postcore_initcall(omap7xx_gpio_init);
diff --git a/arch/arm/mach-omap1/mailbox.c b/arch/arm/mach-omap1/mailbox.c
index c0e1f48..99971bf 100644
--- a/arch/arm/mach-omap1/mailbox.c
+++ b/arch/arm/mach-omap1/mailbox.c
@@ -151,6 +151,9 @@
list[0]->irq = platform_get_irq_byname(pdev, "dsp");
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -ENODEV;
+
mbox_base = ioremap(mem->start, resource_size(mem));
if (!mbox_base)
return -ENOMEM;
diff --git a/arch/arm/mach-omap1/omap1-cpufreq.c b/arch/arm/mach-omap1/omap1-cpufreq.c
new file mode 100644
index 0000000..7c5216e
--- /dev/null
+++ b/arch/arm/mach-omap1/omap1-cpufreq.c
@@ -0,0 +1,175 @@
+/*
+ * OMAP1 cpufreq driver
+ *
+ * CPU frequency scaling for OMAP
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Written by Tony Lindgren <tony@atomide.com>
+ *
+ * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
+ *
+ * Copyright (C) 2007-2008 Texas Instruments, Inc.
+ * Rajendra Nayak <rnayak@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/opp.h>
+
+#include <asm/system.h>
+
+#include <plat/clock.h>
+#include <plat/omap-pm.h>
+
+#include <mach/hardware.h>
+
+#define VERY_HI_RATE 900000000
+
+static struct cpufreq_frequency_table *freq_table;
+static struct clk *mpu_clk;
+
+static int omap_verify_speed(struct cpufreq_policy *policy)
+{
+ if (freq_table)
+ return cpufreq_frequency_table_verify(policy, freq_table);
+
+ if (policy->cpu)
+ return -EINVAL;
+
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+
+ policy->min = clk_round_rate(mpu_clk, policy->min * 1000) / 1000;
+ policy->max = clk_round_rate(mpu_clk, policy->max * 1000) / 1000;
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+ return 0;
+}
+
+static unsigned int omap_getspeed(unsigned int cpu)
+{
+ unsigned long rate;
+
+ if (cpu)
+ return 0;
+
+ rate = clk_get_rate(mpu_clk) / 1000;
+ return rate;
+}
+
+static int omap_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ int ret = 0;
+
+ /* Ensure desired rate is within allowed range. Some govenors
+ * (ondemand) will just pass target_freq=0 to get the minimum. */
+ if (target_freq < policy->min)
+ target_freq = policy->min;
+ if (target_freq > policy->max)
+ target_freq = policy->max;
+
+ freqs.old = omap_getspeed(0);
+ freqs.new = clk_round_rate(mpu_clk, target_freq * 1000) / 1000;
+ freqs.cpu = 0;
+
+ if (freqs.old == freqs.new)
+ return ret;
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+#ifdef CONFIG_CPU_FREQ_DEBUG
+ pr_info("cpufreq-omap: transition: %u --> %u\n", freqs.old, freqs.new);
+#endif
+ ret = clk_set_rate(mpu_clk, freqs.new * 1000);
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return ret;
+}
+
+static int __init omap_cpu_init(struct cpufreq_policy *policy)
+{
+ int result = 0;
+
+ mpu_clk = clk_get(NULL, "mpu");
+ if (IS_ERR(mpu_clk))
+ return PTR_ERR(mpu_clk);
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ policy->cur = policy->min = policy->max = omap_getspeed(0);
+
+ clk_init_cpufreq_table(&freq_table);
+
+ if (freq_table) {
+ result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ if (!result)
+ cpufreq_frequency_table_get_attr(freq_table,
+ policy->cpu);
+ } else {
+ policy->cpuinfo.min_freq = clk_round_rate(mpu_clk, 0) / 1000;
+ policy->cpuinfo.max_freq = clk_round_rate(mpu_clk,
+ VERY_HI_RATE) / 1000;
+ }
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+ policy->cur = omap_getspeed(0);
+
+ /* FIXME: what's the actual transition time? */
+ policy->cpuinfo.transition_latency = 300 * 1000;
+
+ return 0;
+}
+
+static int omap_cpu_exit(struct cpufreq_policy *policy)
+{
+ clk_exit_cpufreq_table(&freq_table);
+ clk_put(mpu_clk);
+ return 0;
+}
+
+static struct freq_attr *omap_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver omap_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = omap_verify_speed,
+ .target = omap_target,
+ .get = omap_getspeed,
+ .init = omap_cpu_init,
+ .exit = omap_cpu_exit,
+ .name = "omap1",
+ .attr = omap_cpufreq_attr,
+};
+
+static int __init omap_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&omap_driver);
+}
+
+static void __exit omap_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&omap_driver);
+}
+
+MODULE_DESCRIPTION("cpufreq driver for OMAP1 SOCs");
+MODULE_LICENSE("GPL");
+module_init(omap_cpufreq_init);
+module_exit(omap_cpufreq_exit);
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 13d7b8f..dc4a8fd 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -53,7 +53,6 @@
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
#include <plat/common.h>
-#include <plat/dmtimer.h>
/*
* ---------------------------------------------------------------------------
@@ -184,9 +183,6 @@
{
omap_init_clocksource_32k();
-#ifdef CONFIG_OMAP_DM_TIMER
- omap_dm_timer_init();
-#endif
omap_init_32k_timer();
return true;
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 841ae21..3924ddb 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -45,12 +45,13 @@
select CPU_V7
select ARM_GIC
select LOCAL_TIMERS if SMP
- select PL310_ERRATA_588369
- select PL310_ERRATA_727915
+ select PL310_ERRATA_588369 if CONFIG_CACHE_L2X0
+ select PL310_ERRATA_727915 if CONFIG_CACHE_L2X0
select ARM_ERRATA_720789
select ARCH_HAS_OPP
select PM_OPP if PM
select USB_ARCH_HAS_EHCI
+ select ARCH_HAS_BARRIERS
comment "OMAP Core Type"
depends on ARCH_OMAP2
@@ -314,6 +315,7 @@
select OMAP_PACKAGE_CBL
select OMAP_PACKAGE_CBS
select REGULATOR_FIXED_VOLTAGE
+ select OMAP_TPS6236X
config MACH_OMAP4_PANDA
bool "OMAP4 Panda Board"
@@ -343,6 +345,42 @@
wish to say no. Selecting yes without understanding what is
going on could result in system crashes;
+config OMAP_TPS6236X
+ bool
+
+config OMAP_ALLOW_OSWR
+ bool "Enable Open Switch Retention"
+ depends on ARCH_OMAP4
+ default n
+ help
+ Select this option to enable OSWR support.
+ Which means the Logic of power domains can be lost now
+ unlike the CSWR wherein the logic is retained
+
+config OMAP_FIQ_DEBUGGER
+ bool "Enable the serial FIQ debugger on OMAP"
+ default y
+ select FIQ_DEBUGGER
+ help
+ Enables the serial FIQ debugger on OMAP"
+
+config OMAP4_PPA_CPU1_ONLINE_BUG
+ bool "Enable Support for CPU1 ONLINE WA for OSWR/OFF"
+ depends on ARCH_OMAP4
+ depends on OMAP_ALLOW_OSWR
+ default y
+ help
+ If an non GP OMAP4 device is used and PPA revision is < v1.7.3,
+ the device does not perform the memory maintenance and TLB sync
+ operations required before releasing CPU1 to HLOS. This results
+ in crash while resuming from OFF mode.
+
+ Disable this option *ONLY IF* you meet the minimum PPA version
+ requirement.
+
+ If, on the other hand, you do not understand the change, leave the
+ default as enabled.
+
endmenu
endif
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index b148077..aa42baf 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -4,7 +4,7 @@
# Common support
obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer-gp.o pm.o \
- common.o gpio.o dma.o wd_timer.o
+ common.o gpio.o dma.o wd_timer.o omap_pmic.o dmtimer.o
omap-2-3-common = irq.o sdrc.o
hwmod-common = omap_hwmod.o \
@@ -15,16 +15,20 @@
obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common)
obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common)
obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common)
+obj-$(CONFIG_ARCH_OMAP4) += emif.o lpddr2_jedec_data.o lpddr2_elpida_data.o
obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
obj-$(CONFIG_TWL4030_CORE) += omap_twl.o
+obj-$(CONFIG_OMAP_TPS6236X) += omap_tps6236x.o
+obj-$(CONFIG_OMAP_TEMP_SENSOR) +=temp_sensor_device.o
# SMP support ONLY available for OMAP4
obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o
obj-$(CONFIG_LOCAL_TIMERS) += timer-mpu.o
obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o
-obj-$(CONFIG_ARCH_OMAP4) += omap44xx-smc.o omap4-common.o
+obj-$(CONFIG_ARCH_OMAP4) += omap44xx-smc.o omap4-common.o \
+ omap-wakeupgen.o
plus_sec := $(call as-instr,.arch_extension sec,+sec)
AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a$(plus_sec)
@@ -56,19 +60,29 @@
obj-$(CONFIG_ARCH_OMAP4) += opp4xxx_data.o
endif
+# CPUFREQ driver
+obj-$(CONFIG_CPU_FREQ) += omap2plus-cpufreq.o
+
# Power Management
ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o
obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o
obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \
cpuidle34xx.o
-obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o
+obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o \
+ omap4-mpuss-lowpower.o sleep44xx.o \
+ cpuidle44xx.o resetreason.o
obj-$(CONFIG_PM_DEBUG) += pm-debug.o
+ifeq ($(CONFIG_PM_DEBUG),y)
+obj-$(CONFIG_ARCH_OMAP4) += prcm-debug.o
+endif
obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o
obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o
+obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS1P5) += smartreflex-class1p5.o
AFLAGS_sleep24xx.o :=-Wa,-march=armv6
AFLAGS_sleep34xx.o :=-Wa,-march=armv7-a$(plus_sec)
+AFLAGS_sleep44xx.o :=-Wa,-march=armv7-a$(plus_sec)
ifeq ($(CONFIG_PM_VERBOSE),y)
CFLAGS_pm_bus.o += -DDEBUG
@@ -86,16 +100,19 @@
obj-$(CONFIG_ARCH_OMAP4) += prcm.o cm2xxx_3xxx.o cminst44xx.o \
cm44xx.o prcm_mpu44xx.o \
prminst44xx.o vc44xx_data.o \
- vp44xx_data.o
+ vp44xx_data.o omap4-sar.o
# OMAP voltage domains
ifeq ($(CONFIG_PM),y)
-voltagedomain-common := voltage.o
-obj-$(CONFIG_ARCH_OMAP2) += $(voltagedomain-common)
+voltagedomain-common := voltage.o vc.o vp.o
+obj-$(CONFIG_ARCH_OMAP2) += $(voltagedomain-common) \
+ voltagedomains2xxx_data.o
obj-$(CONFIG_ARCH_OMAP3) += $(voltagedomain-common) \
- voltagedomains3xxx_data.o
+ voltagedomains3xxx_data.o \
+ ldo.o ldo3xxx_data.o dvfs.o
obj-$(CONFIG_ARCH_OMAP4) += $(voltagedomain-common) \
- voltagedomains44xx_data.o
+ voltagedomains44xx_data.o \
+ ldo.o ldo4xxx_data.o dvfs.o
endif
# OMAP powerdomain framework
@@ -157,13 +174,13 @@
obj-$(CONFIG_ARCH_OMAP3) += omap_l3_smx.o
obj-$(CONFIG_ARCH_OMAP4) += omap_l3_noc.o
+# LDO stuff
+obj-$(CONFIG_ARCH_OMAP4) += omap4_trim_quirks.o
+
obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox_mach.o
mailbox_mach-objs := mailbox.o
-obj-$(CONFIG_OMAP_IOMMU) += iommu2.o
-
-iommu-$(CONFIG_OMAP_IOMMU) := omap-iommu.o
-obj-y += $(iommu-m) $(iommu-y)
+obj-$(CONFIG_OMAP_IOMMU) += iommu2.o omap-iommu.o
i2c-omap-$(CONFIG_I2C_OMAP) := i2c.o
obj-y += $(i2c-omap-m) $(i2c-omap-y)
@@ -241,6 +258,8 @@
obj-$(CONFIG_MACH_OMAP3517EVM) += board-am3517evm.o \
omap_phy_internal.o \
+obj-$(CONFIG_MACH_OMAP3517EVM) += board-am3517evm.o
+
obj-$(CONFIG_MACH_CRANEBOARD) += board-am3517crane.o
obj-$(CONFIG_MACH_SBC3530) += board-omap3stalker.o \
@@ -270,3 +289,8 @@
obj-y += $(disp-m) $(disp-y)
obj-y += common-board-devices.o
+
+obj-$(CONFIG_OMAP_REMOTE_PROC) += remoteproc.o
+obj-$(CONFIG_OMAP_HSI_DEVICE) += omap_hsi.o
+obj-$(CONFIG_ARCH_OMAP4) += omap_dmm.o
+obj-$(CONFIG_OMAP_FIQ_DEBUGGER) += omap_fiq_debugger.o
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 5dac974..4e2baf7 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -553,106 +553,8 @@
static struct omap_board_mux board_mux[] __initdata = {
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
-
-static struct omap_device_pad serial1_pads[] __initdata = {
- /*
- * Note that off output enable is an active low
- * signal. So setting this means pin is a
- * input enabled in off mode
- */
- OMAP_MUX_STATIC("uart1_cts.uart1_cts",
- OMAP_PIN_INPUT |
- OMAP_PIN_OFF_INPUT_PULLDOWN |
- OMAP_OFFOUT_EN |
- OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart1_rts.uart1_rts",
- OMAP_PIN_OUTPUT |
- OMAP_OFF_EN |
- OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart1_rx.uart1_rx",
- OMAP_PIN_INPUT |
- OMAP_PIN_OFF_INPUT_PULLDOWN |
- OMAP_OFFOUT_EN |
- OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart1_tx.uart1_tx",
- OMAP_PIN_OUTPUT |
- OMAP_OFF_EN |
- OMAP_MUX_MODE0),
-};
-
-static struct omap_device_pad serial2_pads[] __initdata = {
- OMAP_MUX_STATIC("uart2_cts.uart2_cts",
- OMAP_PIN_INPUT_PULLUP |
- OMAP_PIN_OFF_INPUT_PULLDOWN |
- OMAP_OFFOUT_EN |
- OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart2_rts.uart2_rts",
- OMAP_PIN_OUTPUT |
- OMAP_OFF_EN |
- OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart2_rx.uart2_rx",
- OMAP_PIN_INPUT |
- OMAP_PIN_OFF_INPUT_PULLDOWN |
- OMAP_OFFOUT_EN |
- OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart2_tx.uart2_tx",
- OMAP_PIN_OUTPUT |
- OMAP_OFF_EN |
- OMAP_MUX_MODE0),
-};
-
-static struct omap_device_pad serial3_pads[] __initdata = {
- OMAP_MUX_STATIC("uart3_cts_rctx.uart3_cts_rctx",
- OMAP_PIN_INPUT_PULLDOWN |
- OMAP_PIN_OFF_INPUT_PULLDOWN |
- OMAP_OFFOUT_EN |
- OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart3_rts_sd.uart3_rts_sd",
- OMAP_PIN_OUTPUT |
- OMAP_OFF_EN |
- OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart3_rx_irrx.uart3_rx_irrx",
- OMAP_PIN_INPUT |
- OMAP_PIN_OFF_INPUT_PULLDOWN |
- OMAP_OFFOUT_EN |
- OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart3_tx_irtx.uart3_tx_irtx",
- OMAP_PIN_OUTPUT |
- OMAP_OFF_EN |
- OMAP_MUX_MODE0),
-};
-
-static struct omap_board_data serial1_data __initdata = {
- .id = 0,
- .pads = serial1_pads,
- .pads_cnt = ARRAY_SIZE(serial1_pads),
-};
-
-static struct omap_board_data serial2_data __initdata = {
- .id = 1,
- .pads = serial2_pads,
- .pads_cnt = ARRAY_SIZE(serial2_pads),
-};
-
-static struct omap_board_data serial3_data __initdata = {
- .id = 2,
- .pads = serial3_pads,
- .pads_cnt = ARRAY_SIZE(serial3_pads),
-};
-
-static inline void board_serial_init(void)
-{
- omap_serial_init_port(&serial1_data);
- omap_serial_init_port(&serial2_data);
- omap_serial_init_port(&serial3_data);
-}
#else
#define board_mux NULL
-
-static inline void board_serial_init(void)
-{
- omap_serial_init();
-}
#endif
/*
@@ -789,7 +691,7 @@
else
gpio_pendown = SDP3430_TS_GPIO_IRQ_SDPV1;
omap_ads7846_init(1, gpio_pendown, 310, NULL);
- board_serial_init();
+ omap_serial_init();
usb_musb_init(NULL);
board_smc91x_init();
board_flash_init(sdp_flash_partitions, chip_sel_3430, 0);
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 14a5971..8396ac8 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -24,9 +24,14 @@
#include <linux/regulator/machine.h>
#include <linux/leds.h>
#include <linux/leds_pwm.h>
+#include <linux/memblock.h>
#include <mach/hardware.h>
#include <mach/omap4-common.h>
+#include <mach/emif.h>
+#include <mach/lpddr2-elpida.h>
+#include <mach/dmm.h>
+
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -36,13 +41,16 @@
#include <plat/usb.h>
#include <plat/mmc.h>
#include <plat/omap4-keypad.h>
+#include <plat/remoteproc.h>
#include <video/omapdss.h>
+#include <video/omap-panel-nokia-dsi.h>
#include "mux.h"
#include "hsmmc.h"
#include "timer-gp.h"
#include "control.h"
#include "common-board-devices.h"
+#include "pm.h"
#define ETH_KS8851_IRQ 34
#define ETH_KS8851_POWER_ON 48
@@ -51,6 +59,20 @@
#define OMAP4_SFH7741_ENABLE_GPIO 188
#define HDMI_GPIO_CT_CP_HPD 60 /* HPD mode enable/disable */
#define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */
+#define LCD_BL_GPIO 27 /* LCD Backlight GPIO */
+/* PWM2 and TOGGLE3 register offsets */
+#define LED_PWM2ON 0x03
+#define LED_PWM2OFF 0x04
+#define TWL6030_TOGGLE3 0x92
+
+#define TPS62361_GPIO 7
+
+#define PHYS_ADDR_SMC_SIZE (SZ_1M * 3)
+#define PHYS_ADDR_SMC_MEM (0x80000000 + SZ_1G - PHYS_ADDR_SMC_SIZE)
+#define OMAP_ION_HEAP_SECURE_INPUT_SIZE (SZ_1M * 90)
+#define PHYS_ADDR_DUCATI_SIZE (SZ_1M * 105)
+#define PHYS_ADDR_DUCATI_MEM (PHYS_ADDR_SMC_MEM - PHYS_ADDR_DUCATI_SIZE - \
+ OMAP_ION_HEAP_SECURE_INPUT_SIZE)
#define HDMI_GPIO_HPD 63 /* Hotplug detect */
static const int sdp4430_keymap[] = {
@@ -272,24 +294,13 @@
return status;
}
-static struct platform_device sdp4430_lcd_device = {
- .name = "sdp4430_lcd",
- .id = -1,
-};
-
static struct platform_device *sdp4430_devices[] __initdata = {
- &sdp4430_lcd_device,
&sdp4430_gpio_keys_device,
&sdp4430_leds_gpio,
&sdp4430_leds_pwm,
};
-static struct omap_lcd_config sdp4430_lcd_config __initdata = {
- .ctrl_name = "internal",
-};
-
static struct omap_board_config_kernel sdp4430_config[] __initdata = {
- { OMAP_TAG_LCD, &sdp4430_lcd_config },
};
static void __init omap_4430sdp_init_early(void)
@@ -345,6 +356,10 @@
.dev_name = "omap_hsmmc.0",
},
};
+static struct regulator_consumer_supply sdp4430_vcxio_supply[] = {
+ REGULATOR_SUPPLY("vdds_dsi", "omapdss_dss"),
+ REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
+};
static int omap4_twl6030_hsmmc_late_init(struct device *dev)
{
@@ -491,7 +506,10 @@
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
+ .always_on = true,
},
+ .num_consumer_supplies = ARRAY_SIZE(sdp4430_vcxio_supply),
+ .consumer_supplies = sdp4430_vcxio_supply,
};
static struct regulator_init_data sdp4430_vdac = {
@@ -577,6 +595,76 @@
__func__, OMAP4_SFH7741_ENABLE_GPIO, error);
}
+static int dsi1_panel_set_backlight(struct omap_dss_device *dssdev, int level)
+{
+ int r;
+
+ r = twl_i2c_write_u8(TWL_MODULE_PWM, 0x7F, LED_PWM2OFF);
+ if (r)
+ return r;
+
+ if (level > 1) {
+ if (level == 255)
+ level = 0x7F;
+ else
+ level = (~(level/2)) & 0x7F;
+
+ r = twl_i2c_write_u8(TWL_MODULE_PWM, level, LED_PWM2ON);
+ if (r)
+ return r;
+ r = twl_i2c_write_u8(TWL6030_MODULE_ID1, 0x30, TWL6030_TOGGLE3);
+ if (r)
+ return r;
+ } else if (level <= 1) {
+ r = twl_i2c_write_u8(TWL6030_MODULE_ID1, 0x08, TWL6030_TOGGLE3);
+ if (r)
+ return r;
+ r = twl_i2c_write_u8(TWL6030_MODULE_ID1, 0x28, TWL6030_TOGGLE3);
+ if (r)
+ return r;
+ r = twl_i2c_write_u8(TWL6030_MODULE_ID1, 0x00, TWL6030_TOGGLE3);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static struct nokia_dsi_panel_data dsi1_panel;
+
+static void sdp4430_lcd_init(void)
+{
+ u32 reg;
+ int status;
+
+ /* Enable 3 lanes in DSI1 module, disable pull down */
+ reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
+ reg &= ~OMAP4_DSI1_LANEENABLE_MASK;
+ reg |= 0x7 << OMAP4_DSI1_LANEENABLE_SHIFT;
+ reg &= ~OMAP4_DSI1_PIPD_MASK;
+ reg |= 0x7 << OMAP4_DSI1_PIPD_SHIFT;
+ omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
+
+ /* Panel Taal reset and backlight GPIO init */
+ status = gpio_request_one(dsi1_panel.reset_gpio, GPIOF_DIR_OUT,
+ "lcd_reset_gpio");
+ if (status)
+ pr_err("%s: Could not get lcd_reset_gpio\n", __func__);
+
+ if (dsi1_panel.use_ext_te) {
+ status = omap_mux_init_signal("gpmc_ncs4.gpio_101",
+ OMAP_PIN_INPUT_PULLUP);
+ if (status)
+ pr_err("%s: Could not get ext_te gpio\n", __func__);
+ }
+
+ status = gpio_request_one(LCD_BL_GPIO, GPIOF_DIR_OUT, "lcd_bl_gpio");
+ if (status)
+ pr_err("%s: Could not get lcd_bl_gpio\n", __func__);
+
+ gpio_set_value(LCD_BL_GPIO, 0);
+}
+
static void sdp4430_hdmi_mux_init(void)
{
omap_mux_init_signal("hdmi_cec",
@@ -610,6 +698,52 @@
gpio_free_array(sdp4430_hdmi_gpios, ARRAY_SIZE(sdp4430_hdmi_gpios));
}
+static struct nokia_dsi_panel_data dsi1_panel = {
+ .name = "taal",
+ .reset_gpio = 102,
+ .use_ext_te = false,
+ .ext_te_gpio = 101,
+ .esd_interval = 0,
+ .set_backlight = dsi1_panel_set_backlight,
+};
+
+static struct omap_dss_device sdp4430_lcd_device = {
+ .name = "lcd",
+ .driver_name = "taal",
+ .type = OMAP_DISPLAY_TYPE_DSI,
+ .data = &dsi1_panel,
+ .phy.dsi = {
+ .clk_lane = 1,
+ .clk_pol = 0,
+ .data1_lane = 2,
+ .data1_pol = 0,
+ .data2_lane = 3,
+ .data2_pol = 0,
+ },
+
+ .clocks = {
+ .dispc = {
+ .channel = {
+ .lck_div = 1, /* Logic Clock = 172.8 MHz */
+ .pck_div = 5, /* Pixel Clock = 34.56 MHz */
+ .lcd_clk_src = OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC,
+ },
+ .dispc_fclk_src = OMAP_DSS_CLK_SRC_FCK,
+ },
+
+ .dsi = {
+ .regn = 16, /* Fint = 2.4 MHz */
+ .regm = 180, /* DDR Clock = 216 MHz */
+ .regm_dispc = 5, /* PLL1_CLK1 = 172.8 MHz */
+ .regm_dsi = 5, /* PLL1_CLK2 = 172.8 MHz */
+
+ .lp_clk_div = 10, /* LP Clock = 8.64 MHz */
+ .dsi_fclk_src = OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI,
+ },
+ },
+ .channel = OMAP_DSS_CHANNEL_LCD,
+};
+
static struct omap_dss_hdmi_data sdp4430_hdmi_data = {
.hpd_gpio = HDMI_GPIO_HPD,
};
@@ -625,17 +759,19 @@
};
static struct omap_dss_device *sdp4430_dss_devices[] = {
+ &sdp4430_lcd_device,
&sdp4430_hdmi_device,
};
static struct omap_dss_board_info sdp4430_dss_data = {
.num_devices = ARRAY_SIZE(sdp4430_dss_devices),
.devices = sdp4430_dss_devices,
- .default_device = &sdp4430_hdmi_device,
+ .default_device = &sdp4430_lcd_device,
};
void omap_4430sdp_display_init(void)
{
+ sdp4430_lcd_init();
sdp4430_hdmi_mux_init();
omap_display_init(&sdp4430_dss_data);
@@ -650,67 +786,28 @@
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
-static struct omap_device_pad serial2_pads[] __initdata = {
- OMAP_MUX_STATIC("uart2_cts.uart2_cts",
- OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart2_rts.uart2_rts",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart2_rx.uart2_rx",
- OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart2_tx.uart2_tx",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
+/*
+ * LPDDR2 Configeration Data:
+ * The memory organisation is as below :
+ * EMIF1 - CS0 - 2 Gb
+ * CS1 - 2 Gb
+ * EMIF2 - CS0 - 2 Gb
+ * CS1 - 2 Gb
+ * --------------------
+ * TOTAL - 8 Gb
+ *
+ * Same devices installed on EMIF1 and EMIF2
+ */
+static __initdata struct emif_device_details emif_devices = {
+ .cs0_device = &lpddr2_elpida_2G_S4_dev,
+ .cs1_device = &lpddr2_elpida_2G_S4_dev
};
-static struct omap_device_pad serial3_pads[] __initdata = {
- OMAP_MUX_STATIC("uart3_cts_rctx.uart3_cts_rctx",
- OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart3_rts_sd.uart3_rts_sd",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart3_rx_irrx.uart3_rx_irrx",
- OMAP_PIN_INPUT | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart3_tx_irtx.uart3_tx_irtx",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
-};
-
-static struct omap_device_pad serial4_pads[] __initdata = {
- OMAP_MUX_STATIC("uart4_rx.uart4_rx",
- OMAP_PIN_INPUT | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart4_tx.uart4_tx",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
-};
-
-static struct omap_board_data serial2_data __initdata = {
- .id = 1,
- .pads = serial2_pads,
- .pads_cnt = ARRAY_SIZE(serial2_pads),
-};
-
-static struct omap_board_data serial3_data __initdata = {
- .id = 2,
- .pads = serial3_pads,
- .pads_cnt = ARRAY_SIZE(serial3_pads),
-};
-
-static struct omap_board_data serial4_data __initdata = {
- .id = 3,
- .pads = serial4_pads,
- .pads_cnt = ARRAY_SIZE(serial4_pads),
-};
-
-static inline void board_serial_init(void)
+static inline void __init board_serial_init(void)
{
- struct omap_board_data bdata;
- bdata.flags = 0;
- bdata.pads = NULL;
- bdata.pads_cnt = 0;
- bdata.id = 0;
- /* pass dummy data for UART1 */
- omap_serial_init_port(&bdata);
-
- omap_serial_init_port(&serial2_data);
- omap_serial_init_port(&serial3_data);
- omap_serial_init_port(&serial4_data);
+ omap_serial_init();
}
+
#else
#define board_mux NULL
@@ -729,6 +826,8 @@
package = OMAP_PACKAGE_CBL;
omap4_mux_init(board_mux, NULL, package);
+ omap_emif_setup_device_details(&emif_devices, &emif_devices);
+
omap_board_config = sdp4430_config;
omap_board_config_size = ARRAY_SIZE(sdp4430_config);
@@ -753,7 +852,16 @@
if (status)
pr_err("Keypad initialization failed: %d\n", status);
+ omap_dmm_init();
omap_4430sdp_display_init();
+
+ if (cpu_is_omap446x()) {
+ /* Vsel0 = gpio, vsel1 = gnd */
+ status = omap_tps6236x_board_setup(true, TPS62361_GPIO, -1,
+ OMAP_PIN_OFF_OUTPUT_HIGH, -1);
+ if (status)
+ pr_err("TPS62361 initialization failed: %d\n", status);
+ }
}
static void __init omap_4430sdp_map_io(void)
@@ -762,10 +870,22 @@
omap44xx_map_common_io();
}
+static void __init omap_4430sdp_reserve(void)
+{
+ /* do the static reservations first */
+ memblock_remove(PHYS_ADDR_SMC_MEM, PHYS_ADDR_SMC_SIZE);
+ memblock_remove(PHYS_ADDR_DUCATI_MEM, PHYS_ADDR_DUCATI_SIZE);
+ /* ipu needs to recognize secure input buffer area as well */
+ omap_ipu_set_static_mempool(PHYS_ADDR_DUCATI_MEM, PHYS_ADDR_DUCATI_SIZE +
+ OMAP_ION_HEAP_SECURE_INPUT_SIZE);
+
+ omap_reserve();
+}
+
MACHINE_START(OMAP_4430SDP, "OMAP4430 4430SDP board")
/* Maintainer: Santosh Shilimkar - Texas Instruments Inc */
.boot_params = 0x80000100,
- .reserve = omap_reserve,
+ .reserve = omap_4430sdp_reserve,
.map_io = omap_4430sdp_map_io,
.init_early = omap_4430sdp_init_early,
.init_irq = gic_init_irq,
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 8d74318..5ab626a 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -656,15 +656,15 @@
bdata.pads_cnt = 0;
bdata.id = 0;
- omap_serial_init_port(&bdata);
+ omap_serial_init_port(&bdata, NULL);
bdata.id = 1;
- omap_serial_init_port(&bdata);
+ omap_serial_init_port(&bdata, NULL);
bdata.id = 2;
bdata.pads = serial2_pads;
bdata.pads_cnt = ARRAY_SIZE(serial2_pads);
- omap_serial_init_port(&bdata);
+ omap_serial_init_port(&bdata, NULL);
}
#else
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 107dfc3..5f42052 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -28,9 +28,14 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/fixed.h>
#include <linux/wl12xx.h>
+#include <linux/memblock.h>
#include <mach/hardware.h>
#include <mach/omap4-common.h>
+#include <mach/emif.h>
+#include <mach/lpddr2-elpida.h>
+#include <mach/dmm.h>
+
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -40,6 +45,7 @@
#include <plat/common.h>
#include <plat/usb.h>
#include <plat/mmc.h>
+#include <plat/remoteproc.h>
#include <video/omap-panel-generic-dpi.h>
#include "timer-gp.h"
@@ -56,6 +62,14 @@
#define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */
#define HDMI_GPIO_HPD 63 /* Hotplug detect */
+
+#define PHYS_ADDR_SMC_SIZE (SZ_1M * 3)
+#define PHYS_ADDR_SMC_MEM (0x80000000 + SZ_1G - PHYS_ADDR_SMC_SIZE)
+#define OMAP_ION_HEAP_SECURE_INPUT_SIZE (SZ_1M * 90)
+#define PHYS_ADDR_DUCATI_SIZE (SZ_1M * 105)
+#define PHYS_ADDR_DUCATI_MEM (PHYS_ADDR_SMC_MEM - PHYS_ADDR_DUCATI_SIZE - \
+ OMAP_ION_HEAP_SECURE_INPUT_SIZE)
+
/* wl127x BT, FM, GPS connectivity chip */
static int wl1271_gpios[] = {46, -1, -1};
static struct platform_device wl1271_device = {
@@ -152,7 +166,11 @@
static struct omap_musb_board_data musb_board_data = {
.interface_type = MUSB_INTERFACE_UTMI,
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ .mode = MUSB_PERIPHERAL,
+#else
.mode = MUSB_OTG,
+#endif
.power = 100,
};
@@ -381,6 +399,24 @@
},
};
+static void omap4_audio_conf(void)
+{
+ /* twl6040 naudint */
+ omap_mux_init_signal("sys_nirq2.sys_nirq2", \
+ OMAP_PIN_INPUT_PULLUP);
+}
+
+static struct twl4030_codec_audio_data twl6040_audio = {
+ /* Add audio only data */
+};
+
+static struct twl4030_codec_data twl6040_codec = {
+ .audio = &twl6040_audio,
+ .audpwron_gpio = 127,
+ .naudint_irq = OMAP44XX_IRQ_SYS_2N,
+ .irq_base = TWL6040_CODEC_IRQ_BASE,
+};
+
static struct twl4030_platform_data omap4_panda_twldata = {
.irq_base = TWL6030_IRQ_BASE,
.irq_end = TWL6030_IRQ_END,
@@ -396,6 +432,9 @@
.vaux3 = &omap4_panda_vaux3,
.clk32kg = &omap4_panda_clk32kg,
.usb = &omap4_usbphy_data,
+
+ /* children */
+ .codec = &twl6040_codec,
};
/*
@@ -498,71 +537,14 @@
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
-static struct omap_device_pad serial2_pads[] __initdata = {
- OMAP_MUX_STATIC("uart2_cts.uart2_cts",
- OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart2_rts.uart2_rts",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart2_rx.uart2_rx",
- OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart2_tx.uart2_tx",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
-};
-
-static struct omap_device_pad serial3_pads[] __initdata = {
- OMAP_MUX_STATIC("uart3_cts_rctx.uart3_cts_rctx",
- OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart3_rts_sd.uart3_rts_sd",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart3_rx_irrx.uart3_rx_irrx",
- OMAP_PIN_INPUT | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart3_tx_irtx.uart3_tx_irtx",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
-};
-
-static struct omap_device_pad serial4_pads[] __initdata = {
- OMAP_MUX_STATIC("uart4_rx.uart4_rx",
- OMAP_PIN_INPUT | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart4_tx.uart4_tx",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
-};
-
-static struct omap_board_data serial2_data __initdata = {
- .id = 1,
- .pads = serial2_pads,
- .pads_cnt = ARRAY_SIZE(serial2_pads),
-};
-
-static struct omap_board_data serial3_data __initdata = {
- .id = 2,
- .pads = serial3_pads,
- .pads_cnt = ARRAY_SIZE(serial3_pads),
-};
-
-static struct omap_board_data serial4_data __initdata = {
- .id = 3,
- .pads = serial4_pads,
- .pads_cnt = ARRAY_SIZE(serial4_pads),
-};
-
-static inline void board_serial_init(void)
+static inline void __init board_serial_init(void)
{
- struct omap_board_data bdata;
- bdata.flags = 0;
- bdata.pads = NULL;
- bdata.pads_cnt = 0;
- bdata.id = 0;
- /* pass dummy data for UART1 */
- omap_serial_init_port(&bdata);
-
- omap_serial_init_port(&serial2_data);
- omap_serial_init_port(&serial3_data);
- omap_serial_init_port(&serial4_data);
+ omap_serial_init();
}
#else
#define board_mux NULL
-static inline void board_serial_init(void)
+static inline void __init board_serial_init(void)
{
omap_serial_init();
}
@@ -584,7 +566,7 @@
/* Using generic display panel */
static struct panel_generic_dpi_data omap4_dvi_panel = {
- .name = "generic",
+ .name = "generic_720p",
.platform_enable = omap4_panda_enable_dvi,
.platform_disable = omap4_panda_disable_dvi,
};
@@ -671,6 +653,23 @@
.default_device = &omap4_panda_dvi_device,
};
+/*
+ * LPDDR2 Configeration Data:
+ * The memory organisation is as below :
+ * EMIF1 - CS0 - 2 Gb
+ * CS1 - 2 Gb
+ * EMIF2 - CS0 - 2 Gb
+ * CS1 - 2 Gb
+ * --------------------
+ * TOTAL - 8 Gb
+ *
+ * Same devices installed on EMIF1 and EMIF2
+ */
+static __initdata struct emif_device_details emif_devices = {
+ .cs0_device = &lpddr2_elpida_2G_S4_dev,
+ .cs1_device = &lpddr2_elpida_2G_S4_dev
+};
+
void omap4_panda_display_init(void)
{
int r;
@@ -687,10 +686,14 @@
omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN);
}
+extern void __init omap4_panda_android_init(void);
+
static void __init omap4_panda_init(void)
{
int package = OMAP_PACKAGE_CBS;
+ omap_emif_setup_device_details(&emif_devices, &emif_devices);
+
if (omap_rev() == OMAP4430_REV_ES1_0)
package = OMAP_PACKAGE_CBL;
omap4_mux_init(board_mux, NULL, package);
@@ -699,13 +702,23 @@
pr_err("error setting wl12xx data\n");
omap4_panda_i2c_init();
+ omap4_audio_conf();
platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
- platform_device_register(&omap_vwlan_device);
+/*
+ * This is temporaray. With WLAN regsitering, we see that UART2 is not
+ * idling on panda and CORE RET is not happening. So removing this FTM.
+ * Later will be enabled.
+ *
+ * platform_device_register(&omap_vwlan_device);
+ */
board_serial_init();
omap4_twl6030_hsmmc_init(mmc);
omap4_ehci_init();
usb_musb_init(&musb_board_data);
+
+ omap_dmm_init();
omap4_panda_display_init();
+
}
static void __init omap4_panda_map_io(void)
@@ -714,10 +727,22 @@
omap44xx_map_common_io();
}
+static void __init omap4_panda_reserve(void)
+{
+ /* do the static reservations first */
+ memblock_remove(PHYS_ADDR_SMC_MEM, PHYS_ADDR_SMC_SIZE);
+ memblock_remove(PHYS_ADDR_DUCATI_MEM, PHYS_ADDR_DUCATI_SIZE);
+ /* ipu needs to recognize secure input buffer area as well */
+ omap_ipu_set_static_mempool(PHYS_ADDR_DUCATI_MEM, PHYS_ADDR_DUCATI_SIZE +
+ OMAP_ION_HEAP_SECURE_INPUT_SIZE);
+
+ omap_reserve();
+}
+
MACHINE_START(OMAP4_PANDA, "OMAP4 Panda board")
/* Maintainer: David Anders - Texas Instruments Inc */
.boot_params = 0x80000100,
- .reserve = omap_reserve,
+ .reserve = omap4_panda_reserve,
.map_io = omap4_panda_map_io,
.init_early = omap4_panda_init_early,
.init_irq = gic_init_irq,
diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c
index bcffee0..a7e78e4 100644
--- a/arch/arm/mach-omap2/clkt_dpll.c
+++ b/arch/arm/mach-omap2/clkt_dpll.c
@@ -292,12 +292,14 @@
for (n = dd->min_divider; n <= dd->max_divider; n++) {
- /* Is the (input clk, divider) pair valid for the DPLL? */
- r = _dpll_test_fint(clk, n);
- if (r == DPLL_FINT_UNDERFLOW)
- break;
- else if (r == DPLL_FINT_INVALID)
- continue;
+ if (cpu_is_omap34xx()) {
+ /* Is the (input clk, divider)pair valid for the DPLL?*/
+ r = _dpll_test_fint(clk, n);
+ if (r == DPLL_FINT_UNDERFLOW)
+ break;
+ else if (r == DPLL_FINT_INVALID)
+ continue;
+ }
/* Compute the scaled DPLL multiplier, based on the divider */
m = scaled_rt_rp * n;
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index 180299e..1334f59 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -32,6 +32,7 @@
#include "clock.h"
#include "cm2xxx_3xxx.h"
+#include "cm44xx.h"
#include "cm-regbits-24xx.h"
#include "cm-regbits-34xx.h"
@@ -43,6 +44,11 @@
/* Private functions */
+static void _omap4_module_wait_ready(struct clk *clk)
+{
+ omap4_cm_wait_module_ready(clk->enable_reg);
+}
+
/**
* _omap2_module_wait_ready - wait for an OMAP module to leave IDLE
* @clk: struct clk * belonging to the module
@@ -190,8 +196,12 @@
__raw_writel(v, clk->enable_reg);
v = __raw_readl(clk->enable_reg); /* OCP barrier */
- if (clk->ops->find_idlest)
- _omap2_module_wait_ready(clk);
+ if (clk->ops->find_idlest) {
+ if (cpu_is_omap44xx())
+ _omap4_module_wait_ready(clk);
+ else
+ _omap2_module_wait_ready(clk);
+ }
return 0;
}
@@ -219,6 +229,12 @@
/* No OCP barrier needed here since it is a disable operation */
}
+const struct clkops clkops_omap4_dflt_wait = {
+ .enable = omap2_dflt_clk_enable,
+ .disable = omap2_dflt_clk_disable,
+ .find_idlest = omap2_clk_dflt_find_idlest,
+};
+
const struct clkops clkops_omap2_dflt_wait = {
.enable = omap2_dflt_clk_enable,
.disable = omap2_dflt_clk_disable,
@@ -327,6 +343,10 @@
}
}
+ /* If clockdomain supports hardware control, enable it */
+ if (clk->clkdm)
+ clkdm_allow_idle(clk->clkdm);
+
return 0;
oce_err3:
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index e10ff2b..450aabf 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -41,6 +41,7 @@
/* CM_CLKEN_PLL*.EN* bit values - not all are available for every DPLL */
#define DPLL_LOW_POWER_STOP 0x1
+#define DPLL_MN_BYPASS 0x4
#define DPLL_LOW_POWER_BYPASS 0x5
#define DPLL_LOCKED 0x7
@@ -64,6 +65,11 @@
int omap4_dpllmx_gatectrl_read(struct clk *clk);
void omap4_dpllmx_allow_gatectrl(struct clk *clk);
void omap4_dpllmx_deny_gatectrl(struct clk *clk);
+int omap4460_mpu_dpll_set_rate(struct clk *clk, unsigned long rate);
+long omap4460_mpu_dpll_round_rate(struct clk *clk, unsigned long rate);
+unsigned long omap4460_mpu_dpll_recalc(struct clk *clk);
+long omap4_dpll_regm4xen_round_rate(struct clk *clk, unsigned long target_rate);
+unsigned long omap4_dpll_regm4xen_recalc(struct clk *clk);
#ifdef CONFIG_OMAP_RESET_CLOCKS
void omap2_clk_disable_unused(struct clk *clk);
@@ -132,6 +138,7 @@
extern const struct clkops clkops_omap2_dflt_wait;
extern const struct clkops clkops_dummy;
extern const struct clkops clkops_omap2_dflt;
+extern const struct clkops clkops_omap4_dflt_wait;
extern struct clk_functions omap2_clk_functions;
extern struct clk *vclk, *sclk;
@@ -141,7 +148,9 @@
extern const struct clksel_rate gfx_l3_rates[];
extern const struct clksel_rate dsp_ick_rates[];
-#if defined(CONFIG_ARCH_OMAP2) && defined(CONFIG_CPU_FREQ)
+#ifdef CONFIG_CPU_FREQ
+
+#ifdef CONFIG_ARCH_OMAP2
extern void omap2_clk_init_cpufreq_table(struct cpufreq_frequency_table **table);
extern void omap2_clk_exit_cpufreq_table(struct cpufreq_frequency_table **table);
#else
@@ -149,6 +158,16 @@
#define omap2_clk_exit_cpufreq_table 0
#endif
+#ifdef CONFIG_ARCH_OMAP3
+extern void omap3_clk_init_cpufreq_table(struct cpufreq_frequency_table **table);
+extern void omap3_clk_exit_cpufreq_table(struct cpufreq_frequency_table **table);
+#else
+#define omap3_clk_init_cpufreq_table 0
+#define omap3_clk_exit_cpufreq_table 0
+#endif
+
+#endif /* CONFIG_CPU_FREQ */
+
extern const struct clkops clkops_omap2_iclk_dflt_wait;
extern const struct clkops clkops_omap2_iclk_dflt;
extern const struct clkops clkops_omap2_iclk_idle_only;
diff --git a/arch/arm/mach-omap2/clock2420_data.c b/arch/arm/mach-omap2/clock2420_data.c
index 2926d02..f44d070 100644
--- a/arch/arm/mach-omap2/clock2420_data.c
+++ b/arch/arm/mach-omap2/clock2420_data.c
@@ -1820,29 +1820,29 @@
CLK(NULL, "virt_prcm_set", &virt_prcm_set, CK_242X),
/* general l4 interface ck, multi-parent functional clk */
CLK(NULL, "gpt1_ick", &gpt1_ick, CK_242X),
- CLK(NULL, "gpt1_fck", &gpt1_fck, CK_242X),
+ CLK("omap_timer.1", "fck", &gpt1_fck, CK_242X),
CLK(NULL, "gpt2_ick", &gpt2_ick, CK_242X),
- CLK(NULL, "gpt2_fck", &gpt2_fck, CK_242X),
+ CLK("omap_timer.2", "fck", &gpt2_fck, CK_242X),
CLK(NULL, "gpt3_ick", &gpt3_ick, CK_242X),
- CLK(NULL, "gpt3_fck", &gpt3_fck, CK_242X),
+ CLK("omap_timer.3", "fck", &gpt3_fck, CK_242X),
CLK(NULL, "gpt4_ick", &gpt4_ick, CK_242X),
- CLK(NULL, "gpt4_fck", &gpt4_fck, CK_242X),
+ CLK("omap_timer.4", "fck", &gpt4_fck, CK_242X),
CLK(NULL, "gpt5_ick", &gpt5_ick, CK_242X),
- CLK(NULL, "gpt5_fck", &gpt5_fck, CK_242X),
+ CLK("omap_timer.5", "fck", &gpt5_fck, CK_242X),
CLK(NULL, "gpt6_ick", &gpt6_ick, CK_242X),
- CLK(NULL, "gpt6_fck", &gpt6_fck, CK_242X),
+ CLK("omap_timer.6", "fck", &gpt6_fck, CK_242X),
CLK(NULL, "gpt7_ick", &gpt7_ick, CK_242X),
- CLK(NULL, "gpt7_fck", &gpt7_fck, CK_242X),
+ CLK("omap_timer.7", "fck", &gpt7_fck, CK_242X),
CLK(NULL, "gpt8_ick", &gpt8_ick, CK_242X),
- CLK(NULL, "gpt8_fck", &gpt8_fck, CK_242X),
+ CLK("omap_timer.8", "fck", &gpt8_fck, CK_242X),
CLK(NULL, "gpt9_ick", &gpt9_ick, CK_242X),
- CLK(NULL, "gpt9_fck", &gpt9_fck, CK_242X),
+ CLK("omap_timer.9", "fck", &gpt9_fck, CK_242X),
CLK(NULL, "gpt10_ick", &gpt10_ick, CK_242X),
- CLK(NULL, "gpt10_fck", &gpt10_fck, CK_242X),
+ CLK("omap_timer.10", "fck", &gpt10_fck, CK_242X),
CLK(NULL, "gpt11_ick", &gpt11_ick, CK_242X),
- CLK(NULL, "gpt11_fck", &gpt11_fck, CK_242X),
+ CLK("omap_timer.11", "fck", &gpt11_fck, CK_242X),
CLK(NULL, "gpt12_ick", &gpt12_ick, CK_242X),
- CLK(NULL, "gpt12_fck", &gpt12_fck, CK_242X),
+ CLK("omap_timer.12", "fck", &gpt12_fck, CK_242X),
CLK("omap-mcbsp.1", "ick", &mcbsp1_ick, CK_242X),
CLK("omap-mcbsp.1", "fck", &mcbsp1_fck, CK_242X),
CLK("omap-mcbsp.2", "ick", &mcbsp2_ick, CK_242X),
@@ -1898,6 +1898,54 @@
CLK(NULL, "pka_ick", &pka_ick, CK_242X),
CLK(NULL, "usb_fck", &usb_fck, CK_242X),
CLK("musb-hdrc", "fck", &osc_ck, CK_242X),
+ CLK("omap_timer.1", "fck", &gpt1_fck, CK_242X),
+ CLK("omap_timer.2", "fck", &gpt2_fck, CK_242X),
+ CLK("omap_timer.3", "fck", &gpt3_fck, CK_242X),
+ CLK("omap_timer.4", "fck", &gpt4_fck, CK_242X),
+ CLK("omap_timer.5", "fck", &gpt5_fck, CK_242X),
+ CLK("omap_timer.6", "fck", &gpt6_fck, CK_242X),
+ CLK("omap_timer.7", "fck", &gpt7_fck, CK_242X),
+ CLK("omap_timer.8", "fck", &gpt8_fck, CK_242X),
+ CLK("omap_timer.9", "fck", &gpt9_fck, CK_242X),
+ CLK("omap_timer.10", "fck", &gpt10_fck, CK_242X),
+ CLK("omap_timer.11", "fck", &gpt11_fck, CK_242X),
+ CLK("omap_timer.12", "fck", &gpt12_fck, CK_242X),
+ CLK("omap_timer.1", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.2", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.3", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.4", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.5", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.6", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.7", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.8", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.9", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.10", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.11", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.12", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.1", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.2", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.3", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.4", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.5", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.6", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.7", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.8", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.9", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.10", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.11", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.12", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.1", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.2", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.3", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.4", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.5", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.6", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.7", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.8", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.9", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.10", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.11", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.12", "alt_ck", &alt_ck, CK_243X),
};
/*
diff --git a/arch/arm/mach-omap2/clock2430_data.c b/arch/arm/mach-omap2/clock2430_data.c
index 0c79d39..cffd1c3 100644
--- a/arch/arm/mach-omap2/clock2430_data.c
+++ b/arch/arm/mach-omap2/clock2430_data.c
@@ -1910,29 +1910,29 @@
CLK(NULL, "virt_prcm_set", &virt_prcm_set, CK_243X),
/* general l4 interface ck, multi-parent functional clk */
CLK(NULL, "gpt1_ick", &gpt1_ick, CK_243X),
- CLK(NULL, "gpt1_fck", &gpt1_fck, CK_243X),
+ CLK("omap_timer.1", "fck", &gpt1_fck, CK_243X),
CLK(NULL, "gpt2_ick", &gpt2_ick, CK_243X),
- CLK(NULL, "gpt2_fck", &gpt2_fck, CK_243X),
+ CLK("omap_timer.2", "fck", &gpt2_fck, CK_243X),
CLK(NULL, "gpt3_ick", &gpt3_ick, CK_243X),
- CLK(NULL, "gpt3_fck", &gpt3_fck, CK_243X),
+ CLK("omap_timer.3", "fck", &gpt3_fck, CK_243X),
CLK(NULL, "gpt4_ick", &gpt4_ick, CK_243X),
- CLK(NULL, "gpt4_fck", &gpt4_fck, CK_243X),
+ CLK("omap_timer.4", "fck", &gpt4_fck, CK_243X),
CLK(NULL, "gpt5_ick", &gpt5_ick, CK_243X),
- CLK(NULL, "gpt5_fck", &gpt5_fck, CK_243X),
+ CLK("omap_timer.5", "fck", &gpt5_fck, CK_243X),
CLK(NULL, "gpt6_ick", &gpt6_ick, CK_243X),
- CLK(NULL, "gpt6_fck", &gpt6_fck, CK_243X),
+ CLK("omap_timer.6", "fck", &gpt6_fck, CK_243X),
CLK(NULL, "gpt7_ick", &gpt7_ick, CK_243X),
- CLK(NULL, "gpt7_fck", &gpt7_fck, CK_243X),
+ CLK("omap_timer.7", "fck", &gpt7_fck, CK_243X),
CLK(NULL, "gpt8_ick", &gpt8_ick, CK_243X),
- CLK(NULL, "gpt8_fck", &gpt8_fck, CK_243X),
+ CLK("omap_timer.8", "fck", &gpt8_fck, CK_243X),
CLK(NULL, "gpt9_ick", &gpt9_ick, CK_243X),
- CLK(NULL, "gpt9_fck", &gpt9_fck, CK_243X),
+ CLK("omap_timer.9", "fck", &gpt9_fck, CK_243X),
CLK(NULL, "gpt10_ick", &gpt10_ick, CK_243X),
- CLK(NULL, "gpt10_fck", &gpt10_fck, CK_243X),
+ CLK("omap_timer.10", "fck", &gpt10_fck, CK_243X),
CLK(NULL, "gpt11_ick", &gpt11_ick, CK_243X),
- CLK(NULL, "gpt11_fck", &gpt11_fck, CK_243X),
+ CLK("omap_timer.11", "fck", &gpt11_fck, CK_243X),
CLK(NULL, "gpt12_ick", &gpt12_ick, CK_243X),
- CLK(NULL, "gpt12_fck", &gpt12_fck, CK_243X),
+ CLK("omap_timer.12", "fck", &gpt12_fck, CK_243X),
CLK("omap-mcbsp.1", "ick", &mcbsp1_ick, CK_243X),
CLK("omap-mcbsp.1", "fck", &mcbsp1_fck, CK_243X),
CLK("omap-mcbsp.2", "ick", &mcbsp2_ick, CK_243X),
@@ -1998,6 +1998,42 @@
CLK(NULL, "mdm_intc_ick", &mdm_intc_ick, CK_243X),
CLK("omap_hsmmc.0", "mmchsdb_fck", &mmchsdb1_fck, CK_243X),
CLK("omap_hsmmc.1", "mmchsdb_fck", &mmchsdb2_fck, CK_243X),
+ CLK("omap_timer.1", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.2", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.3", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.4", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.5", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.6", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.7", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.8", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.9", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.10", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.11", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.12", "32k_ck", &func_32k_ck, CK_243X),
+ CLK("omap_timer.1", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.2", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.3", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.4", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.5", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.6", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.7", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.8", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.9", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.10", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.11", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.12", "sys_ck", &sys_ck, CK_243X),
+ CLK("omap_timer.1", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.2", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.3", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.4", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.5", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.6", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.7", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.8", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.9", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.10", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.11", "alt_ck", &alt_ck, CK_243X),
+ CLK("omap_timer.12", "alt_ck", &alt_ck, CK_243X),
};
/*
diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c
index 1fc96b9..119e135 100644
--- a/arch/arm/mach-omap2/clock34xx.c
+++ b/arch/arm/mach-omap2/clock34xx.c
@@ -20,6 +20,8 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/cpufreq.h>
#include <plat/clock.h>
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index 75b119b..a7d698a 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -3280,12 +3280,12 @@
CLK(NULL, "modem_fck", &modem_fck, CK_34XX | CK_36XX),
CLK(NULL, "sad2d_ick", &sad2d_ick, CK_34XX | CK_36XX),
CLK(NULL, "mad2d_ick", &mad2d_ick, CK_34XX | CK_36XX),
- CLK(NULL, "gpt10_fck", &gpt10_fck, CK_3XXX),
- CLK(NULL, "gpt11_fck", &gpt11_fck, CK_3XXX),
+ CLK("omap_timer.10", "fck", &gpt10_fck, CK_3XXX),
+ CLK("omap_timer.11", "fck", &gpt11_fck, CK_3XXX),
CLK(NULL, "cpefuse_fck", &cpefuse_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK(NULL, "ts_fck", &ts_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK(NULL, "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("usbhs-omap.0", "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("usbhs_omap", "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK("omap-mcbsp.1", "prcm_fck", &core_96m_fck, CK_3XXX),
CLK("omap-mcbsp.5", "prcm_fck", &core_96m_fck, CK_3XXX),
CLK(NULL, "core_96m_fck", &core_96m_fck, CK_3XXX),
@@ -3321,7 +3321,7 @@
CLK(NULL, "pka_ick", &pka_ick, CK_34XX | CK_36XX),
CLK(NULL, "core_l4_ick", &core_l4_ick, CK_3XXX),
CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("usbhs-omap.0", "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("usbhs_omap", "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK("omap_hsmmc.2", "ick", &mmchs3_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK(NULL, "icr_ick", &icr_ick, CK_34XX | CK_36XX),
CLK("omap-aes", "ick", &aes2_ick, CK_34XX | CK_36XX),
@@ -3367,22 +3367,22 @@
CLK(NULL, "cam_ick", &cam_ick, CK_34XX | CK_36XX),
CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_34XX | CK_36XX),
CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("usbhs-omap.0", "hs_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("usbhs_omap", "hs_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("usbhs-omap.0", "fs_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("usbhs_omap", "fs_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK(NULL, "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("usbhs-omap.0", "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("usbhs-omap.0", "utmi_p1_gfclk", &dummy_ck, CK_3XXX),
- CLK("usbhs-omap.0", "utmi_p2_gfclk", &dummy_ck, CK_3XXX),
- CLK("usbhs-omap.0", "xclk60mhsp1_ck", &dummy_ck, CK_3XXX),
- CLK("usbhs-omap.0", "xclk60mhsp2_ck", &dummy_ck, CK_3XXX),
- CLK("usbhs-omap.0", "usb_host_hs_utmi_p1_clk", &dummy_ck, CK_3XXX),
- CLK("usbhs-omap.0", "usb_host_hs_utmi_p2_clk", &dummy_ck, CK_3XXX),
- CLK("usbhs-omap.0", "usb_tll_hs_usb_ch0_clk", &dummy_ck, CK_3XXX),
- CLK("usbhs-omap.0", "usb_tll_hs_usb_ch1_clk", &dummy_ck, CK_3XXX),
- CLK("usbhs-omap.0", "init_60m_fclk", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("usbhs_omap", "utmi_p1_gfclk", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "utmi_p2_gfclk", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "xclk60mhsp1_ck", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "xclk60mhsp2_ck", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "usb_host_hs_utmi_p1_clk", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "usb_host_hs_utmi_p2_clk", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "usb_tll_hs_usb_ch0_clk", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "usb_tll_hs_usb_ch1_clk", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "init_60m_fclk", &dummy_ck, CK_3XXX),
CLK(NULL, "usim_fck", &usim_fck, CK_3430ES2PLUS | CK_36XX),
- CLK(NULL, "gpt1_fck", &gpt1_fck, CK_3XXX),
+ CLK("omap_timer.1", "fck", &gpt1_fck, CK_3XXX),
CLK(NULL, "wkup_32k_fck", &wkup_32k_fck, CK_3XXX),
CLK(NULL, "gpio1_dbck", &gpio1_dbck, CK_3XXX),
CLK("omap_wdt", "fck", &wdt2_fck, CK_3XXX),
@@ -3401,14 +3401,14 @@
CLK(NULL, "per_48m_fck", &per_48m_fck, CK_3XXX),
CLK(NULL, "uart3_fck", &uart3_fck, CK_3XXX),
CLK(NULL, "uart4_fck", &uart4_fck, CK_36XX),
- CLK(NULL, "gpt2_fck", &gpt2_fck, CK_3XXX),
- CLK(NULL, "gpt3_fck", &gpt3_fck, CK_3XXX),
- CLK(NULL, "gpt4_fck", &gpt4_fck, CK_3XXX),
- CLK(NULL, "gpt5_fck", &gpt5_fck, CK_3XXX),
- CLK(NULL, "gpt6_fck", &gpt6_fck, CK_3XXX),
- CLK(NULL, "gpt7_fck", &gpt7_fck, CK_3XXX),
- CLK(NULL, "gpt8_fck", &gpt8_fck, CK_3XXX),
- CLK(NULL, "gpt9_fck", &gpt9_fck, CK_3XXX),
+ CLK("omap_timer.2", "fck", &gpt2_fck, CK_3XXX),
+ CLK("omap_timer.3", "fck", &gpt3_fck, CK_3XXX),
+ CLK("omap_timer.4", "fck", &gpt4_fck, CK_3XXX),
+ CLK("omap_timer.5", "fck", &gpt5_fck, CK_3XXX),
+ CLK("omap_timer.6", "fck", &gpt6_fck, CK_3XXX),
+ CLK("omap_timer.7", "fck", &gpt7_fck, CK_3XXX),
+ CLK("omap_timer.8", "fck", &gpt8_fck, CK_3XXX),
+ CLK("omap_timer.9", "fck", &gpt9_fck, CK_3XXX),
CLK(NULL, "per_32k_alwon_fck", &per_32k_alwon_fck, CK_3XXX),
CLK(NULL, "gpio6_dbck", &gpio6_dbck, CK_3XXX),
CLK(NULL, "gpio5_dbck", &gpio5_dbck, CK_3XXX),
@@ -3449,7 +3449,7 @@
CLK(NULL, "sr2_fck", &sr2_fck, CK_34XX | CK_36XX),
CLK(NULL, "sr_l4_ick", &sr_l4_ick, CK_34XX | CK_36XX),
CLK(NULL, "secure_32k_fck", &secure_32k_fck, CK_3XXX),
- CLK(NULL, "gpt12_fck", &gpt12_fck, CK_3XXX),
+ CLK("omap_timer.12", "fck", &gpt12_fck, CK_3XXX),
CLK(NULL, "wdt1_fck", &wdt1_fck, CK_3XXX),
CLK(NULL, "ipss_ick", &ipss_ick, CK_AM35XX),
CLK(NULL, "rmii_ck", &rmii_ck, CK_AM35XX),
@@ -3462,6 +3462,30 @@
CLK("musb-am35x", "fck", &hsotgusb_fck_am35xx, CK_AM35XX),
CLK(NULL, "hecc_ck", &hecc_ck, CK_AM35XX),
CLK(NULL, "uart4_ick", &uart4_ick_am35xx, CK_AM35XX),
+ CLK("omap_timer.1", "32k_ck", &omap_32k_fck, CK_3XXX),
+ CLK("omap_timer.2", "32k_ck", &omap_32k_fck, CK_3XXX),
+ CLK("omap_timer.3", "32k_ck", &omap_32k_fck, CK_3XXX),
+ CLK("omap_timer.4", "32k_ck", &omap_32k_fck, CK_3XXX),
+ CLK("omap_timer.5", "32k_ck", &omap_32k_fck, CK_3XXX),
+ CLK("omap_timer.6", "32k_ck", &omap_32k_fck, CK_3XXX),
+ CLK("omap_timer.7", "32k_ck", &omap_32k_fck, CK_3XXX),
+ CLK("omap_timer.8", "32k_ck", &omap_32k_fck, CK_3XXX),
+ CLK("omap_timer.9", "32k_ck", &omap_32k_fck, CK_3XXX),
+ CLK("omap_timer.10", "32k_ck", &omap_32k_fck, CK_3XXX),
+ CLK("omap_timer.11", "32k_ck", &omap_32k_fck, CK_3XXX),
+ CLK("omap_timer.12", "32k_ck", &omap_32k_fck, CK_3XXX),
+ CLK("omap_timer.1", "sys_ck", &sys_ck, CK_3XXX),
+ CLK("omap_timer.2", "sys_ck", &sys_ck, CK_3XXX),
+ CLK("omap_timer.3", "sys_ck", &sys_ck, CK_3XXX),
+ CLK("omap_timer.4", "sys_ck", &sys_ck, CK_3XXX),
+ CLK("omap_timer.5", "sys_ck", &sys_ck, CK_3XXX),
+ CLK("omap_timer.6", "sys_ck", &sys_ck, CK_3XXX),
+ CLK("omap_timer.7", "sys_ck", &sys_ck, CK_3XXX),
+ CLK("omap_timer.8", "sys_ck", &sys_ck, CK_3XXX),
+ CLK("omap_timer.9", "sys_ck", &sys_ck, CK_3XXX),
+ CLK("omap_timer.10", "sys_ck", &sys_ck, CK_3XXX),
+ CLK("omap_timer.11", "sys_ck", &sys_ck, CK_3XXX),
+ CLK("omap_timer.12", "sys_ck", &sys_ck, CK_3XXX),
};
diff --git a/arch/arm/mach-omap2/clock44xx.h b/arch/arm/mach-omap2/clock44xx.h
index 6be1095..bdc5f0d 100644
--- a/arch/arm/mach-omap2/clock44xx.h
+++ b/arch/arm/mach-omap2/clock44xx.h
@@ -14,7 +14,9 @@
*/
#define OMAP4430_MAX_DPLL_MULT 2047
#define OMAP4430_MAX_DPLL_DIV 128
+#define OMAP4430_REGM4XEN_MULT 4
int omap4xxx_clk_init(void);
+int omap4_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate);
#endif
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index 8c96567..6089ff0 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -1,7 +1,7 @@
/*
- * OMAP4 Clock data
+ * OMAP44xx Clock data
*
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2011 Texas Instruments Incorporated
* Copyright (C) 2009-2010 Nokia Corporation
*
* Paul Walmsley (paul@pwsan.com)
@@ -42,6 +42,10 @@
#define OMAP4430_MODULEMODE_HWCTRL 0
#define OMAP4430_MODULEMODE_SWCTRL 1
+static int omap4_virt_l3_set_rate(struct clk *clk, unsigned long rate);
+static long omap4_virt_l3_round_rate(struct clk *clk, unsigned long rate);
+static unsigned long omap4_virt_l3_recalc(struct clk *clk);
+
/* Root clocks */
static struct clk extalt_clkin_ck = {
@@ -53,7 +57,7 @@
static struct clk pad_clks_ck = {
.name = "pad_clks_ck",
.rate = 12000000,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_CLKSEL_ABE,
.enable_bit = OMAP4430_PAD_CLKS_GATE_SHIFT,
};
@@ -73,7 +77,7 @@
static struct clk slimbus_clk = {
.name = "slimbus_clk",
.rate = 12000000,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_CLKSEL_ABE,
.enable_bit = OMAP4430_SLIMBUS_CLK_GATE_SHIFT,
};
@@ -127,42 +131,42 @@
};
static const struct clksel_rate div_1_0_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_4430 },
+ { .div = 1, .val = 0, .flags = RATE_IN_44XX },
{ .div = 0 },
};
static const struct clksel_rate div_1_1_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_4430 },
+ { .div = 1, .val = 1, .flags = RATE_IN_44XX },
{ .div = 0 },
};
static const struct clksel_rate div_1_2_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_4430 },
+ { .div = 1, .val = 2, .flags = RATE_IN_44XX },
{ .div = 0 },
};
static const struct clksel_rate div_1_3_rates[] = {
- { .div = 1, .val = 3, .flags = RATE_IN_4430 },
+ { .div = 1, .val = 3, .flags = RATE_IN_44XX },
{ .div = 0 },
};
static const struct clksel_rate div_1_4_rates[] = {
- { .div = 1, .val = 4, .flags = RATE_IN_4430 },
+ { .div = 1, .val = 4, .flags = RATE_IN_44XX },
{ .div = 0 },
};
static const struct clksel_rate div_1_5_rates[] = {
- { .div = 1, .val = 5, .flags = RATE_IN_4430 },
+ { .div = 1, .val = 5, .flags = RATE_IN_44XX },
{ .div = 0 },
};
static const struct clksel_rate div_1_6_rates[] = {
- { .div = 1, .val = 6, .flags = RATE_IN_4430 },
+ { .div = 1, .val = 6, .flags = RATE_IN_44XX },
{ .div = 0 },
};
static const struct clksel_rate div_1_7_rates[] = {
- { .div = 1, .val = 7, .flags = RATE_IN_4430 },
+ { .div = 1, .val = 7, .flags = RATE_IN_44XX },
{ .div = 0 },
};
@@ -194,12 +198,6 @@
.ops = &clkops_null,
};
-static struct clk utmi_phy_clkout_ck = {
- .name = "utmi_phy_clkout_ck",
- .rate = 60000000,
- .ops = &clkops_null,
-};
-
static struct clk xclk60mhsp1_ck = {
.name = "xclk60mhsp1_ck",
.rate = 60000000,
@@ -270,8 +268,8 @@
.dpll_data = &dpll_abe_dd,
.init = &omap2_init_dpll_parent,
.ops = &clkops_omap3_noncore_dpll_ops,
- .recalc = &omap3_dpll_recalc,
- .round_rate = &omap2_dpll_round_rate,
+ .recalc = &omap4_dpll_regm4xen_recalc,
+ .round_rate = &omap4_dpll_regm4xen_round_rate,
.set_rate = &omap3_noncore_dpll_set_rate,
};
@@ -285,37 +283,37 @@
};
static const struct clksel_rate div31_1to31_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_4430 },
- { .div = 2, .val = 2, .flags = RATE_IN_4430 },
- { .div = 3, .val = 3, .flags = RATE_IN_4430 },
- { .div = 4, .val = 4, .flags = RATE_IN_4430 },
- { .div = 5, .val = 5, .flags = RATE_IN_4430 },
- { .div = 6, .val = 6, .flags = RATE_IN_4430 },
- { .div = 7, .val = 7, .flags = RATE_IN_4430 },
- { .div = 8, .val = 8, .flags = RATE_IN_4430 },
- { .div = 9, .val = 9, .flags = RATE_IN_4430 },
- { .div = 10, .val = 10, .flags = RATE_IN_4430 },
- { .div = 11, .val = 11, .flags = RATE_IN_4430 },
- { .div = 12, .val = 12, .flags = RATE_IN_4430 },
- { .div = 13, .val = 13, .flags = RATE_IN_4430 },
- { .div = 14, .val = 14, .flags = RATE_IN_4430 },
- { .div = 15, .val = 15, .flags = RATE_IN_4430 },
- { .div = 16, .val = 16, .flags = RATE_IN_4430 },
- { .div = 17, .val = 17, .flags = RATE_IN_4430 },
- { .div = 18, .val = 18, .flags = RATE_IN_4430 },
- { .div = 19, .val = 19, .flags = RATE_IN_4430 },
- { .div = 20, .val = 20, .flags = RATE_IN_4430 },
- { .div = 21, .val = 21, .flags = RATE_IN_4430 },
- { .div = 22, .val = 22, .flags = RATE_IN_4430 },
- { .div = 23, .val = 23, .flags = RATE_IN_4430 },
- { .div = 24, .val = 24, .flags = RATE_IN_4430 },
- { .div = 25, .val = 25, .flags = RATE_IN_4430 },
- { .div = 26, .val = 26, .flags = RATE_IN_4430 },
- { .div = 27, .val = 27, .flags = RATE_IN_4430 },
- { .div = 28, .val = 28, .flags = RATE_IN_4430 },
- { .div = 29, .val = 29, .flags = RATE_IN_4430 },
- { .div = 30, .val = 30, .flags = RATE_IN_4430 },
- { .div = 31, .val = 31, .flags = RATE_IN_4430 },
+ { .div = 1, .val = 1, .flags = RATE_IN_44XX },
+ { .div = 2, .val = 2, .flags = RATE_IN_44XX },
+ { .div = 3, .val = 3, .flags = RATE_IN_44XX },
+ { .div = 4, .val = 4, .flags = RATE_IN_44XX },
+ { .div = 5, .val = 5, .flags = RATE_IN_44XX },
+ { .div = 6, .val = 6, .flags = RATE_IN_44XX },
+ { .div = 7, .val = 7, .flags = RATE_IN_44XX },
+ { .div = 8, .val = 8, .flags = RATE_IN_44XX },
+ { .div = 9, .val = 9, .flags = RATE_IN_44XX },
+ { .div = 10, .val = 10, .flags = RATE_IN_44XX },
+ { .div = 11, .val = 11, .flags = RATE_IN_44XX },
+ { .div = 12, .val = 12, .flags = RATE_IN_44XX },
+ { .div = 13, .val = 13, .flags = RATE_IN_44XX },
+ { .div = 14, .val = 14, .flags = RATE_IN_44XX },
+ { .div = 15, .val = 15, .flags = RATE_IN_44XX },
+ { .div = 16, .val = 16, .flags = RATE_IN_44XX },
+ { .div = 17, .val = 17, .flags = RATE_IN_44XX },
+ { .div = 18, .val = 18, .flags = RATE_IN_44XX },
+ { .div = 19, .val = 19, .flags = RATE_IN_44XX },
+ { .div = 20, .val = 20, .flags = RATE_IN_44XX },
+ { .div = 21, .val = 21, .flags = RATE_IN_44XX },
+ { .div = 22, .val = 22, .flags = RATE_IN_44XX },
+ { .div = 23, .val = 23, .flags = RATE_IN_44XX },
+ { .div = 24, .val = 24, .flags = RATE_IN_44XX },
+ { .div = 25, .val = 25, .flags = RATE_IN_44XX },
+ { .div = 26, .val = 26, .flags = RATE_IN_44XX },
+ { .div = 27, .val = 27, .flags = RATE_IN_44XX },
+ { .div = 28, .val = 28, .flags = RATE_IN_44XX },
+ { .div = 29, .val = 29, .flags = RATE_IN_44XX },
+ { .div = 30, .val = 30, .flags = RATE_IN_44XX },
+ { .div = 31, .val = 31, .flags = RATE_IN_44XX },
{ .div = 0 },
};
@@ -345,9 +343,9 @@
};
static const struct clksel_rate div3_1to4_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_4430 },
- { .div = 2, .val = 1, .flags = RATE_IN_4430 },
- { .div = 4, .val = 2, .flags = RATE_IN_4430 },
+ { .div = 1, .val = 0, .flags = RATE_IN_44XX },
+ { .div = 2, .val = 1, .flags = RATE_IN_44XX },
+ { .div = 4, .val = 2, .flags = RATE_IN_44XX },
{ .div = 0 },
};
@@ -369,8 +367,8 @@
};
static const struct clksel_rate div2_1to2_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_4430 },
- { .div = 2, .val = 1, .flags = RATE_IN_4430 },
+ { .div = 1, .val = 0, .flags = RATE_IN_44XX },
+ { .div = 2, .val = 1, .flags = RATE_IN_44XX },
{ .div = 0 },
};
@@ -501,7 +499,7 @@
.ops = &clkops_omap4_dpllmx_ops,
.recalc = &omap2_clksel_recalc,
.round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
+ .set_rate = &omap4_core_dpll_m2_set_rate,
};
static struct clk ddrphy_ck = {
@@ -524,6 +522,15 @@
.set_rate = &omap2_clksel_set_rate,
};
+static struct clk virt_l3_ck = {
+ .name = "virt_l3_ck",
+ .parent = &dpll_core_m5x2_ck,
+ .ops = &clkops_null,
+ .set_rate = &omap4_virt_l3_set_rate,
+ .recalc = &omap4_virt_l3_recalc,
+ .round_rate = &omap4_virt_l3_round_rate,
+};
+
static const struct clksel div_core_div[] = {
{ .parent = &dpll_core_m5x2_ck, .rates = div2_1to2_rates },
{ .parent = NULL },
@@ -542,10 +549,10 @@
};
static const struct clksel_rate div4_1to8_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_4430 },
- { .div = 2, .val = 1, .flags = RATE_IN_4430 },
- { .div = 4, .val = 2, .flags = RATE_IN_4430 },
- { .div = 8, .val = 3, .flags = RATE_IN_4430 },
+ { .div = 1, .val = 0, .flags = RATE_IN_44XX },
+ { .div = 2, .val = 1, .flags = RATE_IN_44XX },
+ { .div = 4, .val = 2, .flags = RATE_IN_44XX },
+ { .div = 8, .val = 3, .flags = RATE_IN_44XX },
{ .div = 0 },
};
@@ -621,7 +628,7 @@
.clksel = dpll_core_m6x2_div,
.clksel_reg = OMAP4430_CM_DIV_M3_DPLL_CORE,
.clksel_mask = OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_DIV_M3_DPLL_CORE,
.enable_bit = OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT,
.recalc = &omap2_clksel_recalc,
@@ -774,6 +781,15 @@
.set_rate = &omap2_clksel_set_rate,
};
+static struct clk virt_dpll_mpu_ck = {
+ .name = "virt_dpll_mpu_ck",
+ .parent = &dpll_mpu_ck,
+ .ops = &clkops_null,
+ .recalc = &omap4460_mpu_dpll_recalc,
+ .round_rate = &omap4460_mpu_dpll_round_rate,
+ .set_rate = &omap4460_mpu_dpll_set_rate,
+};
+
static struct clk per_hs_clk_div_ck = {
.name = "per_hs_clk_div_ck",
.parent = &dpll_abe_m3x2_ck,
@@ -879,7 +895,7 @@
.clksel = dpll_per_m2x2_div,
.clksel_reg = OMAP4430_CM_DIV_M3_DPLL_PER,
.clksel_mask = OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_DIV_M3_DPLL_PER,
.enable_bit = OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT,
.recalc = &omap2_clksel_recalc,
@@ -1007,7 +1023,8 @@
.flags = DPLL_J_TYPE,
.clk_ref = &sys_clkin_ck,
.control_reg = OMAP4430_CM_CLKMODE_DPLL_USB,
- .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED)
+ | (1 << DPLL_LOW_POWER_STOP),
.autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_USB,
.idlest_reg = OMAP4430_CM_IDLEST_DPLL_USB,
.mult_mask = OMAP4430_DPLL_MULT_MASK,
@@ -1030,6 +1047,7 @@
.recalc = &omap3_dpll_recalc,
.round_rate = &omap2_dpll_round_rate,
.set_rate = &omap3_noncore_dpll_set_rate,
+ .clkdm_name = "l3_init_clkdm",
};
static struct clk dpll_usb_clkdcoldo_ck = {
@@ -1040,6 +1058,13 @@
.recalc = &followparent_recalc,
};
+static struct clk utmi_phy_clkout_ck = {
+ .name = "utmi_phy_clkout_ck",
+ .ops = &clkops_null,
+ .parent = &dpll_usb_clkdcoldo_ck,
+ .recalc = &followparent_recalc,
+};
+
static const struct clksel dpll_usb_m2_div[] = {
{ .parent = &dpll_usb_ck, .rates = div31_1to31_rates },
{ .parent = NULL },
@@ -1099,8 +1124,8 @@
};
static const struct clksel_rate div2_4to8_rates[] = {
- { .div = 4, .val = 0, .flags = RATE_IN_4430 },
- { .div = 8, .val = 1, .flags = RATE_IN_4430 },
+ { .div = 4, .val = 0, .flags = RATE_IN_44XX },
+ { .div = 8, .val = 1, .flags = RATE_IN_44XX },
{ .div = 0 },
};
@@ -1130,8 +1155,8 @@
};
static const struct clksel_rate div2_2to4_rates[] = {
- { .div = 2, .val = 0, .flags = RATE_IN_4430 },
- { .div = 4, .val = 1, .flags = RATE_IN_4430 },
+ { .div = 2, .val = 0, .flags = RATE_IN_44XX },
+ { .div = 4, .val = 1, .flags = RATE_IN_44XX },
{ .div = 0 },
};
@@ -1183,8 +1208,8 @@
};
static const struct clksel_rate div2_1to8_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_4430 },
- { .div = 8, .val = 1, .flags = RATE_IN_4430 },
+ { .div = 1, .val = 0, .flags = RATE_IN_44XX },
+ { .div = 8, .val = 1, .flags = RATE_IN_44XX },
{ .div = 0 },
};
@@ -1264,6 +1289,28 @@
.recalc = &omap2_clksel_recalc,
};
+static const struct clksel_rate div3_8to32_rates[] = {
+ { .div = 8, .val = 0, .flags = RATE_IN_44XX },
+ { .div = 16, .val = 1, .flags = RATE_IN_44XX },
+ { .div = 32, .val = 2, .flags = RATE_IN_44XX },
+ { .div = 0 },
+};
+
+static const struct clksel div_ts_ck_div[] = {
+ { .parent = &l4_wkup_clk_mux_ck, .rates = div3_8to32_rates },
+ { .parent = NULL },
+};
+
+static struct clk div_ts_ck = {
+ .name = "div_ts_ck",
+ .parent = &l4_wkup_clk_mux_ck,
+ .clksel = div_ts_ck_div,
+ .clksel_reg = OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_24_25_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+};
+
static const struct clksel per_abe_nc_fclk_div[] = {
{ .parent = &dpll_abe_m2_ck, .rates = div2_1to2_rates },
{ .parent = NULL },
@@ -1358,7 +1405,7 @@
static struct clk aes1_fck = {
.name = "aes1_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4SEC_AES1_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_secure_clkdm",
@@ -1368,7 +1415,7 @@
static struct clk aes2_fck = {
.name = "aes2_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4SEC_AES2_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_secure_clkdm",
@@ -1378,7 +1425,7 @@
static struct clk aess_fck = {
.name = "aess_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM1_ABE_AESS_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "abe_clkdm",
@@ -1388,7 +1435,7 @@
static struct clk bandgap_fclk = {
.name = "bandgap_fclk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_BGAP_32K_SHIFT,
.clkdm_name = "l4_wkup_clkdm",
@@ -1396,9 +1443,19 @@
.recalc = &followparent_recalc,
};
+static struct clk bandgap_ts_fclk = {
+ .name = "bandgap_ts_fclk",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
+ .enable_bit = OMAP4460_OPTFCLKEN_TS_FCLK_SHIFT,
+ .clkdm_name = "l4_wkup_clkdm",
+ .parent = &div_ts_ck,
+ .recalc = &followparent_recalc,
+};
+
static struct clk des3des_fck = {
.name = "des3des_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4SEC_DES3DES_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_secure_clkdm",
@@ -1439,7 +1496,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM1_ABE_DMIC_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_SOURCE_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM1_ABE_DMIC_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -1448,7 +1505,7 @@
static struct clk dsp_fck = {
.name = "dsp_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_TESLA_TESLA_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "tesla_clkdm",
@@ -1458,17 +1515,20 @@
static struct clk dss_sys_clk = {
.name = "dss_sys_clk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_SYS_CLK_SHIFT,
.clkdm_name = "l3_dss_clkdm",
+#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
+ .flags = ENABLE_ON_INIT,
+#endif
.parent = &syc_clk_div_ck,
.recalc = &followparent_recalc,
};
static struct clk dss_tv_clk = {
.name = "dss_tv_clk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_TV_CLK_SHIFT,
.clkdm_name = "l3_dss_clkdm",
@@ -1478,7 +1538,7 @@
static struct clk dss_dss_clk = {
.name = "dss_dss_clk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_DSSCLK_SHIFT,
.clkdm_name = "l3_dss_clkdm",
@@ -1488,7 +1548,7 @@
static struct clk dss_48mhz_clk = {
.name = "dss_48mhz_clk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_48MHZ_CLK_SHIFT,
.clkdm_name = "l3_dss_clkdm",
@@ -1498,17 +1558,20 @@
static struct clk dss_fck = {
.name = "dss_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l3_dss_clkdm",
+#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
+ .flags = ENABLE_ON_INIT,
+#endif
.parent = &l3_div_ck,
.recalc = &followparent_recalc,
};
static struct clk efuse_ctrl_cust_fck = {
.name = "efuse_ctrl_cust_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_CEFUSE_CEFUSE_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_cefuse_clkdm",
@@ -1518,10 +1581,9 @@
static struct clk emif1_fck = {
.name = "emif1_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .flags = ENABLE_ON_INIT,
.clkdm_name = "l3_emif_clkdm",
.parent = &ddrphy_ck,
.recalc = &followparent_recalc,
@@ -1529,10 +1591,9 @@
static struct clk emif2_fck = {
.name = "emif2_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .flags = ENABLE_ON_INIT,
.clkdm_name = "l3_emif_clkdm",
.parent = &ddrphy_ck,
.recalc = &followparent_recalc,
@@ -1550,7 +1611,7 @@
.clksel = fdif_fclk_div,
.clksel_reg = OMAP4430_CM_CAM_FDIF_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_FCLK_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.round_rate = &omap2_clksel_round_rate,
.set_rate = &omap2_clksel_set_rate,
@@ -1561,7 +1622,7 @@
static struct clk fpka_fck = {
.name = "fpka_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4SEC_PKAEIP29_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_secure_clkdm",
@@ -1571,7 +1632,7 @@
static struct clk gpio1_dbclk = {
.name = "gpio1_dbclk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_WKUP_GPIO1_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
.clkdm_name = "l4_wkup_clkdm",
@@ -1581,7 +1642,7 @@
static struct clk gpio1_ick = {
.name = "gpio1_ick",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_WKUP_GPIO1_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "l4_wkup_clkdm",
@@ -1591,7 +1652,7 @@
static struct clk gpio2_dbclk = {
.name = "gpio2_dbclk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_GPIO2_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
.clkdm_name = "l4_per_clkdm",
@@ -1601,7 +1662,7 @@
static struct clk gpio2_ick = {
.name = "gpio2_ick",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_GPIO2_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -1611,7 +1672,7 @@
static struct clk gpio3_dbclk = {
.name = "gpio3_dbclk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_GPIO3_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
.clkdm_name = "l4_per_clkdm",
@@ -1621,7 +1682,7 @@
static struct clk gpio3_ick = {
.name = "gpio3_ick",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_GPIO3_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -1631,7 +1692,7 @@
static struct clk gpio4_dbclk = {
.name = "gpio4_dbclk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_GPIO4_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
.clkdm_name = "l4_per_clkdm",
@@ -1641,7 +1702,7 @@
static struct clk gpio4_ick = {
.name = "gpio4_ick",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_GPIO4_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -1651,7 +1712,7 @@
static struct clk gpio5_dbclk = {
.name = "gpio5_dbclk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_GPIO5_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
.clkdm_name = "l4_per_clkdm",
@@ -1661,7 +1722,7 @@
static struct clk gpio5_ick = {
.name = "gpio5_ick",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_GPIO5_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -1671,7 +1732,7 @@
static struct clk gpio6_dbclk = {
.name = "gpio6_dbclk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_GPIO6_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
.clkdm_name = "l4_per_clkdm",
@@ -1681,7 +1742,7 @@
static struct clk gpio6_ick = {
.name = "gpio6_ick",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_GPIO6_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -1691,10 +1752,11 @@
static struct clk gpmc_ick = {
.name = "gpmc_ick",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3_2_GPMC_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "l3_2_clkdm",
+ .flags = ENABLE_ON_INIT,
.parent = &l3_div_ck,
.recalc = &followparent_recalc,
};
@@ -1713,7 +1775,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM_GFX_GFX_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_SGX_FCLK_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM_GFX_GFX_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -1722,7 +1784,7 @@
static struct clk hdq1w_fck = {
.name = "hdq1w_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_HDQ1W_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -1742,7 +1804,7 @@
.clksel = hsi_fclk_div,
.clksel_reg = OMAP4430_CM_L3INIT_HSI_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_24_25_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.round_rate = &omap2_clksel_round_rate,
.set_rate = &omap2_clksel_set_rate,
@@ -1753,7 +1815,7 @@
static struct clk i2c1_fck = {
.name = "i2c1_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_I2C1_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -1763,7 +1825,7 @@
static struct clk i2c2_fck = {
.name = "i2c2_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_I2C2_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -1773,7 +1835,7 @@
static struct clk i2c3_fck = {
.name = "i2c3_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_I2C3_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -1783,7 +1845,7 @@
static struct clk i2c4_fck = {
.name = "i2c4_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_I2C4_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -1793,7 +1855,7 @@
static struct clk ipu_fck = {
.name = "ipu_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_DUCATI_DUCATI_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "ducati_clkdm",
@@ -1803,7 +1865,7 @@
static struct clk iss_ctrlclk = {
.name = "iss_ctrlclk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_CAM_ISS_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_CTRLCLK_SHIFT,
.clkdm_name = "iss_clkdm",
@@ -1813,7 +1875,7 @@
static struct clk iss_fck = {
.name = "iss_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_CAM_ISS_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "iss_clkdm",
@@ -1823,7 +1885,7 @@
static struct clk iva_fck = {
.name = "iva_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_IVAHD_IVAHD_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "ivahd_clkdm",
@@ -1833,7 +1895,7 @@
static struct clk kbd_fck = {
.name = "kbd_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_WKUP_KEYBOARD_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_wkup_clkdm",
@@ -1843,7 +1905,7 @@
static struct clk l3_instr_ick = {
.name = "l3_instr_ick",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "l3_instr_clkdm",
@@ -1854,7 +1916,7 @@
static struct clk l3_main_3_ick = {
.name = "l3_main_3_ick",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INSTR_L3_3_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "l3_instr_clkdm",
@@ -1889,7 +1951,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM1_ABE_MCASP_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_SOURCE_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM1_ABE_MCASP_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -1922,7 +1984,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM1_ABE_MCBSP1_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_SOURCE_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM1_ABE_MCBSP1_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -1955,7 +2017,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM1_ABE_MCBSP2_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_SOURCE_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM1_ABE_MCBSP2_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -1988,7 +2050,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM1_ABE_MCBSP3_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_SOURCE_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM1_ABE_MCBSP3_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2020,7 +2082,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM_L4PER_MCBSP4_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_SOURCE_24_24_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM_L4PER_MCBSP4_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2029,7 +2091,7 @@
static struct clk mcpdm_fck = {
.name = "mcpdm_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM1_ABE_PDM_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "abe_clkdm",
@@ -2039,7 +2101,7 @@
static struct clk mcspi1_fck = {
.name = "mcspi1_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_MCSPI1_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -2049,7 +2111,7 @@
static struct clk mcspi2_fck = {
.name = "mcspi2_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_MCSPI2_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -2059,7 +2121,7 @@
static struct clk mcspi3_fck = {
.name = "mcspi3_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_MCSPI3_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -2069,7 +2131,7 @@
static struct clk mcspi4_fck = {
.name = "mcspi4_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_MCSPI4_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -2085,7 +2147,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM_L3INIT_MMC1_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM_L3INIT_MMC1_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2100,7 +2162,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM_L3INIT_MMC2_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM_L3INIT_MMC2_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2109,7 +2171,7 @@
static struct clk mmc3_fck = {
.name = "mmc3_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_MMCSD3_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -2119,7 +2181,7 @@
static struct clk mmc4_fck = {
.name = "mmc4_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_MMCSD4_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -2129,7 +2191,7 @@
static struct clk mmc5_fck = {
.name = "mmc5_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_MMCSD5_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -2139,7 +2201,7 @@
static struct clk ocp2scp_usb_phy_phy_48m = {
.name = "ocp2scp_usb_phy_phy_48m",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_PHY_48M_SHIFT,
.clkdm_name = "l3_init_clkdm",
@@ -2149,7 +2211,7 @@
static struct clk ocp2scp_usb_phy_ick = {
.name = "ocp2scp_usb_phy_ick",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "l3_init_clkdm",
@@ -2159,7 +2221,7 @@
static struct clk ocp_wp_noc_ick = {
.name = "ocp_wp_noc_ick",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "l3_instr_clkdm",
@@ -2170,7 +2232,7 @@
static struct clk rng_ick = {
.name = "rng_ick",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4SEC_RNG_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "l4_secure_clkdm",
@@ -2180,7 +2242,7 @@
static struct clk sha2md5_fck = {
.name = "sha2md5_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_secure_clkdm",
@@ -2190,7 +2252,7 @@
static struct clk sl2if_ick = {
.name = "sl2if_ick",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_IVAHD_SL2_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "ivahd_clkdm",
@@ -2200,7 +2262,7 @@
static struct clk slimbus1_fclk_1 = {
.name = "slimbus1_fclk_1",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_FCLK1_SHIFT,
.clkdm_name = "abe_clkdm",
@@ -2210,7 +2272,7 @@
static struct clk slimbus1_fclk_0 = {
.name = "slimbus1_fclk_0",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_FCLK0_SHIFT,
.clkdm_name = "abe_clkdm",
@@ -2220,7 +2282,7 @@
static struct clk slimbus1_fclk_2 = {
.name = "slimbus1_fclk_2",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_FCLK2_SHIFT,
.clkdm_name = "abe_clkdm",
@@ -2230,7 +2292,7 @@
static struct clk slimbus1_slimbus_clk = {
.name = "slimbus1_slimbus_clk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_SLIMBUS_CLK_11_11_SHIFT,
.clkdm_name = "abe_clkdm",
@@ -2240,7 +2302,7 @@
static struct clk slimbus1_fck = {
.name = "slimbus1_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "abe_clkdm",
@@ -2250,7 +2312,7 @@
static struct clk slimbus2_fclk_1 = {
.name = "slimbus2_fclk_1",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_PERABE24M_GFCLK_SHIFT,
.clkdm_name = "l4_per_clkdm",
@@ -2260,7 +2322,7 @@
static struct clk slimbus2_fclk_0 = {
.name = "slimbus2_fclk_0",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_PER24MC_GFCLK_SHIFT,
.clkdm_name = "l4_per_clkdm",
@@ -2270,7 +2332,7 @@
static struct clk slimbus2_slimbus_clk = {
.name = "slimbus2_slimbus_clk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_SLIMBUS_CLK_SHIFT,
.clkdm_name = "l4_per_clkdm",
@@ -2280,7 +2342,7 @@
static struct clk slimbus2_fck = {
.name = "slimbus2_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -2290,7 +2352,7 @@
static struct clk smartreflex_core_fck = {
.name = "smartreflex_core_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_ALWON_SR_CORE_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_ao_clkdm",
@@ -2300,7 +2362,7 @@
static struct clk smartreflex_iva_fck = {
.name = "smartreflex_iva_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_ALWON_SR_IVA_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_ao_clkdm",
@@ -2310,7 +2372,7 @@
static struct clk smartreflex_mpu_fck = {
.name = "smartreflex_mpu_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_ALWON_SR_MPU_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_ao_clkdm",
@@ -2326,7 +2388,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM_WKUP_TIMER1_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM_WKUP_TIMER1_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2341,7 +2403,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2356,7 +2418,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2371,7 +2433,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2386,7 +2448,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2401,7 +2463,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2422,7 +2484,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM1_ABE_TIMER5_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM1_ABE_TIMER5_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2437,7 +2499,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM1_ABE_TIMER6_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM1_ABE_TIMER6_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2452,7 +2514,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM1_ABE_TIMER7_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM1_ABE_TIMER7_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2467,7 +2529,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM1_ABE_TIMER8_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM1_ABE_TIMER8_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2482,7 +2544,7 @@
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
@@ -2491,7 +2553,7 @@
static struct clk uart1_fck = {
.name = "uart1_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_UART1_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -2501,7 +2563,7 @@
static struct clk uart2_fck = {
.name = "uart2_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_UART2_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -2511,7 +2573,7 @@
static struct clk uart3_fck = {
.name = "uart3_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_UART3_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -2521,7 +2583,7 @@
static struct clk uart4_fck = {
.name = "uart4_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L4PER_UART4_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_per_clkdm",
@@ -2531,7 +2593,7 @@
static struct clk usb_host_fs_fck = {
.name = "usb_host_fs_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INIT_USB_HOST_FS_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l3_init_clkdm",
@@ -2558,7 +2620,7 @@
static struct clk usb_host_hs_utmi_p1_clk = {
.name = "usb_host_hs_utmi_p1_clk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_UTMI_P1_CLK_SHIFT,
.clkdm_name = "l3_init_clkdm",
@@ -2585,7 +2647,7 @@
static struct clk usb_host_hs_utmi_p2_clk = {
.name = "usb_host_hs_utmi_p2_clk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_UTMI_P2_CLK_SHIFT,
.clkdm_name = "l3_init_clkdm",
@@ -2595,7 +2657,7 @@
static struct clk usb_host_hs_utmi_p3_clk = {
.name = "usb_host_hs_utmi_p3_clk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_UTMI_P3_CLK_SHIFT,
.clkdm_name = "l3_init_clkdm",
@@ -2605,7 +2667,7 @@
static struct clk usb_host_hs_hsic480m_p1_clk = {
.name = "usb_host_hs_hsic480m_p1_clk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_HSIC480M_P1_CLK_SHIFT,
.clkdm_name = "l3_init_clkdm",
@@ -2615,7 +2677,7 @@
static struct clk usb_host_hs_hsic60m_p1_clk = {
.name = "usb_host_hs_hsic60m_p1_clk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_HSIC60M_P1_CLK_SHIFT,
.clkdm_name = "l3_init_clkdm",
@@ -2625,7 +2687,7 @@
static struct clk usb_host_hs_hsic60m_p2_clk = {
.name = "usb_host_hs_hsic60m_p2_clk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_HSIC60M_P2_CLK_SHIFT,
.clkdm_name = "l3_init_clkdm",
@@ -2635,7 +2697,7 @@
static struct clk usb_host_hs_hsic480m_p2_clk = {
.name = "usb_host_hs_hsic480m_p2_clk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_HSIC480M_P2_CLK_SHIFT,
.clkdm_name = "l3_init_clkdm",
@@ -2645,7 +2707,7 @@
static struct clk usb_host_hs_func48mclk = {
.name = "usb_host_hs_func48mclk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_FUNC48MCLK_SHIFT,
.clkdm_name = "l3_init_clkdm",
@@ -2655,7 +2717,7 @@
static struct clk usb_host_hs_fck = {
.name = "usb_host_hs_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l3_init_clkdm",
@@ -2682,7 +2744,7 @@
static struct clk usb_otg_hs_xclk = {
.name = "usb_otg_hs_xclk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_XCLK_SHIFT,
.clkdm_name = "l3_init_clkdm",
@@ -2692,17 +2754,17 @@
static struct clk usb_otg_hs_ick = {
.name = "usb_otg_hs_ick",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "l3_init_clkdm",
- .parent = &l3_div_ck,
+ .parent = &otg_60m_gfclk,
.recalc = &followparent_recalc,
};
static struct clk usb_phy_cm_clk32k = {
.name = "usb_phy_cm_clk32k",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_ALWON_USBPHY_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_CLK32K_SHIFT,
.clkdm_name = "l4_ao_clkdm",
@@ -2712,7 +2774,7 @@
static struct clk usb_tll_hs_usb_ch2_clk = {
.name = "usb_tll_hs_usb_ch2_clk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_USB_CH2_CLK_SHIFT,
.clkdm_name = "l3_init_clkdm",
@@ -2722,7 +2784,7 @@
static struct clk usb_tll_hs_usb_ch0_clk = {
.name = "usb_tll_hs_usb_ch0_clk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_USB_CH0_CLK_SHIFT,
.clkdm_name = "l3_init_clkdm",
@@ -2732,7 +2794,7 @@
static struct clk usb_tll_hs_usb_ch1_clk = {
.name = "usb_tll_hs_usb_ch1_clk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_USB_CH1_CLK_SHIFT,
.clkdm_name = "l3_init_clkdm",
@@ -2742,7 +2804,7 @@
static struct clk usb_tll_hs_ick = {
.name = "usb_tll_hs_ick",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "l3_init_clkdm",
@@ -2751,8 +2813,8 @@
};
static const struct clksel_rate div2_14to18_rates[] = {
- { .div = 14, .val = 0, .flags = RATE_IN_4430 },
- { .div = 18, .val = 1, .flags = RATE_IN_4430 },
+ { .div = 14, .val = 0, .flags = RATE_IN_44XX },
+ { .div = 18, .val = 1, .flags = RATE_IN_44XX },
{ .div = 0 },
};
@@ -2775,7 +2837,7 @@
static struct clk usim_fclk = {
.name = "usim_fclk",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_WKUP_USIM_CLKCTRL,
.enable_bit = OMAP4430_OPTFCLKEN_FCLK_SHIFT,
.clkdm_name = "l4_wkup_clkdm",
@@ -2785,7 +2847,7 @@
static struct clk usim_fck = {
.name = "usim_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_WKUP_USIM_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.clkdm_name = "l4_wkup_clkdm",
@@ -2795,7 +2857,7 @@
static struct clk wd_timer2_fck = {
.name = "wd_timer2_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM_WKUP_WDT2_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "l4_wkup_clkdm",
@@ -2805,7 +2867,7 @@
static struct clk wd_timer3_fck = {
.name = "wd_timer3_fck",
- .ops = &clkops_omap2_dflt,
+ .ops = &clkops_omap4_dflt_wait,
.enable_reg = OMAP4430_CM1_ABE_WDT3_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_SWCTRL,
.clkdm_name = "abe_clkdm",
@@ -2850,19 +2912,39 @@
/* SCRM aux clk nodes */
-static const struct clksel auxclk_sel[] = {
+static const struct clksel auxclk_src_sel[] = {
{ .parent = &sys_clkin_ck, .rates = div_1_0_rates },
{ .parent = &dpll_core_m3x2_ck, .rates = div_1_1_rates },
{ .parent = &dpll_per_m3x2_ck, .rates = div_1_2_rates },
{ .parent = NULL },
};
-static struct clk auxclk0_ck = {
- .name = "auxclk0_ck",
+static const struct clksel_rate div16_1to16_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_44XX },
+ { .div = 2, .val = 1, .flags = RATE_IN_44XX },
+ { .div = 3, .val = 2, .flags = RATE_IN_44XX },
+ { .div = 4, .val = 3, .flags = RATE_IN_44XX },
+ { .div = 5, .val = 4, .flags = RATE_IN_44XX },
+ { .div = 6, .val = 5, .flags = RATE_IN_44XX },
+ { .div = 7, .val = 6, .flags = RATE_IN_44XX },
+ { .div = 8, .val = 7, .flags = RATE_IN_44XX },
+ { .div = 9, .val = 8, .flags = RATE_IN_44XX },
+ { .div = 10, .val = 9, .flags = RATE_IN_44XX },
+ { .div = 11, .val = 10, .flags = RATE_IN_44XX },
+ { .div = 12, .val = 11, .flags = RATE_IN_44XX },
+ { .div = 13, .val = 12, .flags = RATE_IN_44XX },
+ { .div = 14, .val = 13, .flags = RATE_IN_44XX },
+ { .div = 15, .val = 14, .flags = RATE_IN_44XX },
+ { .div = 16, .val = 15, .flags = RATE_IN_44XX },
+ { .div = 0 },
+};
+
+static struct clk auxclk0_src_ck = {
+ .name = "auxclk0_src_ck",
.parent = &sys_clkin_ck,
.init = &omap2_init_clksel_parent,
- .ops = &clkops_omap2_dflt,
- .clksel = auxclk_sel,
+ .ops = &clkops_omap4_dflt_wait,
+ .clksel = auxclk_src_sel,
.clksel_reg = OMAP4_SCRM_AUXCLK0,
.clksel_mask = OMAP4_SRCSELECT_MASK,
.recalc = &omap2_clksel_recalc,
@@ -2870,12 +2952,29 @@
.enable_bit = OMAP4_ENABLE_SHIFT,
};
-static struct clk auxclk1_ck = {
- .name = "auxclk1_ck",
+static const struct clksel auxclk0_sel[] = {
+ { .parent = &auxclk0_src_ck, .rates = div16_1to16_rates },
+ { .parent = NULL },
+};
+
+static struct clk auxclk0_ck = {
+ .name = "auxclk0_ck",
+ .parent = &auxclk0_src_ck,
+ .clksel = auxclk0_sel,
+ .clksel_reg = OMAP4_SCRM_AUXCLK0,
+ .clksel_mask = OMAP4_CLKDIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+};
+
+static struct clk auxclk1_src_ck = {
+ .name = "auxclk1_src_ck",
.parent = &sys_clkin_ck,
.init = &omap2_init_clksel_parent,
- .ops = &clkops_omap2_dflt,
- .clksel = auxclk_sel,
+ .ops = &clkops_omap4_dflt_wait,
+ .clksel = auxclk_src_sel,
.clksel_reg = OMAP4_SCRM_AUXCLK1,
.clksel_mask = OMAP4_SRCSELECT_MASK,
.recalc = &omap2_clksel_recalc,
@@ -2883,24 +2982,59 @@
.enable_bit = OMAP4_ENABLE_SHIFT,
};
-static struct clk auxclk2_ck = {
- .name = "auxclk2_ck",
+static const struct clksel auxclk1_sel[] = {
+ { .parent = &auxclk1_src_ck, .rates = div16_1to16_rates },
+ { .parent = NULL },
+};
+
+static struct clk auxclk1_ck = {
+ .name = "auxclk1_ck",
+ .parent = &auxclk1_src_ck,
+ .clksel = auxclk1_sel,
+ .clksel_reg = OMAP4_SCRM_AUXCLK1,
+ .clksel_mask = OMAP4_CLKDIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+};
+
+static struct clk auxclk2_src_ck = {
+ .name = "auxclk2_src_ck",
.parent = &sys_clkin_ck,
.init = &omap2_init_clksel_parent,
- .ops = &clkops_omap2_dflt,
- .clksel = auxclk_sel,
+ .ops = &clkops_omap4_dflt_wait,
+ .clksel = auxclk_src_sel,
.clksel_reg = OMAP4_SCRM_AUXCLK2,
.clksel_mask = OMAP4_SRCSELECT_MASK,
.recalc = &omap2_clksel_recalc,
.enable_reg = OMAP4_SCRM_AUXCLK2,
.enable_bit = OMAP4_ENABLE_SHIFT,
};
-static struct clk auxclk3_ck = {
- .name = "auxclk3_ck",
+
+static const struct clksel auxclk2_sel[] = {
+ { .parent = &auxclk2_src_ck, .rates = div16_1to16_rates },
+ { .parent = NULL },
+};
+
+static struct clk auxclk2_ck = {
+ .name = "auxclk2_ck",
+ .parent = &auxclk2_src_ck,
+ .clksel = auxclk2_sel,
+ .clksel_reg = OMAP4_SCRM_AUXCLK2,
+ .clksel_mask = OMAP4_CLKDIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+};
+
+static struct clk auxclk3_src_ck = {
+ .name = "auxclk3_src_ck",
.parent = &sys_clkin_ck,
.init = &omap2_init_clksel_parent,
- .ops = &clkops_omap2_dflt,
- .clksel = auxclk_sel,
+ .ops = &clkops_omap4_dflt_wait,
+ .clksel = auxclk_src_sel,
.clksel_reg = OMAP4_SCRM_AUXCLK3,
.clksel_mask = OMAP4_SRCSELECT_MASK,
.recalc = &omap2_clksel_recalc,
@@ -2908,12 +3042,29 @@
.enable_bit = OMAP4_ENABLE_SHIFT,
};
-static struct clk auxclk4_ck = {
- .name = "auxclk4_ck",
+static const struct clksel auxclk3_sel[] = {
+ { .parent = &auxclk3_src_ck, .rates = div16_1to16_rates },
+ { .parent = NULL },
+};
+
+static struct clk auxclk3_ck = {
+ .name = "auxclk3_ck",
+ .parent = &auxclk3_src_ck,
+ .clksel = auxclk3_sel,
+ .clksel_reg = OMAP4_SCRM_AUXCLK3,
+ .clksel_mask = OMAP4_CLKDIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+};
+
+static struct clk auxclk4_src_ck = {
+ .name = "auxclk4_src_ck",
.parent = &sys_clkin_ck,
.init = &omap2_init_clksel_parent,
- .ops = &clkops_omap2_dflt,
- .clksel = auxclk_sel,
+ .ops = &clkops_omap4_dflt_wait,
+ .clksel = auxclk_src_sel,
.clksel_reg = OMAP4_SCRM_AUXCLK4,
.clksel_mask = OMAP4_SRCSELECT_MASK,
.recalc = &omap2_clksel_recalc,
@@ -2921,12 +3072,29 @@
.enable_bit = OMAP4_ENABLE_SHIFT,
};
-static struct clk auxclk5_ck = {
- .name = "auxclk5_ck",
+static const struct clksel auxclk4_sel[] = {
+ { .parent = &auxclk4_src_ck, .rates = div16_1to16_rates },
+ { .parent = NULL },
+};
+
+static struct clk auxclk4_ck = {
+ .name = "auxclk4_ck",
+ .parent = &auxclk4_src_ck,
+ .clksel = auxclk4_sel,
+ .clksel_reg = OMAP4_SCRM_AUXCLK4,
+ .clksel_mask = OMAP4_CLKDIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+};
+
+static struct clk auxclk5_src_ck = {
+ .name = "auxclk5_src_ck",
.parent = &sys_clkin_ck,
.init = &omap2_init_clksel_parent,
- .ops = &clkops_omap2_dflt,
- .clksel = auxclk_sel,
+ .ops = &clkops_omap4_dflt_wait,
+ .clksel = auxclk_src_sel,
.clksel_reg = OMAP4_SCRM_AUXCLK5,
.clksel_mask = OMAP4_SRCSELECT_MASK,
.recalc = &omap2_clksel_recalc,
@@ -2934,6 +3102,23 @@
.enable_bit = OMAP4_ENABLE_SHIFT,
};
+static const struct clksel auxclk5_sel[] = {
+ { .parent = &auxclk5_src_ck, .rates = div16_1to16_rates },
+ { .parent = NULL },
+};
+
+static struct clk auxclk5_ck = {
+ .name = "auxclk5_ck",
+ .parent = &auxclk5_src_ck,
+ .clksel = auxclk5_sel,
+ .clksel_reg = OMAP4_SCRM_AUXCLK5,
+ .clksel_mask = OMAP4_CLKDIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+};
+
static const struct clksel auxclkreq_sel[] = {
{ .parent = &auxclk0_ck, .rates = div_1_0_rates },
{ .parent = &auxclk1_ck, .rates = div_1_1_rates },
@@ -3010,289 +3195,448 @@
.recalc = &omap2_clksel_recalc,
};
+static struct clk smp_twd_443x = {
+ .name = "smp_twd",
+ .parent = &dpll_mpu_ck,
+ .ops = &clkops_null,
+ .fixed_div = 2,
+ .recalc = &omap_fixed_divisor_recalc,
+};
+
+static struct clk smp_twd_446x = {
+ .name = "smp_twd",
+ .parent = &virt_dpll_mpu_ck,
+ .ops = &clkops_null,
+ .fixed_div = 2,
+ .recalc = &omap_fixed_divisor_recalc,
+};
+
/*
* clkdev
*/
static struct omap_clk omap44xx_clks[] = {
- CLK(NULL, "extalt_clkin_ck", &extalt_clkin_ck, CK_443X),
- CLK(NULL, "pad_clks_ck", &pad_clks_ck, CK_443X),
- CLK(NULL, "pad_slimbus_core_clks_ck", &pad_slimbus_core_clks_ck, CK_443X),
- CLK(NULL, "secure_32k_clk_src_ck", &secure_32k_clk_src_ck, CK_443X),
- CLK(NULL, "slimbus_clk", &slimbus_clk, CK_443X),
- CLK(NULL, "sys_32k_ck", &sys_32k_ck, CK_443X),
- CLK(NULL, "virt_12000000_ck", &virt_12000000_ck, CK_443X),
- CLK(NULL, "virt_13000000_ck", &virt_13000000_ck, CK_443X),
- CLK(NULL, "virt_16800000_ck", &virt_16800000_ck, CK_443X),
- CLK(NULL, "virt_19200000_ck", &virt_19200000_ck, CK_443X),
- CLK(NULL, "virt_26000000_ck", &virt_26000000_ck, CK_443X),
- CLK(NULL, "virt_27000000_ck", &virt_27000000_ck, CK_443X),
- CLK(NULL, "virt_38400000_ck", &virt_38400000_ck, CK_443X),
- CLK(NULL, "sys_clkin_ck", &sys_clkin_ck, CK_443X),
- CLK(NULL, "tie_low_clock_ck", &tie_low_clock_ck, CK_443X),
- CLK(NULL, "utmi_phy_clkout_ck", &utmi_phy_clkout_ck, CK_443X),
- CLK(NULL, "xclk60mhsp1_ck", &xclk60mhsp1_ck, CK_443X),
- CLK(NULL, "xclk60mhsp2_ck", &xclk60mhsp2_ck, CK_443X),
- CLK(NULL, "xclk60motg_ck", &xclk60motg_ck, CK_443X),
- CLK(NULL, "abe_dpll_bypass_clk_mux_ck", &abe_dpll_bypass_clk_mux_ck, CK_443X),
- CLK(NULL, "abe_dpll_refclk_mux_ck", &abe_dpll_refclk_mux_ck, CK_443X),
- CLK(NULL, "dpll_abe_ck", &dpll_abe_ck, CK_443X),
- CLK(NULL, "dpll_abe_x2_ck", &dpll_abe_x2_ck, CK_443X),
- CLK(NULL, "dpll_abe_m2x2_ck", &dpll_abe_m2x2_ck, CK_443X),
- CLK(NULL, "abe_24m_fclk", &abe_24m_fclk, CK_443X),
- CLK(NULL, "abe_clk", &abe_clk, CK_443X),
- CLK(NULL, "aess_fclk", &aess_fclk, CK_443X),
- CLK(NULL, "dpll_abe_m3x2_ck", &dpll_abe_m3x2_ck, CK_443X),
- CLK(NULL, "core_hsd_byp_clk_mux_ck", &core_hsd_byp_clk_mux_ck, CK_443X),
- CLK(NULL, "dpll_core_ck", &dpll_core_ck, CK_443X),
- CLK(NULL, "dpll_core_x2_ck", &dpll_core_x2_ck, CK_443X),
- CLK(NULL, "dpll_core_m6x2_ck", &dpll_core_m6x2_ck, CK_443X),
- CLK(NULL, "dbgclk_mux_ck", &dbgclk_mux_ck, CK_443X),
- CLK(NULL, "dpll_core_m2_ck", &dpll_core_m2_ck, CK_443X),
- CLK(NULL, "ddrphy_ck", &ddrphy_ck, CK_443X),
- CLK(NULL, "dpll_core_m5x2_ck", &dpll_core_m5x2_ck, CK_443X),
- CLK(NULL, "div_core_ck", &div_core_ck, CK_443X),
- CLK(NULL, "div_iva_hs_clk", &div_iva_hs_clk, CK_443X),
- CLK(NULL, "div_mpu_hs_clk", &div_mpu_hs_clk, CK_443X),
- CLK(NULL, "dpll_core_m4x2_ck", &dpll_core_m4x2_ck, CK_443X),
- CLK(NULL, "dll_clk_div_ck", &dll_clk_div_ck, CK_443X),
- CLK(NULL, "dpll_abe_m2_ck", &dpll_abe_m2_ck, CK_443X),
- CLK(NULL, "dpll_core_m3x2_ck", &dpll_core_m3x2_ck, CK_443X),
- CLK(NULL, "dpll_core_m7x2_ck", &dpll_core_m7x2_ck, CK_443X),
- CLK(NULL, "iva_hsd_byp_clk_mux_ck", &iva_hsd_byp_clk_mux_ck, CK_443X),
- CLK(NULL, "dpll_iva_ck", &dpll_iva_ck, CK_443X),
- CLK(NULL, "dpll_iva_x2_ck", &dpll_iva_x2_ck, CK_443X),
- CLK(NULL, "dpll_iva_m4x2_ck", &dpll_iva_m4x2_ck, CK_443X),
- CLK(NULL, "dpll_iva_m5x2_ck", &dpll_iva_m5x2_ck, CK_443X),
- CLK(NULL, "dpll_mpu_ck", &dpll_mpu_ck, CK_443X),
- CLK(NULL, "dpll_mpu_m2_ck", &dpll_mpu_m2_ck, CK_443X),
- CLK(NULL, "per_hs_clk_div_ck", &per_hs_clk_div_ck, CK_443X),
- CLK(NULL, "per_hsd_byp_clk_mux_ck", &per_hsd_byp_clk_mux_ck, CK_443X),
- CLK(NULL, "dpll_per_ck", &dpll_per_ck, CK_443X),
- CLK(NULL, "dpll_per_m2_ck", &dpll_per_m2_ck, CK_443X),
- CLK(NULL, "dpll_per_x2_ck", &dpll_per_x2_ck, CK_443X),
- CLK(NULL, "dpll_per_m2x2_ck", &dpll_per_m2x2_ck, CK_443X),
- CLK(NULL, "dpll_per_m3x2_ck", &dpll_per_m3x2_ck, CK_443X),
- CLK(NULL, "dpll_per_m4x2_ck", &dpll_per_m4x2_ck, CK_443X),
- CLK(NULL, "dpll_per_m5x2_ck", &dpll_per_m5x2_ck, CK_443X),
- CLK(NULL, "dpll_per_m6x2_ck", &dpll_per_m6x2_ck, CK_443X),
- CLK(NULL, "dpll_per_m7x2_ck", &dpll_per_m7x2_ck, CK_443X),
- CLK(NULL, "dpll_unipro_ck", &dpll_unipro_ck, CK_443X),
- CLK(NULL, "dpll_unipro_x2_ck", &dpll_unipro_x2_ck, CK_443X),
- CLK(NULL, "dpll_unipro_m2x2_ck", &dpll_unipro_m2x2_ck, CK_443X),
- CLK(NULL, "usb_hs_clk_div_ck", &usb_hs_clk_div_ck, CK_443X),
- CLK(NULL, "dpll_usb_ck", &dpll_usb_ck, CK_443X),
- CLK(NULL, "dpll_usb_clkdcoldo_ck", &dpll_usb_clkdcoldo_ck, CK_443X),
- CLK(NULL, "dpll_usb_m2_ck", &dpll_usb_m2_ck, CK_443X),
- CLK(NULL, "ducati_clk_mux_ck", &ducati_clk_mux_ck, CK_443X),
- CLK(NULL, "func_12m_fclk", &func_12m_fclk, CK_443X),
- CLK(NULL, "func_24m_clk", &func_24m_clk, CK_443X),
- CLK(NULL, "func_24mc_fclk", &func_24mc_fclk, CK_443X),
- CLK(NULL, "func_48m_fclk", &func_48m_fclk, CK_443X),
- CLK(NULL, "func_48mc_fclk", &func_48mc_fclk, CK_443X),
- CLK(NULL, "func_64m_fclk", &func_64m_fclk, CK_443X),
- CLK(NULL, "func_96m_fclk", &func_96m_fclk, CK_443X),
- CLK(NULL, "hsmmc6_fclk", &hsmmc6_fclk, CK_443X),
- CLK(NULL, "init_60m_fclk", &init_60m_fclk, CK_443X),
- CLK(NULL, "l3_div_ck", &l3_div_ck, CK_443X),
- CLK(NULL, "l4_div_ck", &l4_div_ck, CK_443X),
- CLK(NULL, "lp_clk_div_ck", &lp_clk_div_ck, CK_443X),
- CLK(NULL, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck, CK_443X),
- CLK(NULL, "per_abe_nc_fclk", &per_abe_nc_fclk, CK_443X),
- CLK(NULL, "mcasp2_fclk", &mcasp2_fclk, CK_443X),
- CLK(NULL, "mcasp3_fclk", &mcasp3_fclk, CK_443X),
- CLK(NULL, "ocp_abe_iclk", &ocp_abe_iclk, CK_443X),
- CLK(NULL, "per_abe_24m_fclk", &per_abe_24m_fclk, CK_443X),
- CLK(NULL, "pmd_stm_clock_mux_ck", &pmd_stm_clock_mux_ck, CK_443X),
- CLK(NULL, "pmd_trace_clk_mux_ck", &pmd_trace_clk_mux_ck, CK_443X),
- CLK(NULL, "syc_clk_div_ck", &syc_clk_div_ck, CK_443X),
- CLK(NULL, "aes1_fck", &aes1_fck, CK_443X),
- CLK(NULL, "aes2_fck", &aes2_fck, CK_443X),
- CLK(NULL, "aess_fck", &aess_fck, CK_443X),
- CLK(NULL, "bandgap_fclk", &bandgap_fclk, CK_443X),
- CLK(NULL, "des3des_fck", &des3des_fck, CK_443X),
- CLK(NULL, "dmic_sync_mux_ck", &dmic_sync_mux_ck, CK_443X),
- CLK(NULL, "dmic_fck", &dmic_fck, CK_443X),
- CLK(NULL, "dsp_fck", &dsp_fck, CK_443X),
- CLK("omapdss_dss", "sys_clk", &dss_sys_clk, CK_443X),
- CLK("omapdss_dss", "tv_clk", &dss_tv_clk, CK_443X),
- CLK("omapdss_dss", "video_clk", &dss_48mhz_clk, CK_443X),
- CLK("omapdss_dss", "fck", &dss_dss_clk, CK_443X),
- CLK("omapdss_dss", "ick", &dss_fck, CK_443X),
- CLK(NULL, "efuse_ctrl_cust_fck", &efuse_ctrl_cust_fck, CK_443X),
- CLK(NULL, "emif1_fck", &emif1_fck, CK_443X),
- CLK(NULL, "emif2_fck", &emif2_fck, CK_443X),
- CLK(NULL, "fdif_fck", &fdif_fck, CK_443X),
- CLK(NULL, "fpka_fck", &fpka_fck, CK_443X),
- CLK(NULL, "gpio1_dbclk", &gpio1_dbclk, CK_443X),
- CLK(NULL, "gpio1_ick", &gpio1_ick, CK_443X),
- CLK(NULL, "gpio2_dbclk", &gpio2_dbclk, CK_443X),
- CLK(NULL, "gpio2_ick", &gpio2_ick, CK_443X),
- CLK(NULL, "gpio3_dbclk", &gpio3_dbclk, CK_443X),
- CLK(NULL, "gpio3_ick", &gpio3_ick, CK_443X),
- CLK(NULL, "gpio4_dbclk", &gpio4_dbclk, CK_443X),
- CLK(NULL, "gpio4_ick", &gpio4_ick, CK_443X),
- CLK(NULL, "gpio5_dbclk", &gpio5_dbclk, CK_443X),
- CLK(NULL, "gpio5_ick", &gpio5_ick, CK_443X),
- CLK(NULL, "gpio6_dbclk", &gpio6_dbclk, CK_443X),
- CLK(NULL, "gpio6_ick", &gpio6_ick, CK_443X),
- CLK(NULL, "gpmc_ick", &gpmc_ick, CK_443X),
- CLK(NULL, "gpu_fck", &gpu_fck, CK_443X),
- CLK("omap2_hdq.0", "fck", &hdq1w_fck, CK_443X),
- CLK(NULL, "hsi_fck", &hsi_fck, CK_443X),
- CLK("omap_i2c.1", "fck", &i2c1_fck, CK_443X),
- CLK("omap_i2c.2", "fck", &i2c2_fck, CK_443X),
- CLK("omap_i2c.3", "fck", &i2c3_fck, CK_443X),
- CLK("omap_i2c.4", "fck", &i2c4_fck, CK_443X),
- CLK(NULL, "ipu_fck", &ipu_fck, CK_443X),
- CLK(NULL, "iss_ctrlclk", &iss_ctrlclk, CK_443X),
- CLK(NULL, "iss_fck", &iss_fck, CK_443X),
- CLK(NULL, "iva_fck", &iva_fck, CK_443X),
- CLK(NULL, "kbd_fck", &kbd_fck, CK_443X),
- CLK(NULL, "l3_instr_ick", &l3_instr_ick, CK_443X),
- CLK(NULL, "l3_main_3_ick", &l3_main_3_ick, CK_443X),
- CLK(NULL, "mcasp_sync_mux_ck", &mcasp_sync_mux_ck, CK_443X),
- CLK(NULL, "mcasp_fck", &mcasp_fck, CK_443X),
- CLK(NULL, "mcbsp1_sync_mux_ck", &mcbsp1_sync_mux_ck, CK_443X),
- CLK("omap-mcbsp.1", "fck", &mcbsp1_fck, CK_443X),
- CLK(NULL, "mcbsp2_sync_mux_ck", &mcbsp2_sync_mux_ck, CK_443X),
- CLK("omap-mcbsp.2", "fck", &mcbsp2_fck, CK_443X),
- CLK(NULL, "mcbsp3_sync_mux_ck", &mcbsp3_sync_mux_ck, CK_443X),
- CLK("omap-mcbsp.3", "fck", &mcbsp3_fck, CK_443X),
- CLK(NULL, "mcbsp4_sync_mux_ck", &mcbsp4_sync_mux_ck, CK_443X),
- CLK("omap-mcbsp.4", "fck", &mcbsp4_fck, CK_443X),
- CLK(NULL, "mcpdm_fck", &mcpdm_fck, CK_443X),
- CLK("omap2_mcspi.1", "fck", &mcspi1_fck, CK_443X),
- CLK("omap2_mcspi.2", "fck", &mcspi2_fck, CK_443X),
- CLK("omap2_mcspi.3", "fck", &mcspi3_fck, CK_443X),
- CLK("omap2_mcspi.4", "fck", &mcspi4_fck, CK_443X),
- CLK("omap_hsmmc.0", "fck", &mmc1_fck, CK_443X),
- CLK("omap_hsmmc.1", "fck", &mmc2_fck, CK_443X),
- CLK("omap_hsmmc.2", "fck", &mmc3_fck, CK_443X),
- CLK("omap_hsmmc.3", "fck", &mmc4_fck, CK_443X),
- CLK("omap_hsmmc.4", "fck", &mmc5_fck, CK_443X),
- CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m, CK_443X),
- CLK(NULL, "ocp2scp_usb_phy_ick", &ocp2scp_usb_phy_ick, CK_443X),
- CLK(NULL, "ocp_wp_noc_ick", &ocp_wp_noc_ick, CK_443X),
- CLK("omap_rng", "ick", &rng_ick, CK_443X),
- CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X),
- CLK(NULL, "sl2if_ick", &sl2if_ick, CK_443X),
- CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X),
- CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X),
- CLK(NULL, "slimbus1_fclk_2", &slimbus1_fclk_2, CK_443X),
- CLK(NULL, "slimbus1_slimbus_clk", &slimbus1_slimbus_clk, CK_443X),
- CLK(NULL, "slimbus1_fck", &slimbus1_fck, CK_443X),
- CLK(NULL, "slimbus2_fclk_1", &slimbus2_fclk_1, CK_443X),
- CLK(NULL, "slimbus2_fclk_0", &slimbus2_fclk_0, CK_443X),
- CLK(NULL, "slimbus2_slimbus_clk", &slimbus2_slimbus_clk, CK_443X),
- CLK(NULL, "slimbus2_fck", &slimbus2_fck, CK_443X),
- CLK(NULL, "smartreflex_core_fck", &smartreflex_core_fck, CK_443X),
- CLK(NULL, "smartreflex_iva_fck", &smartreflex_iva_fck, CK_443X),
- CLK(NULL, "smartreflex_mpu_fck", &smartreflex_mpu_fck, CK_443X),
- CLK(NULL, "gpt1_fck", &timer1_fck, CK_443X),
- CLK(NULL, "gpt10_fck", &timer10_fck, CK_443X),
- CLK(NULL, "gpt11_fck", &timer11_fck, CK_443X),
- CLK(NULL, "gpt2_fck", &timer2_fck, CK_443X),
- CLK(NULL, "gpt3_fck", &timer3_fck, CK_443X),
- CLK(NULL, "gpt4_fck", &timer4_fck, CK_443X),
- CLK(NULL, "gpt5_fck", &timer5_fck, CK_443X),
- CLK(NULL, "gpt6_fck", &timer6_fck, CK_443X),
- CLK(NULL, "gpt7_fck", &timer7_fck, CK_443X),
- CLK(NULL, "gpt8_fck", &timer8_fck, CK_443X),
- CLK(NULL, "gpt9_fck", &timer9_fck, CK_443X),
- CLK(NULL, "uart1_fck", &uart1_fck, CK_443X),
- CLK(NULL, "uart2_fck", &uart2_fck, CK_443X),
- CLK(NULL, "uart3_fck", &uart3_fck, CK_443X),
- CLK(NULL, "uart4_fck", &uart4_fck, CK_443X),
- CLK(NULL, "usb_host_fs_fck", &usb_host_fs_fck, CK_443X),
- CLK("usbhs-omap.0", "fs_fck", &usb_host_fs_fck, CK_443X),
- CLK(NULL, "utmi_p1_gfclk", &utmi_p1_gfclk, CK_443X),
- CLK(NULL, "usb_host_hs_utmi_p1_clk", &usb_host_hs_utmi_p1_clk, CK_443X),
- CLK(NULL, "utmi_p2_gfclk", &utmi_p2_gfclk, CK_443X),
- CLK(NULL, "usb_host_hs_utmi_p2_clk", &usb_host_hs_utmi_p2_clk, CK_443X),
- CLK(NULL, "usb_host_hs_utmi_p3_clk", &usb_host_hs_utmi_p3_clk, CK_443X),
- CLK(NULL, "usb_host_hs_hsic480m_p1_clk", &usb_host_hs_hsic480m_p1_clk, CK_443X),
- CLK(NULL, "usb_host_hs_hsic60m_p1_clk", &usb_host_hs_hsic60m_p1_clk, CK_443X),
- CLK(NULL, "usb_host_hs_hsic60m_p2_clk", &usb_host_hs_hsic60m_p2_clk, CK_443X),
- CLK(NULL, "usb_host_hs_hsic480m_p2_clk", &usb_host_hs_hsic480m_p2_clk, CK_443X),
- CLK(NULL, "usb_host_hs_func48mclk", &usb_host_hs_func48mclk, CK_443X),
- CLK(NULL, "usb_host_hs_fck", &usb_host_hs_fck, CK_443X),
- CLK("usbhs-omap.0", "hs_fck", &usb_host_hs_fck, CK_443X),
- CLK("usbhs-omap.0", "usbhost_ick", &dummy_ck, CK_443X),
- CLK(NULL, "otg_60m_gfclk", &otg_60m_gfclk, CK_443X),
- CLK(NULL, "usb_otg_hs_xclk", &usb_otg_hs_xclk, CK_443X),
- CLK("musb-omap2430", "ick", &usb_otg_hs_ick, CK_443X),
- CLK(NULL, "usb_phy_cm_clk32k", &usb_phy_cm_clk32k, CK_443X),
- CLK(NULL, "usb_tll_hs_usb_ch2_clk", &usb_tll_hs_usb_ch2_clk, CK_443X),
- CLK(NULL, "usb_tll_hs_usb_ch0_clk", &usb_tll_hs_usb_ch0_clk, CK_443X),
- CLK(NULL, "usb_tll_hs_usb_ch1_clk", &usb_tll_hs_usb_ch1_clk, CK_443X),
- CLK(NULL, "usb_tll_hs_ick", &usb_tll_hs_ick, CK_443X),
- CLK("usbhs-omap.0", "usbtll_ick", &usb_tll_hs_ick, CK_443X),
- CLK("usbhs-omap.0", "usbtll_fck", &dummy_ck, CK_443X),
- CLK(NULL, "usim_ck", &usim_ck, CK_443X),
- CLK(NULL, "usim_fclk", &usim_fclk, CK_443X),
- CLK(NULL, "usim_fck", &usim_fck, CK_443X),
- CLK("omap_wdt", "fck", &wd_timer2_fck, CK_443X),
- CLK(NULL, "mailboxes_ick", &dummy_ck, CK_443X),
- CLK(NULL, "wd_timer3_fck", &wd_timer3_fck, CK_443X),
- CLK(NULL, "stm_clk_div_ck", &stm_clk_div_ck, CK_443X),
- CLK(NULL, "trace_clk_div_ck", &trace_clk_div_ck, CK_443X),
- CLK(NULL, "gpmc_ck", &dummy_ck, CK_443X),
- CLK(NULL, "gpt1_ick", &dummy_ck, CK_443X),
- CLK(NULL, "gpt2_ick", &dummy_ck, CK_443X),
- CLK(NULL, "gpt3_ick", &dummy_ck, CK_443X),
- CLK(NULL, "gpt4_ick", &dummy_ck, CK_443X),
- CLK(NULL, "gpt5_ick", &dummy_ck, CK_443X),
- CLK(NULL, "gpt6_ick", &dummy_ck, CK_443X),
- CLK(NULL, "gpt7_ick", &dummy_ck, CK_443X),
- CLK(NULL, "gpt8_ick", &dummy_ck, CK_443X),
- CLK(NULL, "gpt9_ick", &dummy_ck, CK_443X),
- CLK(NULL, "gpt10_ick", &dummy_ck, CK_443X),
- CLK(NULL, "gpt11_ick", &dummy_ck, CK_443X),
- CLK("omap_i2c.1", "ick", &dummy_ck, CK_443X),
- CLK("omap_i2c.2", "ick", &dummy_ck, CK_443X),
- CLK("omap_i2c.3", "ick", &dummy_ck, CK_443X),
- CLK("omap_i2c.4", "ick", &dummy_ck, CK_443X),
- CLK("omap_hsmmc.0", "ick", &dummy_ck, CK_443X),
- CLK("omap_hsmmc.1", "ick", &dummy_ck, CK_443X),
- CLK("omap_hsmmc.2", "ick", &dummy_ck, CK_443X),
- CLK("omap_hsmmc.3", "ick", &dummy_ck, CK_443X),
- CLK("omap_hsmmc.4", "ick", &dummy_ck, CK_443X),
- CLK("omap-mcbsp.1", "ick", &dummy_ck, CK_443X),
- CLK("omap-mcbsp.2", "ick", &dummy_ck, CK_443X),
- CLK("omap-mcbsp.3", "ick", &dummy_ck, CK_443X),
- CLK("omap-mcbsp.4", "ick", &dummy_ck, CK_443X),
- CLK("omap2_mcspi.1", "ick", &dummy_ck, CK_443X),
- CLK("omap2_mcspi.2", "ick", &dummy_ck, CK_443X),
- CLK("omap2_mcspi.3", "ick", &dummy_ck, CK_443X),
- CLK("omap2_mcspi.4", "ick", &dummy_ck, CK_443X),
- CLK(NULL, "uart1_ick", &dummy_ck, CK_443X),
- CLK(NULL, "uart2_ick", &dummy_ck, CK_443X),
- CLK(NULL, "uart3_ick", &dummy_ck, CK_443X),
- CLK(NULL, "uart4_ick", &dummy_ck, CK_443X),
- CLK("omap_wdt", "ick", &dummy_ck, CK_443X),
- CLK(NULL, "auxclk0_ck", &auxclk0_ck, CK_443X),
- CLK(NULL, "auxclk1_ck", &auxclk1_ck, CK_443X),
- CLK(NULL, "auxclk2_ck", &auxclk2_ck, CK_443X),
- CLK(NULL, "auxclk3_ck", &auxclk3_ck, CK_443X),
- CLK(NULL, "auxclk4_ck", &auxclk4_ck, CK_443X),
- CLK(NULL, "auxclk5_ck", &auxclk5_ck, CK_443X),
- CLK(NULL, "auxclkreq0_ck", &auxclkreq0_ck, CK_443X),
- CLK(NULL, "auxclkreq1_ck", &auxclkreq1_ck, CK_443X),
- CLK(NULL, "auxclkreq2_ck", &auxclkreq2_ck, CK_443X),
- CLK(NULL, "auxclkreq3_ck", &auxclkreq3_ck, CK_443X),
- CLK(NULL, "auxclkreq4_ck", &auxclkreq4_ck, CK_443X),
- CLK(NULL, "auxclkreq5_ck", &auxclkreq5_ck, CK_443X),
+ CLK(NULL, "extalt_clkin_ck", &extalt_clkin_ck, CK_44XX),
+ CLK(NULL, "pad_clks_ck", &pad_clks_ck, CK_44XX),
+ CLK(NULL, "pad_slimbus_core_clks_ck", &pad_slimbus_core_clks_ck, CK_44XX),
+ CLK(NULL, "secure_32k_clk_src_ck", &secure_32k_clk_src_ck, CK_44XX),
+ CLK(NULL, "slimbus_clk", &slimbus_clk, CK_44XX),
+ CLK(NULL, "sys_32k_ck", &sys_32k_ck, CK_44XX),
+ CLK(NULL, "virt_12000000_ck", &virt_12000000_ck, CK_44XX),
+ CLK(NULL, "virt_13000000_ck", &virt_13000000_ck, CK_44XX),
+ CLK(NULL, "virt_16800000_ck", &virt_16800000_ck, CK_44XX),
+ CLK(NULL, "virt_19200000_ck", &virt_19200000_ck, CK_44XX),
+ CLK(NULL, "virt_26000000_ck", &virt_26000000_ck, CK_44XX),
+ CLK(NULL, "virt_27000000_ck", &virt_27000000_ck, CK_44XX),
+ CLK(NULL, "virt_38400000_ck", &virt_38400000_ck, CK_44XX),
+ CLK(NULL, "sys_clkin_ck", &sys_clkin_ck, CK_44XX),
+ CLK(NULL, "tie_low_clock_ck", &tie_low_clock_ck, CK_44XX),
+ CLK(NULL, "utmi_phy_clkout_ck", &utmi_phy_clkout_ck, CK_44XX),
+ CLK(NULL, "xclk60mhsp1_ck", &xclk60mhsp1_ck, CK_44XX),
+ CLK(NULL, "xclk60mhsp2_ck", &xclk60mhsp2_ck, CK_44XX),
+ CLK(NULL, "xclk60motg_ck", &xclk60motg_ck, CK_44XX),
+ CLK(NULL, "abe_dpll_bypass_clk_mux_ck", &abe_dpll_bypass_clk_mux_ck, CK_44XX),
+ CLK(NULL, "abe_dpll_refclk_mux_ck", &abe_dpll_refclk_mux_ck, CK_44XX),
+ CLK(NULL, "dpll_abe_ck", &dpll_abe_ck, CK_44XX),
+ CLK(NULL, "dpll_abe_x2_ck", &dpll_abe_x2_ck, CK_44XX),
+ CLK(NULL, "dpll_abe_m2x2_ck", &dpll_abe_m2x2_ck, CK_44XX),
+ CLK(NULL, "abe_24m_fclk", &abe_24m_fclk, CK_44XX),
+ CLK(NULL, "abe_clk", &abe_clk, CK_44XX),
+ CLK(NULL, "aess_fclk", &aess_fclk, CK_44XX),
+ CLK(NULL, "dpll_abe_m3x2_ck", &dpll_abe_m3x2_ck, CK_44XX),
+ CLK(NULL, "core_hsd_byp_clk_mux_ck", &core_hsd_byp_clk_mux_ck, CK_44XX),
+ CLK(NULL, "dpll_core_ck", &dpll_core_ck, CK_44XX),
+ CLK(NULL, "dpll_core_x2_ck", &dpll_core_x2_ck, CK_44XX),
+ CLK(NULL, "dpll_core_m6x2_ck", &dpll_core_m6x2_ck, CK_44XX),
+ CLK(NULL, "dbgclk_mux_ck", &dbgclk_mux_ck, CK_44XX),
+ CLK(NULL, "dpll_core_m2_ck", &dpll_core_m2_ck, CK_44XX),
+ CLK(NULL, "ddrphy_ck", &ddrphy_ck, CK_44XX),
+ CLK(NULL, "dpll_core_m5x2_ck", &dpll_core_m5x2_ck, CK_44XX),
+ CLK(NULL, "virt_l3_ck", &virt_l3_ck, CK_44XX),
+ CLK(NULL, "div_core_ck", &div_core_ck, CK_44XX),
+ CLK(NULL, "div_iva_hs_clk", &div_iva_hs_clk, CK_44XX),
+ CLK(NULL, "div_mpu_hs_clk", &div_mpu_hs_clk, CK_44XX),
+ CLK(NULL, "dpll_core_m4x2_ck", &dpll_core_m4x2_ck, CK_44XX),
+ CLK(NULL, "dll_clk_div_ck", &dll_clk_div_ck, CK_44XX),
+ CLK(NULL, "dpll_abe_m2_ck", &dpll_abe_m2_ck, CK_44XX),
+ CLK(NULL, "dpll_core_m3x2_ck", &dpll_core_m3x2_ck, CK_44XX),
+ CLK(NULL, "dpll_core_m7x2_ck", &dpll_core_m7x2_ck, CK_44XX),
+ CLK(NULL, "iva_hsd_byp_clk_mux_ck", &iva_hsd_byp_clk_mux_ck, CK_44XX),
+ CLK(NULL, "dpll_iva_ck", &dpll_iva_ck, CK_44XX),
+ CLK(NULL, "dpll_iva_x2_ck", &dpll_iva_x2_ck, CK_44XX),
+ CLK(NULL, "dpll_iva_m4x2_ck", &dpll_iva_m4x2_ck, CK_44XX),
+ CLK(NULL, "dpll_iva_m5x2_ck", &dpll_iva_m5x2_ck, CK_44XX),
+ CLK(NULL, "dpll_mpu_ck", &dpll_mpu_ck, CK_44XX),
+ CLK(NULL, "virt_dpll_mpu_ck", &virt_dpll_mpu_ck, CK_446X),
+ CLK(NULL, "dpll_mpu_m2_ck", &dpll_mpu_m2_ck, CK_44XX),
+ CLK(NULL, "per_hs_clk_div_ck", &per_hs_clk_div_ck, CK_44XX),
+ CLK(NULL, "per_hsd_byp_clk_mux_ck", &per_hsd_byp_clk_mux_ck, CK_44XX),
+ CLK(NULL, "dpll_per_ck", &dpll_per_ck, CK_44XX),
+ CLK(NULL, "dpll_per_m2_ck", &dpll_per_m2_ck, CK_44XX),
+ CLK(NULL, "dpll_per_x2_ck", &dpll_per_x2_ck, CK_44XX),
+ CLK(NULL, "dpll_per_m2x2_ck", &dpll_per_m2x2_ck, CK_44XX),
+ CLK(NULL, "dpll_per_m3x2_ck", &dpll_per_m3x2_ck, CK_44XX),
+ CLK(NULL, "dpll_per_m4x2_ck", &dpll_per_m4x2_ck, CK_44XX),
+ CLK(NULL, "dpll_per_m5x2_ck", &dpll_per_m5x2_ck, CK_44XX),
+ CLK(NULL, "dpll_per_m6x2_ck", &dpll_per_m6x2_ck, CK_44XX),
+ CLK(NULL, "dpll_per_m7x2_ck", &dpll_per_m7x2_ck, CK_44XX),
+ CLK(NULL, "dpll_unipro_ck", &dpll_unipro_ck, CK_44XX),
+ CLK(NULL, "dpll_unipro_x2_ck", &dpll_unipro_x2_ck, CK_44XX),
+ CLK(NULL, "dpll_unipro_m2x2_ck", &dpll_unipro_m2x2_ck, CK_44XX),
+ CLK(NULL, "usb_hs_clk_div_ck", &usb_hs_clk_div_ck, CK_44XX),
+ CLK(NULL, "dpll_usb_ck", &dpll_usb_ck, CK_44XX),
+ CLK(NULL, "dpll_usb_clkdcoldo_ck", &dpll_usb_clkdcoldo_ck, CK_44XX),
+ CLK(NULL, "dpll_usb_m2_ck", &dpll_usb_m2_ck, CK_44XX),
+ CLK(NULL, "ducati_clk_mux_ck", &ducati_clk_mux_ck, CK_44XX),
+ CLK(NULL, "func_12m_fclk", &func_12m_fclk, CK_44XX),
+ CLK(NULL, "func_24m_clk", &func_24m_clk, CK_44XX),
+ CLK(NULL, "func_24mc_fclk", &func_24mc_fclk, CK_44XX),
+ CLK(NULL, "func_48m_fclk", &func_48m_fclk, CK_44XX),
+ CLK(NULL, "func_48mc_fclk", &func_48mc_fclk, CK_44XX),
+ CLK(NULL, "func_64m_fclk", &func_64m_fclk, CK_44XX),
+ CLK(NULL, "func_96m_fclk", &func_96m_fclk, CK_44XX),
+ CLK(NULL, "hsmmc6_fclk", &hsmmc6_fclk, CK_44XX),
+ CLK(NULL, "init_60m_fclk", &init_60m_fclk, CK_44XX),
+ CLK(NULL, "l3_div_ck", &l3_div_ck, CK_44XX),
+ CLK(NULL, "l4_div_ck", &l4_div_ck, CK_44XX),
+ CLK(NULL, "lp_clk_div_ck", &lp_clk_div_ck, CK_44XX),
+ CLK(NULL, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck, CK_44XX),
+ CLK(NULL, "div_ts_ck", &div_ts_ck, CK_446X),
+ CLK(NULL, "per_abe_nc_fclk", &per_abe_nc_fclk, CK_44XX),
+ CLK(NULL, "mcasp2_fclk", &mcasp2_fclk, CK_44XX),
+ CLK(NULL, "mcasp3_fclk", &mcasp3_fclk, CK_44XX),
+ CLK(NULL, "ocp_abe_iclk", &ocp_abe_iclk, CK_44XX),
+ CLK(NULL, "per_abe_24m_fclk", &per_abe_24m_fclk, CK_44XX),
+ CLK(NULL, "pmd_stm_clock_mux_ck", &pmd_stm_clock_mux_ck, CK_44XX),
+ CLK(NULL, "pmd_trace_clk_mux_ck", &pmd_trace_clk_mux_ck, CK_44XX),
+ CLK(NULL, "syc_clk_div_ck", &syc_clk_div_ck, CK_44XX),
+ CLK(NULL, "aes1_fck", &aes1_fck, CK_44XX),
+ CLK(NULL, "aes2_fck", &aes2_fck, CK_44XX),
+ CLK(NULL, "aess_fck", &aess_fck, CK_44XX),
+ CLK("omap_temp_sensor.0", "fck", &bandgap_fclk, CK_443X),
+ CLK("omap_temp_sensor.0", "fck", &bandgap_ts_fclk, CK_446X),
+ CLK(NULL, "des3des_fck", &des3des_fck, CK_44XX),
+ CLK(NULL, "dmic_sync_mux_ck", &dmic_sync_mux_ck, CK_44XX),
+ CLK(NULL, "dmic_fck", &dmic_fck, CK_44XX),
+ CLK(NULL, "dsp_fck", &dsp_fck, CK_44XX),
+ CLK(NULL, "sys_clk", &dss_sys_clk, CK_44XX),
+ CLK(NULL, "tv_clk", &dss_tv_clk, CK_44XX),
+ CLK(NULL, "video_clk", &dss_48mhz_clk, CK_44XX),
+ CLK(NULL, "fck", &dss_dss_clk, CK_44XX),
+ CLK(NULL, "ick", &dss_fck, CK_44XX),
+ CLK(NULL, "efuse_ctrl_cust_fck", &efuse_ctrl_cust_fck, CK_44XX),
+ CLK(NULL, "emif1_fck", &emif1_fck, CK_44XX),
+ CLK(NULL, "emif2_fck", &emif2_fck, CK_44XX),
+ CLK(NULL, "fdif_fck", &fdif_fck, CK_44XX),
+ CLK(NULL, "fpka_fck", &fpka_fck, CK_44XX),
+ CLK(NULL, "gpio1_dbclk", &gpio1_dbclk, CK_44XX),
+ CLK(NULL, "gpio1_ick", &gpio1_ick, CK_44XX),
+ CLK(NULL, "gpio2_dbclk", &gpio2_dbclk, CK_44XX),
+ CLK(NULL, "gpio2_ick", &gpio2_ick, CK_44XX),
+ CLK(NULL, "gpio3_dbclk", &gpio3_dbclk, CK_44XX),
+ CLK(NULL, "gpio3_ick", &gpio3_ick, CK_44XX),
+ CLK(NULL, "gpio4_dbclk", &gpio4_dbclk, CK_44XX),
+ CLK(NULL, "gpio4_ick", &gpio4_ick, CK_44XX),
+ CLK(NULL, "gpio5_dbclk", &gpio5_dbclk, CK_44XX),
+ CLK(NULL, "gpio5_ick", &gpio5_ick, CK_44XX),
+ CLK(NULL, "gpio6_dbclk", &gpio6_dbclk, CK_44XX),
+ CLK(NULL, "gpio6_ick", &gpio6_ick, CK_44XX),
+ CLK(NULL, "gpmc_ick", &gpmc_ick, CK_44XX),
+ CLK(NULL, "gpu_fck", &gpu_fck, CK_44XX),
+ CLK("omap2_hdq.0", "fck", &hdq1w_fck, CK_44XX),
+ CLK(NULL, "hsi_fck", &hsi_fck, CK_44XX),
+ CLK("omap_i2c.1", "fck", &i2c1_fck, CK_44XX),
+ CLK("omap_i2c.2", "fck", &i2c2_fck, CK_44XX),
+ CLK("omap_i2c.3", "fck", &i2c3_fck, CK_44XX),
+ CLK("omap_i2c.4", "fck", &i2c4_fck, CK_44XX),
+ CLK(NULL, "ipu_fck", &ipu_fck, CK_44XX),
+ CLK(NULL, "iss_ctrlclk", &iss_ctrlclk, CK_44XX),
+ CLK(NULL, "iss_fck", &iss_fck, CK_44XX),
+ CLK(NULL, "iva_fck", &iva_fck, CK_44XX),
+ CLK(NULL, "kbd_fck", &kbd_fck, CK_44XX),
+ CLK(NULL, "l3_instr_ick", &l3_instr_ick, CK_44XX),
+ CLK(NULL, "l3_main_3_ick", &l3_main_3_ick, CK_44XX),
+ CLK(NULL, "mcasp_sync_mux_ck", &mcasp_sync_mux_ck, CK_44XX),
+ CLK(NULL, "mcasp_fck", &mcasp_fck, CK_44XX),
+ CLK(NULL, "mcbsp1_sync_mux_ck", &mcbsp1_sync_mux_ck, CK_44XX),
+ CLK("omap-mcbsp.1", "fck", &mcbsp1_fck, CK_44XX),
+ CLK(NULL, "mcbsp2_sync_mux_ck", &mcbsp2_sync_mux_ck, CK_44XX),
+ CLK("omap-mcbsp.2", "fck", &mcbsp2_fck, CK_44XX),
+ CLK(NULL, "mcbsp3_sync_mux_ck", &mcbsp3_sync_mux_ck, CK_44XX),
+ CLK("omap-mcbsp.3", "fck", &mcbsp3_fck, CK_44XX),
+ CLK(NULL, "mcbsp4_sync_mux_ck", &mcbsp4_sync_mux_ck, CK_44XX),
+ CLK("omap-mcbsp.4", "fck", &mcbsp4_fck, CK_44XX),
+ CLK(NULL, "mcpdm_fck", &mcpdm_fck, CK_44XX),
+ CLK("omap2_mcspi.1", "fck", &mcspi1_fck, CK_44XX),
+ CLK("omap2_mcspi.2", "fck", &mcspi2_fck, CK_44XX),
+ CLK("omap2_mcspi.3", "fck", &mcspi3_fck, CK_44XX),
+ CLK("omap2_mcspi.4", "fck", &mcspi4_fck, CK_44XX),
+ CLK("omap_hsmmc.0", "fck", &mmc1_fck, CK_44XX),
+ CLK("omap_hsmmc.1", "fck", &mmc2_fck, CK_44XX),
+ CLK("omap_hsmmc.2", "fck", &mmc3_fck, CK_44XX),
+ CLK("omap_hsmmc.3", "fck", &mmc4_fck, CK_44XX),
+ CLK("omap_hsmmc.4", "fck", &mmc5_fck, CK_44XX),
+ CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m, CK_44XX),
+ CLK(NULL, "ocp2scp_usb_phy_ick", &ocp2scp_usb_phy_ick, CK_44XX),
+ CLK(NULL, "ocp_wp_noc_ick", &ocp_wp_noc_ick, CK_44XX),
+ CLK("omap_rng", "ick", &rng_ick, CK_44XX),
+ CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_44XX),
+ CLK(NULL, "sl2if_ick", &sl2if_ick, CK_44XX),
+ CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_44XX),
+ CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_44XX),
+ CLK(NULL, "slimbus1_fclk_2", &slimbus1_fclk_2, CK_44XX),
+ CLK(NULL, "slimbus1_slimbus_clk", &slimbus1_slimbus_clk, CK_44XX),
+ CLK(NULL, "slimbus1_fck", &slimbus1_fck, CK_44XX),
+ CLK(NULL, "slimbus2_fclk_1", &slimbus2_fclk_1, CK_44XX),
+ CLK(NULL, "slimbus2_fclk_0", &slimbus2_fclk_0, CK_44XX),
+ CLK(NULL, "slimbus2_slimbus_clk", &slimbus2_slimbus_clk, CK_44XX),
+ CLK(NULL, "slimbus2_fck", &slimbus2_fck, CK_44XX),
+ CLK(NULL, "smartreflex_core_fck", &smartreflex_core_fck, CK_44XX),
+ CLK(NULL, "smartreflex_iva_fck", &smartreflex_iva_fck, CK_44XX),
+ CLK(NULL, "smartreflex_mpu_fck", &smartreflex_mpu_fck, CK_44XX),
+ CLK("omap_timer.1", "fck", &timer1_fck, CK_44XX),
+ CLK("omap_timer.10", "fck", &timer10_fck, CK_44XX),
+ CLK("omap_timer.11", "fck", &timer11_fck, CK_44XX),
+ CLK("omap_timer.2", "fck", &timer2_fck, CK_44XX),
+ CLK("omap_timer.3", "fck", &timer3_fck, CK_44XX),
+ CLK("omap_timer.4", "fck", &timer4_fck, CK_44XX),
+ CLK("omap_timer.5", "fck", &timer5_fck, CK_44XX),
+ CLK("omap_timer.6", "fck", &timer6_fck, CK_44XX),
+ CLK("omap_timer.7", "fck", &timer7_fck, CK_44XX),
+ CLK("omap_timer.8", "fck", &timer8_fck, CK_44XX),
+ CLK("omap_timer.9", "fck", &timer9_fck, CK_44XX),
+ CLK(NULL, "uart1_fck", &uart1_fck, CK_44XX),
+ CLK(NULL, "uart2_fck", &uart2_fck, CK_44XX),
+ CLK(NULL, "uart3_fck", &uart3_fck, CK_44XX),
+ CLK(NULL, "uart4_fck", &uart4_fck, CK_44XX),
+ CLK(NULL, "usb_host_fs_fck", &usb_host_fs_fck, CK_44XX),
+ CLK("usbhs_omap", "fs_fck", &usb_host_fs_fck, CK_44XX),
+ CLK(NULL, "utmi_p1_gfclk", &utmi_p1_gfclk, CK_44XX),
+ CLK(NULL, "usb_host_hs_utmi_p1_clk", &usb_host_hs_utmi_p1_clk, CK_44XX),
+ CLK(NULL, "utmi_p2_gfclk", &utmi_p2_gfclk, CK_44XX),
+ CLK(NULL, "usb_host_hs_utmi_p2_clk", &usb_host_hs_utmi_p2_clk, CK_44XX),
+ CLK(NULL, "usb_host_hs_utmi_p3_clk", &usb_host_hs_utmi_p3_clk, CK_44XX),
+ CLK(NULL, "usb_host_hs_hsic480m_p1_clk", &usb_host_hs_hsic480m_p1_clk, CK_44XX),
+ CLK(NULL, "usb_host_hs_hsic60m_p1_clk", &usb_host_hs_hsic60m_p1_clk, CK_44XX),
+ CLK(NULL, "usb_host_hs_hsic60m_p2_clk", &usb_host_hs_hsic60m_p2_clk, CK_44XX),
+ CLK(NULL, "usb_host_hs_hsic480m_p2_clk", &usb_host_hs_hsic480m_p2_clk, CK_44XX),
+ CLK(NULL, "usb_host_hs_func48mclk", &usb_host_hs_func48mclk, CK_44XX),
+ CLK(NULL, "usb_host_hs_fck", &usb_host_hs_fck, CK_44XX),
+ CLK("usbhs_omap", "hs_fck", &usb_host_hs_fck, CK_44XX),
+ CLK("usbhs_omap", "usbhost_ick", &dummy_ck, CK_44XX),
+ CLK(NULL, "otg_60m_gfclk", &otg_60m_gfclk, CK_44XX),
+ CLK(NULL, "usb_otg_hs_xclk", &usb_otg_hs_xclk, CK_44XX),
+ CLK("musb-omap2430", "ick", &usb_otg_hs_ick, CK_44XX),
+ CLK(NULL, "usb_phy_cm_clk32k", &usb_phy_cm_clk32k, CK_44XX),
+ CLK(NULL, "usb_tll_hs_usb_ch2_clk", &usb_tll_hs_usb_ch2_clk, CK_44XX),
+ CLK(NULL, "usb_tll_hs_usb_ch0_clk", &usb_tll_hs_usb_ch0_clk, CK_44XX),
+ CLK(NULL, "usb_tll_hs_usb_ch1_clk", &usb_tll_hs_usb_ch1_clk, CK_44XX),
+ CLK(NULL, "usb_tll_hs_ick", &usb_tll_hs_ick, CK_44XX),
+ CLK("usbhs_omap", "usbtll_ick", &usb_tll_hs_ick, CK_44XX),
+ CLK("usbhs_omap", "usbtll_fck", &dummy_ck, CK_44XX),
+ CLK(NULL, "usim_ck", &usim_ck, CK_44XX),
+ CLK(NULL, "usim_fclk", &usim_fclk, CK_44XX),
+ CLK(NULL, "usim_fck", &usim_fck, CK_44XX),
+ CLK("omap_wdt", "fck", &wd_timer2_fck, CK_44XX),
+ CLK(NULL, "mailboxes_ick", &dummy_ck, CK_44XX),
+ CLK(NULL, "wd_timer3_fck", &wd_timer3_fck, CK_44XX),
+ CLK(NULL, "stm_clk_div_ck", &stm_clk_div_ck, CK_44XX),
+ CLK(NULL, "trace_clk_div_ck", &trace_clk_div_ck, CK_44XX),
+ CLK(NULL, "gpmc_ck", &dummy_ck, CK_44XX),
+ CLK(NULL, "gpt1_ick", &dummy_ck, CK_44XX),
+ CLK(NULL, "gpt2_ick", &dummy_ck, CK_44XX),
+ CLK(NULL, "gpt3_ick", &dummy_ck, CK_44XX),
+ CLK(NULL, "gpt4_ick", &dummy_ck, CK_44XX),
+ CLK(NULL, "gpt5_ick", &dummy_ck, CK_44XX),
+ CLK(NULL, "gpt6_ick", &dummy_ck, CK_44XX),
+ CLK(NULL, "gpt7_ick", &dummy_ck, CK_44XX),
+ CLK(NULL, "gpt8_ick", &dummy_ck, CK_44XX),
+ CLK(NULL, "gpt9_ick", &dummy_ck, CK_44XX),
+ CLK(NULL, "gpt10_ick", &dummy_ck, CK_44XX),
+ CLK(NULL, "gpt11_ick", &dummy_ck, CK_44XX),
+ CLK("omap_i2c.1", "ick", &dummy_ck, CK_44XX),
+ CLK("omap_i2c.2", "ick", &dummy_ck, CK_44XX),
+ CLK("omap_i2c.3", "ick", &dummy_ck, CK_44XX),
+ CLK("omap_i2c.4", "ick", &dummy_ck, CK_44XX),
+ CLK("omap_hsmmc.0", "ick", &dummy_ck, CK_44XX),
+ CLK("omap_hsmmc.1", "ick", &dummy_ck, CK_44XX),
+ CLK("omap_hsmmc.2", "ick", &dummy_ck, CK_44XX),
+ CLK("omap_hsmmc.3", "ick", &dummy_ck, CK_44XX),
+ CLK("omap_hsmmc.4", "ick", &dummy_ck, CK_44XX),
+ CLK("omap-mcbsp.1", "ick", &dummy_ck, CK_44XX),
+ CLK("omap-mcbsp.2", "ick", &dummy_ck, CK_44XX),
+ CLK("omap-mcbsp.3", "ick", &dummy_ck, CK_44XX),
+ CLK("omap-mcbsp.4", "ick", &dummy_ck, CK_44XX),
+ CLK("omap2_mcspi.1", "ick", &dummy_ck, CK_44XX),
+ CLK("omap2_mcspi.2", "ick", &dummy_ck, CK_44XX),
+ CLK("omap2_mcspi.3", "ick", &dummy_ck, CK_44XX),
+ CLK("omap2_mcspi.4", "ick", &dummy_ck, CK_44XX),
+ CLK(NULL, "uart1_ick", &dummy_ck, CK_44XX),
+ CLK(NULL, "uart2_ick", &dummy_ck, CK_44XX),
+ CLK(NULL, "uart3_ick", &dummy_ck, CK_44XX),
+ CLK(NULL, "uart4_ick", &dummy_ck, CK_44XX),
+ CLK("omap_wdt", "ick", &dummy_ck, CK_44XX),
+ CLK(NULL, "auxclk0_src_ck", &auxclk0_src_ck, CK_44XX),
+ CLK(NULL, "auxclk0_ck", &auxclk0_ck, CK_44XX),
+ CLK(NULL, "auxclk1_src_ck", &auxclk1_src_ck, CK_44XX),
+ CLK(NULL, "auxclk1_ck", &auxclk1_ck, CK_44XX),
+ CLK(NULL, "auxclk2_src_ck", &auxclk2_src_ck, CK_44XX),
+ CLK(NULL, "auxclk2_ck", &auxclk2_ck, CK_44XX),
+ CLK(NULL, "auxclk3_src_ck", &auxclk3_src_ck, CK_44XX),
+ CLK(NULL, "auxclk3_ck", &auxclk3_ck, CK_44XX),
+ CLK(NULL, "auxclk4_src_ck", &auxclk4_src_ck, CK_44XX),
+ CLK(NULL, "auxclk4_ck", &auxclk4_ck, CK_44XX),
+ CLK(NULL, "auxclk5_src_ck", &auxclk5_src_ck, CK_44XX),
+ CLK(NULL, "auxclk5_ck", &auxclk5_ck, CK_44XX),
+ CLK(NULL, "auxclkreq0_ck", &auxclkreq0_ck, CK_44XX),
+ CLK(NULL, "auxclkreq1_ck", &auxclkreq1_ck, CK_44XX),
+ CLK(NULL, "auxclkreq2_ck", &auxclkreq2_ck, CK_44XX),
+ CLK(NULL, "auxclkreq3_ck", &auxclkreq3_ck, CK_44XX),
+ CLK(NULL, "auxclkreq4_ck", &auxclkreq4_ck, CK_44XX),
+ CLK(NULL, "auxclkreq5_ck", &auxclkreq5_ck, CK_44XX),
+ CLK("smp_twd", NULL, &smp_twd_443x, CK_443X),
+ CLK("smp_twd", NULL, &smp_twd_446x, CK_446X),
+ CLK("omap_timer.1", "32k_ck", &sys_32k_ck, CK_44XX),
+ CLK("omap_timer.2", "32k_ck", &sys_32k_ck, CK_44XX),
+ CLK("omap_timer.3", "32k_ck", &sys_32k_ck, CK_44XX),
+ CLK("omap_timer.4", "32k_ck", &sys_32k_ck, CK_44XX),
+ CLK("omap_timer.5", "32k_ck", &sys_32k_ck, CK_44XX),
+ CLK("omap_timer.6", "32k_ck", &sys_32k_ck, CK_44XX),
+ CLK("omap_timer.7", "32k_ck", &sys_32k_ck, CK_44XX),
+ CLK("omap_timer.8", "32k_ck", &sys_32k_ck, CK_44XX),
+ CLK("omap_timer.9", "32k_ck", &sys_32k_ck, CK_44XX),
+ CLK("omap_timer.10", "32k_ck", &sys_32k_ck, CK_44XX),
+ CLK("omap_timer.11", "32k_ck", &sys_32k_ck, CK_44XX),
+ CLK("omap_timer.1", "sys_ck", &sys_clkin_ck, CK_44XX),
+ CLK("omap_timer.2", "sys_ck", &sys_clkin_ck, CK_44XX),
+ CLK("omap_timer.3", "sys_ck", &sys_clkin_ck, CK_44XX),
+ CLK("omap_timer.4", "sys_ck", &sys_clkin_ck, CK_44XX),
+ CLK("omap_timer.9", "sys_ck", &sys_clkin_ck, CK_44XX),
+ CLK("omap_timer.10", "sys_ck", &sys_clkin_ck, CK_44XX),
+ CLK("omap_timer.11", "sys_ck", &sys_clkin_ck, CK_44XX),
+ CLK("omap_timer.5", "sys_ck", &syc_clk_div_ck, CK_44XX),
+ CLK("omap_timer.6", "sys_ck", &syc_clk_div_ck, CK_44XX),
+ CLK("omap_timer.7", "sys_ck", &syc_clk_div_ck, CK_44XX),
+ CLK("omap_timer.8", "sys_ck", &syc_clk_div_ck, CK_44XX),
};
+#define L3_OPP50_RATE 100000000
+#define DPLL_CORE_M2_OPP50_RATE 400000000
+#define DPLL_CORE_M2_OPP100_RATE 800000000
+#define DPLL_CORE_M3_OPP50_RATE 200000000
+#define DPLL_CORE_M3_OPP100_RATE 320000000
+#define DPLL_CORE_M6_OPP50_RATE 200000000
+#define DPLL_CORE_M6_OPP100_RATE 266600000
+#define DPLL_CORE_M7_OPP50_RATE 133333333
+#define DPLL_CORE_M7_OPP100_RATE 266666666
+#define DPLL_PER_M3_OPP50_RATE 192000000
+#define DPLL_PER_M3_OPP100_RATE 256000000
+#define DPLL_PER_M6_OPP50_RATE 192000000
+#define DPLL_PER_M6_OPP100_RATE 384000000
+
+static long omap4_virt_l3_round_rate(struct clk *clk, unsigned long rate)
+{
+ long parent_rate;
+
+ if (!clk || !clk->parent)
+ return 0;
+
+ if (clk->parent->round_rate) {
+ parent_rate = clk->parent->round_rate(clk->parent, rate * 2);
+ if (parent_rate)
+ return parent_rate / 2;
+ }
+ return 0;
+}
+
+static unsigned long omap4_virt_l3_recalc(struct clk *clk)
+{
+ if (!clk || !clk->parent)
+ return 0;
+
+ return clk->parent->rate / 2;
+}
+
+static int omap4_clksel_set_rate(struct clk *clk, unsigned long rate)
+{
+ int ret = -EINVAL;
+
+ if (!clk->set_rate || !clk->round_rate)
+ return ret;
+
+ rate = clk->round_rate(clk, rate);
+ if (rate) {
+ ret = clk->set_rate(clk, rate);
+ if (!ret)
+ propagate_rate(clk);
+ }
+ return ret;
+}
+
+struct virt_l3_ck_deps {
+ unsigned long core_m2_rate;
+ unsigned long core_m3_rate;
+ unsigned long core_m6_rate;
+ unsigned long core_m7_rate;
+ unsigned long per_m3_rate;
+ unsigned long per_m6_rate;
+};
+
+#define NO_OF_L3_OPPS 2
+#define L3_OPP_50_INDEX 0
+#define L3_OPP_100_INDEX 1
+
+static struct virt_l3_ck_deps omap4_virt_l3_clk_deps[NO_OF_L3_OPPS] = {
+ { /* OPP 50 */
+ .core_m2_rate = DPLL_CORE_M2_OPP50_RATE,
+ .core_m3_rate = DPLL_CORE_M3_OPP50_RATE,
+ .core_m6_rate = DPLL_CORE_M6_OPP50_RATE,
+ .core_m7_rate = DPLL_CORE_M7_OPP50_RATE,
+ .per_m3_rate = DPLL_PER_M3_OPP50_RATE,
+ .per_m6_rate = DPLL_PER_M6_OPP50_RATE,
+ },
+ { /* OPP 100 */
+ .core_m2_rate = DPLL_CORE_M2_OPP100_RATE,
+ .core_m3_rate = DPLL_CORE_M3_OPP100_RATE,
+ .core_m6_rate = DPLL_CORE_M6_OPP100_RATE,
+ .core_m7_rate = DPLL_CORE_M7_OPP100_RATE,
+ .per_m3_rate = DPLL_PER_M3_OPP100_RATE,
+ .per_m6_rate = DPLL_PER_M6_OPP100_RATE,
+ },
+};
+
+static int omap4_virt_l3_set_rate(struct clk *clk, unsigned long rate)
+{
+ struct virt_l3_ck_deps *l3_deps;
+
+ if (rate <= L3_OPP50_RATE)
+ l3_deps = &omap4_virt_l3_clk_deps[L3_OPP_50_INDEX];
+ else
+ l3_deps = &omap4_virt_l3_clk_deps[L3_OPP_100_INDEX];
+
+ omap4_clksel_set_rate(&dpll_core_m3x2_ck, l3_deps->core_m3_rate);
+ omap4_clksel_set_rate(&dpll_core_m6x2_ck, l3_deps->core_m6_rate);
+ omap4_clksel_set_rate(&dpll_core_m7x2_ck, l3_deps->core_m7_rate);
+ omap4_clksel_set_rate(&dpll_per_m3x2_ck, l3_deps->per_m3_rate);
+ omap4_clksel_set_rate(&dpll_per_m6x2_ck, l3_deps->per_m6_rate);
+ omap4_clksel_set_rate(&dpll_core_m5x2_ck, rate * 2);
+ omap4_clksel_set_rate(&dpll_core_m2_ck, l3_deps->core_m2_rate);
+
+ clk->rate = rate;
+ return 0;
+}
+
int __init omap4xxx_clk_init(void)
{
struct omap_clk *c;
- u32 cpu_clkflg;
+ u32 cpu_clkflg = 0;
- if (cpu_is_omap44xx()) {
- cpu_mask = RATE_IN_4430;
+ if (cpu_is_omap443x()) {
+ cpu_mask = RATE_IN_443X;
cpu_clkflg = CK_443X;
+ } else if (cpu_is_omap446x()) {
+ cpu_mask = RATE_IN_446X;
+ cpu_clkflg = CK_446X;
}
clk_init(&omap2_clk_functions);
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index 6cb6c03..583cc3d 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -718,6 +718,8 @@
*/
int clkdm_wakeup(struct clockdomain *clkdm)
{
+ int ret;
+
if (!clkdm)
return -EINVAL;
@@ -732,7 +734,10 @@
pr_debug("clockdomain: forcing wakeup on %s\n", clkdm->name);
- return arch_clkdm->clkdm_wakeup(clkdm);
+ ret = arch_clkdm->clkdm_wakeup(clkdm);
+ ret |= pwrdm_wait_transition(clkdm->pwrdm.ptr);
+
+ return ret;
}
/**
@@ -795,6 +800,27 @@
arch_clkdm->clkdm_deny_idle(clkdm);
}
+/**
+ * clkdm_is_idle - Check if the clkdm hwsup/autoidle is enabled
+ * @clkdm: struct clockdomain *
+ *
+ * Returns true if the clockdomain is in hardware-supervised
+ * idle mode, or 0 otherwise.
+ *
+ */
+int clkdm_is_idle(struct clockdomain *clkdm)
+{
+ if (!clkdm)
+ return -EINVAL;
+
+ if (!arch_clkdm || !arch_clkdm->clkdm_is_idle)
+ return -EINVAL;
+
+ pr_debug("clockdomain: reading idle state for %s\n", clkdm->name);
+
+ return arch_clkdm->clkdm_is_idle(clkdm);
+}
+
/* Clockdomain-to-clock framework interface code */
@@ -825,7 +851,12 @@
if (!arch_clkdm || !arch_clkdm->clkdm_clk_enable)
return -EINVAL;
- if (atomic_inc_return(&clkdm->usecount) > 1)
+ /*
+ * For arch's with no autodeps, clkcm_clk_enable
+ * should be called for every clock instance that is
+ * enabled, so the clkdm can be force woken up.
+ */
+ if ((atomic_inc_return(&clkdm->usecount) > 1) && autodeps)
return 0;
/* Clockdomain now has one enabled downstream clock */
diff --git a/arch/arm/mach-omap2/clockdomain.h b/arch/arm/mach-omap2/clockdomain.h
index 5823584..085ed82 100644
--- a/arch/arm/mach-omap2/clockdomain.h
+++ b/arch/arm/mach-omap2/clockdomain.h
@@ -138,6 +138,7 @@
* @clkdm_wakeup: Force a clockdomain to wakeup
* @clkdm_allow_idle: Enable hw supervised idle transitions for clock domain
* @clkdm_deny_idle: Disable hw supervised idle transitions for clock domain
+ * @clkdm_is_idle: Check if hw supervised idle transitions are enabled
* @clkdm_clk_enable: Put the clkdm in right state for a clock enable
* @clkdm_clk_disable: Put the clkdm in right state for a clock disable
*/
@@ -154,6 +155,7 @@
int (*clkdm_wakeup)(struct clockdomain *clkdm);
void (*clkdm_allow_idle)(struct clockdomain *clkdm);
void (*clkdm_deny_idle)(struct clockdomain *clkdm);
+ int (*clkdm_is_idle)(struct clockdomain *clkdm);
int (*clkdm_clk_enable)(struct clockdomain *clkdm);
int (*clkdm_clk_disable)(struct clockdomain *clkdm);
};
@@ -177,6 +179,7 @@
void clkdm_allow_idle(struct clockdomain *clkdm);
void clkdm_deny_idle(struct clockdomain *clkdm);
+int clkdm_is_idle(struct clockdomain *clkdm);
int clkdm_wakeup(struct clockdomain *clkdm);
int clkdm_sleep(struct clockdomain *clkdm);
diff --git a/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c b/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
index 48d0db7..db49baa 100644
--- a/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
@@ -13,6 +13,7 @@
*/
#include <linux/types.h>
+#include <linux/errno.h>
#include <plat/prcm.h>
#include "prm.h"
#include "prm2xxx_3xxx.h"
@@ -146,6 +147,15 @@
_clkdm_del_autodeps(clkdm);
}
+static int omap2_clkdm_is_idle(struct clockdomain *clkdm)
+{
+ if (!clkdm->clktrctrl_mask)
+ return -1;
+
+ return omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+ clkdm->clktrctrl_mask);
+}
+
static void _enable_hwsup(struct clockdomain *clkdm)
{
if (cpu_is_omap24xx())
@@ -252,6 +262,7 @@
.clkdm_wakeup = omap2_clkdm_wakeup,
.clkdm_allow_idle = omap2_clkdm_allow_idle,
.clkdm_deny_idle = omap2_clkdm_deny_idle,
+ .clkdm_is_idle = omap2_clkdm_is_idle,
.clkdm_clk_enable = omap2_clkdm_clk_enable,
.clkdm_clk_disable = omap2_clkdm_clk_disable,
};
@@ -269,6 +280,7 @@
.clkdm_wakeup = omap3_clkdm_wakeup,
.clkdm_allow_idle = omap3_clkdm_allow_idle,
.clkdm_deny_idle = omap3_clkdm_deny_idle,
+ .clkdm_is_idle = omap2_clkdm_is_idle,
.clkdm_clk_enable = omap2_clkdm_clk_enable,
.clkdm_clk_disable = omap2_clkdm_clk_disable,
};
diff --git a/arch/arm/mach-omap2/clockdomain44xx.c b/arch/arm/mach-omap2/clockdomain44xx.c
index a1a4ecd..5024e63 100644
--- a/arch/arm/mach-omap2/clockdomain44xx.c
+++ b/arch/arm/mach-omap2/clockdomain44xx.c
@@ -93,15 +93,16 @@
clkdm->cm_inst, clkdm->clkdm_offs);
}
+static int omap4_clkdm_is_idle(struct clockdomain *clkdm)
+{
+ return omap4_cminst_is_clkdm_in_hwsup(clkdm->prcm_partition,
+ clkdm->cm_inst, clkdm->clkdm_offs);
+}
+
static int omap4_clkdm_clk_enable(struct clockdomain *clkdm)
{
- bool hwsup = false;
-
- hwsup = omap4_cminst_is_clkdm_in_hwsup(clkdm->prcm_partition,
- clkdm->cm_inst, clkdm->clkdm_offs);
-
- if (!hwsup)
- clkdm_wakeup(clkdm);
+ /* For every clock enable, force wakeup the clkdm */
+ clkdm_wakeup(clkdm);
return 0;
}
@@ -132,6 +133,7 @@
.clkdm_wakeup = omap4_clkdm_wakeup,
.clkdm_allow_idle = omap4_clkdm_allow_idle,
.clkdm_deny_idle = omap4_clkdm_deny_idle,
+ .clkdm_is_idle = omap4_clkdm_is_idle,
.clkdm_clk_enable = omap4_clkdm_clk_enable,
.clkdm_clk_disable = omap4_clkdm_clk_disable,
};
diff --git a/arch/arm/mach-omap2/clockdomains44xx_data.c b/arch/arm/mach-omap2/clockdomains44xx_data.c
index a607ec1..8c73442 100644
--- a/arch/arm/mach-omap2/clockdomains44xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains44xx_data.c
@@ -35,55 +35,55 @@
static struct clkdm_dep ducati_wkup_sleep_deps[] = {
{
.clkdm_name = "abe_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "ivahd_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_1_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_2_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_dss_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_emif_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_gfx_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_init_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_cfg_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_per_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_secure_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_wkup_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "tesla_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{ NULL },
};
@@ -91,15 +91,15 @@
static struct clkdm_dep iss_wkup_sleep_deps[] = {
{
.clkdm_name = "ivahd_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_1_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_emif_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{ NULL },
};
@@ -107,11 +107,11 @@
static struct clkdm_dep ivahd_wkup_sleep_deps[] = {
{
.clkdm_name = "l3_1_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_emif_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{ NULL },
};
@@ -119,35 +119,35 @@
static struct clkdm_dep l3_d2d_wkup_sleep_deps[] = {
{
.clkdm_name = "abe_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "ivahd_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_1_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_2_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_emif_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_init_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_cfg_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_per_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{ NULL },
};
@@ -155,47 +155,47 @@
static struct clkdm_dep l3_dma_wkup_sleep_deps[] = {
{
.clkdm_name = "abe_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "ducati_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "ivahd_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_1_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_dss_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_emif_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_init_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_cfg_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_per_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_secure_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_wkup_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{ NULL },
};
@@ -203,15 +203,15 @@
static struct clkdm_dep l3_dss_wkup_sleep_deps[] = {
{
.clkdm_name = "ivahd_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_2_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_emif_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{ NULL },
};
@@ -219,15 +219,15 @@
static struct clkdm_dep l3_gfx_wkup_sleep_deps[] = {
{
.clkdm_name = "ivahd_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_1_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_emif_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{ NULL },
};
@@ -235,31 +235,31 @@
static struct clkdm_dep l3_init_wkup_sleep_deps[] = {
{
.clkdm_name = "abe_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "ivahd_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_emif_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_cfg_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_per_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_secure_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_wkup_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{ NULL },
};
@@ -267,15 +267,15 @@
static struct clkdm_dep l4_secure_wkup_sleep_deps[] = {
{
.clkdm_name = "l3_1_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_emif_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_per_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{ NULL },
};
@@ -283,59 +283,59 @@
static struct clkdm_dep mpuss_wkup_sleep_deps[] = {
{
.clkdm_name = "abe_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "ducati_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "ivahd_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_1_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_2_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_dss_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_emif_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_gfx_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_init_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_cfg_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_per_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_secure_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_wkup_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "tesla_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{ NULL },
};
@@ -343,39 +343,39 @@
static struct clkdm_dep tesla_wkup_sleep_deps[] = {
{
.clkdm_name = "abe_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "ivahd_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_1_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_2_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_emif_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l3_init_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_cfg_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_per_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{
.clkdm_name = "l4_wkup_clkdm",
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX)
},
{ NULL },
};
@@ -387,7 +387,7 @@
.cm_inst = OMAP4430_CM2_CEFUSE_INST,
.clkdm_offs = OMAP4430_CM2_CEFUSE_CEFUSE_CDOFFS,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain l4_cfg_44xx_clkdm = {
@@ -398,7 +398,7 @@
.clkdm_offs = OMAP4430_CM2_CORE_L4CFG_CDOFFS,
.dep_bit = OMAP4430_L4CFG_STATDEP_SHIFT,
.flags = CLKDM_CAN_HWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain tesla_44xx_clkdm = {
@@ -411,7 +411,7 @@
.wkdep_srcs = tesla_wkup_sleep_deps,
.sleepdep_srcs = tesla_wkup_sleep_deps,
.flags = CLKDM_CAN_HWSUP_SWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain l3_gfx_44xx_clkdm = {
@@ -424,7 +424,7 @@
.wkdep_srcs = l3_gfx_wkup_sleep_deps,
.sleepdep_srcs = l3_gfx_wkup_sleep_deps,
.flags = CLKDM_CAN_HWSUP_SWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain ivahd_44xx_clkdm = {
@@ -437,7 +437,7 @@
.wkdep_srcs = ivahd_wkup_sleep_deps,
.sleepdep_srcs = ivahd_wkup_sleep_deps,
.flags = CLKDM_CAN_HWSUP_SWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain l4_secure_44xx_clkdm = {
@@ -450,7 +450,7 @@
.wkdep_srcs = l4_secure_wkup_sleep_deps,
.sleepdep_srcs = l4_secure_wkup_sleep_deps,
.flags = CLKDM_CAN_HWSUP_SWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain l4_per_44xx_clkdm = {
@@ -461,7 +461,7 @@
.clkdm_offs = OMAP4430_CM2_L4PER_L4PER_CDOFFS,
.dep_bit = OMAP4430_L4PER_STATDEP_SHIFT,
.flags = CLKDM_CAN_HWSUP_SWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain abe_44xx_clkdm = {
@@ -472,7 +472,7 @@
.clkdm_offs = OMAP4430_CM1_ABE_ABE_CDOFFS,
.dep_bit = OMAP4430_ABE_STATDEP_SHIFT,
.flags = CLKDM_CAN_HWSUP_SWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain l3_instr_44xx_clkdm = {
@@ -481,7 +481,7 @@
.prcm_partition = OMAP4430_CM2_PARTITION,
.cm_inst = OMAP4430_CM2_CORE_INST,
.clkdm_offs = OMAP4430_CM2_CORE_L3INSTR_CDOFFS,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain l3_init_44xx_clkdm = {
@@ -493,8 +493,8 @@
.dep_bit = OMAP4430_L3INIT_STATDEP_SHIFT,
.wkdep_srcs = l3_init_wkup_sleep_deps,
.sleepdep_srcs = l3_init_wkup_sleep_deps,
- .flags = CLKDM_CAN_HWSUP_SWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .flags = CLKDM_CAN_SWSUP,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain mpuss_44xx_clkdm = {
@@ -506,7 +506,7 @@
.wkdep_srcs = mpuss_wkup_sleep_deps,
.sleepdep_srcs = mpuss_wkup_sleep_deps,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain mpu0_44xx_clkdm = {
@@ -516,7 +516,7 @@
.cm_inst = OMAP4430_PRCM_MPU_CPU0_INST,
.clkdm_offs = OMAP4430_PRCM_MPU_CPU0_CPU0_CDOFFS,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain mpu1_44xx_clkdm = {
@@ -526,7 +526,7 @@
.cm_inst = OMAP4430_PRCM_MPU_CPU1_INST,
.clkdm_offs = OMAP4430_PRCM_MPU_CPU1_CPU1_CDOFFS,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain l3_emif_44xx_clkdm = {
@@ -537,7 +537,7 @@
.clkdm_offs = OMAP4430_CM2_CORE_MEMIF_CDOFFS,
.dep_bit = OMAP4430_MEMIF_STATDEP_SHIFT,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain l4_ao_44xx_clkdm = {
@@ -547,7 +547,7 @@
.cm_inst = OMAP4430_CM2_ALWAYS_ON_INST,
.clkdm_offs = OMAP4430_CM2_ALWAYS_ON_ALWON_CDOFFS,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain ducati_44xx_clkdm = {
@@ -560,7 +560,7 @@
.wkdep_srcs = ducati_wkup_sleep_deps,
.sleepdep_srcs = ducati_wkup_sleep_deps,
.flags = CLKDM_CAN_HWSUP_SWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain l3_2_44xx_clkdm = {
@@ -571,7 +571,7 @@
.clkdm_offs = OMAP4430_CM2_CORE_L3_2_CDOFFS,
.dep_bit = OMAP4430_L3_2_STATDEP_SHIFT,
.flags = CLKDM_CAN_HWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain l3_1_44xx_clkdm = {
@@ -582,7 +582,7 @@
.clkdm_offs = OMAP4430_CM2_CORE_L3_1_CDOFFS,
.dep_bit = OMAP4430_L3_1_STATDEP_SHIFT,
.flags = CLKDM_CAN_HWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain l3_d2d_44xx_clkdm = {
@@ -594,7 +594,7 @@
.wkdep_srcs = l3_d2d_wkup_sleep_deps,
.sleepdep_srcs = l3_d2d_wkup_sleep_deps,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain iss_44xx_clkdm = {
@@ -605,8 +605,8 @@
.clkdm_offs = OMAP4430_CM2_CAM_CAM_CDOFFS,
.wkdep_srcs = iss_wkup_sleep_deps,
.sleepdep_srcs = iss_wkup_sleep_deps,
- .flags = CLKDM_CAN_HWSUP_SWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .flags = CLKDM_CAN_SWSUP,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain l3_dss_44xx_clkdm = {
@@ -619,7 +619,7 @@
.wkdep_srcs = l3_dss_wkup_sleep_deps,
.sleepdep_srcs = l3_dss_wkup_sleep_deps,
.flags = CLKDM_CAN_HWSUP_SWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain l4_wkup_44xx_clkdm = {
@@ -630,7 +630,7 @@
.clkdm_offs = OMAP4430_PRM_WKUP_CM_WKUP_CDOFFS,
.dep_bit = OMAP4430_L4WKUP_STATDEP_SHIFT,
.flags = CLKDM_CAN_HWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain emu_sys_44xx_clkdm = {
@@ -640,7 +640,7 @@
.cm_inst = OMAP4430_PRM_EMU_CM_INST,
.clkdm_offs = OMAP4430_PRM_EMU_CM_EMU_CDOFFS,
.flags = CLKDM_CAN_HWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain l3_dma_44xx_clkdm = {
@@ -652,7 +652,7 @@
.wkdep_srcs = l3_dma_wkup_sleep_deps,
.sleepdep_srcs = l3_dma_wkup_sleep_deps,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct clockdomain *clockdomains_omap44xx[] __initdata = {
diff --git a/arch/arm/mach-omap2/cm-regbits-44xx.h b/arch/arm/mach-omap2/cm-regbits-44xx.h
index 9d47a05..4c4cbfa 100644
--- a/arch/arm/mach-omap2/cm-regbits-44xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-44xx.h
@@ -106,6 +106,10 @@
#define OMAP4430_CLKACTIVITY_CORE_DPLL_EMU_CLK_SHIFT 9
#define OMAP4430_CLKACTIVITY_CORE_DPLL_EMU_CLK_MASK (1 << 9)
+/* Used by CM_L4CFG_CLKSTCTRL */
+#define OMAP4460_CLKACTIVITY_CORE_TS_GFCLK_SHIFT 9
+#define OMAP4460_CLKACTIVITY_CORE_TS_GFCLK_MASK (1 << 9)
+
/* Used by CM_CEFUSE_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_CUST_EFUSE_SYS_CLK_SHIFT 9
#define OMAP4430_CLKACTIVITY_CUST_EFUSE_SYS_CLK_MASK (1 << 9)
@@ -418,6 +422,10 @@
#define OMAP4430_CLKACTIVITY_WKUP_32K_GFCLK_SHIFT 11
#define OMAP4430_CLKACTIVITY_WKUP_32K_GFCLK_MASK (1 << 11)
+/* Used by CM_WKUP_CLKSTCTRL */
+#define OMAP4460_CLKACTIVITY_WKUP_TS_GFCLK_SHIFT 13
+#define OMAP4460_CLKACTIVITY_WKUP_TS_GFCLK_MASK (1 << 13)
+
/*
* Used by CM1_ABE_TIMER5_CLKCTRL, CM1_ABE_TIMER6_CLKCTRL,
* CM1_ABE_TIMER7_CLKCTRL, CM1_ABE_TIMER8_CLKCTRL, CM_L3INIT_MMC1_CLKCTRL,
@@ -449,6 +457,10 @@
#define OMAP4430_CLKSEL_60M_SHIFT 24
#define OMAP4430_CLKSEL_60M_MASK (1 << 24)
+/* Used by CM_MPU_MPU_CLKCTRL */
+#define OMAP4460_CLKSEL_ABE_DIV_MODE_SHIFT 25
+#define OMAP4460_CLKSEL_ABE_DIV_MODE_MASK (1 << 25)
+
/* Used by CM1_ABE_AESS_CLKCTRL */
#define OMAP4430_CLKSEL_AESS_FCLK_SHIFT 24
#define OMAP4430_CLKSEL_AESS_FCLK_MASK (1 << 24)
@@ -468,6 +480,10 @@
#define OMAP4430_CLKSEL_DIV_SHIFT 24
#define OMAP4430_CLKSEL_DIV_MASK (1 << 24)
+/* Used by CM_MPU_MPU_CLKCTRL */
+#define OMAP4460_CLKSEL_EMIF_DIV_MODE_SHIFT 24
+#define OMAP4460_CLKSEL_EMIF_DIV_MODE_MASK (1 << 24)
+
/* Used by CM_CAM_FDIF_CLKCTRL */
#define OMAP4430_CLKSEL_FCLK_SHIFT 24
#define OMAP4430_CLKSEL_FCLK_MASK (0x3 << 24)
@@ -572,6 +588,14 @@
#define OMAP4430_D2D_STATDEP_SHIFT 18
#define OMAP4430_D2D_STATDEP_MASK (1 << 18)
+/* Used by CM_CLKSEL_DPLL_MPU */
+#define OMAP4460_DCC_COUNT_MAX_SHIFT 24
+#define OMAP4460_DCC_COUNT_MAX_MASK (0xff << 24)
+
+/* Used by CM_CLKSEL_DPLL_MPU */
+#define OMAP4460_DCC_EN_SHIFT 22
+#define OMAP4460_DCC_EN_MASK (1 << 22)
+
/*
* Used by CM_SSC_DELTAMSTEP_DPLL_ABE, CM_SSC_DELTAMSTEP_DPLL_CORE,
* CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE, CM_SSC_DELTAMSTEP_DPLL_DDRPHY,
@@ -1204,6 +1228,10 @@
#define OMAP4430_MODULEMODE_SHIFT 0
#define OMAP4430_MODULEMODE_MASK (0x3 << 0)
+/* Used by CM_L4CFG_DYNAMICDEP */
+#define OMAP4460_MPU_DYNDEP_SHIFT 19
+#define OMAP4460_MPU_DYNDEP_MASK (1 << 19)
+
/* Used by CM_DSS_DSS_CLKCTRL */
#define OMAP4430_OPTFCLKEN_48MHZ_CLK_SHIFT 9
#define OMAP4430_OPTFCLKEN_48MHZ_CLK_MASK (1 << 9)
@@ -1298,6 +1326,10 @@
#define OMAP4430_OPTFCLKEN_SYS_CLK_SHIFT 10
#define OMAP4430_OPTFCLKEN_SYS_CLK_MASK (1 << 10)
+/* Used by CM_WKUP_BANDGAP_CLKCTRL */
+#define OMAP4460_OPTFCLKEN_TS_FCLK_SHIFT 8
+#define OMAP4460_OPTFCLKEN_TS_FCLK_MASK (1 << 8)
+
/* Used by CM_DSS_DSS_CLKCTRL */
#define OMAP4430_OPTFCLKEN_TV_CLK_SHIFT 11
#define OMAP4430_OPTFCLKEN_TV_CLK_MASK (1 << 11)
diff --git a/arch/arm/mach-omap2/cm1_44xx.h b/arch/arm/mach-omap2/cm1_44xx.h
index e2d7a56..6a34bed 100644
--- a/arch/arm/mach-omap2/cm1_44xx.h
+++ b/arch/arm/mach-omap2/cm1_44xx.h
@@ -82,8 +82,8 @@
#define OMAP4430_CM_DIV_M7_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0044)
#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_CORE_OFFSET 0x0048
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0048)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_CORE_OFFSET 0x004c
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x004c)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_CORE_OFFSET 0x004c
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x004c)
#define OMAP4_CM_EMU_OVERRIDE_DPLL_CORE_OFFSET 0x0050
#define OMAP4430_CM_EMU_OVERRIDE_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0050)
#define OMAP4_CM_CLKMODE_DPLL_MPU_OFFSET 0x0060
@@ -98,8 +98,8 @@
#define OMAP4430_CM_DIV_M2_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0070)
#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_MPU_OFFSET 0x0088
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0088)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_MPU_OFFSET 0x008c
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x008c)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_MPU_OFFSET 0x008c
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x008c)
#define OMAP4_CM_BYPCLK_DPLL_MPU_OFFSET 0x009c
#define OMAP4430_CM_BYPCLK_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x009c)
#define OMAP4_CM_CLKMODE_DPLL_IVA_OFFSET 0x00a0
@@ -116,8 +116,8 @@
#define OMAP4430_CM_DIV_M5_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00bc)
#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_IVA_OFFSET 0x00c8
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00c8)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_IVA_OFFSET 0x00cc
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00cc)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_IVA_OFFSET 0x00cc
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00cc)
#define OMAP4_CM_BYPCLK_DPLL_IVA_OFFSET 0x00dc
#define OMAP4430_CM_BYPCLK_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00dc)
#define OMAP4_CM_CLKMODE_DPLL_ABE_OFFSET 0x00e0
@@ -134,8 +134,8 @@
#define OMAP4430_CM_DIV_M3_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00f4)
#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_ABE_OFFSET 0x0108
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0108)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_ABE_OFFSET 0x010c
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x010c)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_ABE_OFFSET 0x010c
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x010c)
#define OMAP4_CM_CLKMODE_DPLL_DDRPHY_OFFSET 0x0120
#define OMAP4430_CM_CLKMODE_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0120)
#define OMAP4_CM_IDLEST_DPLL_DDRPHY_OFFSET 0x0124
@@ -154,8 +154,8 @@
#define OMAP4430_CM_DIV_M6_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0140)
#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_DDRPHY_OFFSET 0x0148
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0148)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_DDRPHY_OFFSET 0x014c
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x014c)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_DDRPHY_OFFSET 0x014c
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x014c)
#define OMAP4_CM_SHADOW_FREQ_CONFIG1_OFFSET 0x0160
#define OMAP4430_CM_SHADOW_FREQ_CONFIG1 OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0160)
#define OMAP4_CM_SHADOW_FREQ_CONFIG2_OFFSET 0x0164
@@ -236,8 +236,8 @@
#define OMAP4430_CM_CLKSEL_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x001c)
#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE_OFFSET 0x0020
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0020)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_CORE_RESTORE_OFFSET 0x0024
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0024)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE_OFFSET 0x0024
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0024)
#define OMAP4_CM_CLKMODE_DPLL_CORE_RESTORE_OFFSET 0x0028
#define OMAP4430_CM_CLKMODE_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0028)
#define OMAP4_CM_SHADOW_FREQ_CONFIG2_RESTORE_OFFSET 0x002c
@@ -253,9 +253,11 @@
#define OMAP4_CM_DYN_DEP_PRESCAL_RESTORE_OFFSET 0x0040
#define OMAP4430_CM_DYN_DEP_PRESCAL_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0040)
+#ifndef __ASSEMBLER__
/* Function prototypes */
extern u32 omap4_cm1_read_inst_reg(s16 inst, u16 idx);
extern void omap4_cm1_write_inst_reg(u32 val, s16 inst, u16 idx);
extern u32 omap4_cm1_rmw_inst_reg_bits(u32 mask, u32 bits, s16 inst, s16 idx);
+#endif
#endif
diff --git a/arch/arm/mach-omap2/cm2_44xx.h b/arch/arm/mach-omap2/cm2_44xx.h
index aa47450..b6ed984 100644
--- a/arch/arm/mach-omap2/cm2_44xx.h
+++ b/arch/arm/mach-omap2/cm2_44xx.h
@@ -121,8 +121,8 @@
#define OMAP4430_CM_DIV_M7_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0064)
#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_PER_OFFSET 0x0068
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0068)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_PER_OFFSET 0x006c
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x006c)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_PER_OFFSET 0x006c
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x006c)
#define OMAP4_CM_CLKMODE_DPLL_USB_OFFSET 0x0080
#define OMAP4430_CM_CLKMODE_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0080)
#define OMAP4_CM_IDLEST_DPLL_USB_OFFSET 0x0084
@@ -135,8 +135,8 @@
#define OMAP4430_CM_DIV_M2_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0090)
#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_USB_OFFSET 0x00a8
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00a8)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_USB_OFFSET 0x00ac
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00ac)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_USB_OFFSET 0x00ac
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00ac)
#define OMAP4_CM_CLKDCOLDO_DPLL_USB_OFFSET 0x00b4
#define OMAP4430_CM_CLKDCOLDO_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00b4)
#define OMAP4_CM_CLKMODE_DPLL_UNIPRO_OFFSET 0x00c0
@@ -151,8 +151,8 @@
#define OMAP4430_CM_DIV_M2_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00d0)
#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_UNIPRO_OFFSET 0x00e8
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00e8)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_UNIPRO_OFFSET 0x00ec
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00ec)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_UNIPRO_OFFSET 0x00ec
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00ec)
/* CM2.ALWAYS_ON_CM2 register offsets */
#define OMAP4_CM_ALWON_CLKSTCTRL_OFFSET 0x0000
@@ -500,9 +500,11 @@
#define OMAP4_CM_SDMA_STATICDEP_RESTORE_OFFSET 0x005c
#define OMAP4430_CM_SDMA_STATICDEP_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x005c)
+#ifndef __ASSEMBLER__
/* Function prototypes */
extern u32 omap4_cm2_read_inst_reg(s16 inst, u16 idx);
extern void omap4_cm2_write_inst_reg(u32 val, s16 inst, u16 idx);
extern u32 omap4_cm2_rmw_inst_reg_bits(u32 mask, u32 bits, s16 inst, s16 idx);
+#endif
#endif
diff --git a/arch/arm/mach-omap2/cm44xx.c b/arch/arm/mach-omap2/cm44xx.c
index e96f53e..16d5f3d 100644
--- a/arch/arm/mach-omap2/cm44xx.c
+++ b/arch/arm/mach-omap2/cm44xx.c
@@ -21,8 +21,11 @@
#include <plat/common.h>
#include "cm.h"
+#include "cm44xx.h"
#include "cm1_44xx.h"
#include "cm2_44xx.h"
+#include "cminst44xx.h"
+#include "prcm44xx.h"
#include "cm-regbits-44xx.h"
/* CM1 hardware module low-level functions */
@@ -50,3 +53,322 @@
{
__raw_writel(val, OMAP44XX_CM2_REGADDR(inst, reg));
}
+
+#define MAX_CM_REGISTERS 51
+
+struct omap4_cm_tuple {
+ u16 addr;
+ u32 val;
+};
+
+struct omap4_cm_regs {
+ u32 mod_off;
+ u32 no_reg;
+ struct omap4_cm_tuple reg[MAX_CM_REGISTERS];
+};
+
+static struct omap4_cm_regs cm1_regs[] = {
+ /* OMAP4430_CM1_OCP_SOCKET_MOD */
+ { .mod_off = OMAP4430_CM1_OCP_SOCKET_INST, .no_reg = 1,
+ {{.addr = OMAP4_CM_CM1_PROFILING_CLKCTRL_OFFSET} },
+ },
+ /* OMAP4430_CM1_CKGEN_MOD */
+ { .mod_off = OMAP4430_CM1_CKGEN_INST, .no_reg = 4,
+ {{.addr = OMAP4_CM_CLKSEL_CORE_OFFSET},
+ {.addr = OMAP4_CM_CLKSEL_ABE_OFFSET},
+ {.addr = OMAP4_CM_DLL_CTRL_OFFSET},
+ {.addr = OMAP4_CM_DYN_DEP_PRESCAL_OFFSET} },
+ },
+ /* OMAP4430_CM1_MPU_MOD */
+ { .mod_off = OMAP4430_CM1_MPU_INST, .no_reg = 4,
+ {{.addr = OMAP4_CM_MPU_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_MPU_STATICDEP_OFFSET},
+ {.addr = OMAP4_CM_MPU_DYNAMICDEP_OFFSET},
+ {.addr = OMAP4_CM_MPU_MPU_CLKCTRL_OFFSET} },
+ },
+ /* OMAP4430_CM1_TESLA_MOD */
+ { .mod_off = OMAP4430_CM1_TESLA_INST, .no_reg = 4,
+ {{.addr = OMAP4_CM_TESLA_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_TESLA_STATICDEP_OFFSET},
+ {.addr = OMAP4_CM_TESLA_DYNAMICDEP_OFFSET},
+ {.addr = OMAP4_CM_TESLA_TESLA_CLKCTRL_OFFSET} },
+ },
+ /* OMAP4430_CM1_ABE_MOD */
+ { .mod_off = OMAP4430_CM1_ABE_INST, .no_reg = 15,
+ {{.addr = OMAP4_CM1_ABE_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM1_ABE_L4ABE_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM1_ABE_AESS_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM1_ABE_PDM_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM1_ABE_DMIC_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM1_ABE_MCASP_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM1_ABE_MCBSP1_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM1_ABE_MCBSP2_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM1_ABE_MCBSP3_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM1_ABE_SLIMBUS_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM1_ABE_TIMER5_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM1_ABE_TIMER6_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM1_ABE_TIMER7_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM1_ABE_TIMER8_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM1_ABE_WDT3_CLKCTRL_OFFSET} },
+ },
+};
+
+static struct omap4_cm_regs cm2_regs[] = {
+ /* OMAP4430_CM2_OCP_SOCKET_MOD */
+ {.mod_off = OMAP4430_CM2_OCP_SOCKET_INST, .no_reg = 1,
+ {{.addr = OMAP4_CM_CM2_PROFILING_CLKCTRL_OFFSET} },
+ },
+ /* OMAP4430_CM2_CKGEN_MOD */
+ {.mod_off = OMAP4430_CM2_CKGEN_INST, .no_reg = 12,
+ {{.addr = OMAP4_CM_CLKSEL_DUCATI_ISS_ROOT_OFFSET},
+ {.addr = OMAP4_CM_CLKSEL_USB_60MHZ_OFFSET},
+ {.addr = OMAP4_CM_SCALE_FCLK_OFFSET},
+ {.addr = OMAP4_CM_CORE_DVFS_PERF1_OFFSET},
+ {.addr = OMAP4_CM_CORE_DVFS_PERF2_OFFSET},
+ {.addr = OMAP4_CM_CORE_DVFS_PERF3_OFFSET},
+ {.addr = OMAP4_CM_CORE_DVFS_PERF4_OFFSET},
+ {.addr = OMAP4_CM_CORE_DVFS_CURRENT_OFFSET},
+ {.addr = OMAP4_CM_IVA_DVFS_PERF_TESLA_OFFSET},
+ {.addr = OMAP4_CM_IVA_DVFS_PERF_IVAHD_OFFSET},
+ {.addr = OMAP4_CM_IVA_DVFS_PERF_ABE_OFFSET},
+ {.addr = OMAP4_CM_IVA_DVFS_CURRENT_OFFSET} },
+ },
+ /* OMAP4430_CM2_ALWAYS_ON_MOD */
+ {.mod_off = OMAP4430_CM2_ALWAYS_ON_INST, .no_reg = 6,
+ {{.addr = OMAP4_CM_ALWON_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_ALWON_MDMINTC_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_ALWON_SR_MPU_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_ALWON_SR_IVA_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_ALWON_SR_CORE_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_ALWON_USBPHY_CLKCTRL_OFFSET} },
+ },
+ /* OMAP4430_CM2_CORE_MOD */
+ {.mod_off = OMAP4430_CM2_CORE_INST, .no_reg = 41,
+ {{.addr = OMAP4_CM_L3_1_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3_1_DYNAMICDEP_OFFSET},
+ {.addr = OMAP4_CM_L3_1_L3_1_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3_2_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3_2_DYNAMICDEP_OFFSET},
+ {.addr = OMAP4_CM_L3_2_L3_2_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3_2_GPMC_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3_2_OCMC_RAM_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_DUCATI_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_DUCATI_STATICDEP_OFFSET},
+ {.addr = OMAP4_CM_DUCATI_DYNAMICDEP_OFFSET},
+ {.addr = OMAP4_CM_DUCATI_DUCATI_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_SDMA_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_SDMA_STATICDEP_OFFSET},
+ {.addr = OMAP4_CM_SDMA_DYNAMICDEP_OFFSET},
+ {.addr = OMAP4_CM_SDMA_SDMA_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_MEMIF_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_MEMIF_DMM_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_MEMIF_EMIF_FW_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_MEMIF_EMIF_1_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_MEMIF_EMIF_2_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_MEMIF_DLL_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_MEMIF_EMIF_H1_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_MEMIF_EMIF_H2_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_MEMIF_DLL_H_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_D2D_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_D2D_STATICDEP_OFFSET},
+ {.addr = OMAP4_CM_D2D_DYNAMICDEP_OFFSET},
+ {.addr = OMAP4_CM_D2D_SAD2D_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_D2D_INSTEM_ICR_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_D2D_SAD2D_FW_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4CFG_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4CFG_DYNAMICDEP_OFFSET},
+ {.addr = OMAP4_CM_L4CFG_L4_CFG_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4CFG_HW_SEM_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4CFG_MAILBOX_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4CFG_SAR_ROM_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INSTR_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INSTR_L3_3_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INSTR_L3_INSTR_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INSTR_OCP_WP1_CLKCTRL_OFFSET} },
+ },
+ /* OMAP4430_CM2_IVAHD_MOD */
+ {.mod_off = OMAP4430_CM2_IVAHD_INST, .no_reg = 5,
+ {{.addr = OMAP4_CM_IVAHD_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_IVAHD_STATICDEP_OFFSET},
+ {.addr = OMAP4_CM_IVAHD_DYNAMICDEP_OFFSET},
+ {.addr = OMAP4_CM_IVAHD_IVAHD_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_IVAHD_SL2_CLKCTRL_OFFSET} },
+ },
+ /* OMAP4430_CM2_CAM_MOD */
+ {.mod_off = OMAP4430_CM2_CAM_INST, .no_reg = 5,
+ {{.addr = OMAP4_CM_CAM_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_CAM_STATICDEP_OFFSET},
+ {.addr = OMAP4_CM_CAM_DYNAMICDEP_OFFSET},
+ {.addr = OMAP4_CM_CAM_ISS_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_CAM_FDIF_CLKCTRL_OFFSET} },
+ },
+ /* OMAP4430_CM2_DSS_MOD */
+ {.mod_off = OMAP4430_CM2_DSS_INST, .no_reg = 5,
+ {{.addr = OMAP4_CM_DSS_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_DSS_STATICDEP_OFFSET},
+ {.addr = OMAP4_CM_DSS_DYNAMICDEP_OFFSET},
+ {.addr = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_DSS_DEISS_CLKCTRL_OFFSET} },
+ },
+ /* OMAP4430_CM2_GFX_MOD */
+ {.mod_off = OMAP4430_CM2_GFX_INST, .no_reg = 4,
+ {{.addr = OMAP4_CM_GFX_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_GFX_STATICDEP_OFFSET},
+ {.addr = OMAP4_CM_GFX_DYNAMICDEP_OFFSET},
+ {.addr = OMAP4_CM_GFX_GFX_CLKCTRL_OFFSET} },
+ },
+ /* OMAP4430_CM2_L3INIT_MOD */
+ {.mod_off = OMAP4430_CM2_L3INIT_INST, .no_reg = 20,
+ {{.addr = OMAP4_CM_L3INIT_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_STATICDEP_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_DYNAMICDEP_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_MMC1_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_MMC2_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_HSI_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_UNIPRO1_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_USB_OTG_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_P1500_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_EMAC_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_SATA_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_TPPSS_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_PCIESS_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_CCPTX_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_XHPI_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_MMC6_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_USB_HOST_FS_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET} },
+ },
+ /* OMAP4430_CM2_L4PER_MOD */
+ {.mod_off = OMAP4430_CM2_L4PER_INST, .no_reg = 51,
+ {{.addr = OMAP4_CM_L4PER_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_DYNAMICDEP_OFFSET},
+ {.addr = OMAP4_CM_L4PER_ADC_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_DMTIMER10_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_DMTIMER11_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_DMTIMER2_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_DMTIMER3_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_DMTIMER4_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_DMTIMER9_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_ELM_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_GPIO2_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_GPIO3_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_GPIO4_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_GPIO5_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_GPIO6_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_HDQ1W_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_HECC1_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_HECC2_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_I2C1_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_I2C2_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_I2C3_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_I2C4_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_L4PER_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_MCASP2_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_MCASP3_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_MCBSP4_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_MGATE_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_MCSPI1_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_MCSPI2_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_MCSPI3_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_MCSPI4_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_MMCSD3_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_MMCSD4_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_MSPROHG_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_SLIMBUS2_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_UART1_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_UART2_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_UART3_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_UART4_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_MMCSD5_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4PER_I2C5_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4SEC_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4SEC_STATICDEP_OFFSET},
+ {.addr = OMAP4_CM_L4SEC_DYNAMICDEP_OFFSET},
+ {.addr = OMAP4_CM_L4SEC_AES1_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4SEC_AES2_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4SEC_DES3DES_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4SEC_PKAEIP29_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4SEC_RNG_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4SEC_SHA2MD51_CLKCTRL_OFFSET},
+ {.addr = OMAP4_CM_L4SEC_CRYPTODMA_CLKCTRL_OFFSET} },
+ },
+ /* OMAP4430_CM2_CEFUSE_MOD */
+ {.mod_off = OMAP4430_CM2_CEFUSE_INST, .no_reg = 2,
+ {{.addr = OMAP4_CM_CEFUSE_CLKSTCTRL_OFFSET},
+ {.addr = OMAP4_CM_CEFUSE_CEFUSE_CLKCTRL_OFFSET} },
+ },
+};
+
+static void omap4_cm1_prepare_off(void)
+{
+ u32 i, j;
+ struct omap4_cm_regs *cm_reg = cm1_regs;
+
+ for (i = 0; i < ARRAY_SIZE(cm1_regs); i++, cm_reg++) {
+ for (j = 0; j < cm_reg->no_reg; j++) {
+ cm_reg->reg[j].val =
+ omap4_cminst_read_inst_reg(OMAP4430_CM1_PARTITION,
+ cm_reg->mod_off,
+ cm_reg->reg[j].addr);
+ }
+ }
+}
+
+static void omap4_cm2_prepare_off(void)
+{
+ u32 i, j;
+ struct omap4_cm_regs *cm_reg = cm2_regs;
+
+ for (i = 0; i < ARRAY_SIZE(cm2_regs); i++, cm_reg++) {
+ for (j = 0; j < cm_reg->no_reg; j++) {
+ cm_reg->reg[j].val =
+ omap4_cminst_read_inst_reg(OMAP4430_CM2_PARTITION,
+ cm_reg->mod_off,
+ cm_reg->reg[j].addr);
+ }
+ }
+}
+
+static void omap4_cm1_resume_off(void)
+{
+ u32 i, j;
+ struct omap4_cm_regs *cm_reg = cm1_regs;
+
+ for (i = 0; i < ARRAY_SIZE(cm1_regs); i++, cm_reg++) {
+ for (j = 0; j < cm_reg->no_reg; j++) {
+ omap4_cminst_write_inst_reg(cm_reg->reg[j].val,
+ OMAP4430_CM1_PARTITION,
+ cm_reg->mod_off,
+ cm_reg->reg[j].addr);
+ }
+ }
+}
+
+static void omap4_cm2_resume_off(void)
+{
+ u32 i, j;
+ struct omap4_cm_regs *cm_reg = cm2_regs;
+
+ for (i = 0; i < ARRAY_SIZE(cm2_regs); i++, cm_reg++) {
+ for (j = 0; j < cm_reg->no_reg; j++) {
+ omap4_cminst_write_inst_reg(cm_reg->reg[j].val,
+ OMAP4430_CM2_PARTITION,
+ cm_reg->mod_off,
+ cm_reg->reg[j].addr);
+ }
+ }
+}
+
+void omap4_cm_prepare_off(void)
+{
+ omap4_cm1_prepare_off();
+ omap4_cm2_prepare_off();
+}
+
+void omap4_cm_resume_off(void)
+{
+ omap4_cm1_resume_off();
+ omap4_cm2_resume_off();
+}
diff --git a/arch/arm/mach-omap2/cm44xx.h b/arch/arm/mach-omap2/cm44xx.h
index 0b87ec8..4124989 100644
--- a/arch/arm/mach-omap2/cm44xx.h
+++ b/arch/arm/mach-omap2/cm44xx.h
@@ -27,6 +27,10 @@
# ifndef __ASSEMBLER__
extern int omap4_cm_wait_module_ready(void __iomem *clkctrl_reg);
+extern void omap4_cm_prepare_off(void);
+extern void omap4_cm_resume_off(void);
+extern void omap4_dpll_prepare_off(void);
+extern void omap4_dpll_resume_off(void);
# endif
#endif
diff --git a/arch/arm/mach-omap2/common.c b/arch/arm/mach-omap2/common.c
index 3f20cbb..4ffcb54 100644
--- a/arch/arm/mach-omap2/common.c
+++ b/arch/arm/mach-omap2/common.c
@@ -127,6 +127,7 @@
.tap = OMAP2_L4_IO_ADDRESS(OMAP443X_SCM_BASE),
.ctrl = OMAP443X_SCM_BASE,
.ctrl_pad = OMAP443X_CTRL_BASE,
+ .ctrl_wk_pad = OMAP443X_CTRL_WK_BASE,
.prm = OMAP4430_PRM_BASE,
.cm = OMAP4430_CM_BASE,
.cm2 = OMAP4430_CM2_BASE,
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
index da53ba3..5faf166 100644
--- a/arch/arm/mach-omap2/control.c
+++ b/arch/arm/mach-omap2/control.c
@@ -32,6 +32,7 @@
static void __iomem *omap2_ctrl_base;
static void __iomem *omap4_ctrl_pad_base;
+static void __iomem *omap4_ctrl_wk_pad_base;
#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
struct omap3_scratchpad {
@@ -146,6 +147,7 @@
#define OMAP_CTRL_REGADDR(reg) (omap2_ctrl_base + (reg))
#define OMAP4_CTRL_PAD_REGADDR(reg) (omap4_ctrl_pad_base + (reg))
+#define OMAP4_CTRL_WK_PAD_REGADDR(reg) (omap4_ctrl_wk_pad_base + (reg))
void __init omap2_set_globals_control(struct omap_globals *omap2_globals)
{
@@ -160,6 +162,17 @@
omap4_ctrl_pad_base = ioremap(omap2_globals->ctrl_pad, SZ_4K);
WARN_ON(!omap4_ctrl_pad_base);
}
+
+ /*
+ * static mapping, never released. omap4 Wakeup pad is seperate
+ * from the core, hence need to be mapped individually.
+ */
+ if (omap2_globals->ctrl_wk_pad) {
+ omap4_ctrl_wk_pad_base = ioremap(omap2_globals->ctrl_wk_pad,
+ SZ_4K);
+ WARN_ON(!omap4_ctrl_wk_pad_base);
+ }
+
}
void __iomem *omap_ctrl_base_get(void)
@@ -204,16 +217,66 @@
* registers. This APIs will work only for OMAP4
*/
+u8 omap4_ctrl_pad_readb(u16 offset)
+{
+ return __raw_readb(OMAP4_CTRL_PAD_REGADDR(offset));
+}
+
+u16 omap4_ctrl_pad_readw(u16 offset)
+{
+ return __raw_readw(OMAP4_CTRL_PAD_REGADDR(offset));
+}
+
u32 omap4_ctrl_pad_readl(u16 offset)
{
return __raw_readl(OMAP4_CTRL_PAD_REGADDR(offset));
}
+void omap4_ctrl_pad_writeb(u8 val, u16 offset)
+{
+ __raw_writeb(val, OMAP4_CTRL_PAD_REGADDR(offset));
+}
+
+void omap4_ctrl_pad_writew(u16 val, u16 offset)
+{
+ __raw_writew(val, OMAP4_CTRL_PAD_REGADDR(offset));
+}
+
void omap4_ctrl_pad_writel(u32 val, u16 offset)
{
__raw_writel(val, OMAP4_CTRL_PAD_REGADDR(offset));
}
+u8 omap4_ctrl_wk_pad_readb(u16 offset)
+{
+ return __raw_readb(OMAP4_CTRL_WK_PAD_REGADDR(offset));
+}
+
+u16 omap4_ctrl_wk_pad_readw(u16 offset)
+{
+ return __raw_readw(OMAP4_CTRL_WK_PAD_REGADDR(offset));
+}
+
+u32 omap4_ctrl_wk_pad_readl(u16 offset)
+{
+ return __raw_readl(OMAP4_CTRL_WK_PAD_REGADDR(offset));
+}
+
+void omap4_ctrl_wk_pad_writeb(u8 val, u16 offset)
+{
+ __raw_writeb(val, OMAP4_CTRL_WK_PAD_REGADDR(offset));
+}
+
+void omap4_ctrl_wk_pad_writew(u16 val, u16 offset)
+{
+ __raw_writew(val, OMAP4_CTRL_WK_PAD_REGADDR(offset));
+}
+
+void omap4_ctrl_wk_pad_writel(u32 val, u16 offset)
+{
+ __raw_writel(val, OMAP4_CTRL_WK_PAD_REGADDR(offset));
+}
+
#ifdef CONFIG_ARCH_OMAP3
/**
diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h
index a016c8b..ccebb26 100644
--- a/arch/arm/mach-omap2/control.h
+++ b/arch/arm/mach-omap2/control.h
@@ -195,6 +195,7 @@
#define OMAP44XX_CONTROL_FUSE_MPU_OPPNITRO 0x249
#define OMAP44XX_CONTROL_FUSE_CORE_OPP50 0x254
#define OMAP44XX_CONTROL_FUSE_CORE_OPP100 0x257
+#define OMAP44XX_CONTROL_FUSE_CORE_OPP100OV 0x25A
/* AM35XX only CONTROL_GENERAL register offsets */
#define AM35XX_CONTROL_MSUSPENDMUX_6 (OMAP2_CONTROL_GENERAL + 0x0038)
@@ -378,11 +379,21 @@
extern u8 omap_ctrl_readb(u16 offset);
extern u16 omap_ctrl_readw(u16 offset);
extern u32 omap_ctrl_readl(u16 offset);
+extern u8 omap4_ctrl_pad_readb(u16 offset);
+extern u16 omap4_ctrl_pad_readw(u16 offset);
extern u32 omap4_ctrl_pad_readl(u16 offset);
+extern u8 omap4_ctrl_wk_pad_readb(u16 offset);
+extern u16 omap4_ctrl_wk_pad_readw(u16 offset);
+extern u32 omap4_ctrl_wk_pad_readl(u16 offset);
extern void omap_ctrl_writeb(u8 val, u16 offset);
extern void omap_ctrl_writew(u16 val, u16 offset);
extern void omap_ctrl_writel(u32 val, u16 offset);
+extern void omap4_ctrl_pad_writeb(u8 val, u16 offset);
+extern void omap4_ctrl_pad_writew(u16 val, u16 offset);
extern void omap4_ctrl_pad_writel(u32 val, u16 offset);
+extern void omap4_ctrl_wk_pad_writeb(u8 val, u16 offset);
+extern void omap4_ctrl_wk_pad_writew(u16 val, u16 offset);
+extern void omap4_ctrl_wk_pad_writel(u32 val, u16 offset);
extern void omap3_save_scratchpad_contents(void);
extern void omap3_clear_scratchpad_contents(void);
@@ -400,11 +411,21 @@
#define omap_ctrl_readb(x) 0
#define omap_ctrl_readw(x) 0
#define omap_ctrl_readl(x) 0
-#define omap4_ctrl_pad_readl(x) 0
+#define omap4_ctrl_pad_readb(x) 0
+#define omap4_ctrl_pad_readw(x) 0
+#define omap4_ctrl_pad_readl(x) 0
+#define omap4_ctrl_wk_pad_readb(x) 0
+#define omap4_ctrl_wk_pad_readw(x) 0
+#define omap4_ctrl_wk_pad_readl(x) 0
#define omap_ctrl_writeb(x, y) WARN_ON(1)
#define omap_ctrl_writew(x, y) WARN_ON(1)
#define omap_ctrl_writel(x, y) WARN_ON(1)
+#define omap4_ctrl_pad_writeb(x, y) WARN_ON(1)
+#define omap4_ctrl_pad_writew(x, y) WARN_ON(1)
#define omap4_ctrl_pad_writel(x, y) WARN_ON(1)
+#define omap4_ctrl_wk_pad_writeb(x, y) WARN_ON(1)
+#define omap4_ctrl_wk_pad_writew(x, y) WARN_ON(1)
+#define omap4_ctrl_wk_pad_writel(x, y) WARN_ON(1)
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 4bf6e6e..da13f2d 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -30,6 +30,7 @@
#include "powerdomain.h"
#include "clockdomain.h"
#include <plat/serial.h>
+#include <plat/omap-pm.h>
#include "pm.h"
#include "control.h"
@@ -119,7 +120,7 @@
}
/* Execute ARM wfi */
- omap_sram_idle();
+ omap_sram_idle(false);
/* Re-allow idle for C1 */
if (state == &dev->states[0]) {
@@ -157,7 +158,7 @@
u32 mpu_deepest_state = PWRDM_POWER_RET;
u32 core_deepest_state = PWRDM_POWER_RET;
- if (enable_off_mode) {
+ if (off_mode_enabled) {
mpu_deepest_state = PWRDM_POWER_OFF;
/*
* Erratum i583: valable for ES rev < Es1.2 on 3630.
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
new file mode 100644
index 0000000..4ddd08c
--- /dev/null
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -0,0 +1,747 @@
+/*
+ * OMAP4 CPU idle Routines
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Rajendra Nayak <rnayak@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/cpuidle.h>
+#include <linux/clockchips.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/cpu_pm.h>
+
+#include <asm/cacheflush.h>
+#include <asm/proc-fns.h>
+#include <asm/hardware/gic.h>
+
+#include <mach/omap4-common.h>
+#include <mach/omap-wakeupgen.h>
+
+#include <plat/gpio.h>
+
+#include "clockdomain.h"
+#include "pm.h"
+#include "prm.h"
+
+#ifdef CONFIG_CPU_IDLE
+
+/* C1 is a single-cpu C-state, it can be entered by each cpu independently */
+/* C1 - CPUx WFI + MPU ON + CORE ON */
+#define OMAP4_STATE_C1 0
+/* C2 through C4 are shared C-states, both CPUs must agree to enter */
+/* C2 - CPU0 INA + CPU1 INA + MPU INA + CORE INA */
+#define OMAP4_STATE_C2 1
+/* C3 - CPU0 OFF + CPU1 OFF + MPU CSWR + CORE CSWR */
+#define OMAP4_STATE_C3 2
+/* C4 - CPU0 OFF + CPU1 OFF + MPU CSWR + CORE OSWR */
+#define OMAP4_STATE_C4 3
+
+#define OMAP4_MAX_STATES 4
+
+static bool disallow_smp_idle;
+module_param(disallow_smp_idle, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disallow_smp_idle,
+ "Don't enter idle if multiple cpus are active");
+
+static bool skip_off;
+module_param(skip_off, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(skip_off,
+ "Do everything except actually enter the low power state (debugging)");
+
+static bool keep_core_on;
+module_param(keep_core_on, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(keep_core_on,
+ "Prevent core powerdomain from entering any low power states (debugging)");
+
+static bool keep_mpu_on;
+module_param(keep_mpu_on, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(keep_mpu_on,
+ "Prevent mpu powerdomain from entering any low power states (debugging)");
+
+static int max_state;
+module_param(max_state, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_state,
+ "Select deepest power state allowed (0=any, 1=WFI, 2=INA, 3=CSWR, 4=OSWR)");
+
+static int only_state;
+module_param(only_state, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(only_state,
+ "Select only power state allowed (0=any, 1=WFI, 2=INA, 3=CSWR, 4=OSWR)");
+
+static const int omap4_poke_interrupt[2] = {
+ OMAP44XX_IRQ_CPUIDLE_POKE0,
+ OMAP44XX_IRQ_CPUIDLE_POKE1
+};
+
+struct omap4_processor_cx {
+ u8 valid;
+ u8 type;
+ u32 exit_latency;
+ u32 target_residency;
+ u32 mpu_state;
+ u32 mpu_logic_state;
+ u32 core_state;
+ u32 core_logic_state;
+ const char *desc;
+};
+
+struct omap4_processor_cx omap4_power_states[OMAP4_MAX_STATES];
+static struct powerdomain *mpu_pd, *cpu1_pd, *core_pd;
+static struct omap4_processor_cx *omap4_idle_requested_cx[NR_CPUS];
+static int omap4_idle_ready_count;
+static DEFINE_SPINLOCK(omap4_idle_lock);
+static struct clockdomain *cpu1_cd;
+
+/*
+ * Raw measured exit latency numbers (us):
+ * state average max
+ * C2 383 1068
+ * C3 641 1190
+ * C4 769 1323
+ */
+
+static struct cpuidle_params cpuidle_params_table[] = {
+ /* C1 - CPUx WFI + MPU ON + CORE ON */
+ {.exit_latency = 2 + 2, .target_residency = 5, .valid = 1},
+ /* C2 - CPU0 INA + CPU1 INA + MPU INA + CORE INA */
+ {.exit_latency = 1100, .target_residency = 1100, .valid = 1},
+ /* C3 - CPU0 OFF + CPU1 OFF + MPU CSWR + CORE CSWR */
+ {.exit_latency = 1200, .target_residency = 1200, .valid = 1},
+#ifdef CONFIG_OMAP_ALLOW_OSWR
+ /* C4 - CPU0 OFF + CPU1 OFF + MPU CSWR + CORE OSWR */
+ {.exit_latency = 1500, .target_residency = 1500, .valid = 1},
+#else
+ {.exit_latency = 1500, .target_residency = 1500, .valid = 0},
+#endif
+};
+
+static void omap4_update_actual_state(struct cpuidle_device *dev,
+ struct omap4_processor_cx *cx)
+{
+ int i;
+
+ for (i = 0; i < dev->state_count; i++) {
+ if (dev->states[i].driver_data == cx) {
+ dev->last_state = &dev->states[i];
+ return;
+ }
+ }
+}
+
+static bool omap4_gic_interrupt_pending(void)
+{
+ void __iomem *gic_cpu = omap4_get_gic_cpu_base();
+
+ return (__raw_readl(gic_cpu + GIC_CPU_HIGHPRI) != 0x3FF);
+}
+
+/**
+ * omap4_wfi_until_interrupt
+ *
+ * wfi can sometimes return with no interrupts pending, for example on a
+ * broadcast cache flush or tlb op. This function will call wfi repeatedly
+ * until an interrupt is actually pending. Returning without looping would
+ * cause very short idle times to be reported to the idle governor, messing
+ * with repeating interrupt detection, and causing deep idle states to be
+ * avoided.
+ */
+static void omap4_wfi_until_interrupt(void)
+{
+retry:
+ omap_do_wfi();
+
+ if (!omap4_gic_interrupt_pending())
+ goto retry;
+}
+
+/**
+ * omap4_idle_wait
+ *
+ * similar to WFE, but can be woken by an interrupt even though interrupts
+ * are masked. An "event" is emulated by per-cpu unused interrupt in the GIC.
+ * Returns false if wake caused by an interrupt, true if by an "event".
+ */
+static bool omap4_idle_wait(void)
+{
+ int cpu = hard_smp_processor_id();
+ void __iomem *gic_dist = omap4_get_gic_dist_base();
+ u32 bit = BIT(omap4_poke_interrupt[cpu] % 32);
+ u32 reg = (omap4_poke_interrupt[cpu] / 32) * 4;
+ bool poked;
+
+ /* Unmask the "event" interrupt */
+ __raw_writel(bit, gic_dist + GIC_DIST_ENABLE_SET + reg);
+
+ omap4_wfi_until_interrupt();
+
+ /* Read the "event" interrupt pending bit */
+ poked = __raw_readl(gic_dist + GIC_DIST_PENDING_SET + reg) & bit;
+
+ /* Mask the "event" */
+ __raw_writel(bit, gic_dist + GIC_DIST_ENABLE_CLEAR + reg);
+
+ /* Clear the event */
+ if (poked)
+ __raw_writel(bit, gic_dist + GIC_DIST_PENDING_CLEAR + reg);
+
+ return poked;
+}
+
+/**
+ * omap4_poke_cpu
+ * @cpu: cpu to wake
+ *
+ * trigger an "event" to wake a cpu from omap4_idle_wait.
+ */
+static void omap4_poke_cpu(int cpu)
+{
+ void __iomem *gic_dist = omap4_get_gic_dist_base();
+ u32 bit = BIT(omap4_poke_interrupt[cpu] % 32);
+ u32 reg = (omap4_poke_interrupt[cpu] / 32) * 4;
+
+ __raw_writel(bit, gic_dist + GIC_DIST_PENDING_SET + reg);
+}
+
+/**
+ * omap4_enter_idle
+ * @dev: cpuidle device
+ * @state: The target state to be programmed
+ *
+ * Idle function for C1 state, WFI on a single CPU.
+ * Called with irqs off, returns with irqs on.
+ * Returns the amount of time spent in the low power state.
+ */
+static int omap4_enter_idle_wfi(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ ktime_t preidle, postidle;
+
+ local_fiq_disable();
+
+ preidle = ktime_get();
+
+ omap4_wfi_until_interrupt();
+
+ postidle = ktime_get();
+
+ local_fiq_enable();
+ local_irq_enable();
+
+ omap4_update_actual_state(dev, &omap4_power_states[OMAP4_STATE_C1]);
+
+ return ktime_to_us(ktime_sub(postidle, preidle));
+}
+
+static inline bool omap4_all_cpus_idle(void)
+{
+ int i;
+
+ assert_spin_locked(&omap4_idle_lock);
+
+ for_each_online_cpu(i)
+ if (omap4_idle_requested_cx[i] == NULL)
+ return false;
+
+ return true;
+}
+
+static inline struct omap4_processor_cx *omap4_get_idle_state(void)
+{
+ struct omap4_processor_cx *cx = NULL;
+ int i;
+
+ assert_spin_locked(&omap4_idle_lock);
+
+ for_each_online_cpu(i)
+ if (!cx || omap4_idle_requested_cx[i]->type < cx->type)
+ cx = omap4_idle_requested_cx[i];
+
+ return cx;
+}
+
+static void omap4_cpu_poke_others(int cpu)
+{
+ int i;
+
+ for_each_online_cpu(i)
+ if (i != cpu)
+ omap4_poke_cpu(i);
+}
+
+static void omap4_cpu_update_state(int cpu, struct omap4_processor_cx *cx)
+{
+ assert_spin_locked(&omap4_idle_lock);
+
+ omap4_idle_requested_cx[cpu] = cx;
+ omap4_cpu_poke_others(cpu);
+}
+
+/**
+ * omap4_enter_idle_primary
+ * @cx: target idle state
+ *
+ * Waits for cpu1 to be off, then starts the transition to the target power
+ * state for cpu0, mpu and core power domains.
+ */
+static void omap4_enter_idle_primary(struct omap4_processor_cx *cx)
+{
+ int cpu = 0;
+ int ret;
+ int count = 1000000;
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+
+ cpu_pm_enter();
+
+ if (!keep_mpu_on) {
+ pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
+ omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
+ }
+
+ if (!keep_core_on) {
+ pwrdm_set_logic_retst(core_pd, cx->core_logic_state);
+ omap_set_pwrdm_state(core_pd, cx->core_state);
+ }
+
+ if (skip_off)
+ goto out;
+
+ /* spin until cpu1 is really off */
+ while ((pwrdm_read_pwrst(cpu1_pd) != PWRDM_POWER_OFF) && count--)
+ cpu_relax();
+
+ if (pwrdm_read_pwrst(cpu1_pd) != PWRDM_POWER_OFF)
+ goto wake_cpu1;
+
+ ret = pwrdm_wait_transition(cpu1_pd);
+ if (ret)
+ goto wake_cpu1;
+
+ pr_debug("%s: cpu0 down\n", __func__);
+
+ omap4_enter_sleep(0, PWRDM_POWER_OFF, false);
+
+ pr_debug("%s: cpu0 up\n", __func__);
+
+ /* restore the MPU and CORE states to ON */
+ omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
+ omap_set_pwrdm_state(core_pd, PWRDM_POWER_ON);
+
+wake_cpu1:
+ if (!cpu_is_offline(1)) {
+ /*
+ * Work around a ROM bug that causes CPU1 to corrupt the
+ * gic distributor enable register on 4460 by disabling
+ * the gic distributor before waking CPU1, and then waiting
+ * for CPU1 to re-enable the gic distributor before continuing.
+ */
+ if (!cpu_is_omap443x())
+ gic_dist_disable();
+
+ clkdm_wakeup(cpu1_cd);
+
+ if (!cpu_is_omap443x())
+ while (gic_dist_disabled())
+ cpu_relax();
+
+ /*
+ * cpu1 mucks with page tables while it is starting,
+ * prevent cpu0 executing any processes until cpu1 is up
+ */
+ while (omap4_idle_requested_cx[1] && omap4_idle_ready_count)
+ cpu_relax();
+ }
+
+out:
+ cpu_pm_exit();
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+}
+
+/**
+ * omap4_enter_idle_secondary
+ * @cpu: target cpu number
+ *
+ * Puts target cpu powerdomain into OFF.
+ */
+static void omap4_enter_idle_secondary(int cpu)
+{
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+
+ cpu_pm_enter();
+
+ pr_debug("%s: cpu1 down\n", __func__);
+ flush_cache_all();
+ dsb();
+
+ /* TODO: merge CPU1 wakeup masks into CPU0 */
+ omap_wakeupgen_irqmask_all(cpu, 1);
+ gic_cpu_disable();
+
+ if (!skip_off)
+ omap4_enter_lowpower(cpu, PWRDM_POWER_OFF);
+
+ omap_wakeupgen_irqmask_all(cpu, 0);
+ gic_cpu_enable();
+
+ pr_debug("%s: cpu1 up\n", __func__);
+
+ cpu_pm_exit();
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+}
+
+/**
+ * omap4_enter_idle - Programs OMAP4 to enter the specified state
+ * @dev: cpuidle device
+ * @state: The target state to be programmed
+ *
+ * Called from the CPUidle framework to program the device to the
+ * specified low power state selected by the governor.
+ * Called with irqs off, returns with irqs on.
+ * Returns the amount of time spent in the low power state.
+ */
+static int omap4_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ struct omap4_processor_cx *cx = cpuidle_get_statedata(state);
+ struct omap4_processor_cx *actual_cx;
+ ktime_t preidle, postidle;
+ bool idle = true;
+ int cpu = dev->cpu;
+
+ /*
+ * If disallow_smp_idle is set, revert to the old hotplug governor
+ * behavior
+ */
+ if (dev->cpu != 0 && disallow_smp_idle)
+ return omap4_enter_idle_wfi(dev, state);
+
+ /* Clamp the power state at max_state */
+ if (max_state > 0 && (cx->type > max_state - 1))
+ cx = &omap4_power_states[max_state - 1];
+
+ /*
+ * If only_state is set, use wfi if asking for a shallower idle state,
+ * or the specified state if asking for a deeper idle state
+ */
+ if (only_state > 0) {
+ if (cx->type < only_state - 1)
+ cx = &omap4_power_states[OMAP4_STATE_C1];
+ else
+ cx = &omap4_power_states[only_state - 1];
+ }
+
+ if (cx->type == OMAP4_STATE_C1)
+ return omap4_enter_idle_wfi(dev, state);
+
+ preidle = ktime_get();
+
+ local_fiq_disable();
+
+ actual_cx = &omap4_power_states[OMAP4_STATE_C1];
+
+ spin_lock(&omap4_idle_lock);
+ omap4_cpu_update_state(cpu, cx);
+
+ /* Wait for both cpus to be idle, exiting if an interrupt occurs */
+ while (idle && !omap4_all_cpus_idle()) {
+ spin_unlock(&omap4_idle_lock);
+ idle = omap4_idle_wait();
+ spin_lock(&omap4_idle_lock);
+ }
+
+ /*
+ * If we waited for longer than a millisecond, pop out to the governor
+ * to let it recalculate the desired state.
+ */
+ if (ktime_to_us(ktime_sub(preidle, ktime_get())) > 1000)
+ idle = false;
+
+ if (!idle) {
+ omap4_cpu_update_state(cpu, NULL);
+ spin_unlock(&omap4_idle_lock);
+ goto out;
+ }
+
+ /*
+ * If we go to sleep with an IPI pending, we will lose it. Once we
+ * reach this point, the other cpu is either already idle or will
+ * shortly abort idle. If it is already idle it can't send us an IPI,
+ * so it is safe to check for pending IPIs here. If it aborts idle
+ * we will abort as well, and any future IPIs will be processed.
+ */
+ if (omap4_gic_interrupt_pending()) {
+ omap4_cpu_update_state(cpu, NULL);
+ spin_unlock(&omap4_idle_lock);
+ goto out;
+ }
+
+ /*
+ * Both cpus are probably idle. There is a small chance the other cpu
+ * just became active. cpu 0 will set omap4_idle_ready_count to 1,
+ * then each other cpu will increment it. Once a cpu has incremented
+ * the count, it cannot abort idle and must spin until either the count
+ * has hit num_online_cpus(), or is reset to 0 by an aborting cpu.
+ */
+ if (cpu == 0) {
+ BUG_ON(omap4_idle_ready_count != 0);
+ /* cpu0 requests shared-OFF */
+ omap4_idle_ready_count = 1;
+ /* cpu0 can no longer abort shared-OFF, but cpu1 can */
+
+ /* wait for cpu1 to ack shared-OFF, or leave idle */
+ while (omap4_idle_ready_count != num_online_cpus() &&
+ omap4_idle_ready_count != 0 && omap4_all_cpus_idle()) {
+ spin_unlock(&omap4_idle_lock);
+ cpu_relax();
+ spin_lock(&omap4_idle_lock);
+ }
+
+ if (omap4_idle_ready_count != num_online_cpus() ||
+ !omap4_all_cpus_idle()) {
+ pr_debug("%s: cpu1 aborted: %d %p\n", __func__,
+ omap4_idle_ready_count,
+ omap4_idle_requested_cx[1]);
+ omap4_idle_ready_count = 0;
+ omap4_cpu_update_state(cpu, NULL);
+ spin_unlock(&omap4_idle_lock);
+ goto out;
+ }
+
+ actual_cx = omap4_get_idle_state();
+ spin_unlock(&omap4_idle_lock);
+
+ /* cpu1 is turning itself off, continue with turning cpu0 off */
+
+ omap4_enter_idle_primary(actual_cx);
+
+ spin_lock(&omap4_idle_lock);
+ omap4_idle_ready_count = 0;
+ omap4_cpu_update_state(cpu, NULL);
+ spin_unlock(&omap4_idle_lock);
+ } else {
+ /* wait for cpu0 to request the shared-OFF, or leave idle */
+ while ((omap4_idle_ready_count == 0) && omap4_all_cpus_idle()) {
+ spin_unlock(&omap4_idle_lock);
+ cpu_relax();
+ spin_lock(&omap4_idle_lock);
+ }
+
+ if (!omap4_all_cpus_idle()) {
+ pr_debug("%s: cpu0 aborted: %d %p\n", __func__,
+ omap4_idle_ready_count,
+ omap4_idle_requested_cx[0]);
+ omap4_cpu_update_state(cpu, NULL);
+ spin_unlock(&omap4_idle_lock);
+ goto out;
+ }
+
+ pr_debug("%s: cpu1 acks\n", __func__);
+ /* ack shared-OFF */
+ if (omap4_idle_ready_count > 0)
+ omap4_idle_ready_count++;
+ BUG_ON(omap4_idle_ready_count > num_online_cpus());
+
+ while (omap4_idle_ready_count != num_online_cpus() &&
+ omap4_idle_ready_count != 0) {
+ spin_unlock(&omap4_idle_lock);
+ cpu_relax();
+ spin_lock(&omap4_idle_lock);
+ }
+
+ if (omap4_idle_ready_count == 0) {
+ pr_debug("%s: cpu0 aborted: %d %p\n", __func__,
+ omap4_idle_ready_count,
+ omap4_idle_requested_cx[0]);
+ omap4_cpu_update_state(cpu, NULL);
+ spin_unlock(&omap4_idle_lock);
+ goto out;
+ }
+
+ /* cpu1 can no longer abort shared-OFF */
+
+ actual_cx = omap4_get_idle_state();
+ spin_unlock(&omap4_idle_lock);
+
+ omap4_enter_idle_secondary(cpu);
+
+ spin_lock(&omap4_idle_lock);
+ omap4_idle_ready_count = 0;
+ omap4_cpu_update_state(cpu, NULL);
+ spin_unlock(&omap4_idle_lock);
+
+ clkdm_allow_idle(cpu1_cd);
+
+ }
+
+out:
+ postidle = ktime_get();
+
+ omap4_update_actual_state(dev, actual_cx);
+
+ local_irq_enable();
+ local_fiq_enable();
+
+ return ktime_to_us(ktime_sub(postidle, preidle));
+}
+
+DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
+
+/**
+ * omap4_init_power_states - Initialises the OMAP4 specific C states.
+ *
+ * Below is the desciption of each C state.
+ * C1 : CPUx wfi + MPU inative + Core inactive
+ */
+void omap4_init_power_states(void)
+{
+ /*
+ * C1 - CPU0 WFI + CPU1 OFF + MPU ON + CORE ON
+ */
+ omap4_power_states[OMAP4_STATE_C1].valid =
+ cpuidle_params_table[OMAP4_STATE_C1].valid;
+ omap4_power_states[OMAP4_STATE_C1].type = OMAP4_STATE_C1;
+ omap4_power_states[OMAP4_STATE_C1].exit_latency=
+ cpuidle_params_table[OMAP4_STATE_C1].exit_latency;
+ omap4_power_states[OMAP4_STATE_C1].target_residency =
+ cpuidle_params_table[OMAP4_STATE_C1].target_residency;
+ omap4_power_states[OMAP4_STATE_C1].desc = "CPU WFI";
+
+ /*
+ * C2 - CPU0 INA + CPU1 OFF + MPU INA + CORE INA
+ */
+ omap4_power_states[OMAP4_STATE_C2].valid =
+ cpuidle_params_table[OMAP4_STATE_C2].valid;
+ omap4_power_states[OMAP4_STATE_C2].type = OMAP4_STATE_C2;
+ omap4_power_states[OMAP4_STATE_C2].exit_latency =
+ cpuidle_params_table[OMAP4_STATE_C2].exit_latency;
+ omap4_power_states[OMAP4_STATE_C2].target_residency =
+ cpuidle_params_table[OMAP4_STATE_C2].target_residency;
+ omap4_power_states[OMAP4_STATE_C2].mpu_state = PWRDM_POWER_INACTIVE;
+ omap4_power_states[OMAP4_STATE_C2].mpu_logic_state = PWRDM_POWER_RET;
+ omap4_power_states[OMAP4_STATE_C2].core_state = PWRDM_POWER_INACTIVE;
+ omap4_power_states[OMAP4_STATE_C2].core_logic_state = PWRDM_POWER_RET;
+ omap4_power_states[OMAP4_STATE_C2].desc = "CPUs OFF, MPU + CORE INA";
+
+ /*
+ * C3 - CPU0 OFF + CPU1 OFF + MPU CSWR + CORE CSWR
+ */
+ omap4_power_states[OMAP4_STATE_C3].valid =
+ cpuidle_params_table[OMAP4_STATE_C3].valid;
+ omap4_power_states[OMAP4_STATE_C3].type = OMAP4_STATE_C3;
+ omap4_power_states[OMAP4_STATE_C3].exit_latency =
+ cpuidle_params_table[OMAP4_STATE_C3].exit_latency;
+ omap4_power_states[OMAP4_STATE_C3].target_residency =
+ cpuidle_params_table[OMAP4_STATE_C3].target_residency;
+ omap4_power_states[OMAP4_STATE_C3].mpu_state = PWRDM_POWER_RET;
+ omap4_power_states[OMAP4_STATE_C3].mpu_logic_state = PWRDM_POWER_RET;
+ omap4_power_states[OMAP4_STATE_C3].core_state = PWRDM_POWER_RET;
+ omap4_power_states[OMAP4_STATE_C3].core_logic_state = PWRDM_POWER_RET;
+ omap4_power_states[OMAP4_STATE_C3].desc = "CPUs OFF, MPU + CORE CSWR";
+
+ /*
+ * C4 - CPU0 OFF + CPU1 OFF + MPU CSWR + CORE OSWR
+ */
+ omap4_power_states[OMAP4_STATE_C4].valid =
+ cpuidle_params_table[OMAP4_STATE_C4].valid;
+ omap4_power_states[OMAP4_STATE_C4].type = OMAP4_STATE_C4;
+ omap4_power_states[OMAP4_STATE_C4].exit_latency =
+ cpuidle_params_table[OMAP4_STATE_C4].exit_latency;
+ omap4_power_states[OMAP4_STATE_C4].target_residency =
+ cpuidle_params_table[OMAP4_STATE_C4].target_residency;
+ omap4_power_states[OMAP4_STATE_C4].mpu_state = PWRDM_POWER_RET;
+ omap4_power_states[OMAP4_STATE_C4].mpu_logic_state = PWRDM_POWER_RET;
+ omap4_power_states[OMAP4_STATE_C4].core_state = PWRDM_POWER_RET;
+ omap4_power_states[OMAP4_STATE_C4].core_logic_state = PWRDM_POWER_OFF;
+ omap4_power_states[OMAP4_STATE_C4].desc = "CPUs OFF, MPU CSWR + CORE OSWR";
+
+}
+
+struct cpuidle_driver omap4_idle_driver = {
+ .name = "omap4_idle",
+ .owner = THIS_MODULE,
+};
+
+/**
+ * omap4_idle_init - Init routine for OMAP4 idle
+ *
+ * Registers the OMAP4 specific cpuidle driver with the cpuidle
+ * framework with the valid set of states.
+ */
+int __init omap4_idle_init(void)
+{
+ int cpu_id = 0, i, count = 0;
+ struct omap4_processor_cx *cx;
+ struct cpuidle_state *state;
+ struct cpuidle_device *dev;
+
+ mpu_pd = pwrdm_lookup("mpu_pwrdm");
+ BUG_ON(!mpu_pd);
+ cpu1_pd = pwrdm_lookup("cpu1_pwrdm");
+ BUG_ON(!cpu1_pd);
+ cpu1_cd = clkdm_lookup("mpu1_clkdm");
+ BUG_ON(!cpu1_cd);
+ core_pd = pwrdm_lookup("core_pwrdm");
+ BUG_ON(!core_pd);
+
+ omap4_init_power_states();
+ cpuidle_register_driver(&omap4_idle_driver);
+
+ for_each_possible_cpu(cpu_id) {
+ dev = &per_cpu(omap4_idle_dev, cpu_id);
+ dev->cpu = cpu_id;
+ count = 0;
+ for (i = OMAP4_STATE_C1; i < OMAP4_MAX_STATES; i++) {
+ cx = &omap4_power_states[i];
+ state = &dev->states[count];
+
+ if (!cx->valid)
+ continue;
+ cpuidle_set_statedata(state, cx);
+ state->exit_latency = cx->exit_latency;
+ state->target_residency = cx->target_residency;
+ state->flags = CPUIDLE_FLAG_TIME_VALID;
+ if (cx->type == OMAP4_STATE_C1) {
+ dev->safe_state = state;
+ state->enter = omap4_enter_idle_wfi;
+ } else {
+ state->enter = omap4_enter_idle;
+ }
+
+ sprintf(state->name, "C%d", count+1);
+ strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
+ count++;
+ }
+
+ if (!count)
+ return -EINVAL;
+ dev->state_count = count;
+
+ if (cpuidle_register_device(dev)) {
+ pr_err("%s: CPUidle register device failed\n", __func__);
+ return -EIO;
+ }
+
+ __raw_writeb(BIT(cpu_id), omap4_get_gic_dist_base() +
+ GIC_DIST_TARGET + omap4_poke_interrupt[cpu_id]);
+ }
+
+ return 0;
+}
+#else
+int __init omap4_idle_init(void)
+{
+ return 0;
+}
+#endif /* CONFIG_CPU_IDLE */
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 5b8ca68..cf7a0ba 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -29,13 +29,18 @@
#include <mach/gpio.h>
#include <plat/mmc.h>
#include <plat/dma.h>
+#include <plat/gpu.h>
#include <plat/omap_hwmod.h>
#include <plat/omap_device.h>
#include <plat/omap4-keypad.h>
+#include <plat/mcpdm.h>
+
+#include <sound/omap-abe-dsp.h>
#include "mux.h"
#include "control.h"
#include "devices.h"
+#include "dvfs.h"
#define L3_MODULES_MAX_LEN 12
#define L3_MODULES 3
@@ -292,6 +297,112 @@
static inline void omap_init_sti(void) {}
+#if defined(CONFIG_SND_OMAP_SOC_MCPDM) || \
+ defined(CONFIG_SND_OMAP_SOC_MCPDM_MODULE)
+
+static struct omap_device_pm_latency omap_mcpdm_latency[] = {
+ {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
+};
+
+static void omap_init_mcpdm(void)
+{
+ struct omap_hwmod *oh;
+ struct omap_device *od;
+ struct omap_mcpdm_platform_data *pdata;
+ char *oh_name = "mcpdm";
+ char *dev_name = "omap-mcpdm";
+
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh) {
+ pr_err("%s: could not look up %s\n", __func__, oh_name);
+ return;
+ }
+
+ pdata = kzalloc(sizeof(struct omap_mcpdm_platform_data), GFP_KERNEL);
+ if (!pdata) {
+ pr_err("%s: could not allocate platform data\n", __func__);
+ return;
+ }
+
+ pdata->was_context_lost = omap_pm_was_context_lost;
+
+ od = omap_device_build(dev_name, -1, oh, pdata,
+ sizeof(struct omap_mcpdm_platform_data),
+ omap_mcpdm_latency,
+ ARRAY_SIZE(omap_mcpdm_latency), 0);
+ WARN(IS_ERR(od), "could not build omap_device for %s:%s\n",
+ oh_name, dev_name);
+}
+#else
+static inline void omap_init_mcpdm(void) {}
+#endif
+
+#if defined(CONFIG_SND_OMAP_SOC_ABE_DSP) || \
+ defined(CONFIG_SND_OMAP_SOC_ABE_DSP_MODULE)
+
+static struct omap_device_pm_latency omap_aess_latency[] = {
+ {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
+};
+
+static void omap_init_aess(void)
+{
+ struct omap_hwmod *oh;
+ struct omap_device *od;
+ struct omap4_abe_dsp_pdata *pdata;
+ char *oh_name = "aess";
+ char *dev_name = "aess";
+
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh) {
+ pr_err("%s: could not look up %s\n", __func__, oh_name);
+ return;
+ }
+
+ pdata = kzalloc(sizeof(struct omap4_abe_dsp_pdata), GFP_KERNEL);
+ if (!pdata) {
+ pr_err("%s: could not allocate platform data\n", __func__);
+ return;
+ }
+
+ pdata->device_scale = omap_device_scale;
+ pdata->was_context_lost = omap_pm_was_context_lost;
+
+ od = omap_device_build(dev_name, -1, oh, pdata,
+ sizeof(struct omap4_abe_dsp_pdata),
+ omap_aess_latency,
+ ARRAY_SIZE(omap_aess_latency), 0);
+ WARN(IS_ERR(od), "could not build omap_device for %s:%s\n",
+ oh_name, dev_name);
+
+ kfree(pdata);
+}
+#else
+static inline void omap_init_aess(void) {}
+#endif
+
+#if defined CONFIG_ARCH_OMAP4
+
+static struct platform_device omap_abe_dai = {
+ .name = "omap-abe-dai",
+ .id = -1,
+};
+
+static inline void omap_init_abe(void)
+{
+ platform_device_register(&omap_abe_dai);
+}
+#else
+static inline void omap_init_abe(void) {}
+#endif
+
#if defined(CONFIG_SND_SOC) || defined(CONFIG_SND_SOC_MODULE)
static struct platform_device omap_pcm = {
@@ -300,6 +411,14 @@
};
/*
+ * Device for the ASoC OMAP4 HDMI machine driver
+ */
+static struct platform_device omap4_hdmi_audio = {
+ .name = "omap4-hdmi-audio",
+ .id = -1,
+};
+
+/*
* OMAP2420 has 2 McBSP ports
* OMAP2430 has 5 McBSP ports
* OMAP3 has 5 McBSP ports
@@ -313,6 +432,31 @@
static void omap_init_audio(void)
{
+ struct omap_hwmod *oh_hdmi;
+ struct omap_device *od_hdmi, *od_hdmi_codec;
+ char *oh_hdmi_name = "dss_hdmi";
+ char *dev_hdmi_name = "hdmi-audio-dai";
+ char *dev_hdmi_codec_name = "omap-hdmi-codec";
+
+ if (cpu_is_omap44xx()) {
+ oh_hdmi = omap_hwmod_lookup(oh_hdmi_name);
+ WARN(!oh_hdmi, "%s: could not find omap_hwmod for %s\n",
+ __func__, oh_hdmi_name);
+
+ od_hdmi = omap_device_build(dev_hdmi_name, -1, oh_hdmi, NULL, 0,
+ NULL, 0, false);
+ WARN(IS_ERR(od_hdmi), "%s: could not build omap_device for %s\n",
+ __func__, dev_hdmi_name);
+
+ od_hdmi_codec = omap_device_build(dev_hdmi_codec_name,
+ -1, oh_hdmi, NULL, 0, NULL, 0, false);
+
+ WARN(IS_ERR(od_hdmi_codec), "%s: could not build omap_device for %s\n",
+ __func__, dev_hdmi_codec_name);
+
+ platform_device_register(&omap4_hdmi_audio);
+ }
+
platform_device_register(&omap_mcbsp1);
platform_device_register(&omap_mcbsp2);
if (cpu_is_omap243x() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
@@ -329,6 +473,39 @@
static inline void omap_init_audio(void) {}
#endif
+#if defined(CONFIG_SND_OMAP_SOC_MCASP) || \
+ defined(CONFIG_SND_OMAP_SOC_MCASP_MODULE)
+static struct omap_device_pm_latency omap_mcasp_latency[] = {
+ {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
+};
+
+static void omap_init_mcasp(void)
+{
+ struct omap_hwmod *oh;
+ struct omap_device *od;
+ char *oh_name = "omap-mcasp-dai";
+ char *dev_name = "omap-mcasp-dai";
+
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh) {
+ pr_err("%s: could not look up %s\n", __func__, oh_name);
+ return;
+ }
+
+ od = omap_device_build(dev_name, -1, oh, NULL, 0,
+ omap_mcasp_latency,
+ ARRAY_SIZE(omap_mcasp_latency), 0);
+ WARN(IS_ERR(od), "could not build omap_device for %s:%s\n",
+ oh_name, dev_name);
+}
+#else
+static inline void omap_init_mcasp(void) {}
+#endif
+
#if defined(CONFIG_SPI_OMAP24XX) || defined(CONFIG_SPI_OMAP24XX_MODULE)
#include <plat/mcspi.h>
@@ -673,6 +850,68 @@
static inline void omap_init_vout(void) {}
#endif
+static struct omap_device_pm_latency omap_gpu_latency[] = {
+ [0] = {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
+};
+
+static struct platform_device omap_omaplfb_device = {
+ .name = "omaplfb",
+ .id = -1,
+};
+
+
+static void omap_init_gpu(void)
+{
+ struct omap_hwmod *oh;
+ struct omap_device *od;
+ int max_omap_gpu_hwmod_name_len = 16;
+ char oh_name[max_omap_gpu_hwmod_name_len];
+ int l;
+ struct gpu_platform_data *pdata;
+ char *name = "pvrsrvkm";
+
+ l = snprintf(oh_name, max_omap_gpu_hwmod_name_len,
+ "gpu");
+ WARN(l >= max_omap_gpu_hwmod_name_len,
+ "String buffer overflow in GPU device setup\n");
+
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh) {
+
+ pr_err("omap_init_gpu: Could not look up %s\n", oh_name);
+ return;
+ }
+
+ pdata = kzalloc(sizeof(struct gpu_platform_data),
+ GFP_KERNEL);
+ if (!pdata) {
+ pr_err("omap_init_gpu: Platform data memory allocation failed\n");
+ return;
+ }
+
+ pdata->device_scale = omap_device_scale;
+ pdata->device_enable = omap_device_enable;
+ pdata->device_idle = omap_device_idle;
+ pdata->device_shutdown = omap_device_shutdown;
+
+ pdata->ovfreqs = 0;
+ if (cpu_is_omap446x())
+ pdata->ovfreqs = 1;
+
+ od = omap_device_build(name, 0, oh, pdata,
+ sizeof(struct gpu_platform_data),
+ omap_gpu_latency, ARRAY_SIZE(omap_gpu_latency), 0);
+ WARN(IS_ERR(od), "Could not build omap_device for %s %s\n",
+ name, oh_name);
+
+ kfree(pdata);
+ platform_device_register(&omap_omaplfb_device);
+}
+
/*-------------------------------------------------------------------------*/
static int __init omap2_init_devices(void)
@@ -681,9 +920,13 @@
* please keep these calls, and their implementations above,
* in alphabetical order so they're easier to sort through.
*/
+ omap_init_mcpdm();
+ omap_init_aess();
+ omap_init_abe();
omap_init_audio();
omap_init_camera();
omap_init_mbox();
+ omap_init_mcasp();
omap_init_mcspi();
omap_init_pmu();
omap_hdq_init();
@@ -691,6 +934,7 @@
omap_init_sham();
omap_init_aes();
omap_init_vout();
+ omap_init_gpu();
return 0;
}
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 543fcb8..dd91c20 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -25,6 +25,7 @@
#include <video/omapdss.h>
#include <plat/omap_hwmod.h>
#include <plat/omap_device.h>
+#include <plat/omap-pm.h>
static struct platform_device omap_display_device = {
.name = "omapdss",
@@ -42,20 +43,6 @@
},
};
-/* oh_core is used for getting opt-clocks */
-static struct omap_hwmod *oh_core;
-
-static bool opt_clock_available(const char *clk_role)
-{
- int i;
-
- for (i = 0; i < oh_core->opt_clks_cnt; i++) {
- if (!strcmp(oh_core->opt_clks[i].role, clk_role))
- return true;
- }
- return false;
-}
-
struct omap_dss_hwmod_data {
const char *oh_name;
const char *dev_name;
@@ -109,16 +96,7 @@
oh_count = ARRAY_SIZE(omap4_dss_hwmod_data);
}
- /* opt_clks are always associated with dss hwmod */
- oh_core = omap_hwmod_lookup("dss_core");
- if (!oh_core) {
- pr_err("Could not look up dss_core.\n");
- return -ENODEV;
- }
-
pdata.board_data = board_data;
- pdata.board_data->get_last_off_on_transaction_id = NULL;
- pdata.opt_clock_available = opt_clock_available;
for (i = 0; i < oh_count; i++) {
oh = omap_hwmod_lookup(curr_dss_hwmod[i].oh_name);
diff --git a/arch/arm/mach-omap2/dmtimer.c b/arch/arm/mach-omap2/dmtimer.c
new file mode 100644
index 0000000..e9cba71
--- /dev/null
+++ b/arch/arm/mach-omap2/dmtimer.c
@@ -0,0 +1,283 @@
+/**
+ * OMAP2+ Dual-Mode Timers - platform device registration
+ *
+ * Contains first level initialization routines which extracts timers
+ * information from hwmod database and registers with linux device model.
+ * It also has low level function to change the timer input clock source.
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Tarun Kanti DebBarma <tarun.kanti@ti.com>
+ * Thara Gopinath <thara@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <plat/dmtimer.h>
+#include <plat/omap_device.h>
+#include <plat/cpu.h>
+#include <plat/omap_hwmod.h>
+#include <plat/omap-pm.h>
+
+#include "powerdomain.h"
+
+static u8 __initdata system_timer_id;
+
+/**
+ * omap2_dm_timer_set_src - change the timer input clock source
+ * @pdev: timer platform device pointer
+ * @source: array index of parent clock source
+ */
+static int omap2_dm_timer_set_src(struct platform_device *pdev, int source)
+{
+ int ret;
+ struct dmtimer_platform_data *pdata = pdev->dev.platform_data;
+ struct clk *new_fclk;
+ char *fclk_name = "32k_ck"; /* default name */
+
+ struct clk *fclk = clk_get(&pdev->dev, "fck");
+ if (IS_ERR_OR_NULL(fclk)) {
+ dev_err(&pdev->dev, "%s: %d: clk_get() FAILED\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ switch (source) {
+ case OMAP_TIMER_SRC_SYS_CLK:
+ fclk_name = "sys_ck";
+ break;
+
+ case OMAP_TIMER_SRC_32_KHZ:
+ fclk_name = "32k_ck";
+ break;
+
+ case OMAP_TIMER_SRC_EXT_CLK:
+ if (pdata->timer_ip_type == OMAP_TIMER_IP_VERSION_1) {
+ fclk_name = "alt_ck";
+ break;
+ }
+ default:
+ dev_err(&pdev->dev, "%s: %d: invalid clk src.\n",
+ __func__, __LINE__);
+ clk_put(fclk);
+ return -EINVAL;
+ }
+
+ new_fclk = clk_get(&pdev->dev, fclk_name);
+ if (IS_ERR_OR_NULL(new_fclk)) {
+ dev_err(&pdev->dev, "%s: %d: clk_get() %s FAILED\n",
+ __func__, __LINE__, fclk_name);
+ clk_put(fclk);
+ return -EINVAL;
+ }
+
+ ret = clk_set_parent(fclk, new_fclk);
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(&pdev->dev, "%s: clk_set_parent() to %s FAILED\n",
+ __func__, fclk_name);
+ ret = -EINVAL;
+ }
+
+ clk_put(new_fclk);
+ clk_put(fclk);
+
+ return ret;
+}
+
+struct omap_device_pm_latency omap2_dmtimer_latency[] = {
+ {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
+};
+
+/**
+ * omap_timer_init - build and register timer device with an
+ * associated timer hwmod
+ * @oh: timer hwmod pointer to be used to build timer device
+ * @user: parameter that can be passed from calling hwmod API
+ *
+ * Called by omap_hwmod_for_each_by_class to register each of the timer
+ * devices present in the system. The number of timer devices is known
+ * by parsing through the hwmod database for a given class name. At the
+ * end of function call memory is allocated for timer device and it is
+ * registered to the framework ready to be proved by the driver.
+ */
+static int __init omap_timer_init(struct omap_hwmod *oh, void *unused)
+{
+ int id;
+ int ret = 0;
+ char *name = "omap_timer";
+ struct dmtimer_platform_data *pdata;
+ struct omap_device *od;
+ struct omap_secure_timer_dev_attr *secure_timer_dev_attr;
+ struct powerdomain *pwrdm;
+
+ /*
+ * Extract the IDs from name field in hwmod database
+ * and use the same for constructing ids' for the
+ * timer devices. In a way, we are avoiding usage of
+ * static variable witin the function to do the same.
+ * CAUTION: We have to be careful and make sure the
+ * name in hwmod database does not change in which case
+ * we might either make corresponding change here or
+ * switch back static variable mechanism.
+ */
+ sscanf(oh->name, "timer%2d", &id);
+ if (unlikely(id == system_timer_id))
+ return ret;
+
+ pr_debug("%s: %s\n", __func__, oh->name);
+
+ /* do not register secure timer */
+ secure_timer_dev_attr = oh->dev_attr;
+ if (secure_timer_dev_attr && secure_timer_dev_attr->is_secure_timer)
+ return ret;
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ pr_err("%s: No memory for [%s]\n", __func__, oh->name);
+ return -ENOMEM;
+ }
+ pdata->set_timer_src = omap2_dm_timer_set_src;
+ pdata->timer_ip_type = oh->class->rev;
+ pwrdm = omap_hwmod_get_pwrdm(oh);
+ if (!pwrdm) {
+ pr_debug("%s: could not find pwrdm for (%s) in omap hwmod!\n",
+ __func__, oh->name);
+ return -EINVAL;
+ }
+ pdata->loses_context = pwrdm_can_ever_lose_context(pwrdm);
+
+ od = omap_device_build(name, id, oh, pdata, sizeof(*pdata),
+ omap2_dmtimer_latency,
+ ARRAY_SIZE(omap2_dmtimer_latency),
+ pdata->is_early_init);
+
+ if (IS_ERR(od)) {
+ pr_err("%s: Can't build omap_device for %s: %s.\n",
+ __func__, name, oh->name);
+ ret = -EINVAL;
+ }
+
+ kfree(pdata);
+
+ return ret;
+}
+
+/**
+ * omap2_system_timer_init - top level system timer initialization
+ * called from omap2_gp_timer_init() in timer-gp.c
+ * @id : system timer id
+ *
+ * This function does hwmod setup for the system timer entry needed
+ * prior to building and registering the device. After the device is
+ * registered early probe initiated.
+ */
+int __init omap2_system_timer_init(u8 id)
+{
+ int ret = 0;
+ char *name = "omap_timer";
+ struct dmtimer_platform_data *pdata;
+ struct omap_device *od;
+ struct omap_hwmod *oh;
+ char system_timer_name[8]; /* 8 = sizeof("timerXX0") */
+
+ system_timer_id = id;
+
+ sprintf(system_timer_name, "timer%d", id);
+ ret = omap_hwmod_setup_one(system_timer_name);
+ if (ret) {
+ pr_err("%s: omap_hwmod_setup_one(%s) failed.\n",
+ __func__, system_timer_name);
+ return ret;
+ }
+ oh = omap_hwmod_lookup(system_timer_name);
+ if (!oh) {
+ pr_debug("%s: could not find (%s) in omap_hwmod_list!\n",
+ __func__, system_timer_name);
+ return -EINVAL;
+ }
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ pr_err("%s: No memory for [%s]\n", __func__, oh->name);
+ return -ENOMEM;
+ }
+ pdata->is_early_init = 1;
+ pdata->set_timer_src = omap2_dm_timer_set_src;
+ pdata->timer_ip_type = oh->class->rev;
+ pdata->needs_manual_reset = 0;
+
+ od = omap_device_build(name, id, oh, pdata, sizeof(*pdata),
+ omap2_dmtimer_latency,
+ ARRAY_SIZE(omap2_dmtimer_latency),
+ pdata->is_early_init);
+
+ if (IS_ERR(od)) {
+ pr_err("%s: Can't build omap_device for %s: %s.\n",
+ __func__, name, oh->name);
+ ret = -EINVAL;
+ }
+
+ kfree(pdata);
+
+ if (!ret) {
+ early_platform_driver_register_all("earlytimer");
+ early_platform_driver_probe("earlytimer", 1, 0);
+ }
+
+ return 0;
+}
+
+/**
+ * omap2_system_timer_set_src - change the timer input clock source
+ * Allow system timer to program clock source before pm_runtime
+ * framework is available during system boot.
+ * @timer: pointer to struct omap_dm_timer
+ * @source: array index of parent clock source
+ */
+int __init omap2_system_timer_set_src(struct omap_dm_timer *timer, int source)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(timer) || IS_ERR_OR_NULL(timer->fclk))
+ return -EINVAL;
+
+ clk_disable(timer->fclk);
+ ret = omap2_dm_timer_set_src(timer->pdev, source);
+ clk_enable(timer->fclk);
+
+ return ret;
+}
+
+/**
+ * omap2_dm_timer_init - top level regular device initialization
+ *
+ * Uses dedicated hwmod api to parse through hwmod database for
+ * given class name and then build and register the timer device.
+ */
+static int __init omap2_dm_timer_init(void)
+{
+ int ret;
+
+ ret = omap_hwmod_for_each_by_class("timer", omap_timer_init, NULL);
+ if (unlikely(ret)) {
+ pr_err("%s: device registration failed.\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+arch_initcall(omap2_dm_timer_init);
diff --git a/arch/arm/mach-omap2/dmtimer.h b/arch/arm/mach-omap2/dmtimer.h
new file mode 100644
index 0000000..4cfd580
--- /dev/null
+++ b/arch/arm/mach-omap2/dmtimer.h
@@ -0,0 +1,32 @@
+/**
+ * OMAP Dual-Mode Timers - early initialization interface
+ *
+ * Function interface called first to start dmtimer early initialization.
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Tarun Kanti DebBarma <tarun.kanti@ti.com>
+ * Thara Gopinath <thara@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __ASM_ARCH_OMAP2_DMTIMER_H
+#define __ASM_ARCH_OMAP2_DMTIMER_H
+
+#include <plat/dmtimer.h>
+
+/*
+ * dmtimer is required during early part of boot sequence even before
+ * device model and pm_runtime if fully up and running. This function
+ * is called from following sequence:
+ * start_kernel()->time_init()->timer->init()->omap2_gp_timer_init()
+ */
+extern int __init omap2_system_timer_init(u8 id);
+extern int __init omap2_system_timer_set_src(struct omap_dm_timer *, int);
+#endif
diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c
index f77022b..7fb0d21 100644
--- a/arch/arm/mach-omap2/dpll3xxx.c
+++ b/arch/arm/mach-omap2/dpll3xxx.c
@@ -32,8 +32,10 @@
#include <plat/clock.h>
#include "clock.h"
+#include "cm2_44xx.h"
#include "cm2xxx_3xxx.h"
#include "cm-regbits-34xx.h"
+#include "cm-regbits-44xx.h"
/* CM_AUTOIDLE_PLL*.AUTO_* bit values */
#define DPLL_AUTOIDLE_DISABLE 0x0
@@ -61,22 +63,76 @@
static int _omap3_wait_dpll_status(struct clk *clk, u8 state)
{
const struct dpll_data *dd;
- int i = 0;
+ int i;
int ret = -EINVAL;
+ bool first_time = true;
+ u32 reg;
+ u32 orig_cm_div_m2_dpll_usb;
+ u32 orig_cm_clkdcoldo_dpll_usb;
+retry:
dd = clk->dpll_data;
state <<= __ffs(dd->idlest_mask);
+ i = 0;
while (((__raw_readl(dd->idlest_reg) & dd->idlest_mask) != state) &&
i < MAX_DPLL_WAIT_TRIES) {
i++;
udelay(1);
}
+ /* restore back old values if hit work-around */
+ if (!first_time) {
+ __raw_writel(orig_cm_div_m2_dpll_usb,
+ OMAP4430_CM_DIV_M2_DPLL_USB);
+ __raw_writel(orig_cm_clkdcoldo_dpll_usb,
+ OMAP4430_CM_CLKDCOLDO_DPLL_USB);
+ }
+
if (i == MAX_DPLL_WAIT_TRIES) {
printk(KERN_ERR "clock: %s failed transition to '%s'\n",
clk->name, (state) ? "locked" : "bypassed");
+
+ /* Try Error Recovery: for failing usbdpll locking */
+ if (!strcmp(clk->name, "dpll_usb_ck")) {
+
+ reg = __raw_readl(dd->mult_div1_reg);
+
+ /* Put in MN bypass */
+ _omap3_dpll_write_clken(clk, DPLL_MN_BYPASS);
+ i = 0;
+ while (!(__raw_readl(dd->idlest_reg) & (1 << OMAP4430_ST_MN_BYPASS_SHIFT)) &&
+ i < MAX_DPLL_WAIT_TRIES) {
+ i++;
+ udelay(1);
+ }
+
+ /* MN bypass looses contents of CM_CLKSEL_DPLL_USB */
+ __raw_writel(reg, dd->mult_div1_reg);
+
+ /* Force generate request to PRCM: put in Force mode */
+
+ /* a) CM_DIV_M2_DPLL_USB.DPLL_CLKOUT_GATE_CTRL = 1 */
+ orig_cm_div_m2_dpll_usb = __raw_readl(OMAP4430_CM_DIV_M2_DPLL_USB);
+ __raw_writel(orig_cm_div_m2_dpll_usb |
+ (1 << OMAP4430_DPLL_CLKOUT_GATE_CTRL_SHIFT),
+ OMAP4430_CM_DIV_M2_DPLL_USB);
+
+ /* b) CM_CLKDCOLDO_DPLL_USB.DPLL_CLKDCOLDO_GATE_CTRL = 1 */
+ orig_cm_clkdcoldo_dpll_usb = __raw_readl(OMAP4430_CM_CLKDCOLDO_DPLL_USB);
+ __raw_writel(orig_cm_clkdcoldo_dpll_usb |
+ (1 << OMAP4430_DPLL_CLKDCOLDO_GATE_CTRL_SHIFT),
+ OMAP4430_CM_CLKDCOLDO_DPLL_USB);
+
+ /* Put back to locked mode */
+ _omap3_dpll_write_clken(clk, DPLL_LOCKED);
+
+ if (first_time) {
+ first_time = false;
+ goto retry;
+ }
+ }
} else {
pr_debug("clock: %s transition to '%s' in %d loops\n",
clk->name, (state) ? "locked" : "bypassed", i);
@@ -135,11 +191,20 @@
*/
static int _omap3_noncore_dpll_lock(struct clk *clk)
{
+ const struct dpll_data *dd;
u8 ai;
- int r;
+ u8 state = 1;
+ int r = 0;
pr_debug("clock: locking DPLL %s\n", clk->name);
+ dd = clk->dpll_data;
+ state <<= __ffs(dd->idlest_mask);
+
+ /* Check if already locked */
+ if ((__raw_readl(dd->idlest_reg) & dd->idlest_mask) == state)
+ goto done;
+
ai = omap3_dpll_autoidle_read(clk);
omap3_dpll_deny_idle(clk);
@@ -151,6 +216,7 @@
if (ai)
omap3_dpll_allow_idle(clk);
+done:
return r;
}
@@ -455,7 +521,7 @@
new_parent = dd->clk_bypass;
} else {
if (dd->last_rounded_rate != rate)
- omap2_dpll_round_rate(clk, rate);
+ rate = clk->round_rate(clk, rate);
if (dd->last_rounded_rate == 0)
return -EINVAL;
diff --git a/arch/arm/mach-omap2/dpll44xx.c b/arch/arm/mach-omap2/dpll44xx.c
index 4e4da61..d4cfe2a 100644
--- a/arch/arm/mach-omap2/dpll44xx.c
+++ b/arch/arm/mach-omap2/dpll44xx.c
@@ -14,12 +14,219 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/bitops.h>
-
+#include <linux/spinlock.h>
#include <plat/cpu.h>
#include <plat/clock.h>
+#include <plat/common.h>
+
+#include <mach/emif.h>
+#include <mach/omap4-common.h>
#include "clock.h"
+#include "clock44xx.h"
+#include "cm.h"
+#include "cm44xx.h"
+#include "cm1_44xx.h"
+#include "cm2_44xx.h"
+#include "cminst44xx.h"
+#include "clock44xx.h"
+#include "clockdomain.h"
#include "cm-regbits-44xx.h"
+#include "prcm44xx.h"
+
+#define MAX_FREQ_UPDATE_TIMEOUT 100000
+
+static struct clockdomain *l3_emif_clkdm;
+static DEFINE_SPINLOCK(l3_emif_lock);
+
+/**
+ * omap4_core_dpll_m2_set_rate - set CORE DPLL M2 divider
+ * @clk: struct clk * of DPLL to set
+ * @rate: rounded target rate
+ *
+ * Programs the CM shadow registers to update CORE DPLL M2
+ * divider. M2 divider is used to clock external DDR and its
+ * reconfiguration on frequency change is managed through a
+ * hardware sequencer. This is managed by the PRCM with EMIF
+ * uding shadow registers.
+ * Returns -EINVAL/-1 on error and 0 on success.
+ */
+int omap4_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
+{
+ int i = 0;
+ u32 validrate = 0, shadow_freq_cfg1 = 0, new_div = 0;
+ unsigned long flags;
+
+ if (!clk || !rate)
+ return -EINVAL;
+
+ validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
+ if (validrate != rate)
+ return -EINVAL;
+
+ /* Just to avoid look-up on every call to speed up */
+ if (!l3_emif_clkdm) {
+ l3_emif_clkdm = clkdm_lookup("l3_emif_clkdm");
+ if (!l3_emif_clkdm) {
+ pr_err("%s: clockdomain lookup failed\n", __func__);
+ return -EINVAL;
+ }
+ }
+
+ spin_lock_irqsave(&l3_emif_lock, flags);
+
+ /* Configures MEMIF domain in SW_WKUP */
+ clkdm_wakeup(l3_emif_clkdm);
+
+ /*
+ * Errata ID: i728
+ *
+ * DESCRIPTION:
+ *
+ * If during a small window the following three events occur:
+ *
+ * 1) The EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM SR_TIMING counter expires
+ * 2) Frequency change update is requested CM_SHADOW_FREQ_CONFIG1
+ * FREQ_UPDATE set to 1
+ * 3) OCP access is requested
+ *
+ * There will be clock instability on the DDR interface.
+ *
+ * WORKAROUND:
+ *
+ * Prevent event 1) while event 2) is happening.
+ *
+ * Disable the self-refresh when requesting a frequency change.
+ * Before requesting a frequency change, program
+ * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0
+ * (omap_emif_frequency_pre_notify)
+ *
+ * When the frequency change is completed, reprogram
+ * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2.
+ * (omap_emif_frequency_post_notify)
+ */
+ omap_emif_frequency_pre_notify();
+
+ /*
+ * Program EMIF timing parameters in EMIF shadow registers
+ * for targetted DRR clock.
+ * DDR Clock = core_dpll_m2 / 2
+ */
+ omap_emif_setup_registers(validrate >> 1, LPDDR2_VOLTAGE_STABLE);
+
+ /*
+ * FREQ_UPDATE sequence:
+ * - DLL_OVERRIDE=0 (DLL lock & code must not be overridden
+ * after CORE DPLL lock)
+ * - DLL_RESET=1 (DLL must be reset upon frequency change)
+ * - DPLL_CORE_M2_DIV with same value as the one already
+ * in direct register
+ * - DPLL_CORE_DPLL_EN=0x7 (to make CORE DPLL lock)
+ * - FREQ_UPDATE=1 (to start HW sequence)
+ */
+ shadow_freq_cfg1 = (1 << OMAP4430_DLL_RESET_SHIFT) |
+ (new_div << OMAP4430_DPLL_CORE_M2_DIV_SHIFT) |
+ (DPLL_LOCKED << OMAP4430_DPLL_CORE_DPLL_EN_SHIFT) |
+ (1 << OMAP4430_FREQ_UPDATE_SHIFT);
+ shadow_freq_cfg1 &= ~OMAP4430_DLL_OVERRIDE_MASK;
+ __raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1);
+
+ /* wait for the configuration to be applied */
+ omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1)
+ & OMAP4430_FREQ_UPDATE_MASK) == 0),
+ MAX_FREQ_UPDATE_TIMEOUT, i);
+
+ /* Re-enable DDR self refresh */
+ omap_emif_frequency_post_notify();
+
+ /* Configures MEMIF domain back to HW_WKUP */
+ clkdm_allow_idle(l3_emif_clkdm);
+
+ spin_unlock_irqrestore(&l3_emif_lock, flags);
+
+ if (i == MAX_FREQ_UPDATE_TIMEOUT) {
+ pr_err("%s: Frequency update for CORE DPLL M2 change failed\n",
+ __func__);
+ return -1;
+ }
+
+ /* Update the clock change */
+ clk->rate = validrate;
+
+ return 0;
+}
+
+
+/**
+ * omap4_prcm_freq_update - set freq_update bit
+ *
+ * Programs the CM shadow registers to update EMIF
+ * parametrs. Few usecase only few registers needs to
+ * be updated using prcm freq update sequence.
+ * EMIF read-idle control and zq-config needs to be
+ * updated for temprature alerts and voltage change
+ * Returns -1 on error and 0 on success.
+ */
+int omap4_prcm_freq_update(void)
+{
+ u32 shadow_freq_cfg1;
+ int i = 0;
+ unsigned long flags;
+
+ if (!l3_emif_clkdm) {
+ pr_err("%s: clockdomain lookup failed\n", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&l3_emif_lock, flags);
+ /* Configures MEMIF domain in SW_WKUP */
+ clkdm_wakeup(l3_emif_clkdm);
+
+ /* Disable DDR self refresh (Errata ID: i728) */
+ omap_emif_frequency_pre_notify();
+
+ /*
+ * FREQ_UPDATE sequence:
+ * - DLL_OVERRIDE=0 (DLL lock & code must not be overridden
+ * after CORE DPLL lock)
+ * - FREQ_UPDATE=1 (to start HW sequence)
+ */
+ shadow_freq_cfg1 = __raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1);
+ shadow_freq_cfg1 |= (1 << OMAP4430_DLL_RESET_SHIFT) |
+ (1 << OMAP4430_FREQ_UPDATE_SHIFT);
+ shadow_freq_cfg1 &= ~OMAP4430_DLL_OVERRIDE_MASK;
+ __raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1);
+
+ /* wait for the configuration to be applied */
+ omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1)
+ & OMAP4430_FREQ_UPDATE_MASK) == 0),
+ MAX_FREQ_UPDATE_TIMEOUT, i);
+
+ /* Re-enable DDR self refresh */
+ omap_emif_frequency_post_notify();
+
+ /* Configures MEMIF domain back to HW_WKUP */
+ clkdm_allow_idle(l3_emif_clkdm);
+
+ spin_unlock_irqrestore(&l3_emif_lock, flags);
+
+ if (i == MAX_FREQ_UPDATE_TIMEOUT) {
+ pr_err("%s: Frequency update failed (call from %pF)\n",
+ __func__, (void *)_RET_IP_);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Use a very high retry count - we should not hit this condition */
+#define MAX_DPLL_WAIT_TRIES 1000000
+
+#define OMAP_1_5GHz 1500000000
+#define OMAP_1_2GHz 1200000000
+#define OMAP_1GHz 1000000000
+#define OMAP_920MHz 920000000
+#define OMAP_748MHz 748000000
/* Supported only on OMAP4 */
int omap4_dpllmx_gatectrl_read(struct clk *clk)
@@ -82,3 +289,370 @@
.deny_idle = omap4_dpllmx_deny_gatectrl,
};
+static void omap4460_mpu_dpll_update_children(unsigned long rate)
+{
+ u32 v;
+
+ /*
+ * The interconnect frequency to EMIF should
+ * be switched between MPU clk divide by 4 (for
+ * frequencies higher than 920Mhz) and MPU clk divide
+ * by 2 (for frequencies lower than or equal to 920Mhz)
+ * Also the async bridge to ABE must be MPU clk divide
+ * by 8 for MPU clk > 748Mhz and MPU clk divide by 4
+ * for lower frequencies.
+ */
+ v = __raw_readl(OMAP4430_CM_MPU_MPU_CLKCTRL);
+ if (rate > OMAP_920MHz)
+ v |= OMAP4460_CLKSEL_EMIF_DIV_MODE_MASK;
+ else
+ v &= ~OMAP4460_CLKSEL_EMIF_DIV_MODE_MASK;
+
+ if (rate > OMAP_748MHz)
+ v |= OMAP4460_CLKSEL_ABE_DIV_MODE_MASK;
+ else
+ v &= ~OMAP4460_CLKSEL_ABE_DIV_MODE_MASK;
+ __raw_writel(v, OMAP4430_CM_MPU_MPU_CLKCTRL);
+}
+
+int omap4460_mpu_dpll_set_rate(struct clk *clk, unsigned long rate)
+{
+ struct dpll_data *dd;
+ u32 v;
+ unsigned long dpll_rate;
+
+ if (!clk || !rate || !clk->parent)
+ return -EINVAL;
+
+ dd = clk->parent->dpll_data;
+
+ if (!dd)
+ return -EINVAL;
+
+ if (!clk->parent->set_rate)
+ return -EINVAL;
+
+ if (rate > clk->rate)
+ omap4460_mpu_dpll_update_children(rate);
+
+ /*
+ * On OMAP4460, to obtain MPU DPLL frequency higher
+ * than 1GHz, DCC (Duty Cycle Correction) needs to
+ * be enabled.
+ * And needs to be kept disabled for < 1 Ghz.
+ */
+ dpll_rate = omap2_get_dpll_rate(clk->parent);
+ if (rate <= OMAP_1_5GHz) {
+ /* If DCC is enabled, disable it */
+ v = __raw_readl(dd->mult_div1_reg);
+ if (v & OMAP4460_DCC_EN_MASK) {
+ v &= ~OMAP4460_DCC_EN_MASK;
+ __raw_writel(v, dd->mult_div1_reg);
+ }
+
+ if (rate != dpll_rate)
+ clk->parent->set_rate(clk->parent, rate);
+ } else {
+ /*
+ * On 4460, the MPU clk for frequencies higher than 1Ghz
+ * is sourced from CLKOUTX2_M3, instead of CLKOUT_M2, while
+ * value of M3 is fixed to 1. Hence for frequencies higher
+ * than 1 Ghz, lock the DPLL at half the rate so the
+ * CLKOUTX2_M3 then matches the requested rate.
+ */
+ if (rate != dpll_rate * 2)
+ clk->parent->set_rate(clk->parent, rate / 2);
+
+ v = __raw_readl(dd->mult_div1_reg);
+ v &= ~OMAP4460_DCC_COUNT_MAX_MASK;
+ v |= (5 << OMAP4460_DCC_COUNT_MAX_SHIFT);
+ __raw_writel(v, dd->mult_div1_reg);
+
+ v |= OMAP4460_DCC_EN_MASK;
+ __raw_writel(v, dd->mult_div1_reg);
+ }
+
+ if (rate < clk->rate)
+ omap4460_mpu_dpll_update_children(rate);
+
+ clk->rate = rate;
+
+ return 0;
+}
+
+long omap4460_mpu_dpll_round_rate(struct clk *clk, unsigned long rate)
+{
+ if (!clk || !rate || !clk->parent)
+ return -EINVAL;
+
+ if (clk->parent->round_rate)
+ return clk->parent->round_rate(clk->parent, rate);
+ else
+ return 0;
+}
+
+unsigned long omap4460_mpu_dpll_recalc(struct clk *clk)
+{
+ struct dpll_data *dd;
+ u32 v;
+
+ if (!clk || !clk->parent)
+ return -EINVAL;
+
+ dd = clk->parent->dpll_data;
+
+ if (!dd)
+ return -EINVAL;
+
+ v = __raw_readl(dd->mult_div1_reg);
+ if (v & OMAP4460_DCC_EN_MASK)
+ return omap2_get_dpll_rate(clk->parent) * 2;
+ else
+ return omap2_get_dpll_rate(clk->parent);
+}
+
+unsigned long omap4_dpll_regm4xen_recalc(struct clk *clk)
+{
+ u32 v;
+ unsigned long rate;
+ struct dpll_data *dd;
+
+ if (!clk || !clk->dpll_data)
+ return -EINVAL;
+
+ dd = clk->dpll_data;
+
+ rate = omap2_get_dpll_rate(clk);
+
+ /* regm4xen adds a multiplier of 4 to DPLL calculations */
+ v = __raw_readl(dd->control_reg);
+ if (v & OMAP4430_DPLL_REGM4XEN_MASK)
+ rate *= OMAP4430_REGM4XEN_MULT;
+
+ return rate;
+}
+
+long omap4_dpll_regm4xen_round_rate(struct clk *clk, unsigned long target_rate)
+{
+ u32 v;
+ struct dpll_data *dd;
+
+ if (!clk || !clk->dpll_data)
+ return -EINVAL;
+
+ dd = clk->dpll_data;
+
+ /* regm4xen adds a multiplier of 4 to DPLL calculations */
+ v = __raw_readl(dd->control_reg) & OMAP4430_DPLL_REGM4XEN_MASK;
+
+ if (v)
+ target_rate = target_rate / OMAP4430_REGM4XEN_MULT;
+
+ omap2_dpll_round_rate(clk, target_rate);
+
+ if (v)
+ clk->dpll_data->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
+
+ return clk->dpll_data->last_rounded_rate;
+}
+
+struct dpll_reg_tuple {
+ u16 addr;
+ u32 val;
+};
+
+struct omap4_dpll_regs {
+ char *name;
+ u32 mod_partition;
+ u32 mod_inst;
+ struct dpll_reg_tuple clkmode;
+ struct dpll_reg_tuple autoidle;
+ struct dpll_reg_tuple idlest;
+ struct dpll_reg_tuple clksel;
+ struct dpll_reg_tuple div_m2;
+ struct dpll_reg_tuple div_m3;
+ struct dpll_reg_tuple div_m4;
+ struct dpll_reg_tuple div_m5;
+ struct dpll_reg_tuple div_m6;
+ struct dpll_reg_tuple div_m7;
+ struct dpll_reg_tuple clkdcoldo;
+};
+
+static struct omap4_dpll_regs dpll_regs[] = {
+ /* MPU DPLL */
+ { .name = "mpu",
+ .mod_partition = OMAP4430_CM1_PARTITION,
+ .mod_inst = OMAP4430_CM1_CKGEN_INST,
+ .clkmode = {.addr = OMAP4_CM_CLKMODE_DPLL_MPU_OFFSET},
+ .autoidle = {.addr = OMAP4_CM_AUTOIDLE_DPLL_MPU_OFFSET},
+ .idlest = {.addr = OMAP4_CM_IDLEST_DPLL_MPU_OFFSET},
+ .clksel = {.addr = OMAP4_CM_CLKSEL_DPLL_MPU_OFFSET},
+ .div_m2 = {.addr = OMAP4_CM_DIV_M2_DPLL_MPU_OFFSET},
+ },
+ /* IVA DPLL */
+ { .name = "iva",
+ .mod_partition = OMAP4430_CM1_PARTITION,
+ .mod_inst = OMAP4430_CM1_CKGEN_INST,
+ .clkmode = {.addr = OMAP4_CM_CLKMODE_DPLL_IVA_OFFSET},
+ .autoidle = {.addr = OMAP4_CM_AUTOIDLE_DPLL_IVA_OFFSET},
+ .idlest = {.addr = OMAP4_CM_IDLEST_DPLL_IVA_OFFSET},
+ .clksel = {.addr = OMAP4_CM_CLKSEL_DPLL_IVA_OFFSET},
+ .div_m4 = {.addr = OMAP4_CM_DIV_M4_DPLL_IVA_OFFSET},
+ .div_m5 = {.addr = OMAP4_CM_DIV_M5_DPLL_IVA_OFFSET},
+ },
+ /* ABE DPLL */
+ { .name = "abe",
+ .mod_partition = OMAP4430_CM1_PARTITION,
+ .mod_inst = OMAP4430_CM1_CKGEN_INST,
+ .clkmode = {.addr = OMAP4_CM_CLKMODE_DPLL_ABE_OFFSET},
+ .autoidle = {.addr = OMAP4_CM_AUTOIDLE_DPLL_ABE_OFFSET},
+ .idlest = {.addr = OMAP4_CM_IDLEST_DPLL_ABE_OFFSET},
+ .clksel = {.addr = OMAP4_CM_CLKSEL_DPLL_ABE_OFFSET},
+ .div_m2 = {.addr = OMAP4_CM_DIV_M2_DPLL_ABE_OFFSET},
+ .div_m3 = {.addr = OMAP4_CM_DIV_M3_DPLL_ABE_OFFSET},
+ },
+ /* USB DPLL */
+ { .name = "usb",
+ .mod_partition = OMAP4430_CM2_PARTITION,
+ .mod_inst = OMAP4430_CM2_CKGEN_INST,
+ .clkmode = {.addr = OMAP4_CM_CLKMODE_DPLL_USB_OFFSET},
+ .autoidle = {.addr = OMAP4_CM_AUTOIDLE_DPLL_USB_OFFSET},
+ .idlest = {.addr = OMAP4_CM_IDLEST_DPLL_USB_OFFSET},
+ .clksel = {.addr = OMAP4_CM_CLKSEL_DPLL_USB_OFFSET},
+ .div_m2 = {.addr = OMAP4_CM_DIV_M2_DPLL_USB_OFFSET},
+ .clkdcoldo = {.addr = OMAP4_CM_CLKDCOLDO_DPLL_USB_OFFSET},
+ },
+ /* PER DPLL */
+ { .name = "per",
+ .mod_partition = OMAP4430_CM2_PARTITION,
+ .mod_inst = OMAP4430_CM2_CKGEN_INST,
+ .clkmode = {.addr = OMAP4_CM_CLKMODE_DPLL_PER_OFFSET},
+ .autoidle = {.addr = OMAP4_CM_AUTOIDLE_DPLL_PER_OFFSET},
+ .idlest = {.addr = OMAP4_CM_IDLEST_DPLL_PER_OFFSET},
+ .clksel = {.addr = OMAP4_CM_CLKSEL_DPLL_PER_OFFSET},
+ .div_m2 = {.addr = OMAP4_CM_DIV_M2_DPLL_PER_OFFSET},
+ .div_m3 = {.addr = OMAP4_CM_DIV_M3_DPLL_PER_OFFSET},
+ .div_m4 = {.addr = OMAP4_CM_DIV_M4_DPLL_PER_OFFSET},
+ .div_m5 = {.addr = OMAP4_CM_DIV_M5_DPLL_PER_OFFSET},
+ .div_m6 = {.addr = OMAP4_CM_DIV_M6_DPLL_PER_OFFSET},
+ .div_m7 = {.addr = OMAP4_CM_DIV_M7_DPLL_PER_OFFSET},
+ },
+};
+
+static inline void omap4_dpll_store_reg(struct omap4_dpll_regs *dpll_reg,
+ struct dpll_reg_tuple *tuple)
+{
+ if (tuple->addr)
+ tuple->val =
+ omap4_cminst_read_inst_reg(dpll_reg->mod_partition,
+ dpll_reg->mod_inst, tuple->addr);
+}
+
+void omap4_dpll_prepare_off(void)
+{
+ u32 i;
+ struct omap4_dpll_regs *dpll_reg = dpll_regs;
+
+ for (i = 0; i < ARRAY_SIZE(dpll_regs); i++, dpll_reg++) {
+ omap4_dpll_store_reg(dpll_reg, &dpll_reg->clkmode);
+ omap4_dpll_store_reg(dpll_reg, &dpll_reg->autoidle);
+ omap4_dpll_store_reg(dpll_reg, &dpll_reg->clksel);
+ omap4_dpll_store_reg(dpll_reg, &dpll_reg->div_m2);
+ omap4_dpll_store_reg(dpll_reg, &dpll_reg->div_m3);
+ omap4_dpll_store_reg(dpll_reg, &dpll_reg->div_m4);
+ omap4_dpll_store_reg(dpll_reg, &dpll_reg->div_m5);
+ omap4_dpll_store_reg(dpll_reg, &dpll_reg->div_m6);
+ omap4_dpll_store_reg(dpll_reg, &dpll_reg->div_m7);
+ omap4_dpll_store_reg(dpll_reg, &dpll_reg->clkdcoldo);
+ omap4_dpll_store_reg(dpll_reg, &dpll_reg->idlest);
+ }
+}
+
+static void omap4_dpll_print_reg(struct omap4_dpll_regs *dpll_reg, char *name,
+ struct dpll_reg_tuple *tuple)
+{
+ if (tuple->addr)
+ pr_warn("%s - Address offset = 0x%08x, value=0x%08x\n", name,
+ tuple->addr, tuple->val);
+}
+
+static void omap4_dpll_dump_regs(struct omap4_dpll_regs *dpll_reg)
+{
+ pr_warn("%s: Unable to lock dpll %s[part=%x inst=%x]:\n",
+ __func__, dpll_reg->name, dpll_reg->mod_partition,
+ dpll_reg->mod_inst);
+ omap4_dpll_print_reg(dpll_reg, "clksel", &dpll_reg->clksel);
+ omap4_dpll_print_reg(dpll_reg, "div_m2", &dpll_reg->div_m2);
+ omap4_dpll_print_reg(dpll_reg, "div_m3", &dpll_reg->div_m3);
+ omap4_dpll_print_reg(dpll_reg, "div_m4", &dpll_reg->div_m4);
+ omap4_dpll_print_reg(dpll_reg, "div_m5", &dpll_reg->div_m5);
+ omap4_dpll_print_reg(dpll_reg, "div_m6", &dpll_reg->div_m6);
+ omap4_dpll_print_reg(dpll_reg, "div_m7", &dpll_reg->div_m7);
+ omap4_dpll_print_reg(dpll_reg, "clkdcoldo", &dpll_reg->clkdcoldo);
+ omap4_dpll_print_reg(dpll_reg, "clkmode", &dpll_reg->clkmode);
+ omap4_dpll_print_reg(dpll_reg, "autoidle", &dpll_reg->autoidle);
+ if (dpll_reg->idlest.addr)
+ pr_warn("idlest - Address offset = 0x%08x, before val=0x%08x"
+ " after = 0x%08x\n", dpll_reg->idlest.addr,
+ dpll_reg->idlest.val,
+ omap4_cminst_read_inst_reg(dpll_reg->mod_partition,
+ dpll_reg->mod_inst,
+ dpll_reg->idlest.addr));
+}
+
+static void omap4_wait_dpll_lock(struct omap4_dpll_regs *dpll_reg)
+{
+ int j = 0;
+
+ /* Return if we dont need to lock. */
+ if ((dpll_reg->clkmode.val & OMAP4430_DPLL_EN_MASK) !=
+ DPLL_LOCKED << OMAP4430_DPLL_EN_SHIFT);
+ return;
+
+ while ((omap4_cminst_read_inst_reg(dpll_reg->mod_partition,
+ dpll_reg->mod_inst,
+ dpll_reg->idlest.addr)
+ & OMAP4430_ST_DPLL_CLK_MASK) !=
+ 0x1 << OMAP4430_ST_DPLL_CLK_SHIFT
+ && j < MAX_DPLL_WAIT_TRIES) {
+ j++;
+ udelay(1);
+ }
+
+ /* if we are unable to lock, warn and move on.. */
+ if (j == MAX_DPLL_WAIT_TRIES)
+ omap4_dpll_dump_regs(dpll_reg);
+}
+
+static inline void omap4_dpll_restore_reg(struct omap4_dpll_regs *dpll_reg,
+ struct dpll_reg_tuple *tuple)
+{
+ if (tuple->addr)
+ omap4_cminst_write_inst_reg(tuple->val, dpll_reg->mod_partition,
+ dpll_reg->mod_inst, tuple->addr);
+}
+
+void omap4_dpll_resume_off(void)
+{
+ u32 i;
+ struct omap4_dpll_regs *dpll_reg = dpll_regs;
+
+ for (i = 0; i < ARRAY_SIZE(dpll_regs); i++, dpll_reg++) {
+ omap4_dpll_restore_reg(dpll_reg, &dpll_reg->clksel);
+ omap4_dpll_restore_reg(dpll_reg, &dpll_reg->div_m2);
+ omap4_dpll_restore_reg(dpll_reg, &dpll_reg->div_m3);
+ omap4_dpll_restore_reg(dpll_reg, &dpll_reg->div_m4);
+ omap4_dpll_restore_reg(dpll_reg, &dpll_reg->div_m5);
+ omap4_dpll_restore_reg(dpll_reg, &dpll_reg->div_m6);
+ omap4_dpll_restore_reg(dpll_reg, &dpll_reg->div_m7);
+ omap4_dpll_restore_reg(dpll_reg, &dpll_reg->clkdcoldo);
+
+ /* Restore clkmode after the above registers are restored */
+ omap4_dpll_restore_reg(dpll_reg, &dpll_reg->clkmode);
+
+ omap4_wait_dpll_lock(dpll_reg);
+
+ /* Restore autoidle settings after the dpll is locked */
+ omap4_dpll_restore_reg(dpll_reg, &dpll_reg->autoidle);
+ }
+}
diff --git a/arch/arm/mach-omap2/dvfs.c b/arch/arm/mach-omap2/dvfs.c
new file mode 100644
index 0000000..e00032b
--- /dev/null
+++ b/arch/arm/mach-omap2/dvfs.c
@@ -0,0 +1,1314 @@
+/*
+ * OMAP3/OMAP4 DVFS Management Routines
+ *
+ * Author: Vishwanath BS <vishwanath.bs@ti.com>
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Vishwanath BS <vishwanath.bs@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/plist.h>
+#include <linux/slab.h>
+#include <linux/opp.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <plat/common.h>
+#include <plat/omap_device.h>
+#include <plat/omap_hwmod.h>
+#include <plat/clock.h>
+#include "dvfs.h"
+#include "smartreflex.h"
+#include "powerdomain.h"
+#include "pm.h"
+
+/**
+ * DOC: Introduction
+ * =================
+ * DVFS is a technique that uses the optimal operating frequency and voltage to
+ * allow a task to be performed in the required amount of time.
+ * OMAP processors have voltage domains whose voltage can be scaled to
+ * various levels depending on which the operating frequencies of certain
+ * devices belonging to the domain will also need to be scaled. This voltage
+ * frequency tuple is known as Operating Performance Point (OPP). A device
+ * can have multiple OPP's. Also a voltage domain could be shared between
+ * multiple devices. Also there could be dependencies between various
+ * voltage domains for maintaining system performance like VDD<X>
+ * should be at voltage v1 when VDD<Y> is at voltage v2.
+ *
+ * The design of this framework takes into account all the above mentioned
+ * points. To summarize the basic design of DVFS framework:-
+ *
+ * 1. Have device opp tables for each device whose operating frequency can be
+ * scaled. This is easy now due to the existance of hwmod layer which
+ * allow storing of device specific info. The device opp tables contain
+ * the opp pairs (frequency voltage tuples), the voltage domain pointer
+ * to which the device belongs to, the device specific set_rate and
+ * get_rate API's which will do the actual scaling of the device frequency
+ * and retrieve the current device frequency.
+ * 2. Introduce use counting on a per VDD basis. This is to take care multiple
+ * requests to scale a VDD. The VDD will be scaled to the maximum of the
+ * voltages requested.
+ * 3. Keep track of all scalable devices belonging to a particular voltage
+ * domain the voltage layer.
+ * 4. Keep track of frequency requests for each of the device. This will enable
+ * to scale individual devices to different frequency (even w/o scaling
+ * voltage aka frequency throttling)
+ * 5. Generic dvfs API that can be called by anybody to scale a device opp.
+ * This API takes the device pointer and frequency to which the device
+ * needs to be scaled to. This API then internally finds out the voltage
+ * domain to which the device belongs to and the voltage to which the voltage
+ * domain needs to be put to for the device to be scaled to the new frequency
+ * from the device opp table. Then this API will add requested frequency into
+ * the corresponding target device frequency list and add voltage request to
+ * the corresponding vdd. Subsequently it calls voltage scale function which
+ * will find out the highest requested voltage for the given vdd and scales
+ * the voltage to the required one and also adds corresponding frequency
+ * request for that voltage. It also runs through the list of all
+ * scalable devices belonging to this voltage domain and scale them to the
+ * appropriate frequencies using the set_rate pointer in the device opp
+ * tables.
+ * 6. Handle inter VDD dependecies. This will take care of scaling domain's voltage
+ * and frequency together.
+ *
+ *
+ * DOC: The Core DVFS data structure:
+ * ==================================
+ * Structure Name Example Tree
+ * ---------
+ * /|\ +-------------------+ +-------------------+
+ * | |User2 (dev2, freq2)+---\ |User4 (dev4, freq4)+---\
+ * | +-------------------+ | +-------------------+ |
+ * (struct omap_dev_user_list) | |
+ * | +-------------------+ | +-------------------+ |
+ * | |User1 (dev1, freq1)+---| |User3 (dev3, freq3)+---|
+ * \|/ +-------------------+ | +-------------------+ |
+ * --------- | |
+ * /|\ +------------+------+ +---------------+--+
+ * | | DEV1 (dev, | | DEV2 (dev) |
+ * (struct omap_vdd_dev_list)|omap_dev_user_list)| |omap_dev_user_list|
+ * | +------------+------+ +--+---------------+
+ * \|/ /|\ /-----+-------------+------> others..
+ * --------- Frequency |
+ * /|\ +--+------------------+
+ * | | VDD_n |
+ * | | (omap_vdd_dev_list, |
+ * (struct omap_vdd_dvfs_info)** | omap_vdd_user_list) |
+ * | +--+------------------+
+ * | | (ROOT NODE: omap_dvfs_info_list)
+ * \|/ |
+ * --------- Voltage \---+-------------+----------> others..
+ * /|\ \|/ +-------+----+ +-----+--------+
+ * | | vdd_user2 | | vdd_user3 |
+ * (struct omap_vdd_user_list) | (dev, volt)| | (dev, volt) |
+ * \|/ +------------+ +--------------+
+ * ---------
+ * Key: ** -> Root of the tree.
+ * NOTE: we use the priority to store the voltage/frequency
+ *
+ * For voltage dependency description, see: struct dependency:
+ * voltagedomain -> (description of the voltagedomain)
+ * omap_vdd_info -> (vdd information)
+ * omap_vdd_dep_info[]-> (stores array of depedency info)
+ * omap_vdd_dep_volt[] -> (stores array of maps)
+ * (main_volt -> dep_volt) (a singular map)
+ */
+
+/* Macros to give idea about scaling directions */
+#define DVFS_VOLT_SCALE_DOWN 0
+#define DVFS_VOLT_SCALE_NONE 1
+#define DVFS_VOLT_SCALE_UP 2
+
+/**
+ * struct omap_dev_user_list - Structure maitain userlist per devide
+ * @dev: The device requesting for a particular frequency
+ * @node: The list head entry
+ *
+ * Using this structure, user list (requesting dev * and frequency) for
+ * each device is maintained. This is how we can have different devices
+ * at different frequencies (to support frequency locking and throttling).
+ * Even if one of the devices in a given vdd has locked it's frequency,
+ * other's can still scale their frequency using this list.
+ * If no one has placed a frequency request for a device, then device is
+ * set to the frequency from it's opp table.
+ */
+struct omap_dev_user_list {
+ struct device *dev;
+ struct plist_node node;
+};
+
+/**
+ * struct omap_vdd_dev_list - Device list per vdd
+ * @dev: The device belonging to a particular vdd
+ * @node: The list head entry
+ * @freq_user_list: The list of users for vdd device
+ * @clk: frequency control clock for this dev
+ * @user_lock: The lock for plist manipulation
+ */
+struct omap_vdd_dev_list {
+ struct device *dev;
+ struct list_head node;
+ struct plist_head freq_user_list;
+ struct clk *clk;
+ spinlock_t user_lock; /* spinlock for plist */
+};
+
+/**
+ * struct omap_vdd_user_list - The per vdd user list
+ * @dev: The device asking for the vdd to be set at a particular
+ * voltage
+ * @node: The list head entry
+ */
+struct omap_vdd_user_list {
+ struct device *dev;
+ struct plist_node node;
+};
+
+/**
+ * struct omap_vdd_dvfs_info - The per vdd dvfs info
+ * @node: list node for vdd_dvfs_info list
+ * @user_lock: spinlock for plist operations
+ * @vdd_user_list: The vdd user list
+ * @voltdm: Voltage domains for which dvfs info stored
+ * @dev_list: Device list maintained per domain
+ *
+ * This is a fundamental structure used to store all the required
+ * DVFS related information for a vdd.
+ */
+struct omap_vdd_dvfs_info {
+ struct list_head node;
+
+ spinlock_t user_lock; /* spin lock */
+ struct plist_head vdd_user_list;
+ struct voltagedomain *voltdm;
+ struct list_head dev_list;
+};
+
+static LIST_HEAD(omap_dvfs_info_list);
+DEFINE_MUTEX(omap_dvfs_lock);
+
+/* Dvfs scale helper function */
+static int _dvfs_scale(struct device *req_dev, struct device *target_dev,
+ struct omap_vdd_dvfs_info *tdvfs_info);
+
+/* Few search functions to traverse and find pointers of interest */
+
+/**
+ * _dvfs_info_to_dev() - Locate the parent device associated to dvfs_info
+ * @dvfs_info: dvfs_info to search for
+ *
+ * Returns NULL on failure.
+ */
+static struct device *_dvfs_info_to_dev(struct omap_vdd_dvfs_info *dvfs_info)
+{
+ struct omap_vdd_dev_list *tmp_dev;
+ if (IS_ERR_OR_NULL(dvfs_info))
+ return NULL;
+ if (list_empty(&dvfs_info->dev_list))
+ return NULL;
+ tmp_dev = list_first_entry(&dvfs_info->dev_list,
+ struct omap_vdd_dev_list, node);
+ return tmp_dev->dev;
+}
+
+/**
+ * _dev_to_dvfs_info() - Locate the dvfs_info for a device
+ * @dev: dev to search for
+ *
+ * Returns NULL on failure.
+ */
+static struct omap_vdd_dvfs_info *_dev_to_dvfs_info(struct device *dev)
+{
+ struct omap_vdd_dvfs_info *dvfs_info;
+ struct omap_vdd_dev_list *temp_dev;
+
+ if (IS_ERR_OR_NULL(dev))
+ return NULL;
+
+ list_for_each_entry(dvfs_info, &omap_dvfs_info_list, node) {
+ list_for_each_entry(temp_dev, &dvfs_info->dev_list, node) {
+ if (temp_dev->dev == dev)
+ return dvfs_info;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * _voltdm_to_dvfs_info() - Locate a dvfs_info given a voltdm pointer
+ * @voltdm: voltdm to search for
+ *
+ * Returns NULL on failure.
+ */
+static
+struct omap_vdd_dvfs_info *_voltdm_to_dvfs_info(struct voltagedomain *voltdm)
+{
+ struct omap_vdd_dvfs_info *dvfs_info;
+
+ if (IS_ERR_OR_NULL(voltdm))
+ return NULL;
+
+ list_for_each_entry(dvfs_info, &omap_dvfs_info_list, node) {
+ if (dvfs_info->voltdm == voltdm)
+ return dvfs_info;
+ }
+
+ return NULL;
+}
+
+/**
+ * _volt_to_opp() - Find OPP corresponding to a given voltage
+ * @dev: device pointer associated with the OPP list
+ * @volt: voltage to search for in uV
+ *
+ * Searches for exact match in the OPP list and returns handle to the matching
+ * OPP if found, else return the max available OPP.
+ * If there are multiple opps with same voltage, it will return
+ * the first available entry. Return pointer should be checked against IS_ERR.
+ *
+ * NOTE: since this uses OPP functions, use under rcu_lock. This function also
+ * assumes that the cpufreq table and OPP table are in sync - any modifications
+ * to either should be synchronized.
+ */
+static struct opp *_volt_to_opp(struct device *dev, unsigned long volt)
+{
+ struct opp *opp = ERR_PTR(-ENODEV);
+ unsigned long f = 0;
+
+ do {
+ opp = opp_find_freq_ceil(dev, &f);
+ if (IS_ERR(opp)) {
+ /*
+ * if there is no OPP for corresponding volt
+ * then return max available instead
+ */
+ opp = opp_find_freq_floor(dev, &f);
+ break;
+ }
+ if (opp_get_voltage(opp) >= volt)
+ break;
+ f++;
+ } while (1);
+
+ return opp;
+}
+
+/* rest of the helper functions */
+/**
+ * _add_vdd_user() - Add a voltage request
+ * @dvfs_info: omap_vdd_dvfs_info pointer for the required vdd
+ * @dev: device making the request
+ * @volt: requested voltage in uV
+ *
+ * Adds the given device's voltage request into corresponding
+ * vdd's omap_vdd_dvfs_info user list (plist). This list is used
+ * to find the maximum voltage request for a given vdd.
+ *
+ * Returns 0 on success.
+ */
+static int _add_vdd_user(struct omap_vdd_dvfs_info *dvfs_info,
+ struct device *dev, unsigned long volt)
+{
+ struct omap_vdd_user_list *user = NULL, *temp_user;
+
+ if (!dvfs_info || IS_ERR(dvfs_info)) {
+ dev_warn(dev, "%s: VDD specified does not exist!\n", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock(&dvfs_info->user_lock);
+ plist_for_each_entry(temp_user, &dvfs_info->vdd_user_list, node) {
+ if (temp_user->dev == dev) {
+ user = temp_user;
+ break;
+ }
+ }
+
+ if (!user) {
+ user = kzalloc(sizeof(struct omap_vdd_user_list), GFP_ATOMIC);
+ if (!user) {
+ dev_err(dev,
+ "%s: Unable to creat a new user for vdd_%s\n",
+ __func__, dvfs_info->voltdm->name);
+ spin_unlock(&dvfs_info->user_lock);
+ return -ENOMEM;
+ }
+ user->dev = dev;
+ } else {
+ plist_del(&user->node, &dvfs_info->vdd_user_list);
+ }
+
+ plist_node_init(&user->node, volt);
+ plist_add(&user->node, &dvfs_info->vdd_user_list);
+
+ spin_unlock(&dvfs_info->user_lock);
+ return 0;
+}
+
+/**
+ * _remove_vdd_user() - Remove a voltage request
+ * @dvfs_info: omap_vdd_dvfs_info pointer for the required vdd
+ * @dev: device making the request
+ *
+ * Removes the given device's voltage request from corresponding
+ * vdd's omap_vdd_dvfs_info user list (plist).
+ *
+ * Returns 0 on success.
+ */
+static int _remove_vdd_user(struct omap_vdd_dvfs_info *dvfs_info,
+ struct device *dev)
+{
+ struct omap_vdd_user_list *user = NULL, *temp_user;
+ int ret = 0;
+
+ if (!dvfs_info || IS_ERR(dvfs_info)) {
+ dev_err(dev, "%s: VDD specified does not exist!\n", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock(&dvfs_info->user_lock);
+ plist_for_each_entry(temp_user, &dvfs_info->vdd_user_list, node) {
+ if (temp_user->dev == dev) {
+ user = temp_user;
+ break;
+ }
+ }
+
+ if (user)
+ plist_del(&user->node, &dvfs_info->vdd_user_list);
+ else {
+ dev_err(dev, "%s: Unable to find the user for vdd_%s\n",
+ __func__, dvfs_info->voltdm->name);
+ ret = -ENOENT;
+ }
+
+ spin_unlock(&dvfs_info->user_lock);
+ kfree(user);
+
+ return ret;
+}
+
+/**
+ * _add_freq_request() - Add a requested device frequency
+ * @dvfs_info: omap_vdd_dvfs_info pointer for the required vdd
+ * @req_dev: device making the request
+ * @target_dev: target device for which frequency request is being made
+ * @freq: target device frequency
+ *
+ * This adds a requested frequency into target device's frequency list.
+ *
+ * Returns 0 on success.
+ */
+static int _add_freq_request(struct omap_vdd_dvfs_info *dvfs_info,
+ struct device *req_dev, struct device *target_dev, unsigned long freq)
+{
+ struct omap_dev_user_list *dev_user = NULL, *tmp_user;
+ struct omap_vdd_dev_list *temp_dev;
+
+ if (!dvfs_info || IS_ERR(dvfs_info)) {
+ dev_warn(target_dev, "%s: VDD specified does not exist!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(temp_dev, &dvfs_info->dev_list, node) {
+ if (temp_dev->dev == target_dev)
+ break;
+ }
+
+ if (temp_dev->dev != target_dev) {
+ dev_warn(target_dev, "%s: target_dev does not exist!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ spin_lock(&temp_dev->user_lock);
+ plist_for_each_entry(tmp_user, &temp_dev->freq_user_list, node) {
+ if (tmp_user->dev == req_dev) {
+ dev_user = tmp_user;
+ break;
+ }
+ }
+
+ if (!dev_user) {
+ dev_user = kzalloc(sizeof(struct omap_dev_user_list),
+ GFP_ATOMIC);
+ if (!dev_user) {
+ dev_err(target_dev,
+ "%s: Unable to creat a new user for vdd_%s\n",
+ __func__, dvfs_info->voltdm->name);
+ spin_unlock(&temp_dev->user_lock);
+ return -ENOMEM;
+ }
+ dev_user->dev = req_dev;
+ } else {
+ plist_del(&dev_user->node, &temp_dev->freq_user_list);
+ }
+
+ plist_node_init(&dev_user->node, freq);
+ plist_add(&dev_user->node, &temp_dev->freq_user_list);
+ spin_unlock(&temp_dev->user_lock);
+ return 0;
+}
+
+/**
+ * _remove_freq_request() - Remove the requested device frequency
+ *
+ * @dvfs_info: omap_vdd_dvfs_info pointer for the required vdd
+ * @req_dev: device removing the request
+ * @target_dev: target device from which frequency request is being removed
+ *
+ * This removes a requested frequency from target device's frequency list.
+ *
+ * Returns 0 on success.
+ */
+static int _remove_freq_request(struct omap_vdd_dvfs_info *dvfs_info,
+ struct device *req_dev, struct device *target_dev)
+{
+ struct omap_dev_user_list *dev_user = NULL, *tmp_user;
+ int ret = 0;
+ struct omap_vdd_dev_list *temp_dev;
+
+ if (!dvfs_info || IS_ERR(dvfs_info)) {
+ dev_warn(target_dev, "%s: VDD specified does not exist!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+
+ list_for_each_entry(temp_dev, &dvfs_info->dev_list, node) {
+ if (temp_dev->dev == target_dev)
+ break;
+ }
+
+ if (temp_dev->dev != target_dev) {
+ dev_warn(target_dev, "%s: target_dev does not exist!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ spin_lock(&temp_dev->user_lock);
+ plist_for_each_entry(tmp_user, &temp_dev->freq_user_list, node) {
+ if (tmp_user->dev == req_dev) {
+ dev_user = tmp_user;
+ break;
+ }
+ }
+
+ if (dev_user) {
+ plist_del(&dev_user->node, &temp_dev->freq_user_list);
+ } else {
+ dev_err(target_dev,
+ "%s: Unable to remove the user for vdd_%s\n",
+ __func__, dvfs_info->voltdm->name);
+ ret = -EINVAL;
+ }
+
+ spin_unlock(&temp_dev->user_lock);
+ kfree(dev_user);
+
+ return ret;
+}
+
+/**
+ * _dep_scan_table() - Scan a dependency table and mark for scaling
+ * @dev: device requesting the dependency scan (req_dev)
+ * @dep_info: dependency information (contains the table)
+ * @main_volt: voltage dependency to search for
+ *
+ * This runs down the table provided to find the match for main_volt
+ * provided and sets up a scale request for the dependent domain
+ * for the dependent voltage.
+ *
+ * Returns 0 if all went well.
+ */
+static int _dep_scan_table(struct device *dev,
+ struct omap_vdd_dep_info *dep_info, unsigned long main_volt)
+{
+ struct omap_vdd_dep_volt *dep_table = dep_info->dep_table;
+ struct device *target_dev;
+ struct omap_vdd_dvfs_info *tdvfs_info;
+ struct opp *opp;
+ int i, ret;
+ unsigned long dep_volt = 0, new_freq = 0;
+
+ if (!dep_table) {
+ dev_err(dev, "%s: deptable not present for vdd%s\n",
+ __func__, dep_info->name);
+ return -EINVAL;
+ }
+
+ /* Now scan through the the dep table for a match */
+ for (i = 0; i < dep_info->nr_dep_entries; i++) {
+ if (dep_table[i].main_vdd_volt == main_volt) {
+ dep_volt = dep_table[i].dep_vdd_volt;
+ break;
+ }
+ }
+ if (!dep_volt) {
+ dev_warn(dev, "%s: %ld volt map missing in vdd_%s\n",
+ __func__, main_volt, dep_info->name);
+ return -EINVAL;
+ }
+
+ /* populate voltdm if it is not present */
+ if (!dep_info->_dep_voltdm) {
+ dep_info->_dep_voltdm = voltdm_lookup(dep_info->name);
+ if (!dep_info->_dep_voltdm) {
+ dev_warn(dev, "%s: unable to get vdm%s\n",
+ __func__, dep_info->name);
+ return -ENODEV;
+ }
+ }
+
+ /* See if dep_volt is possible for the vdd*/
+ ret = _add_vdd_user(_voltdm_to_dvfs_info(dep_info->_dep_voltdm),
+ dev, dep_volt);
+ if (ret)
+ dev_err(dev, "%s: Failed to add dep to domain %s volt=%ld\n",
+ __func__, dep_info->name, dep_volt);
+
+ /* And also add corresponding freq request */
+ tdvfs_info = _voltdm_to_dvfs_info(dep_info->_dep_voltdm);
+ if (!tdvfs_info) {
+ dev_warn(dev, "%s: no dvfs_info\n",
+ __func__);
+ return -ENODEV;
+ }
+ target_dev = _dvfs_info_to_dev(tdvfs_info);
+ if (!target_dev) {
+ dev_warn(dev, "%s: no target_dev\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ rcu_read_lock();
+ opp = _volt_to_opp(target_dev, dep_volt);
+ if (!IS_ERR(opp))
+ new_freq = opp_get_freq(opp);
+ rcu_read_unlock();
+
+ if (new_freq) {
+ ret = _add_freq_request(tdvfs_info, dev, target_dev, new_freq);
+ if (ret) {
+ dev_err(target_dev, "%s: freqadd(%s) failed %d[f=%ld,"
+ "v=%ld]\n", __func__, dev_name(dev),
+ i, new_freq, dep_volt);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * _dep_scan_domains() - Scan dependency domains for a device
+ * @dev: device requesting the scan
+ * @vdd: vdd_info corresponding to the device
+ * @main_volt: voltage to scan for
+ *
+ * Since each domain *may* have multiple dependent domains, we scan
+ * through each of the dependent domains and invoke _dep_scan_table to
+ * scan each table for dependent domain for dependency scaling.
+ *
+ * This assumes that the dependent domain information is NULL entry terminated.
+ * Returns 0 if all went well.
+ */
+static int _dep_scan_domains(struct device *dev,
+ struct omap_vdd_info *vdd, unsigned long main_volt)
+{
+ struct omap_vdd_dep_info *dep_info = vdd->dep_vdd_info;
+ int ret = 0, r;
+
+ if (!dep_info) {
+ dev_dbg(dev, "%s: No dependent VDD\n", __func__);
+ return 0;
+ }
+
+ /* First scan through the mydomain->dep_domain list */
+ while (dep_info->nr_dep_entries) {
+ r = _dep_scan_table(dev, dep_info, main_volt);
+ /* Store last failed value */
+ ret = (r) ? r : ret;
+ dep_info++;
+ }
+
+ return ret;
+}
+
+/**
+ * _dep_scale_domains() - Cause a scale of all dependent domains
+ * @req_dev: device requesting the scale
+ * @req_vdd: vdd_info corresponding to the requesting device.
+ *
+ * This walks through every dependent domain and triggers a scale
+ * It is assumed that the corresponding scale handling for the
+ * domain translates this to freq and voltage scale operations as
+ * needed.
+ *
+ * Note: This is uses _dvfs_scale and one should be careful not to
+ * create a circular depedency (e.g. vdd_mpu->vdd_core->vdd->mpu)
+ * which can create deadlocks. No protection is provided to prevent
+ * this condition and a tree organization is assumed.
+ *
+ * Returns 0 if all went fine.
+ */
+static int _dep_scale_domains(struct device *req_dev,
+ struct omap_vdd_info *req_vdd)
+{
+ struct omap_vdd_dep_info *dep_info = req_vdd->dep_vdd_info;
+ int ret = 0, r;
+
+ if (!dep_info) {
+ dev_dbg(req_dev, "%s: No dependent VDD\n", __func__);
+ return 0;
+ }
+
+ /* First scan through the mydomain->dep_domain list */
+ while (dep_info->nr_dep_entries) {
+ struct voltagedomain *tvoltdm = dep_info->_dep_voltdm;
+
+ r = 0;
+ /* Scale it only if I have a voltdm mapped up for the dep */
+ if (tvoltdm) {
+ struct omap_vdd_dvfs_info *tdvfs_info;
+ struct device *target_dev;
+ tdvfs_info = _voltdm_to_dvfs_info(tvoltdm);
+ if (!tdvfs_info) {
+ dev_warn(req_dev, "%s: no dvfs_info\n",
+ __func__);
+ goto next;
+ }
+ target_dev = _dvfs_info_to_dev(tdvfs_info);
+ if (!target_dev) {
+ dev_warn(req_dev, "%s: no target_dev\n",
+ __func__);
+ goto next;
+ }
+ r = _dvfs_scale(req_dev, target_dev, tdvfs_info);
+next:
+ if (r)
+ dev_err(req_dev, "%s: dvfs_scale to %s =%d\n",
+ __func__, dev_name(target_dev), r);
+ }
+ /* Store last failed value */
+ ret = (r) ? r : ret;
+ dep_info++;
+ }
+
+ return ret;
+}
+
+/**
+ * _dvfs_scale() : Scale the devices associated with a voltage domain
+ * @req_dev: Device requesting the scale
+ * @target_dev: Device requesting to be scaled
+ * @tdvfs_info: omap_vdd_dvfs_info pointer for the target domain
+ *
+ * This runs through the list of devices associated with the
+ * voltage domain and scales the device rates to the one requested
+ * by the user or those corresponding to the new voltage of the
+ * voltage domain. Target voltage is the highest voltage in the vdd_user_list.
+ *
+ * Returns 0 on success else the error value.
+ */
+static int _dvfs_scale(struct device *req_dev, struct device *target_dev,
+ struct omap_vdd_dvfs_info *tdvfs_info)
+{
+ unsigned long curr_volt, new_volt;
+ int volt_scale_dir = DVFS_VOLT_SCALE_DOWN;
+ struct omap_vdd_dev_list *temp_dev;
+ struct plist_node *node;
+ int ret = 0;
+ struct voltagedomain *voltdm;
+ struct omap_vdd_info *vdd;
+ struct omap_volt_data *new_vdata;
+ struct omap_volt_data *curr_vdata;
+
+ voltdm = tdvfs_info->voltdm;
+ if (IS_ERR_OR_NULL(voltdm)) {
+ dev_err(target_dev, "%s: bad voltdm\n", __func__);
+ return -EINVAL;
+ }
+ vdd = voltdm->vdd;
+
+ /* Find the highest voltage being requested */
+ node = plist_last(&tdvfs_info->vdd_user_list);
+ new_volt = node->prio;
+
+ new_vdata = omap_voltage_get_voltdata(voltdm, new_volt);
+ if (IS_ERR_OR_NULL(new_vdata)) {
+ pr_err("%s:%s: Bad New voltage data for %ld\n",
+ __func__, voltdm->name, new_volt);
+ return PTR_ERR(new_vdata);
+ }
+ new_volt = omap_get_operation_voltage(new_vdata);
+ curr_vdata = omap_voltage_get_curr_vdata(voltdm);
+ if (IS_ERR_OR_NULL(curr_vdata)) {
+ pr_err("%s:%s: Bad Current voltage data\n",
+ __func__, voltdm->name);
+ return PTR_ERR(curr_vdata);
+ }
+
+ /* Disable smartreflex module across voltage and frequency scaling */
+ omap_sr_disable(voltdm);
+
+ /* Pick up the current voltage ONLY after ensuring no changes occur */
+ curr_volt = omap_vp_get_curr_volt(voltdm);
+ if (!curr_volt)
+ curr_volt = omap_get_operation_voltage(curr_vdata);
+
+ /* Make a decision to scale dependent domain based on nominal voltage */
+ if (omap_get_nominal_voltage(new_vdata) >
+ omap_get_nominal_voltage(curr_vdata)) {
+ ret = _dep_scale_domains(target_dev, vdd);
+ if (ret) {
+ dev_err(target_dev,
+ "%s: Error(%d)scale dependent with %ld volt\n",
+ __func__, ret, new_volt);
+ goto fail;
+ }
+ }
+
+ if (voltdm->abb && omap_get_nominal_voltage(new_vdata) >
+ omap_get_nominal_voltage(curr_vdata)) {
+ ret = omap_ldo_abb_pre_scale(voltdm, new_vdata);
+ if (ret) {
+ pr_err("%s: ABB prescale failed for vdd%s: %d\n",
+ __func__, voltdm->name, ret);
+ goto fail;
+ }
+ }
+
+ /* Now decide on switching OPP */
+ if (curr_volt == new_volt) {
+ volt_scale_dir = DVFS_VOLT_SCALE_NONE;
+ } else if (curr_volt < new_volt) {
+ ret = voltdm_scale(voltdm, new_vdata);
+ if (ret) {
+ dev_err(target_dev,
+ "%s: Unable to scale the %s to %ld volt\n",
+ __func__, voltdm->name, new_volt);
+ goto fail;
+ }
+ volt_scale_dir = DVFS_VOLT_SCALE_UP;
+ }
+
+ if (voltdm->abb && omap_get_nominal_voltage(new_vdata) >
+ omap_get_nominal_voltage(curr_vdata)) {
+ ret = omap_ldo_abb_post_scale(voltdm, new_vdata);
+ if (ret) {
+ pr_err("%s: ABB prescale failed for vdd%s: %d\n",
+ __func__, voltdm->name, ret);
+ goto fail;
+ }
+ }
+
+ /* Move all devices in list to the required frequencies */
+ list_for_each_entry(temp_dev, &tdvfs_info->dev_list, node) {
+ struct device *dev;
+ struct opp *opp;
+ unsigned long freq = 0;
+ int r;
+
+ dev = temp_dev->dev;
+ if (!plist_head_empty(&temp_dev->freq_user_list)) {
+ node = plist_last(&temp_dev->freq_user_list);
+ freq = node->prio;
+ } else {
+ /*
+ * Is the dev of dep domain target_device?
+ * we'd probably have a voltage request without
+ * a frequency dependency, scale appropriate frequency
+ * if there are none pending
+ */
+ if (target_dev == dev) {
+ rcu_read_lock();
+ opp = _volt_to_opp(dev, new_volt);
+ if (!IS_ERR(opp))
+ freq = opp_get_freq(opp);
+ rcu_read_unlock();
+ }
+ if (!freq)
+ continue;
+ }
+
+ if (freq == clk_get_rate(temp_dev->clk)) {
+ dev_dbg(dev, "%s: Already at the requested"
+ "rate %ld\n", __func__, freq);
+ continue;
+ }
+
+ r = clk_set_rate(temp_dev->clk, freq);
+ if (r < 0) {
+ dev_err(dev, "%s: clk set rate frq=%ld failed(%d)\n",
+ __func__, freq, r);
+ ret = r;
+ }
+ }
+
+ if (ret)
+ goto fail;
+
+ if (voltdm->abb && omap_get_nominal_voltage(new_vdata) <
+ omap_get_nominal_voltage(curr_vdata)) {
+ ret = omap_ldo_abb_pre_scale(voltdm, new_vdata);
+ if (ret) {
+ pr_err("%s: ABB prescale failed for vdd%s: %d\n",
+ __func__, voltdm->name, ret);
+ goto fail;
+ }
+ }
+
+ if (DVFS_VOLT_SCALE_DOWN == volt_scale_dir)
+ voltdm_scale(voltdm, new_vdata);
+
+ if (voltdm->abb && omap_get_nominal_voltage(new_vdata) <
+ omap_get_nominal_voltage(curr_vdata)) {
+ ret = omap_ldo_abb_post_scale(voltdm, new_vdata);
+ if (ret)
+ pr_err("%s: ABB postscale failed for vdd%s: %d\n",
+ __func__, voltdm->name, ret);
+ }
+
+ /* Make a decision to scale dependent domain based on nominal voltage */
+ if (omap_get_nominal_voltage(new_vdata) <
+ omap_get_nominal_voltage(curr_vdata)) {
+ _dep_scale_domains(target_dev, vdd);
+ }
+
+ /* Ensure that current voltage data pointer points to new volt */
+ if (curr_volt == new_volt && omap_get_nominal_voltage(new_vdata) !=
+ omap_get_nominal_voltage(curr_vdata)) {
+ voltdm->curr_volt = new_vdata;
+ omap_vp_update_errorgain(voltdm, new_vdata);
+ }
+
+ /* All clear.. go out gracefully */
+ goto out;
+
+fail:
+ pr_warning("%s: domain%s: No clean recovery available! could be bad!\n",
+ __func__, voltdm->name);
+out:
+ /* Re-enable Smartreflex module */
+ omap_sr_enable(voltdm, new_vdata);
+
+ return ret;
+}
+
+/* Public functions */
+
+/**
+ * omap_device_scale() - Set a new rate at which the device is to operate
+ * @req_dev: pointer to the device requesting the scaling.
+ * @target_dev: pointer to the device that is to be scaled
+ * @rate: the rnew rate for the device.
+ *
+ * This API gets the device opp table associated with this device and
+ * tries putting the device to the requested rate and the voltage domain
+ * associated with the device to the voltage corresponding to the
+ * requested rate. Since multiple devices can be assocciated with a
+ * voltage domain this API finds out the possible voltage the
+ * voltage domain can enter and then decides on the final device
+ * rate.
+ *
+ * Return 0 on success else the error value
+ */
+int omap_device_scale(struct device *req_dev, struct device *target_dev,
+ unsigned long rate)
+{
+ struct opp *opp;
+ unsigned long volt, freq = rate, new_freq = 0;
+ struct omap_vdd_dvfs_info *tdvfs_info;
+ struct platform_device *pdev;
+ struct omap_device *od;
+ struct device *dev;
+ int ret = 0;
+
+ pdev = container_of(target_dev, struct platform_device, dev);
+ if (IS_ERR_OR_NULL(pdev)) {
+ pr_err("%s: pdev is null!\n", __func__);
+ return -EINVAL;
+ }
+
+ od = container_of(pdev, struct omap_device, pdev);
+ if (IS_ERR_OR_NULL(od)) {
+ pr_err("%s: od is null!\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!omap_pm_is_ready()) {
+ dev_dbg(target_dev, "%s: pm is not ready yet\n", __func__);
+ return -EBUSY;
+ }
+
+ /* Lock me to ensure cross domain scaling is secure */
+ mutex_lock(&omap_dvfs_lock);
+
+ rcu_read_lock();
+ opp = opp_find_freq_ceil(target_dev, &freq);
+ /* If we dont find a max, try a floor at least */
+ if (IS_ERR(opp))
+ opp = opp_find_freq_floor(target_dev, &freq);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ dev_err(target_dev, "%s: Unable to find OPP for freq%ld\n",
+ __func__, rate);
+ ret = -ENODEV;
+ goto out;
+ }
+ volt = opp_get_voltage(opp);
+ rcu_read_unlock();
+
+ tdvfs_info = _dev_to_dvfs_info(target_dev);
+ if (IS_ERR_OR_NULL(tdvfs_info)) {
+ dev_err(target_dev, "%s: (req=%s) no vdd![f=%ld, v=%ld]\n",
+ __func__, dev_name(req_dev), freq, volt);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = _add_freq_request(tdvfs_info, req_dev, target_dev, freq);
+ if (ret) {
+ dev_err(target_dev, "%s: freqadd(%s) failed %d[f=%ld, v=%ld]\n",
+ __func__, dev_name(req_dev), ret, freq, volt);
+ goto out;
+ }
+
+ ret = _add_vdd_user(tdvfs_info, req_dev, volt);
+ if (ret) {
+ dev_err(target_dev, "%s: vddadd(%s) failed %d[f=%ld, v=%ld]\n",
+ __func__, dev_name(req_dev), ret, freq, volt);
+ _remove_freq_request(tdvfs_info, req_dev,
+ target_dev);
+ goto out;
+ }
+
+ /* Check for any dep domains and add the user request */
+ ret = _dep_scan_domains(target_dev, tdvfs_info->voltdm->vdd, volt);
+ if (ret) {
+ dev_err(target_dev,
+ "%s: Error in scan domains for vdd_%s\n",
+ __func__, tdvfs_info->voltdm->name);
+ goto out;
+ }
+
+ dev = _dvfs_info_to_dev(tdvfs_info);
+ if (!dev) {
+ dev_warn(dev, "%s: no target_dev\n",
+ __func__);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (dev != target_dev) {
+ rcu_read_lock();
+ opp = _volt_to_opp(dev, volt);
+ if (!IS_ERR(opp))
+ new_freq = opp_get_freq(opp);
+ rcu_read_unlock();
+ if (new_freq) {
+ ret = _add_freq_request(tdvfs_info, req_dev, dev,
+ new_freq);
+ if (ret) {
+ dev_err(target_dev, "%s: freqadd(%s) failed %d"
+ "[f=%ld, v=%ld]\n", __func__,
+ dev_name(req_dev), ret, freq, volt);
+ goto out;
+ }
+ }
+ }
+
+ /* Do the actual scaling */
+ ret = _dvfs_scale(req_dev, target_dev, tdvfs_info);
+ if (ret) {
+ dev_err(target_dev, "%s: scale by %s failed %d[f=%ld, v=%ld]\n",
+ __func__, dev_name(req_dev), ret, freq, volt);
+ _remove_freq_request(tdvfs_info, req_dev,
+ target_dev);
+ _remove_vdd_user(tdvfs_info, target_dev);
+ /* Fall through */
+ }
+ /* Fall through */
+out:
+ mutex_unlock(&omap_dvfs_lock);
+ return ret;
+}
+EXPORT_SYMBOL(omap_device_scale);
+
+#ifdef CONFIG_PM_DEBUG
+static int dvfs_dump_vdd(struct seq_file *sf, void *unused)
+{
+ int k;
+ struct omap_vdd_dvfs_info *dvfs_info;
+ struct omap_vdd_dev_list *tdev;
+ struct omap_dev_user_list *duser;
+ struct omap_vdd_user_list *vuser;
+ struct omap_vdd_info *vdd;
+ struct omap_vdd_dep_info *dep_info;
+ struct voltagedomain *voltdm;
+ struct omap_volt_data *volt_data;
+ int anyreq;
+ int anyreq2;
+
+ dvfs_info = (struct omap_vdd_dvfs_info *)sf->private;
+ if (IS_ERR_OR_NULL(dvfs_info)) {
+ pr_err("%s: NO DVFS?\n", __func__);
+ return -EINVAL;
+ }
+
+ voltdm = dvfs_info->voltdm;
+ if (IS_ERR_OR_NULL(voltdm)) {
+ pr_err("%s: NO voltdm?\n", __func__);
+ return -EINVAL;
+ }
+
+ vdd = voltdm->vdd;
+ if (IS_ERR_OR_NULL(vdd)) {
+ pr_err("%s: NO vdd data?\n", __func__);
+ return -EINVAL;
+ }
+
+ seq_printf(sf, "vdd_%s\n", voltdm->name);
+ mutex_lock(&omap_dvfs_lock);
+ spin_lock(&dvfs_info->user_lock);
+
+ seq_printf(sf, "|- voltage requests\n| |\n");
+ anyreq = 0;
+ plist_for_each_entry(vuser, &dvfs_info->vdd_user_list, node) {
+ seq_printf(sf, "| |-%d: %s:%s\n",
+ vuser->node.prio,
+ dev_driver_string(vuser->dev), dev_name(vuser->dev));
+ anyreq = 1;
+ }
+
+ spin_unlock(&dvfs_info->user_lock);
+
+ if (!anyreq)
+ seq_printf(sf, "| `-none\n");
+ else
+ seq_printf(sf, "| X\n");
+ seq_printf(sf, "|\n");
+
+ seq_printf(sf, "|- frequency requests\n| |\n");
+ anyreq2 = 0;
+ list_for_each_entry(tdev, &dvfs_info->dev_list, node) {
+ anyreq = 0;
+ seq_printf(sf, "| |- %s:%s\n",
+ dev_driver_string(tdev->dev), dev_name(tdev->dev));
+ spin_lock(&tdev->user_lock);
+ plist_for_each_entry(duser, &tdev->freq_user_list, node) {
+ seq_printf(sf, "| | |-%d: %s:%s\n",
+ duser->node.prio,
+ dev_driver_string(duser->dev),
+ dev_name(duser->dev));
+ anyreq = 1;
+ }
+
+ spin_unlock(&tdev->user_lock);
+
+ if (!anyreq)
+ seq_printf(sf, "| | `-none\n");
+ else
+ seq_printf(sf, "| | X\n");
+ anyreq2 = 1;
+ }
+ if (!anyreq2)
+ seq_printf(sf, "| `-none\n");
+ else
+ seq_printf(sf, "| X\n");
+
+ volt_data = vdd->volt_data;
+ seq_printf(sf, "|- Supported voltages\n| |\n");
+ anyreq = 0;
+ while (volt_data && volt_data->volt_nominal) {
+ seq_printf(sf, "| |-%d\n", volt_data->volt_nominal);
+ anyreq = 1;
+ volt_data++;
+ }
+ if (!anyreq)
+ seq_printf(sf, "| `-none\n");
+ else
+ seq_printf(sf, "| X\n");
+
+ dep_info = vdd->dep_vdd_info;
+ seq_printf(sf, "`- voltage dependencies\n |\n");
+ anyreq = 0;
+ while (dep_info && dep_info->nr_dep_entries) {
+ struct omap_vdd_dep_volt *dep_table = dep_info->dep_table;
+
+ seq_printf(sf, " |-on vdd_%s\n", dep_info->name);
+
+ for (k = 0; k < dep_info->nr_dep_entries; k++) {
+ seq_printf(sf, " | |- %d => %d\n",
+ dep_table[k].main_vdd_volt,
+ dep_table[k].dep_vdd_volt);
+ }
+
+ anyreq = 1;
+ dep_info++;
+ }
+
+ if (!anyreq)
+ seq_printf(sf, " `- none\n");
+ else
+ seq_printf(sf, " X X\n");
+
+ mutex_unlock(&omap_dvfs_lock);
+ return 0;
+}
+
+static int dvfs_dbg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dvfs_dump_vdd, inode->i_private);
+}
+
+static struct file_operations debugdvfs_fops = {
+ .open = dvfs_dbg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry __initdata *dvfsdebugfs_dir;
+
+static void __init dvfs_dbg_init(struct omap_vdd_dvfs_info *dvfs_info)
+{
+ struct dentry *ddir;
+
+ /* create a base dir */
+ if (!dvfsdebugfs_dir)
+ dvfsdebugfs_dir = debugfs_create_dir("dvfs", NULL);
+ if (IS_ERR_OR_NULL(dvfsdebugfs_dir)) {
+ WARN_ONCE("%s: Unable to create base DVFS dir\n", __func__);
+ return;
+ }
+
+ if (IS_ERR_OR_NULL(dvfs_info->voltdm)) {
+ pr_err("%s: no voltdm\n", __func__);
+ return;
+ }
+
+ ddir = debugfs_create_dir(dvfs_info->voltdm->name, dvfsdebugfs_dir);
+ if (IS_ERR_OR_NULL(ddir)) {
+ pr_warning("%s: unable to create subdir %s\n", __func__,
+ dvfs_info->voltdm->name);
+ return;
+ }
+
+ debugfs_create_file("info", S_IRUGO, ddir,
+ (void *)dvfs_info, &debugdvfs_fops);
+}
+#else /* CONFIG_PM_DEBUG */
+static inline void dvfs_dbg_init(struct omap_vdd_dvfs_info *dvfs_info)
+{
+ return;
+}
+#endif /* CONFIG_PM_DEBUG */
+
+/**
+ * omap_dvfs_register_device - Add a parent device into dvfs managed list
+ * @dev: Device to be added
+ * @voltdm_name: Name of the voltage domain for the device
+ * @clk_name: Name of the clock for the device
+ *
+ * This function adds a given device into user_list of corresponding
+ * vdd's omap_vdd_dvfs_info strucure. This list is traversed to scale
+ * frequencies of all the devices on a given vdd.
+ *
+ * Returns 0 on success.
+ */
+int __init omap_dvfs_register_device(struct device *dev, char *voltdm_name,
+ char *clk_name)
+{
+ struct omap_vdd_dev_list *temp_dev;
+ struct omap_vdd_dvfs_info *dvfs_info;
+ struct clk *clk = NULL;
+ struct voltagedomain *voltdm;
+ int ret = 0;
+
+ if (!voltdm_name) {
+ dev_err(dev, "%s: Bad voltdm name!\n", __func__);
+ return -EINVAL;
+ }
+ if (!clk_name) {
+ dev_err(dev, "%s: Bad clk name!\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Lock me to secure structure changes */
+ mutex_lock(&omap_dvfs_lock);
+
+ voltdm = voltdm_lookup(voltdm_name);
+ if (!voltdm) {
+ dev_warn(dev, "%s: unable to find voltdm %s!\n",
+ __func__, voltdm_name);
+ ret = -EINVAL;
+ goto out;
+ }
+ dvfs_info = _voltdm_to_dvfs_info(voltdm);
+ if (!dvfs_info) {
+ dvfs_info = kzalloc(sizeof(struct omap_vdd_dvfs_info),
+ GFP_KERNEL);
+ if (!dvfs_info) {
+ dev_warn(dev, "%s: unable to alloc memory!\n",
+ __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+ dvfs_info->voltdm = voltdm;
+
+ /* Init the plist */
+ spin_lock_init(&dvfs_info->user_lock);
+ plist_head_init(&dvfs_info->vdd_user_list);
+ /* Init the device list */
+ INIT_LIST_HEAD(&dvfs_info->dev_list);
+
+ list_add(&dvfs_info->node, &omap_dvfs_info_list);
+
+ dvfs_dbg_init(dvfs_info);
+ }
+
+ /* If device already added, we dont need to do more.. */
+ list_for_each_entry(temp_dev, &dvfs_info->dev_list, node) {
+ if (temp_dev->dev == dev)
+ goto out;
+ }
+
+ temp_dev = kzalloc(sizeof(struct omap_vdd_dev_list), GFP_KERNEL);
+ if (!temp_dev) {
+ dev_err(dev, "%s: Unable to creat a new device for vdd_%s\n",
+ __func__, dvfs_info->voltdm->name);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ clk = clk_get(dev, clk_name);
+ if (IS_ERR_OR_NULL(clk)) {
+ dev_warn(dev, "%s: Bad clk pointer!\n", __func__);
+ kfree(temp_dev);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Initialize priority ordered list */
+ spin_lock_init(&temp_dev->user_lock);
+ plist_head_init(&temp_dev->freq_user_list);
+
+ temp_dev->dev = dev;
+ temp_dev->clk = clk;
+ list_add_tail(&temp_dev->node, &dvfs_info->dev_list);
+
+ /* Fall through */
+out:
+ mutex_unlock(&omap_dvfs_lock);
+ return ret;
+}
diff --git a/arch/arm/mach-omap2/dvfs.h b/arch/arm/mach-omap2/dvfs.h
new file mode 100644
index 0000000..b0a0cd6
--- /dev/null
+++ b/arch/arm/mach-omap2/dvfs.h
@@ -0,0 +1,47 @@
+/*
+ * OMAP3/OMAP4 DVFS Management Routines
+ *
+ * Author: Vishwanath BS <vishwanath.bs@ti.com>
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Vishwanath BS <vishwanath.bs@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_DVFS_H
+#define __ARCH_ARM_MACH_OMAP2_DVFS_H
+#include <plat/omap_hwmod.h>
+#include "voltage.h"
+
+#ifdef CONFIG_PM
+#include <linux/mutex.h>
+extern struct mutex omap_dvfs_lock;
+int omap_dvfs_register_device(struct device *dev, char *voltdm_name,
+ char *clk_name);
+int omap_device_scale(struct device *req_dev, struct device *target_dev,
+ unsigned long rate);
+
+static inline bool omap_dvfs_is_any_dev_scaling(void)
+{
+ return mutex_is_locked(&omap_dvfs_lock);
+}
+#else
+static inline int omap_dvfs_register_device(struct device *dev,
+ char *voltdm_name, char *clk_name)
+{
+ return -EINVAL;
+}
+static inline int omap_device_scale(struct device *req_dev,
+ struct device *target_dev, unsigned long rate)
+{
+ return -EINVAL;
+}
+static inline bool omap_dvfs_is_any_dev_scaling(void)
+{
+ return false;
+}
+#endif
+#endif
diff --git a/arch/arm/mach-omap2/emif.c b/arch/arm/mach-omap2/emif.c
new file mode 100644
index 0000000..8f4ec5f
--- /dev/null
+++ b/arch/arm/mach-omap2/emif.c
@@ -0,0 +1,1514 @@
+/*
+ * OMAP4 EMIF platform driver
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Aneesh V <aneesh@ti.com>
+ * Vibhore Vardhan <vvardhan@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <plat/omap_hwmod.h>
+#include <plat/omap_device.h>
+#include <mach/emif-44xx.h>
+#include <mach/emif.h>
+#include <mach/lpddr2-jedec.h>
+#include <mach/omap4-common.h>
+
+#include "voltage.h"
+
+/* Utility macro for masking and setting a field in a register/variable */
+#define mask_n_set(reg, shift, msk, val) \
+ (reg) = (((reg) & ~(msk))|(((val) << (shift)) & msk))
+
+struct emif_instance {
+ void __iomem *base;
+ u16 irq;
+ struct platform_device *pdev;
+ bool ddr_refresh_disabled;
+};
+static struct emif_instance emif[EMIF_NUM_INSTANCES];
+static struct emif_regs *emif_curr_regs[EMIF_NUM_INSTANCES];
+static struct emif_regs *emif1_regs_cache[EMIF_MAX_NUM_FREQUENCIES];
+static struct emif_regs *emif2_regs_cache[EMIF_MAX_NUM_FREQUENCIES];
+static struct emif_device_details *emif_devices[2];
+static u32 emif_temperature_level[EMIF_NUM_INSTANCES] = { SDRAM_TEMP_NOMINAL,
+ SDRAM_TEMP_NOMINAL
+};
+
+static u32 emif_notify_pending;
+static u32 emif_thermal_handling_pending;
+static u32 T_den, T_num;
+
+static struct omap_device_pm_latency omap_emif_latency[] = {
+ [0] = {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
+};
+
+static void do_cancel_out(u32 *num, u32 *den, u32 factor)
+{
+ while (1) {
+ if (((*num) / factor * factor == (*num)) &&
+ ((*den) / factor * factor == (*den))) {
+ (*num) /= factor;
+ (*den) /= factor;
+ } else
+ break;
+ }
+}
+
+static void cancel_out(u32 *num, u32 *den)
+{
+ do_cancel_out(num, den, 2);
+ do_cancel_out(num, den, 3);
+ do_cancel_out(num, den, 5);
+ do_cancel_out(num, den, 7);
+ do_cancel_out(num, den, 11);
+ do_cancel_out(num, den, 13);
+ do_cancel_out(num, den, 17);
+}
+
+/*
+ * Get the period in ns (in fraction form) for a given frequency:
+ * Getting it in fraction form is for better accuracy in integer arithmetics
+ * freq_hz - input: frequency in Hertz
+ * den_limit - input: upper limit for denominator. see the description of
+ * EMIF_PERIOD_DEN_LIMIT for more details
+ * period_den - output: pointer to denominator of period in ns
+ * period_num - output: pointer to numerator of period in ns
+ */
+static void get_period(u32 freq_hz, u32 den_limit, u32 *period_num,
+ u32 *period_den)
+{
+ *period_num = 1000000000; /* 10^9 to convert the period to 'ns' */
+ *period_den = freq_hz;
+ cancel_out(period_num, period_den);
+ /* make sure den <= den_limit at the cost of some accuracy */
+ while ((*period_den) > den_limit) {
+ *period_num /= 2;
+ *period_den /= 2;
+ }
+}
+
+/*
+ * Calculate the period of DDR clock from frequency value and set the
+ * denominator and numerator in global variables for easy access later
+ */
+static void set_ddr_clk_period(u32 freq)
+{
+ get_period(freq, EMIF_PERIOD_DEN_LIMIT, &T_num, &T_den);
+}
+
+/*
+ * Convert time in nano seconds to number of cycles of DDR clock
+ */
+static u32 ns_2_cycles(u32 ns)
+{
+ return ((ns * T_den) + T_num - 1) / T_num;
+}
+
+/*
+ * ns_2_cycles with the difference that the time passed is 2 times the actual
+ * value(to avoid fractions). The cycles returned is for the original value of
+ * the timing parameter
+ */
+static u32 ns_x2_2_cycles(u32 ns)
+{
+ return ((ns * T_den) + T_num * 2 - 1) / (T_num * 2);
+}
+
+/*
+ * Find addressing table index based on the device's type(S2 or S4) and
+ * density
+ */
+static s8 addressing_table_index(u8 type, u8 density, u8 width)
+{
+ u8 index;
+ if (unlikely((density > LPDDR2_DENSITY_8Gb) ||
+ (width == LPDDR2_IO_WIDTH_8)))
+ return -1;
+
+ /*
+ * Look at the way ADDR_TABLE_INDEX* values have been defined
+ * in emif.h compared to LPDDR2_DENSITY_* values
+ * The table is layed out in the increasing order of density
+ * (ignoring type). The exceptions 1GS2 and 2GS2 have been placed
+ * at the end
+ */
+ if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_1Gb))
+ index = ADDR_TABLE_INDEX1GS2;
+ else if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_2Gb))
+ index = ADDR_TABLE_INDEX2GS2;
+ else
+ index = density;
+
+ pr_debug("emif: addressing table index %d", index);
+
+ return index;
+}
+
+/*
+ * Find the the right timing table from the array of timing
+ * tables of the device using DDR clock frequency
+ */
+static const struct lpddr2_timings *get_timings_table(
+ const struct lpddr2_timings * const *device_timings, u32 freq)
+{
+ u32 i, temp, freq_nearest;
+ const struct lpddr2_timings *timings = NULL;
+
+ emif_assert(freq <= MAX_LPDDR2_FREQ);
+ emif_assert(device_timings);
+
+ /*
+ * Start with the maximum allowed frequency - that is always safe
+ */
+ freq_nearest = MAX_LPDDR2_FREQ;
+ /*
+ * Find the timings table that has the max frequency value:
+ * i. Above or equal to the DDR frequency - safe
+ * ii. The lowest that satisfies condition (i) - optimal
+ */
+ for (i = 0; i < MAX_NUM_SPEEDBINS; i++) {
+ if (device_timings[i]) {
+ temp = device_timings[i]->max_freq;
+ if ((temp >= freq) && (temp <= freq_nearest)) {
+ freq_nearest = temp;
+ timings = device_timings[i];
+ }
+ }
+ }
+ pr_debug("emif: timings table: %d", freq_nearest);
+ return timings;
+}
+
+/*
+ * Finds the value of emif_sdram_config_reg
+ * All parameters are programmed based on the device on CS0.
+ * If there is a device on CS1, it will be same as that on CS0 or
+ * it will be NVM. We don't support NVM yet.
+ * If cs1_device pointer is NULL it is assumed that there is no device
+ * on CS1
+ */
+static u32 get_sdram_config_reg(const struct lpddr2_device_info *cs0_device,
+ const struct lpddr2_device_info *cs1_device,
+ const struct lpddr2_addressing *addressing,
+ u8 RL)
+{
+ u32 config_reg = 0;
+
+ mask_n_set(config_reg, OMAP44XX_REG_SDRAM_TYPE_SHIFT,
+ OMAP44XX_REG_SDRAM_TYPE_MASK, cs0_device->type + 4);
+
+ mask_n_set(config_reg, OMAP44XX_REG_IBANK_POS_SHIFT,
+ OMAP44XX_REG_IBANK_POS_MASK,
+ EMIF_INTERLEAVING_POLICY_MAX_INTERLEAVING);
+
+ mask_n_set(config_reg, OMAP44XX_REG_NARROW_MODE_SHIFT,
+ OMAP44XX_REG_NARROW_MODE_MASK, cs0_device->io_width);
+
+ mask_n_set(config_reg, OMAP44XX_REG_CL_SHIFT, OMAP44XX_REG_CL_MASK, RL);
+
+ mask_n_set(config_reg, OMAP44XX_REG_ROWSIZE_SHIFT,
+ OMAP44XX_REG_ROWSIZE_MASK,
+ addressing->row_sz[cs0_device->io_width]);
+
+ mask_n_set(config_reg, OMAP44XX_REG_IBANK_SHIFT,
+ OMAP44XX_REG_IBANK_MASK, addressing->num_banks);
+
+ mask_n_set(config_reg, OMAP44XX_REG_EBANK_SHIFT,
+ OMAP44XX_REG_EBANK_MASK,
+ (cs1_device ? EBANK_CS1_EN : EBANK_CS1_DIS));
+
+ mask_n_set(config_reg, OMAP44XX_REG_PAGESIZE_SHIFT,
+ OMAP44XX_REG_PAGESIZE_MASK,
+ addressing->col_sz[cs0_device->io_width]);
+
+ return config_reg;
+}
+
+static u32 get_sdram_ref_ctrl(u32 freq,
+ const struct lpddr2_addressing *addressing)
+{
+ u32 ref_ctrl = 0, val = 0, freq_khz;
+ freq_khz = freq / 1000;
+ /*
+ * refresh rate to be set is 'tREFI * freq in MHz
+ * division by 10000 to account for khz and x10 in t_REFI_us_x10
+ */
+ val = addressing->t_REFI_us_x10 * freq_khz / 10000;
+ mask_n_set(ref_ctrl, OMAP44XX_REG_REFRESH_RATE_SHIFT,
+ OMAP44XX_REG_REFRESH_RATE_MASK, val);
+
+ /* enable refresh */
+ mask_n_set(ref_ctrl, OMAP44XX_REG_INITREF_DIS_SHIFT,
+ OMAP44XX_REG_INITREF_DIS_MASK, 1);
+ return ref_ctrl;
+}
+
+static u32 get_sdram_tim_1_reg(const struct lpddr2_timings *timings,
+ const struct lpddr2_min_tck *min_tck,
+ const struct lpddr2_addressing *addressing)
+{
+ u32 tim1 = 0, val = 0;
+ val = max(min_tck->tWTR, ns_x2_2_cycles(timings->tWTRx2)) - 1;
+ mask_n_set(tim1, OMAP44XX_REG_T_WTR_SHIFT, OMAP44XX_REG_T_WTR_MASK,
+ val);
+
+ if (addressing->num_banks == BANKS8)
+ val = (timings->tFAW * T_den + 4 * T_num - 1) / (4 * T_num) - 1;
+ else
+ val = max(min_tck->tRRD, ns_2_cycles(timings->tRRD)) - 1;
+
+ mask_n_set(tim1, OMAP44XX_REG_T_RRD_SHIFT, OMAP44XX_REG_T_RRD_MASK,
+ val);
+
+ val = ns_2_cycles(timings->tRASmin + timings->tRPab) - 1;
+ mask_n_set(tim1, OMAP44XX_REG_T_RC_SHIFT, OMAP44XX_REG_T_RC_MASK, val);
+
+ val = max(min_tck->tRAS_MIN, ns_2_cycles(timings->tRASmin)) - 1;
+ mask_n_set(tim1, OMAP44XX_REG_T_RAS_SHIFT, OMAP44XX_REG_T_RAS_MASK,
+ val);
+
+ val = max(min_tck->tWR, ns_2_cycles(timings->tWR)) - 1;
+ mask_n_set(tim1, OMAP44XX_REG_T_WR_SHIFT, OMAP44XX_REG_T_WR_MASK, val);
+
+ val = max(min_tck->tRCD, ns_2_cycles(timings->tRCD)) - 1;
+ mask_n_set(tim1, OMAP44XX_REG_T_RCD_SHIFT, OMAP44XX_REG_T_RCD_MASK,
+ val);
+ val = max(min_tck->tRP_AB, ns_2_cycles(timings->tRPab)) - 1;
+ mask_n_set(tim1, OMAP44XX_REG_T_RP_SHIFT, OMAP44XX_REG_T_RP_MASK, val);
+
+ return tim1;
+}
+
+/*
+ * Finds the de-rated value for EMIF_SDRAM_TIM1 register
+ * All the de-rated timings are limited to this register
+ * Adds 2ns instead of 1.875ns to the affected timings as
+ * we can not use float.
+ */
+static u32 get_sdram_tim_1_reg_derated(const struct lpddr2_timings *timings,
+ const struct lpddr2_min_tck *min_tck,
+ const struct lpddr2_addressing
+ *addressing)
+{
+ u32 tim1 = 0, val = 0;
+ val = max(min_tck->tWTR, ns_x2_2_cycles(timings->tWTRx2)) - 1;
+ mask_n_set(tim1, OMAP44XX_REG_T_WTR_SHIFT, OMAP44XX_REG_T_WTR_MASK,
+ val);
+
+ if (addressing->num_banks == BANKS8)
+ /*
+ * tFAW is approximately 4 times tRRD. So add 1.875*4 = 7.5 ~ 8
+ * to tFAW for de-rating
+ */
+ val = ((timings->tFAW + 8) * T_den + 4 * T_num - 1)
+ / (4 * T_num) - 1;
+ else
+ val = max(min_tck->tRRD, ns_2_cycles(timings->tRRD + 2)) - 1;
+
+ mask_n_set(tim1, OMAP44XX_REG_T_RRD_SHIFT, OMAP44XX_REG_T_RRD_MASK,
+ val);
+
+ val = ns_2_cycles(timings->tRASmin + timings->tRPab + 2) - 1;
+ mask_n_set(tim1, OMAP44XX_REG_T_RC_SHIFT, OMAP44XX_REG_T_RC_MASK, val);
+
+ val = max(min_tck->tRAS_MIN, ns_2_cycles(timings->tRASmin + 2)) - 1;
+ mask_n_set(tim1, OMAP44XX_REG_T_RAS_SHIFT, OMAP44XX_REG_T_RAS_MASK,
+ val);
+
+ val = max(min_tck->tWR, ns_2_cycles(timings->tWR)) - 1;
+ mask_n_set(tim1, OMAP44XX_REG_T_WR_SHIFT, OMAP44XX_REG_T_WR_MASK, val);
+
+ val = max(min_tck->tRCD, ns_2_cycles(timings->tRCD + 2)) - 1;
+ mask_n_set(tim1, OMAP44XX_REG_T_RCD_SHIFT, OMAP44XX_REG_T_RCD_MASK,
+ val);
+ val = max(min_tck->tRP_AB, ns_2_cycles(timings->tRPab + 2)) - 1;
+ mask_n_set(tim1, OMAP44XX_REG_T_RP_SHIFT, OMAP44XX_REG_T_RP_MASK, val);
+
+ return tim1;
+}
+
+static u32 get_sdram_tim_2_reg(const struct lpddr2_timings *timings,
+ const struct lpddr2_min_tck *min_tck)
+{
+ u32 tim2 = 0, val = 0;
+ val = max(min_tck->tCKE, timings->tCKE) - 1;
+ mask_n_set(tim2, OMAP44XX_REG_T_CKE_SHIFT, OMAP44XX_REG_T_CKE_MASK,
+ val);
+
+ val = max(min_tck->tRTP, ns_x2_2_cycles(timings->tRTPx2)) - 1;
+ mask_n_set(tim2, OMAP44XX_REG_T_RTP_SHIFT, OMAP44XX_REG_T_RTP_MASK,
+ val);
+
+ /*
+ * tXSRD = tRFCab + 10 ns. XSRD and XSNR should have the
+ * same value
+ */
+ val = ns_2_cycles(timings->tXSR) - 1;
+ mask_n_set(tim2, OMAP44XX_REG_T_XSRD_SHIFT, OMAP44XX_REG_T_XSRD_MASK,
+ val);
+ mask_n_set(tim2, OMAP44XX_REG_T_XSNR_SHIFT, OMAP44XX_REG_T_XSNR_MASK,
+ val);
+
+ val = max(min_tck->tXP, ns_x2_2_cycles(timings->tXPx2)) - 1;
+ mask_n_set(tim2, OMAP44XX_REG_T_XP_SHIFT, OMAP44XX_REG_T_XP_MASK, val);
+
+ return tim2;
+}
+
+static u32 get_sdram_tim_3_reg(const struct lpddr2_timings *timings,
+ const struct lpddr2_min_tck *min_tck,
+ const struct lpddr2_addressing *addressing)
+{
+ u32 tim3 = 0, val = 0;
+ val = min(timings->tRASmax * 10 / addressing->t_REFI_us_x10 - 1, 0xF);
+ mask_n_set(tim3, OMAP44XX_REG_T_RAS_MAX_SHIFT,
+ OMAP44XX_REG_T_RAS_MAX_MASK, val);
+
+ val = ns_2_cycles(timings->tRFCab) - 1;
+ mask_n_set(tim3, OMAP44XX_REG_T_RFC_SHIFT, OMAP44XX_REG_T_RFC_MASK,
+ val);
+
+ val = ns_x2_2_cycles(timings->tDQSCKMAXx2) - 1;
+ mask_n_set(tim3, OMAP44XX_REG_T_TDQSCKMAX_SHIFT,
+ OMAP44XX_REG_T_TDQSCKMAX_MASK, val);
+
+ val = ns_2_cycles(timings->tZQCS) - 1;
+ mask_n_set(tim3, OMAP44XX_REG_ZQ_ZQCS_SHIFT,
+ OMAP44XX_REG_ZQ_ZQCS_MASK, val);
+
+ val = max(min_tck->tCKESR, ns_2_cycles(timings->tCKESR)) - 1;
+ mask_n_set(tim3, OMAP44XX_REG_T_CKESR_SHIFT,
+ OMAP44XX_REG_T_CKESR_MASK, val);
+
+ return tim3;
+}
+
+static u32 get_zq_config_reg(const struct lpddr2_device_info *cs1_device,
+ const struct lpddr2_addressing *addressing,
+ bool volt_ramp)
+{
+ u32 zq = 0, val = 0;
+ if (volt_ramp)
+ val =
+ EMIF_ZQCS_INTERVAL_DVFS_IN_US * 10 /
+ addressing->t_REFI_us_x10;
+ else
+ val =
+ EMIF_ZQCS_INTERVAL_NORMAL_IN_US * 10 /
+ addressing->t_REFI_us_x10;
+ mask_n_set(zq, OMAP44XX_REG_ZQ_REFINTERVAL_SHIFT,
+ OMAP44XX_REG_ZQ_REFINTERVAL_MASK, val);
+
+ mask_n_set(zq, OMAP44XX_REG_ZQ_ZQCL_MULT_SHIFT,
+ OMAP44XX_REG_ZQ_ZQCL_MULT_MASK, REG_ZQ_ZQCL_MULT - 1);
+
+ mask_n_set(zq, OMAP44XX_REG_ZQ_ZQINIT_MULT_SHIFT,
+ OMAP44XX_REG_ZQ_ZQINIT_MULT_MASK, REG_ZQ_ZQINIT_MULT - 1);
+
+ mask_n_set(zq, OMAP44XX_REG_ZQ_SFEXITEN_SHIFT,
+ OMAP44XX_REG_ZQ_SFEXITEN_MASK, REG_ZQ_SFEXITEN_ENABLE);
+
+ /*
+ * Assuming that two chipselects have a single calibration resistor
+ * If there are indeed two calibration resistors, then this flag should
+ * be enabled to take advantage of dual calibration feature.
+ * This data should ideally come from board files. But considering
+ * that none of the boards today have calibration resistors per CS,
+ * it would be an unnecessary overhead.
+ */
+ mask_n_set(zq, OMAP44XX_REG_ZQ_DUALCALEN_SHIFT,
+ OMAP44XX_REG_ZQ_DUALCALEN_MASK, REG_ZQ_DUALCALEN_DISABLE);
+
+ mask_n_set(zq, OMAP44XX_REG_ZQ_CS0EN_SHIFT,
+ OMAP44XX_REG_ZQ_CS0EN_MASK, REG_ZQ_CS0EN_ENABLE);
+
+ mask_n_set(zq, OMAP44XX_REG_ZQ_CS1EN_SHIFT,
+ OMAP44XX_REG_ZQ_CS1EN_MASK, (cs1_device ? 1 : 0));
+
+ return zq;
+}
+
+static u32 get_temp_alert_config(const struct lpddr2_device_info *cs1_device,
+ const struct lpddr2_addressing *addressing,
+ bool is_derated)
+{
+ u32 alert = 0, interval;
+ interval =
+ TEMP_ALERT_POLL_INTERVAL_MS * 10000 / addressing->t_REFI_us_x10;
+ if (is_derated)
+ interval *= 4;
+ mask_n_set(alert, OMAP44XX_REG_TA_REFINTERVAL_SHIFT,
+ OMAP44XX_REG_TA_REFINTERVAL_MASK, interval);
+
+ mask_n_set(alert, OMAP44XX_REG_TA_DEVCNT_SHIFT,
+ OMAP44XX_REG_TA_DEVCNT_MASK, TEMP_ALERT_CONFIG_DEVCT_1);
+
+ mask_n_set(alert, OMAP44XX_REG_TA_DEVWDT_SHIFT,
+ OMAP44XX_REG_TA_DEVWDT_MASK, TEMP_ALERT_CONFIG_DEVWDT_32);
+
+ mask_n_set(alert, OMAP44XX_REG_TA_SFEXITEN_SHIFT,
+ OMAP44XX_REG_TA_SFEXITEN_MASK, 1);
+
+ mask_n_set(alert, OMAP44XX_REG_TA_CS0EN_SHIFT,
+ OMAP44XX_REG_TA_CS0EN_MASK, 1);
+
+ mask_n_set(alert, OMAP44XX_REG_TA_CS1EN_SHIFT,
+ OMAP44XX_REG_TA_CS1EN_MASK, (cs1_device ? 1 : 0));
+
+ return alert;
+}
+
+static u32 get_read_idle_ctrl_reg(bool volt_ramp)
+{
+ u32 idle = 0, val = 0;
+ if (volt_ramp)
+ val = ns_2_cycles(READ_IDLE_INTERVAL_DVFS) / 64 - 1;
+ else
+ /*Maximum value in normal conditions - suggested by hw team */
+ val = 0x1FF;
+ mask_n_set(idle, OMAP44XX_REG_READ_IDLE_INTERVAL_SHIFT,
+ OMAP44XX_REG_READ_IDLE_INTERVAL_MASK, val);
+
+ mask_n_set(idle, OMAP44XX_REG_READ_IDLE_LEN_SHIFT,
+ OMAP44XX_REG_READ_IDLE_LEN_MASK, EMIF_REG_READ_IDLE_LEN_VAL);
+
+ return idle;
+}
+
+static u32 get_ddr_phy_ctrl_1(u32 freq, u8 RL)
+{
+ u32 phy = 0, val = 0;
+
+ mask_n_set(phy, OMAP44XX_REG_READ_LATENCY_SHIFT,
+ OMAP44XX_REG_READ_LATENCY_MASK, RL + 2);
+
+ if (freq <= 100000000)
+ val = EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS;
+ else if (freq <= 200000000)
+ val = EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ;
+ else
+ val = EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ;
+ mask_n_set(phy, OMAP44XX_REG_DLL_SLAVE_DLY_CTRL_SHIFT,
+ OMAP44XX_REG_DLL_SLAVE_DLY_CTRL_MASK, val);
+
+ /* Other fields are constant magic values. Hardcode them together */
+ mask_n_set(phy, OMAP44XX_EMIF_DDR_PHY_CTRL_1_BASE_VAL_SHIFT,
+ OMAP44XX_EMIF_DDR_PHY_CTRL_1_BASE_VAL_MASK,
+ EMIF_DDR_PHY_CTRL_1_BASE_VAL);
+
+ phy >>= OMAP44XX_REG_DDR_PHY_CTRL_1_SHIFT;
+
+ return phy;
+}
+
+/*
+ * get_lp_mode - Get the LP Mode of a EMIF instance.
+ *
+ * It returns the REG_LP_MODE of EMIF_PWR_MGMT_CTRL[10:8]
+ * for a EMIF.
+ *
+ */
+static u32 get_lp_mode(u32 emif_nr)
+{
+ u32 temp, lpmode;
+ void __iomem *base = emif[emif_nr].base;
+
+ temp = readl(base + OMAP44XX_EMIF_PWR_MGMT_CTRL);
+ lpmode = (temp & OMAP44XX_REG_LP_MODE_MASK) >>
+ OMAP44XX_REG_LP_MODE_SHIFT;
+
+ return lpmode;
+}
+
+/*
+ * set_lp_mode - Set the LP Mode of a EMIF instance.
+ *
+ * It replaces the REG_LP_MODE of EMIF_PWR_MGMT_CTRL[10:8]
+ * with the new value for a EMIF.
+ *
+ */
+static void set_lp_mode(u32 emif_nr, u32 lpmode)
+{
+ u32 temp;
+ void __iomem *base = emif[emif_nr].base;
+
+ /* Extract current lp mode value */
+ temp = readl(base + OMAP44XX_EMIF_PWR_MGMT_CTRL);
+
+ /* Write out the new lp mode value */
+ temp &= ~OMAP44XX_REG_LP_MODE_MASK;
+ temp |= lpmode << OMAP44XX_REG_LP_MODE_SHIFT;
+ writel(temp, base + OMAP44XX_EMIF_PWR_MGMT_CTRL);
+
+}
+
+/*
+ * Get the temperature level of the EMIF instance:
+ * Reads the MR4 register of attached SDRAM parts to find out the temperature
+ * level. If there are two parts attached(one on each CS), then the temperature
+ * level for the EMIF instance is the higher of the two temperatures.
+ */
+static u32 get_temperature_level(u32 emif_nr)
+{
+ u32 temp, tmp_temperature_level;
+ bool cs1_used;
+ void __iomem *base;
+
+ base = emif[emif_nr].base;
+
+ temp = __raw_readl(base + OMAP44XX_EMIF_SDRAM_CONFIG);
+ cs1_used = (temp & OMAP44XX_REG_EBANK_MASK) ? true : false;
+
+ /* Read mode register 4 */
+ __raw_writel(LPDDR2_MR4, base + OMAP44XX_EMIF_LPDDR2_MODE_REG_CFG);
+ tmp_temperature_level = __raw_readl(base +
+ OMAP44XX_EMIF_LPDDR2_MODE_REG_DATA);
+
+ tmp_temperature_level = (tmp_temperature_level &
+ MR4_SDRAM_REF_RATE_MASK) >>
+ MR4_SDRAM_REF_RATE_SHIFT;
+
+ if (cs1_used) {
+ __raw_writel(LPDDR2_MR4 | OMAP44XX_REG_CS_MASK,
+ base + OMAP44XX_EMIF_LPDDR2_MODE_REG_CFG);
+ temp = __raw_readl(base + OMAP44XX_EMIF_LPDDR2_MODE_REG_DATA);
+ temp = (temp & MR4_SDRAM_REF_RATE_MASK)
+ >> MR4_SDRAM_REF_RATE_SHIFT;
+ tmp_temperature_level = max(temp, tmp_temperature_level);
+ }
+
+ /* treat everything less than nominal(3) in MR4 as nominal */
+ if (unlikely(tmp_temperature_level < SDRAM_TEMP_NOMINAL))
+ tmp_temperature_level = SDRAM_TEMP_NOMINAL;
+
+ /* if we get reserved value in MR4 persist with the existing value */
+ if (unlikely(tmp_temperature_level == SDRAM_TEMP_RESERVED_4))
+ tmp_temperature_level = emif_temperature_level[emif_nr];
+
+ return tmp_temperature_level;
+}
+
+/*
+ * Program EMIF shadow registers:
+ * Sets the shadow registers using pre-caulated register values
+ * When volt_state indicates that this function is called just before
+ * a voltage scaling, set only the registers relevant for voltage scaling
+ * Otherwise, set all the registers relevant for a frequency change
+ */
+static void setup_registers(u32 emif_nr, struct emif_regs *regs, u32 volt_state)
+{
+ u32 temp,read_idle;
+ void __iomem *base = emif[emif_nr].base;
+
+ __raw_writel(regs->ref_ctrl, base + OMAP44XX_EMIF_SDRAM_REF_CTRL_SHDW);
+
+ __raw_writel(regs->sdram_tim2, base + OMAP44XX_EMIF_SDRAM_TIM_2_SHDW);
+ __raw_writel(regs->sdram_tim3, base + OMAP44XX_EMIF_SDRAM_TIM_3_SHDW);
+ /*
+ * Do not change the RL part in PHY CTRL register
+ * RL is not changed during DVFS
+ */
+ temp = __raw_readl(base + OMAP44XX_EMIF_DDR_PHY_CTRL_1_SHDW);
+ mask_n_set(temp, OMAP44XX_REG_DDR_PHY_CTRL_1_SHDW_SHIFT,
+ OMAP44XX_REG_DDR_PHY_CTRL_1_SHDW_MASK,
+ regs->emif_ddr_phy_ctlr_1_final);
+ __raw_writel(temp, base + OMAP44XX_EMIF_DDR_PHY_CTRL_1_SHDW);
+
+ __raw_writel(regs->temp_alert_config,
+ base + OMAP44XX_EMIF_TEMP_ALERT_CONFIG);
+
+ /*
+ * When voltage ramps forced read idle should
+ * happen more often.
+ */
+ if (volt_state == LPDDR2_VOLTAGE_RAMPING)
+ read_idle = regs->read_idle_ctrl_volt_ramp;
+ else
+ read_idle = regs->read_idle_ctrl_normal;
+ __raw_writel(read_idle, base + OMAP44XX_EMIF_READ_IDLE_CTRL_SHDW);
+
+ /*
+ * Reading back the last written register to ensure all writes are
+ * complete
+ */
+ temp = __raw_readl(base + OMAP44XX_EMIF_READ_IDLE_CTRL_SHDW);
+}
+
+/*
+ * setup_temperature_sensitive_regs() - set the timings for temperature
+ * sensitive registers. This happens once at initialization time based
+ * on the temperature at boot time and subsequently based on the temperature
+ * alert interrupt. Temperature alert can happen when the temperature
+ * increases or drops. So this function can have the effect of either
+ * derating the timings or going back to nominal values.
+ */
+static void setup_temperature_sensitive_regs(u32 emif_nr,
+ struct emif_regs *regs)
+{
+ u32 tim1, ref_ctrl, temp_alert_cfg;
+ void __iomem *base = emif[emif_nr].base;
+ u32 temperature = emif_temperature_level[emif_nr];
+
+ if (unlikely(temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH)) {
+ tim1 = regs->sdram_tim1;
+ ref_ctrl = regs->ref_ctrl_derated;
+ temp_alert_cfg = regs->temp_alert_config_derated;
+ } else if (unlikely(temperature ==
+ SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS)) {
+ tim1 = regs->sdram_tim1_derated;
+ ref_ctrl = regs->ref_ctrl_derated;
+ temp_alert_cfg = regs->temp_alert_config_derated;
+ } else {
+ /*
+ * Nominal timings - you may switch back to the
+ * nominal timings if the temperature falls
+ */
+ tim1 = regs->sdram_tim1;
+ ref_ctrl = regs->ref_ctrl;
+ temp_alert_cfg = regs->temp_alert_config;
+ }
+
+ __raw_writel(tim1, base + OMAP44XX_EMIF_SDRAM_TIM_1_SHDW);
+ __raw_writel(temp_alert_cfg, base + OMAP44XX_EMIF_TEMP_ALERT_CONFIG);
+ __raw_writel(ref_ctrl, base + OMAP44XX_EMIF_SDRAM_REF_CTRL_SHDW);
+
+ /* read back last written register to ensure write is complete */
+ __raw_readl(base + OMAP44XX_EMIF_SDRAM_REF_CTRL_SHDW);
+}
+
+static irqreturn_t handle_temp_alert(void __iomem *base, u32 emif_nr)
+{
+ u32 old_temperature_level;
+ old_temperature_level = emif_temperature_level[emif_nr];
+ emif_temperature_level[emif_nr] = get_temperature_level(emif_nr);
+
+ if (unlikely(emif_temperature_level[emif_nr] == old_temperature_level))
+ return IRQ_HANDLED;
+
+ emif_notify_pending |= (1 << emif_nr);
+ if (likely(emif_temperature_level[emif_nr] < old_temperature_level)) {
+ /* Temperature coming down - defer handling to thread */
+ emif_thermal_handling_pending |= (1 << emif_nr);
+ } else if (likely(emif_temperature_level[emif_nr] !=
+ SDRAM_TEMP_VERY_HIGH_SHUTDOWN)) {
+ /* Temperature is going up - handle immediately */
+ setup_temperature_sensitive_regs(emif_nr,
+ emif_curr_regs[emif_nr]);
+ /*
+ * EMIF de-rated timings register needs to be setup using
+ * freq update method only
+ */
+ omap4_prcm_freq_update();
+ }
+ return IRQ_WAKE_THREAD;
+}
+
+static void setup_volt_sensitive_registers(u32 emif_nr, struct emif_regs *regs,
+ u32 volt_state)
+{
+ u32 read_idle;
+ void __iomem *base = emif[emif_nr].base;
+ /*
+ * When voltage ramps forced read idle should
+ * happen more often.
+ */
+ if (volt_state == LPDDR2_VOLTAGE_RAMPING)
+ read_idle = regs->read_idle_ctrl_volt_ramp;
+ else
+ read_idle = regs->read_idle_ctrl_normal;
+
+ __raw_writel(read_idle, base + OMAP44XX_EMIF_READ_IDLE_CTRL_SHDW);
+
+ /* read back last written register to ensure write is complete */
+ __raw_readl(base + OMAP44XX_EMIF_READ_IDLE_CTRL_SHDW);
+
+ return;
+}
+
+/*
+ * Interrupt Handler for EMIF1 and EMIF2
+ */
+static irqreturn_t emif_interrupt_handler(int irq, void *dev_id)
+{
+ void __iomem *base;
+ irqreturn_t ret = IRQ_HANDLED;
+ u32 sys, ll;
+ u8 emif_nr = EMIF1;
+
+ if (emif[EMIF2].irq == irq)
+ emif_nr = EMIF2;
+
+ base = emif[emif_nr].base;
+
+ /* Save the status and clear it */
+ sys = __raw_readl(base + OMAP44XX_EMIF_IRQSTATUS_SYS);
+ ll = __raw_readl(base + OMAP44XX_EMIF_IRQSTATUS_LL);
+ __raw_writel(sys, base + OMAP44XX_EMIF_IRQSTATUS_SYS);
+ __raw_writel(ll, base + OMAP44XX_EMIF_IRQSTATUS_LL);
+ /*
+ * Handle temperature alert
+ * Temperature alert should be same for both ports
+ * So, it's enough to process it for only one of the ports
+ */
+ if (sys & OMAP44XX_REG_TA_SYS_MASK)
+ ret = handle_temp_alert(base, emif_nr);
+
+ if (sys & OMAP44XX_REG_ERR_SYS_MASK)
+ pr_err("EMIF: Access error from EMIF%d SYS port - %x",
+ emif_nr, sys);
+
+ if (ll & OMAP44XX_REG_ERR_LL_MASK)
+ pr_err("EMIF Error: Access error from EMIF%d LL port - %x",
+ emif_nr, ll);
+
+ return ret;
+}
+
+static irqreturn_t emif_threaded_isr(int irq, void *dev_id)
+{
+ u8 emif_nr = EMIF1;
+ if (emif[EMIF2].irq == irq)
+ emif_nr = EMIF2;
+
+ if (emif_thermal_handling_pending & (1 << emif_nr)) {
+ setup_temperature_sensitive_regs(emif_nr,
+ emif_curr_regs[emif_nr]);
+ /*
+ * EMIF de-rated timings register needs to be setup using
+ * freq update method only
+ */
+ omap4_prcm_freq_update();
+ /* clear the bit */
+ emif_thermal_handling_pending &= ~(1 << emif_nr);
+ }
+ if (emif_notify_pending & (1 << emif_nr)) {
+ sysfs_notify(&(emif[emif_nr].pdev->dev.kobj), NULL,
+ "temperature");
+ kobject_uevent(&(emif[emif_nr].pdev->dev.kobj), KOBJ_CHANGE);
+ /* clear the bit */
+ emif_notify_pending &= ~(1 << emif_nr);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __init setup_emif_interrupts(u32 emif_nr)
+{
+ u32 temp;
+ void __iomem *base = emif[emif_nr].base;
+ int r;
+
+ /* Clear any pendining interrupts */
+ __raw_writel(0xFFFFFFFF, base + OMAP44XX_EMIF_IRQSTATUS_SYS);
+ __raw_writel(0xFFFFFFFF, base + OMAP44XX_EMIF_IRQSTATUS_LL);
+
+ /* Enable the relevant interrupts for both LL and SYS */
+ temp = OMAP44XX_REG_EN_TA_SYS_MASK | OMAP44XX_REG_EN_ERR_SYS_MASK;
+ __raw_writel(temp, base + OMAP44XX_EMIF_IRQENABLE_SET_SYS);
+ __raw_writel(temp, base + OMAP44XX_EMIF_IRQENABLE_SET_LL);
+
+ /* Dummy read to make sure writes are complete */
+ __raw_readl(base + OMAP44XX_EMIF_IRQENABLE_SET_LL);
+
+ /* setup IRQ handlers */
+ r = request_threaded_irq(emif[emif_nr].irq,
+ emif_interrupt_handler,
+ emif_threaded_isr,
+ IRQF_SHARED, emif[emif_nr].pdev->name,
+ emif[emif_nr].pdev);
+ if (r) {
+ pr_err("%s: Failed: request_irq emif[%d] IRQ%d:%d\n",
+ __func__, emif_nr, emif[emif_nr].irq, r);
+ return r;
+ }
+
+ /*
+ * Even if we fail to make the irq wakeup capable, we are at risk only
+ * while going to suspend where the device is cooler, we might lose a
+ * bit of power due to pending interrupt preventing core from hitting
+ * low power state but we can continue to handle events in active use
+ * cases. So don't free interrupt on failure of marking wakeup capable,
+ * just warn and continue.
+ */
+ if (enable_irq_wake(emif[emif_nr].irq))
+ pr_err("%s: Failed: wakeupen emif[%d] IRQ%d\n", __func__,
+ emif_nr, emif[emif_nr].irq);
+
+ return 0;
+}
+
+static ssize_t emif_temperature_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 temperature;
+ if (dev == &(emif[EMIF1].pdev->dev))
+ temperature = emif_temperature_level[EMIF1];
+ else if (dev == &(emif[EMIF2].pdev->dev))
+ temperature = emif_temperature_level[EMIF2];
+ else
+ return 0;
+
+ return snprintf(buf, 20, "%u\n", temperature);
+}
+static DEVICE_ATTR(temperature, S_IRUGO, emif_temperature_show, NULL);
+
+static int __devinit omap_emif_probe(struct platform_device *pdev)
+{
+ int id;
+ struct resource *res;
+
+ if (!pdev)
+ return -EINVAL;
+
+ id = pdev->id;
+ emif[id].pdev = pdev;
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ pr_err("EMIF %i Invalid IRQ resource\n", id);
+ return -ENODEV;
+ }
+
+ emif[id].irq = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ pr_err("EMIF%i Invalid mem resource\n", id);
+ return -ENODEV;
+ }
+
+ emif[id].base = ioremap(res->start, SZ_1M);
+ if (!emif[id].base) {
+ pr_err("Could not ioremap EMIF%i\n", id);
+ return -ENOMEM;
+ }
+
+ pr_info("EMIF%d is enabled with IRQ%d\n", id, emif[id].irq);
+
+ emif[id].ddr_refresh_disabled = false;
+
+ return 0;
+}
+
+static int emif_init(struct omap_hwmod *oh, void *user)
+{
+ char *name = "omap_emif";
+ struct omap_device *od;
+ static int id;
+
+ od = omap_device_build(name, id, oh, NULL, 0, omap_emif_latency,
+ ARRAY_SIZE(omap_emif_latency), false);
+ WARN(IS_ERR(od), "Can't build omap_device for %s:%s.\n",
+ name, oh->name);
+ id++;
+ return 0;
+
+}
+
+static void emif_calculate_regs(const struct emif_device_details *devices,
+ u32 freq, struct emif_regs *regs)
+{
+ u32 temp;
+ const struct lpddr2_addressing *addressing;
+ const struct lpddr2_timings *timings;
+ const struct lpddr2_min_tck *min_tck;
+ const struct lpddr2_device_info *cs0_device = devices->cs0_device;
+ const struct lpddr2_device_info *cs1_device = devices->cs1_device;
+
+ emif_assert(devices);
+ emif_assert(regs);
+ /*
+ * You can not have a device on CS1 without one on CS0
+ * So configuring EMIF without a device on CS0 doesn't
+ * make sense
+ */
+ emif_assert(cs0_device);
+ emif_assert(cs0_device->type != LPDDR2_TYPE_NVM);
+ /*
+ * If there is a device on CS1 it should be same type as CS0
+ * (or NVM. But NVM is not supported in this driver yet)
+ */
+ emif_assert((cs1_device == NULL) ||
+ (cs1_device->type == LPDDR2_TYPE_NVM) ||
+ (cs0_device->type == cs1_device->type));
+ emif_assert(freq <= MAX_LPDDR2_FREQ);
+
+ set_ddr_clk_period(freq);
+
+ /*
+ * The device on CS0 is used for all timing calculations
+ * There is only one set of registers for timings per EMIF. So, if the
+ * second CS(CS1) has a device, it should have the same timings as the
+ * device on CS0
+ */
+ timings = get_timings_table(cs0_device->device_timings, freq);
+ emif_assert(timings);
+ min_tck = cs0_device->min_tck;
+
+ temp =
+ addressing_table_index(cs0_device->type, cs0_device->density,
+ cs0_device->io_width);
+ emif_assert((temp >= 0));
+ addressing = &(lpddr2_jedec_addressing_table[temp]);
+ emif_assert(addressing);
+
+ regs->RL_final = timings->RL;
+ /*
+ * Initial value of EMIF_SDRAM_CONFIG corresponds to the base
+ * frequency - 19.2 MHz
+ */
+ regs->sdram_config_init =
+ get_sdram_config_reg(cs0_device, cs1_device, addressing,
+ RL_19_2_MHZ);
+
+ regs->sdram_config_final = regs->sdram_config_init;
+ mask_n_set(regs->sdram_config_final, OMAP44XX_REG_CL_SHIFT,
+ OMAP44XX_REG_CL_MASK, timings->RL);
+
+ regs->ref_ctrl = get_sdram_ref_ctrl(freq, addressing);
+ regs->ref_ctrl_derated = regs->ref_ctrl / 4;
+
+ regs->sdram_tim1 = get_sdram_tim_1_reg(timings, min_tck, addressing);
+
+ regs->sdram_tim1_derated =
+ get_sdram_tim_1_reg_derated(timings, min_tck, addressing);
+
+ regs->sdram_tim2 = get_sdram_tim_2_reg(timings, min_tck);
+
+ regs->sdram_tim3 = get_sdram_tim_3_reg(timings, min_tck, addressing);
+
+ regs->read_idle_ctrl_normal =
+ get_read_idle_ctrl_reg(LPDDR2_VOLTAGE_STABLE);
+
+ regs->read_idle_ctrl_volt_ramp =
+ get_read_idle_ctrl_reg(LPDDR2_VOLTAGE_RAMPING);
+
+ regs->zq_config_normal =
+ get_zq_config_reg(cs1_device, addressing, LPDDR2_VOLTAGE_STABLE);
+
+ regs->zq_config_volt_ramp =
+ get_zq_config_reg(cs1_device, addressing, LPDDR2_VOLTAGE_RAMPING);
+
+ regs->temp_alert_config =
+ get_temp_alert_config(cs1_device, addressing, false);
+
+ regs->temp_alert_config_derated =
+ get_temp_alert_config(cs1_device, addressing, true);
+
+ regs->emif_ddr_phy_ctlr_1_init =
+ get_ddr_phy_ctrl_1(EMIF_FREQ_19_2_MHZ, RL_19_2_MHZ);
+
+ regs->emif_ddr_phy_ctlr_1_final =
+ get_ddr_phy_ctrl_1(freq, regs->RL_final);
+
+ /* save the frequency in the struct to act as a tag when cached */
+ regs->freq = freq;
+
+ pr_debug("Calculated EMIF configuration register values "
+ "for %d MHz", freq / 1000000);
+ pr_debug("sdram_config_init\t\t: 0x%08x\n", regs->sdram_config_init);
+ pr_debug("sdram_config_final\t\t: 0x%08x\n", regs->sdram_config_final);
+ pr_debug("sdram_ref_ctrl\t\t: 0x%08x\n", regs->ref_ctrl);
+ pr_debug("sdram_ref_ctrl_derated\t\t: 0x%08x\n",
+ regs->ref_ctrl_derated);
+ pr_debug("sdram_tim_1_reg\t\t: 0x%08x\n", regs->sdram_tim1);
+ pr_debug("sdram_tim_1_reg_derated\t\t: 0x%08x\n",
+ regs->sdram_tim1_derated);
+ pr_debug("sdram_tim_2_reg\t\t: 0x%08x\n", regs->sdram_tim2);
+ pr_debug("sdram_tim_3_reg\t\t: 0x%08x\n", regs->sdram_tim3);
+ pr_debug("emif_read_idle_ctrl_normal\t: 0x%08x\n",
+ regs->read_idle_ctrl_normal);
+ pr_debug("emif_read_idle_ctrl_dvfs\t: 0x%08x\n",
+ regs->read_idle_ctrl_volt_ramp);
+ pr_debug("zq_config_reg_normal\t: 0x%08x\n", regs->zq_config_normal);
+ pr_debug("zq_config_reg_dvfs\t\t: 0x%08x\n", regs->zq_config_volt_ramp);
+ pr_debug("temp_alert_config\t: 0x%08x\n", regs->temp_alert_config);
+ pr_debug("emif_ddr_phy_ctlr_1_init\t: 0x%08x\n",
+ regs->emif_ddr_phy_ctlr_1_init);
+ pr_debug("emif_ddr_phy_ctlr_1_final\t: 0x%08x\n",
+ regs->emif_ddr_phy_ctlr_1_final);
+}
+
+/*
+ * get_regs() - gets the cached emif_regs structure for a given EMIF instance
+ * (emif_nr) for a given frequency(freq):
+ *
+ * As an optimization, only one cache array(that of EMIF1) if both EMIF1 and
+ * EMIF2 has identical devices
+ *
+ * If we do not have an entry corresponding to the frequency given, we
+ * allocate a new entry and calculate the values
+ */
+static struct emif_regs *get_regs(u32 emif_nr, u32 freq)
+{
+ int i;
+ struct emif_regs **regs_cache;
+ struct emif_regs *regs = NULL;
+
+ /*
+ * If EMIF2 has the same devices as EMIF1 use the register
+ * cache of EMIF1
+ */
+ if ((emif_nr == EMIF1) ||
+ ((emif_nr == EMIF2)
+ && (emif_devices[EMIF1] == emif_devices[EMIF2])))
+ regs_cache = emif1_regs_cache;
+ else
+ regs_cache = emif2_regs_cache;
+
+ for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) {
+ if (regs_cache[i]->freq == freq) {
+ regs = regs_cache[i];
+ break;
+ }
+ }
+
+ /*
+ * If we don't have an entry for this frequency in the cache create one
+ * and calculate the values
+ */
+ if (!regs) {
+ regs = kmalloc(sizeof(struct emif_regs), GFP_ATOMIC);
+ if (!regs)
+ return NULL;
+ emif_calculate_regs(emif_devices[emif_nr], freq, regs);
+
+ /*
+ * Now look for an un-used entry in the cache and save the
+ * newly created struct. If there are no free entries
+ * over-write the last entry
+ */
+ for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++)
+ ;
+
+ if (i >= EMIF_MAX_NUM_FREQUENCIES) {
+ pr_warning("emif: emif regs_cache full - more number"
+ " of frequencies used than expected!!");
+ i = EMIF_MAX_NUM_FREQUENCIES - 1;
+ kfree(regs_cache[i]);
+ }
+ regs_cache[i] = regs;
+ }
+ return regs;
+}
+
+static int do_emif_setup_registers(u32 emif_nr, u32 freq, u32 volt_state)
+{
+ struct emif_regs *regs;
+ regs = get_regs(emif_nr, freq);
+ if (!regs)
+ return -ENOMEM;
+
+ emif_curr_regs[emif_nr] = regs;
+ setup_registers(emif_nr, regs, volt_state);
+ setup_temperature_sensitive_regs(emif_nr, regs);
+
+ return 0;
+}
+
+static int do_setup_device_details(u32 emif_nr,
+ const struct emif_device_details *devices)
+{
+ if (!emif_devices[emif_nr]) {
+ emif_devices[emif_nr] =
+ kmalloc(sizeof(struct emif_device_details), GFP_KERNEL);
+ if (!emif_devices[emif_nr])
+ return -ENOMEM;
+ *emif_devices[emif_nr] = *devices;
+ }
+
+ return 0;
+}
+
+/*
+ * Initialize the temperature level and setup the sysfs nodes
+ * and uvent for temperature monitoring
+ */
+static void init_temperature(u32 emif_nr)
+{
+ if (!emif_devices[emif_nr])
+ return;
+
+ emif_temperature_level[emif_nr] = get_temperature_level(emif_nr);
+ WARN_ON(device_create_file(&(emif[emif_nr].pdev->dev),
+ &dev_attr_temperature));
+ kobject_uevent(&(emif[emif_nr].pdev->dev.kobj), KOBJ_ADD);
+
+ if (emif_temperature_level[emif_nr] == SDRAM_TEMP_VERY_HIGH_SHUTDOWN)
+ pr_emerg("EMIF %d: SDRAM temperature exceeds operating"
+ "limit.. Needs shut down!!!", emif_nr + 1);
+}
+
+/*
+ * omap_emif_device_init needs to be done before
+ * ddr reconfigure function call.
+ * Hence omap_emif_device_init is a postcore_initcall.
+ */
+static int __init omap_emif_device_init(void)
+{
+ /*
+ * To avoid code running on other OMAPs in
+ * multi-omap builds
+ */
+ if (!cpu_is_omap44xx())
+ return -ENODEV;
+
+ return omap_hwmod_for_each_by_class("emif", emif_init, NULL);
+}
+postcore_initcall(omap_emif_device_init);
+
+
+/* We need to disable interrupts of the EMIF
+ * module, because in a warm reboot scenario, there
+ * may be a pending irq that is not serviced and emif
+ * is stuck in transition. On the next boot HW mod
+ * fails emif inizalization with a timeout.
+ */
+void emif_clear_irq(int emif_id)
+{
+ u32 irq_mask = 0;
+ u32 base = 0;
+ u32 reg = 0;
+
+ if (emif_id == 0)
+ base = OMAP44XX_EMIF1_VIRT;
+ else
+ base = OMAP44XX_EMIF2_VIRT;
+
+ /* Disable the relevant interrupts for both LL and SYS */
+ irq_mask = OMAP44XX_REG_EN_TA_SYS_MASK | OMAP44XX_REG_EN_ERR_SYS_MASK
+ | OMAP44XX_REG_EN_DNV_SYS_MASK;
+ __raw_writel(irq_mask, base + OMAP44XX_EMIF_IRQENABLE_CLR_SYS);
+ __raw_writel(irq_mask, base + OMAP44XX_EMIF_IRQENABLE_CLR_LL);
+
+ /* Clear any pendining interrupts without overwritng reserved bits*/
+ reg = __raw_readl(base + OMAP44XX_EMIF_IRQSTATUS_SYS);
+ reg |= irq_mask;
+ __raw_writel(reg, base + OMAP44XX_EMIF_IRQSTATUS_SYS);
+
+ reg = __raw_readl(base + OMAP44XX_EMIF_IRQSTATUS_LL);
+ reg |= irq_mask;
+ __raw_writel(reg, base + OMAP44XX_EMIF_IRQSTATUS_LL);
+
+ /* Dummy read to make sure writes are complete */
+ __raw_readl(base + OMAP44XX_EMIF_IRQENABLE_SET_LL);
+
+ return;
+}
+
+void emif_driver_shutdown(struct platform_device *pdev)
+{
+ emif_clear_irq(pdev->id);
+}
+
+static struct platform_driver omap_emif_driver = {
+ .probe = omap_emif_probe,
+ .driver = {
+ .name = "omap_emif",
+ },
+
+ .shutdown = emif_driver_shutdown,
+};
+
+static int __init omap_emif_register(void)
+{
+ return platform_driver_register(&omap_emif_driver);
+}
+postcore_initcall(omap_emif_register);
+
+/*
+ * omap_emif_notify_voltage - setup the voltage sensitive
+ * registers based on the voltage situation (voltage ramping or stable)
+ * read_idle_ctrl and zq_config are the registers that are voltage sensitive
+ * They need to have a very safe value(more frequent zq calibration and
+ * read idle forcing) when voltage is scaling and can have a more relaxed
+ * nominal value(frequency dependent) when voltage is stable
+ */
+int omap_emif_notify_voltage(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ u32 volt_state;
+
+ if (val == OMAP_VOLTAGE_PRECHANGE)
+ volt_state = LPDDR2_VOLTAGE_RAMPING;
+ else
+ volt_state = LPDDR2_VOLTAGE_STABLE;
+
+ if (likely(emif_curr_regs[EMIF1]))
+ setup_volt_sensitive_registers(EMIF1, emif_curr_regs[EMIF1],
+ volt_state);
+
+ if (likely(emif_curr_regs[EMIF2]))
+ setup_volt_sensitive_registers(EMIF2, emif_curr_regs[EMIF2],
+ volt_state);
+
+ if (unlikely(!emif_curr_regs[EMIF1] && !emif_curr_regs[EMIF2])) {
+ pr_err_once("emif: voltage state notification came before the"
+ " initial setup - ignoring the notification");
+ return -EINVAL;
+ }
+
+ /*
+ * EMIF read-idle control needs to be setup using
+ * freq update method only
+ */
+ return omap4_prcm_freq_update();
+}
+
+static struct notifier_block emif_volt_notifier_block = {
+ .notifier_call = omap_emif_notify_voltage,
+};
+
+static int __init omap_emif_late_init(void)
+{
+ struct voltagedomain *voltdm = voltdm_lookup("core");
+
+ if (!voltdm) {
+ pr_err("CORE voltage domain lookup failed\n");
+ return -EINVAL;
+ }
+
+ voltdm_register_notifier(voltdm, &emif_volt_notifier_block);
+
+ return 0;
+}
+late_initcall(omap_emif_late_init);
+
+/*
+ * omap_emif_setup_registers - setup the shadow registers for a given
+ * frequency. This will be typically followed by a FREQ_UPDATE procedure
+ * to lock at the new frequency and this will update the EMIF main registers
+ * with shadow register values
+ */
+int omap_emif_setup_registers(u32 freq, u32 volt_state)
+{
+ int err = 0;
+ if (likely(emif_devices[EMIF1]))
+ err = do_emif_setup_registers(EMIF1, freq, volt_state);
+ if (likely(!err && emif_devices[EMIF2]))
+ err = do_emif_setup_registers(EMIF2, freq, volt_state);
+ return err;
+}
+
+
+/*
+ * omap_emif_frequency_pre_notify - Disable DDR self refresh of both EMIFs
+ *
+ * It disables the LP mode if the LP mode of EMIFs was LP_MODE_SELF_REFRESH.
+ *
+ * It should be called before any PRCM frequency update sequence.
+ * After the frequency update sequence, omap_emif_frequency_post_notify
+ * should be called to restore the original LP MODE setting of the EMIFs.
+ *
+ */
+void omap_emif_frequency_pre_notify(void)
+{
+ int emif_num;
+
+ for (emif_num = EMIF1; emif_num < EMIF_NUM_INSTANCES; emif_num++) {
+
+ /*
+ * Only disable ddr self-refresh
+ * if ddr self-refresh was enabled
+ */
+ if (likely(LP_MODE_SELF_REFRESH == get_lp_mode(emif_num))) {
+
+ set_lp_mode(emif_num, LP_MODE_DISABLE);
+ emif[emif_num].ddr_refresh_disabled = true;
+ }
+
+ }
+}
+
+/*
+ * omap_emif_frequency_post_notify - Enable DDR self refresh of both EMIFs
+ *
+ * It restores the LP mode of the EMIFs back to LP_MODE_SELF_REFRESH if it
+ * was previously disabled by omap_emif_frequency_pre_notify()
+ *
+ */
+void omap_emif_frequency_post_notify(void)
+{
+ int emif_num;
+
+ for (emif_num = EMIF1; emif_num < EMIF_NUM_INSTANCES; emif_num++) {
+
+ /*
+ * Only re-enable ddr self-refresh
+ * if ddr self-refresh was disabled
+ */
+ if (likely(emif[emif_num].ddr_refresh_disabled)) {
+
+ set_lp_mode(emif_num, LP_MODE_SELF_REFRESH);
+ emif[emif_num].ddr_refresh_disabled = false;
+ }
+ }
+}
+
+/*
+ * omap_emif_setup_device_details - save the SDRAM device details passed
+ * from the board file
+ */
+int omap_emif_setup_device_details(const struct emif_device_details
+ *emif1_devices,
+ const struct emif_device_details
+ *emif2_devices)
+{
+ if (emif1_devices)
+ BUG_ON(do_setup_device_details(EMIF1, emif1_devices));
+
+ /*
+ * If memory devices connected to both the EMIFs are identical
+ * (which is normally the case), then no need to calculate the
+ * registers again for EMIF1 and allocate the structure for registers
+ */
+ if (emif2_devices && (emif1_devices != emif2_devices))
+ BUG_ON(do_setup_device_details(EMIF2, emif2_devices));
+ else if (emif2_devices) {
+ emif_devices[EMIF2] = emif_devices[EMIF1];
+ /* call for temperature related setup */
+ BUG_ON(do_setup_device_details(EMIF2, emif2_devices));
+ }
+
+ return 0;
+}
+
+static void __init setup_lowpower_regs(u32 emif_nr,
+ struct emif_device_details *emif_dev)
+{
+ u32 temp;
+ void __iomem *base = emif[emif_nr].base;
+ const struct lpddr2_device_info *dev;
+
+ if (!emif_dev) {
+ pr_err("%s: no emif %d\n", __func__, emif_nr);
+ return;
+ }
+
+ /*
+ * All devices on this specific EMIF should have the same Selfrefresh
+ * timing, so use cs0
+ */
+ dev = emif_dev->cs0_device;
+ if (!dev) {
+ pr_err("%s: no CS0 device in emif %d\n", __func__, emif_nr);
+ return;
+ }
+ if (dev->emif_ddr_selfrefresh_cycles >= 0) {
+ u32 num_cycles, ddr_sr_timer;
+
+ /* Enable self refresh if not already configured */
+ temp = __raw_readl(base + OMAP44XX_EMIF_PWR_MGMT_CTRL) &
+ OMAP44XX_REG_LP_MODE_MASK;
+ /*
+ * Configure the self refresh timing
+ * base value starts at 16 cycles mapped to 1( __fls(16) = 4)
+ */
+ num_cycles = dev->emif_ddr_selfrefresh_cycles;
+ if (num_cycles >= 16)
+ ddr_sr_timer = __fls(num_cycles) - 3;
+ else
+ ddr_sr_timer = 0;
+
+ /* Program the idle delay */
+ temp = __raw_readl(base + OMAP44XX_EMIF_PWR_MGMT_CTRL_SHDW);
+ mask_n_set(temp, OMAP44XX_REG_SR_TIM_SHDW_SHIFT,
+ OMAP44XX_REG_SR_TIM_SHDW_MASK, ddr_sr_timer);
+ /*
+ * Some weird magic number to a field which should'nt impact..
+ * but seems to make this work..
+ */
+ mask_n_set(temp, OMAP44XX_REG_CS_TIM_SHDW_SHIFT,
+ OMAP44XX_REG_CS_TIM_SHDW_MASK, 0xf);
+ __raw_writel(temp, base + OMAP44XX_EMIF_PWR_MGMT_CTRL_SHDW);
+
+ /* Enable Self Refresh */
+ temp = __raw_readl(base + OMAP44XX_EMIF_PWR_MGMT_CTRL);
+ mask_n_set(temp, OMAP44XX_REG_LP_MODE_SHIFT,
+ OMAP44XX_REG_LP_MODE_MASK, LP_MODE_SELF_REFRESH);
+ __raw_writel(temp, base + OMAP44XX_EMIF_PWR_MGMT_CTRL);
+ } else {
+ /* Disable Automatic power management if < 0 and not disabled */
+ temp = __raw_readl(base + OMAP44XX_EMIF_PWR_MGMT_CTRL) &
+ OMAP44XX_REG_LP_MODE_MASK;
+
+ temp = __raw_readl(base + OMAP44XX_EMIF_PWR_MGMT_CTRL_SHDW);
+ mask_n_set(temp, OMAP44XX_REG_SR_TIM_SHDW_SHIFT,
+ OMAP44XX_REG_SR_TIM_SHDW_MASK, 0x0);
+ __raw_writel(temp, base + OMAP44XX_EMIF_PWR_MGMT_CTRL_SHDW);
+
+ temp = __raw_readl(base + OMAP44XX_EMIF_PWR_MGMT_CTRL);
+ mask_n_set(temp, OMAP44XX_REG_LP_MODE_SHIFT,
+ OMAP44XX_REG_LP_MODE_MASK, LP_MODE_DISABLE);
+ __raw_writel(temp, base + OMAP44XX_EMIF_PWR_MGMT_CTRL);
+ }
+}
+
+/*
+ * omap_init_emif_timings - reprogram EMIF timing parameters
+ *
+ * Sets the CORE DPLL3 M2 divider to the same value that it's at
+ * currently. This has the effect of setting the EMIF DDR AC timing
+ * registers to the values currently defined by the kernel.
+ */
+static int __init omap_init_emif_timings(void)
+{
+ struct clk *dpll_core_m2_clk;
+ int ret;
+ long rate;
+
+ /*
+ * Setup the initial temperatures sysfs nodes etc.
+ * Subsequent updates to temperature is done through interrupts
+ */
+ init_temperature(EMIF1);
+ init_temperature(EMIF2);
+
+ /* FREQ_UPDATE sequence isn't supported on early vesion */
+ if (omap_rev() == OMAP4430_REV_ES1_0)
+ return -EINVAL;
+
+ dpll_core_m2_clk = clk_get(NULL, "dpll_core_m2_ck");
+ if (!dpll_core_m2_clk)
+ pr_err("Could not get LPDDR2 clock - dpll_core_m2_ck\n");
+
+ rate = clk_get_rate(dpll_core_m2_clk);
+ pr_info("Reprogramming LPDDR2 timings to %ld Hz\n", rate >> 1);
+
+ ret = clk_set_rate(dpll_core_m2_clk, rate);
+ if (ret)
+ pr_err("Unable to set LPDDR2 rate to %ld:\n", rate);
+
+ /* registers are setup correctly - now enable interrupts */
+ if (emif_devices[EMIF1]) {
+ ret = setup_emif_interrupts(EMIF1);
+ setup_lowpower_regs(EMIF1, emif_devices[EMIF1]);
+ }
+ if (!ret && emif_devices[EMIF2]) {
+ ret = setup_emif_interrupts(EMIF2);
+ setup_lowpower_regs(EMIF2, emif_devices[EMIF2]);
+ }
+
+ clk_put(dpll_core_m2_clk);
+
+ return ret;
+}
+late_initcall(omap_init_emif_timings);
diff --git a/arch/arm/mach-omap2/gpio.c b/arch/arm/mach-omap2/gpio.c
index 9529842..1045c72 100644
--- a/arch/arm/mach-omap2/gpio.c
+++ b/arch/arm/mach-omap2/gpio.c
@@ -23,6 +23,9 @@
#include <plat/omap_hwmod.h>
#include <plat/omap_device.h>
+#include <plat/omap-pm.h>
+
+#include "powerdomain.h"
static struct omap_device_pm_latency omap_gpio_latency[] = {
[0] = {
@@ -39,6 +42,7 @@
struct omap_gpio_dev_attr *dev_attr;
char *name = "omap_gpio";
int id;
+ struct powerdomain *pwrdm;
/*
* extract the device id from name field available in the
@@ -58,16 +62,77 @@
dev_attr = (struct omap_gpio_dev_attr *)oh->dev_attr;
pdata->bank_width = dev_attr->bank_width;
+ pdata->suspend_support = true;
pdata->dbck_flag = dev_attr->dbck_flag;
pdata->virtual_irq_start = IH_GPIO_BASE + 32 * (id - 1);
+ pdata->regs = kzalloc(sizeof(struct omap_gpio_reg_offs), GFP_KERNEL);
+ if (!pdata) {
+ pr_err("gpio%d: Memory allocation failed\n", id);
+ return -ENOMEM;
+ }
+
+ pdata->regs->irqctrl = USHRT_MAX;
+ pdata->regs->edgectrl1 = USHRT_MAX;
+ pdata->regs->edgectrl2 = USHRT_MAX;
+
switch (oh->class->rev) {
case 0:
+ if (id == 1)
+ /* non-wakeup GPIO pins for OMAP2 Bank1 */
+ pdata->non_wakeup_gpios = 0xe203ffc0;
+ else if (id == 2)
+ /* non-wakeup GPIO pins for OMAP2 Bank2 */
+ pdata->non_wakeup_gpios = 0x08700040;
+ /* fall through */
+
case 1:
- pdata->bank_type = METHOD_GPIO_24XX;
+ pdata->regs->revision = OMAP24XX_GPIO_REVISION;
+ pdata->regs->direction = OMAP24XX_GPIO_OE;
+ pdata->regs->datain = OMAP24XX_GPIO_DATAIN;
+ pdata->regs->dataout = OMAP24XX_GPIO_DATAOUT;
+ pdata->regs->set_dataout = OMAP24XX_GPIO_SETDATAOUT;
+ pdata->regs->clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT;
+ pdata->regs->irqstatus = OMAP24XX_GPIO_IRQSTATUS1;
+ pdata->regs->irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2;
+ pdata->regs->irqenable = OMAP24XX_GPIO_IRQENABLE1;
+ pdata->regs->irqenable2 = OMAP24XX_GPIO_IRQENABLE2;
+ pdata->regs->set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1;
+ pdata->regs->clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1;
+ pdata->regs->debounce = OMAP24XX_GPIO_DEBOUNCE_VAL;
+ pdata->regs->debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN;
+ pdata->regs->ctrl = OMAP24XX_GPIO_CTRL;
+ pdata->regs->wkup_status = OMAP24XX_GPIO_WAKE_EN;
+ pdata->regs->wkup_clear = OMAP24XX_GPIO_CLEARWKUENA;
+ pdata->regs->wkup_set = OMAP24XX_GPIO_SETWKUENA;
+ pdata->regs->leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0;
+ pdata->regs->leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1;
+ pdata->regs->risingdetect = OMAP24XX_GPIO_RISINGDETECT;
+ pdata->regs->fallingdetect = OMAP24XX_GPIO_FALLINGDETECT;
break;
case 2:
- pdata->bank_type = METHOD_GPIO_44XX;
+ pdata->regs->revision = OMAP4_GPIO_REVISION;
+ pdata->regs->direction = OMAP4_GPIO_OE;
+ pdata->regs->datain = OMAP4_GPIO_DATAIN;
+ pdata->regs->dataout = OMAP4_GPIO_DATAOUT;
+ pdata->regs->set_dataout = OMAP4_GPIO_SETDATAOUT;
+ pdata->regs->clr_dataout = OMAP4_GPIO_CLEARDATAOUT;
+ pdata->regs->irqstatus = OMAP4_GPIO_IRQSTATUS0;
+ pdata->regs->irqstatus2 = OMAP4_GPIO_IRQSTATUS1;
+ pdata->regs->irqenable = OMAP4_GPIO_IRQSTATUSSET0;
+ pdata->regs->irqenable2 = OMAP4_GPIO_IRQSTATUSSET1;
+ pdata->regs->set_irqenable = OMAP4_GPIO_IRQSTATUSSET0;
+ pdata->regs->clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0;
+ pdata->regs->debounce = OMAP4_GPIO_DEBOUNCINGTIME;
+ pdata->regs->debounce_en = OMAP4_GPIO_DEBOUNCENABLE;
+ pdata->regs->ctrl = OMAP4_GPIO_CTRL;
+ pdata->regs->wkup_status = OMAP4_GPIO_IRQWAKEN0;
+ pdata->regs->wkup_clear = OMAP4_GPIO_IRQWAKEN0;
+ pdata->regs->wkup_set = OMAP4_GPIO_IRQWAKEN0;
+ pdata->regs->leveldetect0 = OMAP4_GPIO_LEVELDETECT0;
+ pdata->regs->leveldetect1 = OMAP4_GPIO_LEVELDETECT1;
+ pdata->regs->risingdetect = OMAP4_GPIO_RISINGDETECT;
+ pdata->regs->fallingdetect = OMAP4_GPIO_FALLINGDETECT;
break;
default:
WARN(1, "Invalid gpio bank_type\n");
@@ -75,6 +140,9 @@
return -EINVAL;
}
+ pwrdm = omap_hwmod_get_pwrdm(oh);
+ pdata->loses_context = pwrdm_can_ever_lose_context(pwrdm);
+
od = omap_device_build(name, id - 1, oh, pdata,
sizeof(*pdata), omap_gpio_latency,
ARRAY_SIZE(omap_gpio_latency),
@@ -87,7 +155,6 @@
return PTR_ERR(od);
}
- gpio_bank_count++;
return 0;
}
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index dfffbbf..5610d10 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -78,9 +78,9 @@
/*
* Structure to save/restore gpmc context
- * to support core off on OMAP3
+ * to support core off.
*/
-struct omap3_gpmc_regs {
+struct omap_gpmc_regs {
u32 sysconfig;
u32 irqenable;
u32 timeout_ctrl;
@@ -776,10 +776,9 @@
return IRQ_HANDLED;
}
-#ifdef CONFIG_ARCH_OMAP3
-static struct omap3_gpmc_regs gpmc_context;
+static struct omap_gpmc_regs gpmc_context;
-void omap3_gpmc_save_context(void)
+void omap_gpmc_save_context(void)
{
int i;
@@ -811,7 +810,7 @@
}
}
-void omap3_gpmc_restore_context(void)
+void omap_gpmc_restore_context(void)
{
int i;
@@ -841,7 +840,6 @@
}
}
}
-#endif /* CONFIG_ARCH_OMAP3 */
/**
* gpmc_enable_hwecc - enable hardware ecc functionality
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index 66868c5..0201705 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -31,16 +31,6 @@
#define HSMMC_NAME_LEN 9
-#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
-
-static int hsmmc_get_context_loss(struct device *dev)
-{
- return omap_pm_get_dev_context_loss_count(dev);
-}
-
-#else
-#define hsmmc_get_context_loss NULL
-#endif
static void omap_hsmmc1_before_set_reg(struct device *dev, int slot,
int power_on, int vdd)
@@ -318,8 +308,6 @@
else
mmc->reg_offset = 0;
- mmc->get_context_loss_count = hsmmc_get_context_loss;
-
mmc->slots[0].switch_pin = c->gpio_cd;
mmc->slots[0].gpio_wp = c->gpio_wp;
@@ -344,6 +332,20 @@
if (c->vcc_aux_disable_is_sleep)
mmc->slots[0].vcc_aux_disable_is_sleep = 1;
+ if (cpu_is_omap44xx()) {
+ if (omap_rev() > OMAP4430_REV_ES1_0)
+ mmc->slots[0].features |= HSMMC_HAS_UPDATED_RESET;
+ if (c->mmc >= 3 && c->mmc <= 5)
+ mmc->slots[0].features |= HSMMC_HAS_48MHZ_MASTER_CLK;
+ }
+
+ if (c->mmc_data) {
+ memcpy(&mmc->slots[0].mmc_data, c->mmc_data,
+ sizeof(struct mmc_platform_data));
+ mmc->slots[0].card_detect =
+ (mmc_card_detect_func)c->mmc_data->status;
+ }
+
/*
* NOTE: MMC slots should have a Vcc regulator set up.
* This may be from a TWL4030-family chip, another
diff --git a/arch/arm/mach-omap2/hsmmc.h b/arch/arm/mach-omap2/hsmmc.h
index f757e78..2cbe0f9 100644
--- a/arch/arm/mach-omap2/hsmmc.h
+++ b/arch/arm/mach-omap2/hsmmc.h
@@ -6,8 +6,12 @@
* published by the Free Software Foundation.
*/
+#include <asm/mach/mmc.h>
+
struct mmc_card;
+typedef int (*mmc_card_detect_func)(struct device *dev, int slot);
+
struct omap2_hsmmc_info {
u8 mmc; /* controller 1/2/3 */
u32 caps; /* 4/8 wires and any additional host
@@ -25,6 +29,7 @@
char *name; /* or NULL for default */
struct device *dev; /* returned: pointer to mmc adapter */
int ocr_mask; /* temporary HACK */
+ struct mmc_platform_data *mmc_data;
/* Remux (pad configuration) when powering on/off */
void (*remux)(struct device *dev, int slot, int power_on);
/* init some special card */
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 2537090..a90630d 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -32,6 +32,7 @@
static unsigned int omap_revision;
u32 omap3_features;
+u32 omap4_features;
unsigned int omap_rev(void)
{
@@ -88,6 +89,8 @@
#define OMAP_TAP_DIE_ID_44XX_1 0x0208
#define OMAP_TAP_DIE_ID_44XX_2 0x020c
#define OMAP_TAP_DIE_ID_44XX_3 0x0210
+#define OMAP_TAP_PROD_ID_44XX_0 0x0214
+#define OMAP_TAP_PROD_ID_44XX_1 0x0218
#define read_tap_reg(reg) __raw_readl(tap_base + (reg))
@@ -126,6 +129,16 @@
odi->id_3 = read_tap_reg(OMAP_TAP_DIE_ID_3);
}
+void omap_get_production_id(struct omap_die_id *odi)
+{
+ if (cpu_is_omap44xx()) {
+ odi->id_0 = read_tap_reg(OMAP_TAP_PROD_ID_44XX_0);
+ odi->id_1 = read_tap_reg(OMAP_TAP_PROD_ID_44XX_1);
+ odi->id_2 = 0;
+ odi->id_3 = 0;
+ }
+}
+
static void __init omap24xx_check_revision(void)
{
int i, j;
@@ -212,6 +225,34 @@
*/
}
+static void __init omap4_check_features(void)
+{
+ u32 si_type;
+
+ omap4_features = 0;
+
+ if (cpu_is_omap443x())
+ omap4_features |= OMAP4_HAS_MPU_1GHZ;
+
+
+ if (cpu_is_omap446x()) {
+ si_type =
+ read_tap_reg(OMAP4_CTRL_MODULE_CORE_STD_FUSE_PROD_ID_1);
+ switch ((si_type & (3 << 16)) >> 16) {
+ case 2:
+ /* High performance device */
+ omap4_features |= OMAP4_HAS_MPU_1_5GHZ;
+ omap4_features |= OMAP4_HAS_MPU_1_2GHZ;
+ break;
+ case 1:
+ default:
+ /* Standard device */
+ omap4_features |= OMAP4_HAS_MPU_1_2GHZ;
+ break;
+ }
+ }
+}
+
static void __init ti816x_check_features(void)
{
omap3_features = OMAP3_HAS_NEON;
@@ -331,8 +372,13 @@
static void __init omap4_check_revision(void)
{
u32 idcode;
- u16 hawkeye;
u8 rev;
+ /*
+ * NOTE: OMAP4460+ uses ramp system for identification and hawkeye
+ * variable is reused for the same. Since the values are unique
+ * we continue to use the current system
+ */
+ u16 hawkeye;
/*
* The IC rev detection is done with hawkeye and rev.
@@ -344,10 +390,10 @@
rev = (idcode >> 28) & 0xf;
/*
- * Few initial ES2.0 samples IDCODE is same as ES1.0
+ * Few initial 4430 ES2.0 samples IDCODE is same as ES1.0
* Use ARM register to detect the correct ES version
*/
- if (!rev) {
+ if (!rev && (hawkeye != 0xb94e)) {
idcode = read_cpuid(CPUID_ID);
rev = (idcode & 0xf) - 1;
}
@@ -377,6 +423,19 @@
omap_chip.oc |= CHIP_IS_OMAP4430ES2_2;
}
break;
+ case 0xb94e:
+ switch (rev) {
+ case 0:
+ omap_revision = OMAP4460_REV_ES1_0;
+ omap_chip.oc |= CHIP_IS_OMAP4460ES1_0;
+ break;
+ case 2:
+ default:
+ omap_revision = OMAP4460_REV_ES1_1;
+ omap_chip.oc |= CHIP_IS_OMAP4460ES1_1;
+ break;
+ }
+ break;
default:
/* Unknown default to latest silicon rev as default */
omap_revision = OMAP4430_REV_ES2_2;
@@ -518,6 +577,7 @@
return;
} else if (cpu_is_omap44xx()) {
omap4_check_revision();
+ omap4_check_features();
return;
} else {
pr_err("OMAP revision unknown, please fix!\n");
diff --git a/arch/arm/mach-omap2/include/mach/barriers.h b/arch/arm/mach-omap2/include/mach/barriers.h
new file mode 100644
index 0000000..aa72a33
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/barriers.h
@@ -0,0 +1,48 @@
+/*
+ * OMAP memory barrier header.
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Richard Woodruff <r-woodruff2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __MACH_BARRIERS_H
+#define __MACH_BARRIERS_H
+
+#include <linux/types.h>
+
+/* provide func ptr so to allow safe calling at any point */
+struct omap_bus_post_fns {
+ void (*sync)(void);
+};
+
+extern struct omap_bus_post_fns omap_bus_post;
+
+#ifdef CONFIG_ARCH_OMAP4
+static inline void bus_sync(void)
+{
+ omap_bus_post.sync();
+}
+#else
+static inline void bus_sync(void)
+{ }
+#endif
+
+#define rmb() dsb()
+#define wmb() do { dsb(); outer_sync(); bus_sync(); } while (0)
+#define mb() wmb()
+
+#endif /* __MACH_BARRIERS_H */
diff --git a/arch/arm/mach-omap2/include/mach/ctrl_module_core_44xx.h b/arch/arm/mach-omap2/include/mach/ctrl_module_core_44xx.h
index 2f7ac70..58983a1 100644
--- a/arch/arm/mach-omap2/include/mach/ctrl_module_core_44xx.h
+++ b/arch/arm/mach-omap2/include/mach/ctrl_module_core_44xx.h
@@ -163,6 +163,7 @@
/* STD_FUSE_OPP_BGAP */
#define OMAP4_STD_FUSE_OPP_BGAP_SHIFT 0
#define OMAP4_STD_FUSE_OPP_BGAP_MASK (0xffffffff << 0)
+#define OMAP4_STD_FUSE_OPP_BGAP_MASK_LSB (0xffff << 16)
/* STD_FUSE_OPP_DPLL_0 */
#define OMAP4_STD_FUSE_OPP_DPLL_0_SHIFT 0
@@ -257,18 +258,50 @@
#define OMAP4_LDOSRAMCORE_ACTMODE_VSET_OUT_MASK (0x1f << 0)
/* TEMP_SENSOR */
-#define OMAP4_BGAP_TEMPSOFF_SHIFT 12
-#define OMAP4_BGAP_TEMPSOFF_MASK (1 << 12)
-#define OMAP4_BGAP_TSHUT_SHIFT 11
-#define OMAP4_BGAP_TSHUT_MASK (1 << 11)
-#define OMAP4_BGAP_TEMP_SENSOR_CONTCONV_SHIFT 10
-#define OMAP4_BGAP_TEMP_SENSOR_CONTCONV_MASK (1 << 10)
-#define OMAP4_BGAP_TEMP_SENSOR_SOC_SHIFT 9
-#define OMAP4_BGAP_TEMP_SENSOR_SOC_MASK (1 << 9)
-#define OMAP4_BGAP_TEMP_SENSOR_EOCZ_SHIFT 8
-#define OMAP4_BGAP_TEMP_SENSOR_EOCZ_MASK (1 << 8)
-#define OMAP4_BGAP_TEMP_SENSOR_DTEMP_SHIFT 0
-#define OMAP4_BGAP_TEMP_SENSOR_DTEMP_MASK (0xff << 0)
+#define OMAP4_BGAP_TEMPSOFF_SHIFT 13
+#define OMAP4_BGAP_TEMPSOFF_MASK (1 << 13)
+#define OMAP4_BGAP_TEMP_SENSOR_CONTCONV_SHIFT 12
+#define OMAP4_BGAP_TEMP_SENSOR_CONTCONV_MASK (1 << 12)
+#define OMAP4_BGAP_TEMP_SENSOR_SOC_SHIFT 11
+#define OMAP4_BGAP_TEMP_SENSOR_SOC_MASK (1 << 11)
+#define OMAP4_BGAP_TEMP_SENSOR_EOCZ_SHIFT 10
+#define OMAP4_BGAP_TEMP_SENSOR_EOCZ_MASK (1 << 10)
+#define OMAP4_BGAP_TEMP_SENSOR_DTEMP_SHIFT 0
+#define OMAP4_BGAP_TEMP_SENSOR_DTEMP_MASK (0x3ff << 0)
+
+/* BANDGAP_CTRL */
+#define OMAP4_SINGLE_MODE_SHIFT 31
+#define OMAP4_SINGLE_MODE_MASK (1 << 31)
+#define OMAP4_MASK_HOT_SHIFT 1
+#define OMAP4_MASK_HOT_MASK (1 << 1)
+#define OMAP4_MASK_COLD_SHIFT 0
+#define OMAP4_MASK_COLD_MASK (1 << 0)
+
+/* BANDGAP_COUNTER */
+#define OMAP4_COUNTER_SHIFT 0
+#define OMAP4_COUNTER_MASK (0xffffff << 0)
+
+/* BANDGAP_THRESHOLD */
+#define OMAP4_T_HOT_SHIFT 16
+#define OMAP4_T_HOT_MASK (0x3ff << 16)
+#define OMAP4_T_COLD_SHIFT 0
+#define OMAP4_T_COLD_MASK (0x3ff << 0)
+
+/* TSHUT_THRESHOLD */
+#define OMAP4_TSHUT_HOT_SHIFT 16
+#define OMAP4_TSHUT_HOT_MASK (0x3ff << 16)
+#define OMAP4_TSHUT_COLD_SHIFT 0
+#define OMAP4_TSHUT_COLD_MASK (0x3ff << 0)
+
+/* BANDGAP_STATUS */
+#define OMAP4_CLEAN_STOP_SHIFT 3
+#define OMAP4_CLEAN_STOP_MASK (1 << 3)
+#define OMAP4_BGAP_ALERT_SHIFT 2
+#define OMAP4_BGAP_ALERT_MASK (1 << 2)
+#define OMAP4_HOT_FLAG_SHIFT 1
+#define OMAP4_HOT_FLAG_MASK (1 << 1)
+#define OMAP4_COLD_FLAG_SHIFT 0
+#define OMAP4_COLD_FLAG_MASK (1 << 0)
/* DPLL_NWELL_TRIM_0 */
#define OMAP4_DPLL_ABE_NWELL_TRIM_MUX_CTRL_SHIFT 29
diff --git a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
index c88420d..852a3a9 100644
--- a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
+++ b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
@@ -1185,6 +1185,14 @@
#define OMAP4_LPDDR21_VREF_DQ_TAP0_MASK (1 << 13)
#define OMAP4_LPDDR21_VREF_DQ_TAP1_SHIFT 12
#define OMAP4_LPDDR21_VREF_DQ_TAP1_MASK (1 << 12)
+#define OMAP4_LPDDR21_VREF_EN_CA_SHIFT 3
+#define OMAP4_LPDDR21_VREF_EN_CA_MASK (1 << 3)
+#define OMAP4_LPDDR21_VREF_EN_DQ_SHIFT 2
+#define OMAP4_LPDDR21_VREF_EN_DQ_MASK (1 << 2)
+#define OMAP4_LPDDR21_VREF_AUTO_EN_CA_SHIFT 1
+#define OMAP4_LPDDR21_VREF_AUTO_EN_CA_MASK (1 << 1)
+#define OMAP4_LPDDR21_VREF_AUTO_EN_DQ_SHIFT 0
+#define OMAP4_LPDDR21_VREF_AUTO_EN_DQ_MASK (1 << 0)
/* CONTROL_LPDDR2IO2_0 */
#define OMAP4_LPDDR2IO2_GR4_SR_SHIFT 30
diff --git a/arch/arm/mach-omap2/include/mach/dmm-44xx.h b/arch/arm/mach-omap2/include/mach/dmm-44xx.h
new file mode 100644
index 0000000..c25dda5
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/dmm-44xx.h
@@ -0,0 +1,363 @@
+/*
+ * OMAP44xx DMM_CORE registers and bitfields
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * Benoit Cousson (b-cousson@ti.com)
+ * Santosh Shilimkar (santosh.shilimkar@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_DMM_CORE_44XX_H
+#define __ARCH_ARM_MACH_OMAP2_DMM_CORE_44XX_H
+
+
+/* Base address */
+#define OMAP44XX_DMM__DMM 0x4e000000
+
+/* Registers offset */
+#define OMAP44XX_DMM_REVISION 0x0000
+#define OMAP44XX_DMM_HWINFO 0x0004
+#define OMAP44XX_DMM_LISA_HWINFO 0x0008
+#define OMAP44XX_DMM_SYSCONFIG 0x0010
+#define OMAP44XX_DMM_LISA_LOCK 0x001c
+#define OMAP44XX_DMM_LISA_MAP 0x0040
+#define OMAP44XX_DMM_TILER_HWINFO 0x0208
+#define OMAP44XX_DMM_TILER_OR 0x0220
+#define OMAP44XX_DMM_PAT_HWINFO 0x0408
+#define OMAP44XX_DMM_PAT_GEOMETRY 0x040c
+#define OMAP44XX_DMM_PAT_CONFIG 0x0410
+#define OMAP44XX_DMM_PAT_VIEW 0x0420
+#define OMAP44XX_DMM_PAT_VIEW_MAP 0x0440
+#define OMAP44XX_DMM_PAT_VIEW_MAP_BASE 0x0460
+#define OMAP44XX_DMM_PAT_IRQ_EOI 0x0478
+#define OMAP44XX_DMM_PAT_IRQSTATUS_RAW 0x0480
+#define OMAP44XX_DMM_PAT_IRQSTATUS 0x0490
+#define OMAP44XX_DMM_PAT_IRQENABLE_SET 0x04a0
+#define OMAP44XX_DMM_PAT_IRQENABLE_CLR 0x04b0
+#define OMAP44XX_DMM_PAT_STATUS 0x04c0
+#define OMAP44XX_DMM_PAT_DESCR 0x0500
+#define OMAP44XX_DMM_PAT_AREA 0x0504
+#define OMAP44XX_DMM_PAT_CTRL 0x0508
+#define OMAP44XX_DMM_PAT_DATA 0x050c
+#define OMAP44XX_DMM_PEG_HWINFO 0x0608
+#define OMAP44XX_DMM_PEG_PRIO 0x0620
+#define OMAP44XX_DMM_PEG_PRIO_PAT 0x0640
+
+/* Registers shifts and masks */
+
+/* DMM_REVISION */
+#define OMAP44XX_SCHEME_SHIFT 30
+#define OMAP44XX_SCHEME_MASK (0x3 << 30)
+#define OMAP44XX_FUNC_SHIFT 16
+#define OMAP44XX_FUNC_MASK (0xfff << 16)
+#define OMAP44XX_R_RTL_SHIFT 11
+#define OMAP44XX_R_RTL_MASK (0x1f << 11)
+#define OMAP44XX_X_MAJOR_SHIFT 8
+#define OMAP44XX_X_MAJOR_MASK (0x7 << 8)
+#define OMAP44XX_CUSTOM_SHIFT 6
+#define OMAP44XX_CUSTOM_MASK (0x3 << 6)
+#define OMAP44XX_Y_MINOR_SHIFT 0
+#define OMAP44XX_Y_MINOR_MASK (0x3f << 0)
+
+/* DMM_HWINFO */
+#define OMAP44XX_ROBIN_CNT_SHIFT 16
+#define OMAP44XX_ROBIN_CNT_MASK (0xf << 16)
+#define OMAP44XX_ELLA_CNT_SHIFT 8
+#define OMAP44XX_ELLA_CNT_MASK (0xf << 8)
+#define OMAP44XX_TILER_CNT_SHIFT 0
+#define OMAP44XX_TILER_CNT_MASK (0xf << 0)
+
+/* DMM_LISA_HWINFO */
+#define OMAP44XX_SDRC_CNT_SHIFT 8
+#define OMAP44XX_SDRC_CNT_MASK (0xf << 8)
+#define OMAP44XX_SECTION_CNT_SHIFT 0
+#define OMAP44XX_SECTION_CNT_MASK (0x1f << 0)
+
+/* DMM_SYSCONFIG */
+#define OMAP44XX_IDLE_MODE_SHIFT 2
+#define OMAP44XX_IDLE_MODE_MASK (0x3 << 2)
+
+/* DMM_LISA_LOCK */
+#define OMAP44XX_LOCK_SHIFT 0
+#define OMAP44XX_LOCK_MASK (1 << 0)
+
+/* DMM_LISA_MAP */
+#define OMAP44XX_SYS_ADDR_SHIFT 24
+#define OMAP44XX_SYS_ADDR_MASK (0xff << 24)
+#define OMAP44XX_SYS_SIZE_SHIFT 20
+#define OMAP44XX_SYS_SIZE_MASK (0x7 << 20)
+#define OMAP44XX_SDRC_INTL_SHIFT 18
+#define OMAP44XX_SDRC_INTL_MASK (0x3 << 18)
+#define OMAP44XX_SDRC_ADDRSPC_SHIFT 16
+#define OMAP44XX_SDRC_ADDRSPC_MASK (0x3 << 16)
+#define OMAP44XX_SDRC_MAP_SHIFT 8
+#define OMAP44XX_SDRC_MAP_MASK (0x3 << 8)
+#define OMAP44XX_SDRC_ADDR_SHIFT 0
+#define OMAP44XX_SDRC_ADDR_MASK (0xff << 0)
+
+/* DMM_TILER_HWINFO */
+#define OMAP44XX_OR_CNT_SHIFT 0
+#define OMAP44XX_OR_CNT_MASK (0x7f << 0)
+
+/* DMM_TILER_OR */
+#define OMAP44XX_W7_SHIFT 31
+#define OMAP44XX_W7_MASK (1 << 31)
+#define OMAP44XX_OR7_SHIFT 28
+#define OMAP44XX_OR7_MASK (0x7 << 28)
+#define OMAP44XX_W6_SHIFT 27
+#define OMAP44XX_W6_MASK (1 << 27)
+#define OMAP44XX_OR6_SHIFT 24
+#define OMAP44XX_OR6_MASK (0x7 << 24)
+#define OMAP44XX_W5_SHIFT 23
+#define OMAP44XX_W5_MASK (1 << 23)
+#define OMAP44XX_OR5_SHIFT 20
+#define OMAP44XX_OR5_MASK (0x7 << 20)
+#define OMAP44XX_W4_SHIFT 19
+#define OMAP44XX_W4_MASK (1 << 19)
+#define OMAP44XX_OR4_SHIFT 16
+#define OMAP44XX_OR4_MASK (0x7 << 16)
+#define OMAP44XX_W3_SHIFT 15
+#define OMAP44XX_W3_MASK (1 << 15)
+#define OMAP44XX_OR3_SHIFT 12
+#define OMAP44XX_OR3_MASK (0x7 << 12)
+#define OMAP44XX_W2_SHIFT 11
+#define OMAP44XX_W2_MASK (1 << 11)
+#define OMAP44XX_OR2_SHIFT 8
+#define OMAP44XX_OR2_MASK (0x7 << 8)
+#define OMAP44XX_W1_SHIFT 7
+#define OMAP44XX_W1_MASK (1 << 7)
+#define OMAP44XX_OR1_SHIFT 4
+#define OMAP44XX_OR1_MASK (0x7 << 4)
+#define OMAP44XX_W0_SHIFT 3
+#define OMAP44XX_W0_MASK (1 << 3)
+#define OMAP44XX_OR0_SHIFT 0
+#define OMAP44XX_OR0_MASK (0x7 << 0)
+
+/* DMM_PAT_HWINFO */
+#define OMAP44XX_ENGINE_CNT_SHIFT 24
+#define OMAP44XX_ENGINE_CNT_MASK (0x1f << 24)
+#define OMAP44XX_LUT_CNT_SHIFT 16
+#define OMAP44XX_LUT_CNT_MASK (0x1f << 16)
+#define OMAP44XX_VIEW_MAP_CNT_SHIFT 8
+#define OMAP44XX_VIEW_MAP_CNT_MASK (0xf << 8)
+#define OMAP44XX_VIEW_CNT_SHIFT 0
+#define OMAP44XX_VIEW_CNT_MASK (0x7f << 0)
+
+/* DMM_PAT_GEOMETRY */
+#define OMAP44XX_CONT_HGHT_SHIFT 24
+#define OMAP44XX_CONT_HGHT_MASK (0x7 << 24)
+#define OMAP44XX_CONT_WDTH_SHIFT 16
+#define OMAP44XX_CONT_WDTH_MASK (0xf << 16)
+#define OMAP44XX_ADDR_RANGE_SHIFT 8
+#define OMAP44XX_ADDR_RANGE_MASK (0x3f << 8)
+#define OMAP44XX_PAGE_SZ_SHIFT 0
+#define OMAP44XX_PAGE_SZ_MASK (0x1f << 0)
+
+/* DMM_PAT_CONFIG */
+#define OMAP44XX_MODE3_SHIFT 3
+#define OMAP44XX_MODE3_MASK (1 << 3)
+#define OMAP44XX_MODE2_SHIFT 2
+#define OMAP44XX_MODE2_MASK (1 << 2)
+#define OMAP44XX_MODE1_SHIFT 1
+#define OMAP44XX_MODE1_MASK (1 << 1)
+#define OMAP44XX_MODE0_SHIFT 0
+#define OMAP44XX_MODE0_MASK (1 << 0)
+
+/* DMM_PAT_VIEW */
+#define OMAP44XX_V7_SHIFT 28
+#define OMAP44XX_V7_MASK (0x3 << 28)
+#define OMAP44XX_V6_SHIFT 24
+#define OMAP44XX_V6_MASK (0x3 << 24)
+#define OMAP44XX_V5_SHIFT 20
+#define OMAP44XX_V5_MASK (0x3 << 20)
+#define OMAP44XX_V4_SHIFT 16
+#define OMAP44XX_V4_MASK (0x3 << 16)
+#define OMAP44XX_V3_SHIFT 12
+#define OMAP44XX_V3_MASK (0x3 << 12)
+#define OMAP44XX_V2_SHIFT 8
+#define OMAP44XX_V2_MASK (0x3 << 8)
+#define OMAP44XX_V1_SHIFT 4
+#define OMAP44XX_V1_MASK (0x3 << 4)
+#define OMAP44XX_V0_SHIFT 0
+#define OMAP44XX_V0_MASK (0x3 << 0)
+
+/* DMM_PAT_VIEW_MAP */
+#define OMAP44XX_ACCESS_PAGE_SHIFT 31
+#define OMAP44XX_ACCESS_PAGE_MASK (1 << 31)
+#define OMAP44XX_CONT_PAGE_SHIFT 24
+#define OMAP44XX_CONT_PAGE_MASK (0xf << 24)
+#define OMAP44XX_ACCESS_32_SHIFT 23
+#define OMAP44XX_ACCESS_32_MASK (1 << 23)
+#define OMAP44XX_CONT_32_SHIFT 16
+#define OMAP44XX_CONT_32_MASK (0xf << 16)
+#define OMAP44XX_ACCESS_16_SHIFT 15
+#define OMAP44XX_ACCESS_16_MASK (1 << 15)
+#define OMAP44XX_CONT_16_SHIFT 8
+#define OMAP44XX_CONT_16_MASK (0xf << 8)
+#define OMAP44XX_ACCESS_8_SHIFT 7
+#define OMAP44XX_ACCESS_8_MASK (1 << 7)
+#define OMAP44XX_CONT_8_SHIFT 0
+#define OMAP44XX_CONT_8_MASK (0xf << 0)
+
+/* DMM_PAT_VIEW_MAP_BASE */
+#define OMAP44XX_BASE_ADDR_SHIFT 31
+#define OMAP44XX_BASE_ADDR_MASK (1 << 31)
+
+/* DMM_PAT_IRQ_EOI */
+#define OMAP44XX_EOI_SHIFT 0
+#define OMAP44XX_EOI_MASK (1 << 0)
+
+/* DMM_PAT_IRQSTATUS_RAW */
+#define OMAP44XX_ERR_LUT_MISS3_SHIFT 31
+#define OMAP44XX_ERR_LUT_MISS3_MASK (1 << 31)
+#define OMAP44XX_ERR_UPD_DATA3_SHIFT 30
+#define OMAP44XX_ERR_UPD_DATA3_MASK (1 << 30)
+#define OMAP44XX_ERR_UPD_CTRL3_SHIFT 29
+#define OMAP44XX_ERR_UPD_CTRL3_MASK (1 << 29)
+#define OMAP44XX_ERR_UPD_AREA3_SHIFT 28
+#define OMAP44XX_ERR_UPD_AREA3_MASK (1 << 28)
+#define OMAP44XX_ERR_INV_DATA3_SHIFT 27
+#define OMAP44XX_ERR_INV_DATA3_MASK (1 << 27)
+#define OMAP44XX_ERR_INV_DSC3_SHIFT 26
+#define OMAP44XX_ERR_INV_DSC3_MASK (1 << 26)
+#define OMAP44XX_FILL_LST3_SHIFT 25
+#define OMAP44XX_FILL_LST3_MASK (1 << 25)
+#define OMAP44XX_FILL_DSC3_SHIFT 24
+#define OMAP44XX_FILL_DSC3_MASK (1 << 24)
+#define OMAP44XX_ERR_LUT_MISS2_SHIFT 23
+#define OMAP44XX_ERR_LUT_MISS2_MASK (1 << 23)
+#define OMAP44XX_ERR_UPD_DATA2_SHIFT 22
+#define OMAP44XX_ERR_UPD_DATA2_MASK (1 << 22)
+#define OMAP44XX_ERR_UPD_CTRL2_SHIFT 21
+#define OMAP44XX_ERR_UPD_CTRL2_MASK (1 << 21)
+#define OMAP44XX_ERR_UPD_AREA2_SHIFT 20
+#define OMAP44XX_ERR_UPD_AREA2_MASK (1 << 20)
+#define OMAP44XX_ERR_INV_DATA2_SHIFT 19
+#define OMAP44XX_ERR_INV_DATA2_MASK (1 << 19)
+#define OMAP44XX_ERR_INV_DSC2_SHIFT 18
+#define OMAP44XX_ERR_INV_DSC2_MASK (1 << 18)
+#define OMAP44XX_FILL_LST2_SHIFT 17
+#define OMAP44XX_FILL_LST2_MASK (1 << 17)
+#define OMAP44XX_FILL_DSC2_SHIFT 16
+#define OMAP44XX_FILL_DSC2_MASK (1 << 16)
+#define OMAP44XX_ERR_LUT_MISS1_SHIFT 15
+#define OMAP44XX_ERR_LUT_MISS1_MASK (1 << 15)
+#define OMAP44XX_ERR_UPD_DATA1_SHIFT 14
+#define OMAP44XX_ERR_UPD_DATA1_MASK (1 << 14)
+#define OMAP44XX_ERR_UPD_CTRL1_SHIFT 13
+#define OMAP44XX_ERR_UPD_CTRL1_MASK (1 << 13)
+#define OMAP44XX_ERR_UPD_AREA1_SHIFT 12
+#define OMAP44XX_ERR_UPD_AREA1_MASK (1 << 12)
+#define OMAP44XX_ERR_INV_DATA1_SHIFT 11
+#define OMAP44XX_ERR_INV_DATA1_MASK (1 << 11)
+#define OMAP44XX_ERR_INV_DSC1_SHIFT 10
+#define OMAP44XX_ERR_INV_DSC1_MASK (1 << 10)
+#define OMAP44XX_FILL_LST1_SHIFT 9
+#define OMAP44XX_FILL_LST1_MASK (1 << 9)
+#define OMAP44XX_FILL_DSC1_SHIFT 8
+#define OMAP44XX_FILL_DSC1_MASK (1 << 8)
+#define OMAP44XX_ERR_LUT_MISS0_SHIFT 7
+#define OMAP44XX_ERR_LUT_MISS0_MASK (1 << 7)
+#define OMAP44XX_ERR_UPD_DATA0_SHIFT 6
+#define OMAP44XX_ERR_UPD_DATA0_MASK (1 << 6)
+#define OMAP44XX_ERR_UPD_CTRL0_SHIFT 5
+#define OMAP44XX_ERR_UPD_CTRL0_MASK (1 << 5)
+#define OMAP44XX_ERR_UPD_AREA0_SHIFT 4
+#define OMAP44XX_ERR_UPD_AREA0_MASK (1 << 4)
+#define OMAP44XX_ERR_INV_DATA0_SHIFT 3
+#define OMAP44XX_ERR_INV_DATA0_MASK (1 << 3)
+#define OMAP44XX_ERR_INV_DSC0_SHIFT 2
+#define OMAP44XX_ERR_INV_DSC0_MASK (1 << 2)
+#define OMAP44XX_FILL_LST0_SHIFT 1
+#define OMAP44XX_FILL_LST0_MASK (1 << 1)
+#define OMAP44XX_FILL_DSC0_SHIFT 0
+#define OMAP44XX_FILL_DSC0_MASK (1 << 0)
+
+/* DMM_PAT_IRQSTATUS */
+
+/* DMM_PAT_IRQENABLE_SET */
+
+/* DMM_PAT_IRQENABLE_CLR */
+
+/* DMM_PAT_STATUS */
+#define OMAP44XX_CNT_SHIFT 16
+#define OMAP44XX_CNT_MASK (0x1ff << 16)
+#define OMAP44XX_ERROR_SHIFT 10
+#define OMAP44XX_ERROR_MASK (0x3f << 10)
+#define OMAP44XX_BYPASSED_SHIFT 7
+#define OMAP44XX_BYPASSED_MASK (1 << 7)
+#define OMAP44XX_LINKED_SHIFT 4
+#define OMAP44XX_LINKED_MASK (1 << 4)
+#define OMAP44XX_DONE_SHIFT 3
+#define OMAP44XX_DONE_MASK (1 << 3)
+#define OMAP44XX_RUN_SHIFT 2
+#define OMAP44XX_RUN_MASK (1 << 2)
+#define OMAP44XX_VALID_SHIFT 1
+#define OMAP44XX_VALID_MASK (1 << 1)
+#define OMAP44XX_READY_SHIFT 0
+#define OMAP44XX_READY_MASK (1 << 0)
+
+/* DMM_PAT_DESCR */
+#define OMAP44XX_ADDR_SHIFT 4
+#define OMAP44XX_ADDR_MASK (0xfffffff << 4)
+
+/* DMM_PAT_AREA */
+#define OMAP44XX_Y1_SHIFT 24
+#define OMAP44XX_Y1_MASK (0x7f << 24)
+#define OMAP44XX_X1_SHIFT 16
+#define OMAP44XX_X1_MASK (0xff << 16)
+#define OMAP44XX_Y0_SHIFT 8
+#define OMAP44XX_Y0_MASK (0x7f << 8)
+#define OMAP44XX_X0_SHIFT 0
+#define OMAP44XX_X0_MASK (0xff << 0)
+
+/* DMM_PAT_CTRL */
+#define OMAP44XX_INITIATOR_SHIFT 28
+#define OMAP44XX_INITIATOR_MASK (0xf << 28)
+#define OMAP44XX_SYNC_SHIFT 16
+#define OMAP44XX_SYNC_MASK (1 << 16)
+#define OMAP44XX_DIRECTION_SHIFT 4
+#define OMAP44XX_DIRECTION_MASK (0x7 << 4)
+#define OMAP44XX_START_SHIFT 0
+#define OMAP44XX_START_MASK (1 << 0)
+
+/* DMM_PAT_DATA */
+
+/* DMM_PEG_HWINFO */
+#define OMAP44XX_PRIO_CNT_SHIFT 0
+#define OMAP44XX_PRIO_CNT_MASK (0x7f << 0)
+
+/* DMM_PEG_PRIO */
+#define OMAP44XX_P7_SHIFT 28
+#define OMAP44XX_P7_MASK (0x7 << 28)
+#define OMAP44XX_P6_SHIFT 24
+#define OMAP44XX_P6_MASK (0x7 << 24)
+#define OMAP44XX_P5_SHIFT 20
+#define OMAP44XX_P5_MASK (0x7 << 20)
+#define OMAP44XX_P4_SHIFT 16
+#define OMAP44XX_P4_MASK (0x7 << 16)
+#define OMAP44XX_P3_SHIFT 12
+#define OMAP44XX_P3_MASK (0x7 << 12)
+#define OMAP44XX_P2_SHIFT 8
+#define OMAP44XX_P2_MASK (0x7 << 8)
+#define OMAP44XX_P1_SHIFT 4
+#define OMAP44XX_P1_MASK (0x7 << 4)
+#define OMAP44XX_P0_SHIFT 0
+#define OMAP44XX_P0_MASK (0x7 << 0)
+
+/* DMM_PEG_PRIO_PAT */
+#define OMAP44XX_W_PAT_SHIFT 3
+#define OMAP44XX_W_PAT_MASK (1 << 3)
+#define OMAP44XX_P_PAT_SHIFT 0
+#define OMAP44XX_P_PAT_MASK (0x7 << 0)
+#endif
diff --git a/arch/arm/mach-omap2/include/mach/dmm.h b/arch/arm/mach-omap2/include/mach/dmm.h
new file mode 100644
index 0000000..3567b6f9
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/dmm.h
@@ -0,0 +1,164 @@
+/*
+ * dmm.h
+ *
+ * DMM driver support functions for TI DMM-TILER hardware block.
+ *
+ * Author: David Sin <davidsin@ti.com>
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DMM_H
+#define DMM_H
+
+#define DMM_BASE 0x4E000000
+#define DMM_SIZE 0x800
+
+#define DMM_REVISION 0x000
+#define DMM_HWINFO 0x004
+#define DMM_LISA_HWINFO 0x008
+#define DMM_DMM_SYSCONFIG 0x010
+#define DMM_LISA_LOCK 0x01C
+#define DMM_LISA_MAP__0 0x040
+#define DMM_LISA_MAP__1 0x044
+#define DMM_TILER_HWINFO 0x208
+#define DMM_TILER_OR__0 0x220
+#define DMM_TILER_OR__1 0x224
+#define DMM_PAT_HWINFO 0x408
+#define DMM_PAT_GEOMETRY 0x40C
+#define DMM_PAT_CONFIG 0x410
+#define DMM_PAT_VIEW__0 0x420
+#define DMM_PAT_VIEW__1 0x424
+#define DMM_PAT_VIEW_MAP__0 0x440
+#define DMM_PAT_VIEW_MAP_BASE 0x460
+#define DMM_PAT_IRQ_EOI 0x478
+#define DMM_PAT_IRQSTATUS_RAW 0x480
+#define DMM_PAT_IRQSTATUS 0x490
+#define DMM_PAT_IRQENABLE_SET 0x4A0
+#define DMM_PAT_IRQENABLE_CLR 0x4B0
+#define DMM_PAT_STATUS__0 0x4C0
+#define DMM_PAT_STATUS__1 0x4C4
+#define DMM_PAT_STATUS__2 0x4C8
+#define DMM_PAT_STATUS__3 0x4CC
+#define DMM_PAT_DESCR__0 0x500
+#define DMM_PAT_AREA__0 0x504
+#define DMM_PAT_CTRL__0 0x508
+#define DMM_PAT_DATA__0 0x50C
+#define DMM_PEG_HWINFO 0x608
+#define DMM_PEG_PRIO 0x620
+#define DMM_PEG_PRIO_PAT 0x640
+
+/**
+ * PAT refill programming mode.
+ */
+enum pat_mode {
+ MANUAL,
+ AUTO
+};
+
+/**
+ * Area definition for DMM physical address translator.
+ */
+struct pat_area {
+ s32 x0:8;
+ s32 y0:8;
+ s32 x1:8;
+ s32 y1:8;
+};
+
+/**
+ * DMM physical address translator control.
+ */
+struct pat_ctrl {
+ s32 start:4;
+ s32 dir:4;
+ s32 lut_id:8;
+ s32 sync:12;
+ s32 ini:4;
+};
+
+/**
+ * PAT descriptor.
+ */
+struct pat {
+ struct pat *next;
+ struct pat_area area;
+ struct pat_ctrl ctrl;
+ u32 data;
+};
+
+/**
+ * DMM device data
+ */
+struct dmm {
+ void __iomem *base;
+};
+
+/**
+ * Create and initialize the physical address translator.
+ * @param id PAT id
+ * @return pointer to device data
+ */
+struct dmm *dmm_pat_init(u32 id);
+
+/**
+ * Program the physical address translator.
+ * @param dmm Device data
+ * @param desc PAT descriptor
+ * @param mode programming mode
+ * @return an error status.
+ */
+s32 dmm_pat_refill(struct dmm *dmm, struct pat *desc, enum pat_mode mode);
+
+/**
+ * Clean up the physical address translator.
+ * @param dmm Device data
+ * @return an error status.
+ */
+void dmm_pat_release(struct dmm *dmm);
+
+/**
+ * DMM Platform Device Data structure
+ *
+ */
+struct omap_dmm_platform_data {
+ const char *oh_name;
+ void __iomem *base;
+ int irq;
+};
+
+/**
+ * Init function for use in board init file
+ *
+ */
+void omap_dmm_init(void);
+
+#endif
diff --git a/arch/arm/mach-omap2/include/mach/emif-44xx.h b/arch/arm/mach-omap2/include/mach/emif-44xx.h
new file mode 100644
index 0000000..58a80f2
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/emif-44xx.h
@@ -0,0 +1,526 @@
+/*
+ * OMAP44xx EMIF registers and bitfields
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * Benoit Cousson (b-cousson@ti.com)
+ * Santosh Shilimkar (santosh.shilimkar@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_EMIF_44XX_H
+#define __ARCH_ARM_MACH_OMAP2_EMIF_44XX_H
+
+
+/* Base address */
+#define OMAP44XX_EMIF1 0x4c000000
+#define OMAP44XX_EMIF2 0x4d000000
+
+/* Registers offset */
+#define OMAP44XX_EMIF_MOD_ID_REV 0x0000
+#define OMAP44XX_EMIF_STATUS 0x0004
+#define OMAP44XX_EMIF_SDRAM_CONFIG 0x0008
+#define OMAP44XX_EMIF_SDRAM_CONFIG_2 0x000c
+#define OMAP44XX_EMIF_SDRAM_REF_CTRL 0x0010
+#define OMAP44XX_EMIF_SDRAM_REF_CTRL_SHDW 0x0014
+#define OMAP44XX_EMIF_SDRAM_TIM_1 0x0018
+#define OMAP44XX_EMIF_SDRAM_TIM_1_SHDW 0x001c
+#define OMAP44XX_EMIF_SDRAM_TIM_2 0x0020
+#define OMAP44XX_EMIF_SDRAM_TIM_2_SHDW 0x0024
+#define OMAP44XX_EMIF_SDRAM_TIM_3 0x0028
+#define OMAP44XX_EMIF_SDRAM_TIM_3_SHDW 0x002c
+#define OMAP44XX_EMIF_LPDDR2_NVM_TIM 0x0030
+#define OMAP44XX_EMIF_LPDDR2_NVM_TIM_SHDW 0x0034
+#define OMAP44XX_EMIF_PWR_MGMT_CTRL 0x0038
+#define OMAP44XX_EMIF_PWR_MGMT_CTRL_SHDW 0x003c
+#define OMAP44XX_EMIF_LPDDR2_MODE_REG_DATA 0x0040
+#define OMAP44XX_EMIF_LPDDR2_MODE_REG_CFG 0x0050
+#define OMAP44XX_EMIF_OCP_CONFIG 0x0054
+#define OMAP44XX_EMIF_OCP_CFG_VAL_1 0x0058
+#define OMAP44XX_EMIF_OCP_CFG_VAL_2 0x005c
+#define OMAP44XX_EMIF_IODFT_TLGC 0x0060
+#define OMAP44XX_EMIF_IODFT_CTRL_MISR_RSLT 0x0064
+#define OMAP44XX_EMIF_IODFT_ADDR_MISR_RSLT 0x0068
+#define OMAP44XX_EMIF_IODFT_DATA_MISR_RSLT_1 0x006c
+#define OMAP44XX_EMIF_IODFT_DATA_MISR_RSLT_2 0x0070
+#define OMAP44XX_EMIF_IODFT_DATA_MISR_RSLT_3 0x0074
+#define OMAP44XX_EMIF_PERF_CNT_1 0x0080
+#define OMAP44XX_EMIF_PERF_CNT_2 0x0084
+#define OMAP44XX_EMIF_PERF_CNT_CFG 0x0088
+#define OMAP44XX_EMIF_PERF_CNT_SEL 0x008c
+#define OMAP44XX_EMIF_PERF_CNT_TIM 0x0090
+#define OMAP44XX_EMIF_READ_IDLE_CTRL 0x0098
+#define OMAP44XX_EMIF_READ_IDLE_CTRL_SHDW 0x009c
+#define OMAP44XX_EMIF_IRQ_EOI 0x00a0
+#define OMAP44XX_EMIF_IRQSTATUS_RAW_SYS 0x00a4
+#define OMAP44XX_EMIF_IRQSTATUS_RAW_LL 0x00a8
+#define OMAP44XX_EMIF_IRQSTATUS_SYS 0x00ac
+#define OMAP44XX_EMIF_IRQSTATUS_LL 0x00b0
+#define OMAP44XX_EMIF_IRQENABLE_SET_SYS 0x00b4
+#define OMAP44XX_EMIF_IRQENABLE_SET_LL 0x00b8
+#define OMAP44XX_EMIF_IRQENABLE_CLR_SYS 0x00bc
+#define OMAP44XX_EMIF_IRQENABLE_CLR_LL 0x00c0
+#define OMAP44XX_EMIF_ZQ_CONFIG 0x00c8
+#define OMAP44XX_EMIF_TEMP_ALERT_CONFIG 0x00cc
+#define OMAP44XX_EMIF_OCP_ERR_LOG 0x00d0
+#define OMAP44XX_EMIF_DDR_PHY_CTRL_1 0x00e4
+#define OMAP44XX_EMIF_DDR_PHY_CTRL_1_SHDW 0x00e8
+#define OMAP44XX_EMIF_DDR_PHY_CTRL_2 0x00ec
+
+/* Registers shifts and masks */
+
+/* EMIF_MOD_ID_REV */
+#define OMAP44XX_REG_SCHEME_SHIFT 30
+#define OMAP44XX_REG_SCHEME_MASK (0x3 << 30)
+#define OMAP44XX_REG_MODULE_ID_SHIFT 16
+#define OMAP44XX_REG_MODULE_ID_MASK (0xfff << 16)
+#define OMAP44XX_REG_RTL_VERSION_SHIFT 11
+#define OMAP44XX_REG_RTL_VERSION_MASK (0x1f << 11)
+#define OMAP44XX_REG_MAJOR_REVISION_SHIFT 8
+#define OMAP44XX_REG_MAJOR_REVISION_MASK (0x7 << 8)
+#define OMAP44XX_REG_MINOR_REVISION_SHIFT 0
+#define OMAP44XX_REG_MINOR_REVISION_MASK (0x3f << 0)
+
+/* STATUS */
+#define OMAP44XX_REG_BE_SHIFT 31
+#define OMAP44XX_REG_BE_MASK (1 << 31)
+#define OMAP44XX_REG_DUAL_CLK_MODE_SHIFT 30
+#define OMAP44XX_REG_DUAL_CLK_MODE_MASK (1 << 30)
+#define OMAP44XX_REG_FAST_INIT_SHIFT 29
+#define OMAP44XX_REG_FAST_INIT_MASK (1 << 29)
+#define OMAP44XX_REG_PHY_DLL_READY_SHIFT 2
+#define OMAP44XX_REG_PHY_DLL_READY_MASK (1 << 2)
+
+/* SDRAM_CONFIG */
+#define OMAP44XX_REG_SDRAM_TYPE_SHIFT 29
+#define OMAP44XX_REG_SDRAM_TYPE_MASK (0x7 << 29)
+#define OMAP44XX_REG_IBANK_POS_SHIFT 27
+#define OMAP44XX_REG_IBANK_POS_MASK (0x3 << 27)
+#define OMAP44XX_REG_DDR_TERM_SHIFT 24
+#define OMAP44XX_REG_DDR_TERM_MASK (0x7 << 24)
+#define OMAP44XX_REG_DDR2_DDQS_SHIFT 23
+#define OMAP44XX_REG_DDR2_DDQS_MASK (1 << 23)
+#define OMAP44XX_REG_DYN_ODT_SHIFT 21
+#define OMAP44XX_REG_DYN_ODT_MASK (0x3 << 21)
+#define OMAP44XX_REG_DDR_DISABLE_DLL_SHIFT 20
+#define OMAP44XX_REG_DDR_DISABLE_DLL_MASK (1 << 20)
+#define OMAP44XX_REG_SDRAM_DRIVE_SHIFT 18
+#define OMAP44XX_REG_SDRAM_DRIVE_MASK (0x3 << 18)
+#define OMAP44XX_REG_CWL_SHIFT 16
+#define OMAP44XX_REG_CWL_MASK (0x3 << 16)
+#define OMAP44XX_REG_NARROW_MODE_SHIFT 14
+#define OMAP44XX_REG_NARROW_MODE_MASK (0x3 << 14)
+#define OMAP44XX_REG_CL_SHIFT 10
+#define OMAP44XX_REG_CL_MASK (0xf << 10)
+#define OMAP44XX_REG_ROWSIZE_SHIFT 7
+#define OMAP44XX_REG_ROWSIZE_MASK (0x7 << 7)
+#define OMAP44XX_REG_IBANK_SHIFT 4
+#define OMAP44XX_REG_IBANK_MASK (0x7 << 4)
+#define OMAP44XX_REG_EBANK_SHIFT 3
+#define OMAP44XX_REG_EBANK_MASK (1 << 3)
+#define OMAP44XX_REG_PAGESIZE_SHIFT 0
+#define OMAP44XX_REG_PAGESIZE_MASK (0x7 << 0)
+
+/* SDRAM_CONFIG_2 */
+#define OMAP44XX_REG_CS1NVMEN_SHIFT 30
+#define OMAP44XX_REG_CS1NVMEN_MASK (1 << 30)
+#define OMAP44XX_REG_EBANK_POS_SHIFT 27
+#define OMAP44XX_REG_EBANK_POS_MASK (1 << 27)
+#define OMAP44XX_REG_RDBNUM_SHIFT 4
+#define OMAP44XX_REG_RDBNUM_MASK (0x3 << 4)
+#define OMAP44XX_REG_RDBSIZE_SHIFT 0
+#define OMAP44XX_REG_RDBSIZE_MASK (0x7 << 0)
+
+/* SDRAM_REF_CTRL */
+#define OMAP44XX_REG_INITREF_DIS_SHIFT 31
+#define OMAP44XX_REG_INITREF_DIS_MASK (1 << 31)
+#define OMAP44XX_REG_SRT_SHIFT 29
+#define OMAP44XX_REG_SRT_MASK (1 << 29)
+#define OMAP44XX_REG_ASR_SHIFT 28
+#define OMAP44XX_REG_ASR_MASK (1 << 28)
+#define OMAP44XX_REG_PASR_SHIFT 24
+#define OMAP44XX_REG_PASR_MASK (0x7 << 24)
+#define OMAP44XX_REG_REFRESH_RATE_SHIFT 0
+#define OMAP44XX_REG_REFRESH_RATE_MASK (0xffff << 0)
+
+/* SDRAM_REF_CTRL_SHDW */
+#define OMAP44XX_REG_REFRESH_RATE_SHDW_SHIFT 0
+#define OMAP44XX_REG_REFRESH_RATE_SHDW_MASK (0xffff << 0)
+
+/* SDRAM_TIM_1 */
+#define OMAP44XX_REG_T_RP_SHIFT 25
+#define OMAP44XX_REG_T_RP_MASK (0xf << 25)
+#define OMAP44XX_REG_T_RCD_SHIFT 21
+#define OMAP44XX_REG_T_RCD_MASK (0xf << 21)
+#define OMAP44XX_REG_T_WR_SHIFT 17
+#define OMAP44XX_REG_T_WR_MASK (0xf << 17)
+#define OMAP44XX_REG_T_RAS_SHIFT 12
+#define OMAP44XX_REG_T_RAS_MASK (0x1f << 12)
+#define OMAP44XX_REG_T_RC_SHIFT 6
+#define OMAP44XX_REG_T_RC_MASK (0x3f << 6)
+#define OMAP44XX_REG_T_RRD_SHIFT 3
+#define OMAP44XX_REG_T_RRD_MASK (0x7 << 3)
+#define OMAP44XX_REG_T_WTR_SHIFT 0
+#define OMAP44XX_REG_T_WTR_MASK (0x7 << 0)
+
+/* SDRAM_TIM_1_SHDW */
+#define OMAP44XX_REG_T_RP_SHDW_SHIFT 25
+#define OMAP44XX_REG_T_RP_SHDW_MASK (0xf << 25)
+#define OMAP44XX_REG_T_RCD_SHDW_SHIFT 21
+#define OMAP44XX_REG_T_RCD_SHDW_MASK (0xf << 21)
+#define OMAP44XX_REG_T_WR_SHDW_SHIFT 17
+#define OMAP44XX_REG_T_WR_SHDW_MASK (0xf << 17)
+#define OMAP44XX_REG_T_RAS_SHDW_SHIFT 12
+#define OMAP44XX_REG_T_RAS_SHDW_MASK (0x1f << 12)
+#define OMAP44XX_REG_T_RC_SHDW_SHIFT 6
+#define OMAP44XX_REG_T_RC_SHDW_MASK (0x3f << 6)
+#define OMAP44XX_REG_T_RRD_SHDW_SHIFT 3
+#define OMAP44XX_REG_T_RRD_SHDW_MASK (0x7 << 3)
+#define OMAP44XX_REG_T_WTR_SHDW_SHIFT 0
+#define OMAP44XX_REG_T_WTR_SHDW_MASK (0x7 << 0)
+
+/* SDRAM_TIM_2 */
+#define OMAP44XX_REG_T_XP_SHIFT 28
+#define OMAP44XX_REG_T_XP_MASK (0x7 << 28)
+#define OMAP44XX_REG_T_ODT_SHIFT 25
+#define OMAP44XX_REG_T_ODT_MASK (0x7 << 25)
+#define OMAP44XX_REG_T_XSNR_SHIFT 16
+#define OMAP44XX_REG_T_XSNR_MASK (0x1ff << 16)
+#define OMAP44XX_REG_T_XSRD_SHIFT 6
+#define OMAP44XX_REG_T_XSRD_MASK (0x3ff << 6)
+#define OMAP44XX_REG_T_RTP_SHIFT 3
+#define OMAP44XX_REG_T_RTP_MASK (0x7 << 3)
+#define OMAP44XX_REG_T_CKE_SHIFT 0
+#define OMAP44XX_REG_T_CKE_MASK (0x7 << 0)
+
+/* SDRAM_TIM_2_SHDW */
+#define OMAP44XX_REG_T_XP_SHDW_SHIFT 28
+#define OMAP44XX_REG_T_XP_SHDW_MASK (0x7 << 28)
+#define OMAP44XX_REG_T_ODT_SHDW_SHIFT 25
+#define OMAP44XX_REG_T_ODT_SHDW_MASK (0x7 << 25)
+#define OMAP44XX_REG_T_XSNR_SHDW_SHIFT 16
+#define OMAP44XX_REG_T_XSNR_SHDW_MASK (0x1ff << 16)
+#define OMAP44XX_REG_T_XSRD_SHDW_SHIFT 6
+#define OMAP44XX_REG_T_XSRD_SHDW_MASK (0x3ff << 6)
+#define OMAP44XX_REG_T_RTP_SHDW_SHIFT 3
+#define OMAP44XX_REG_T_RTP_SHDW_MASK (0x7 << 3)
+#define OMAP44XX_REG_T_CKE_SHDW_SHIFT 0
+#define OMAP44XX_REG_T_CKE_SHDW_MASK (0x7 << 0)
+
+/* SDRAM_TIM_3 */
+#define OMAP44XX_REG_T_CKESR_SHIFT 21
+#define OMAP44XX_REG_T_CKESR_MASK (0x7 << 21)
+#define OMAP44XX_REG_ZQ_ZQCS_SHIFT 15
+#define OMAP44XX_REG_ZQ_ZQCS_MASK (0x3f << 15)
+#define OMAP44XX_REG_T_TDQSCKMAX_SHIFT 13
+#define OMAP44XX_REG_T_TDQSCKMAX_MASK (0x3 << 13)
+#define OMAP44XX_REG_T_RFC_SHIFT 4
+#define OMAP44XX_REG_T_RFC_MASK (0x1ff << 4)
+#define OMAP44XX_REG_T_RAS_MAX_SHIFT 0
+#define OMAP44XX_REG_T_RAS_MAX_MASK (0xf << 0)
+
+/* SDRAM_TIM_3_SHDW */
+#define OMAP44XX_REG_T_CKESR_SHDW_SHIFT 21
+#define OMAP44XX_REG_T_CKESR_SHDW_MASK (0x7 << 21)
+#define OMAP44XX_REG_ZQ_ZQCS_SHDW_SHIFT 15
+#define OMAP44XX_REG_ZQ_ZQCS_SHDW_MASK (0x3f << 15)
+#define OMAP44XX_REG_T_TDQSCKMAX_SHDW_SHIFT 13
+#define OMAP44XX_REG_T_TDQSCKMAX_SHDW_MASK (0x3 << 13)
+#define OMAP44XX_REG_T_RFC_SHDW_SHIFT 4
+#define OMAP44XX_REG_T_RFC_SHDW_MASK (0x1ff << 4)
+#define OMAP44XX_REG_T_RAS_MAX_SHDW_SHIFT 0
+#define OMAP44XX_REG_T_RAS_MAX_SHDW_MASK (0xf << 0)
+
+/* LPDDR2_NVM_TIM */
+#define OMAP44XX_REG_NVM_T_XP_SHIFT 28
+#define OMAP44XX_REG_NVM_T_XP_MASK (0x7 << 28)
+#define OMAP44XX_REG_NVM_T_WTR_SHIFT 24
+#define OMAP44XX_REG_NVM_T_WTR_MASK (0x7 << 24)
+#define OMAP44XX_REG_NVM_T_RP_SHIFT 20
+#define OMAP44XX_REG_NVM_T_RP_MASK (0xf << 20)
+#define OMAP44XX_REG_NVM_T_WRA_SHIFT 16
+#define OMAP44XX_REG_NVM_T_WRA_MASK (0xf << 16)
+#define OMAP44XX_REG_NVM_T_RRD_SHIFT 8
+#define OMAP44XX_REG_NVM_T_RRD_MASK (0xff << 8)
+#define OMAP44XX_REG_NVM_T_RCDMIN_SHIFT 0
+#define OMAP44XX_REG_NVM_T_RCDMIN_MASK (0xff << 0)
+
+/* LPDDR2_NVM_TIM_SHDW */
+#define OMAP44XX_REG_NVM_T_XP_SHDW_SHIFT 28
+#define OMAP44XX_REG_NVM_T_XP_SHDW_MASK (0x7 << 28)
+#define OMAP44XX_REG_NVM_T_WTR_SHDW_SHIFT 24
+#define OMAP44XX_REG_NVM_T_WTR_SHDW_MASK (0x7 << 24)
+#define OMAP44XX_REG_NVM_T_RP_SHDW_SHIFT 20
+#define OMAP44XX_REG_NVM_T_RP_SHDW_MASK (0xf << 20)
+#define OMAP44XX_REG_NVM_T_WRA_SHDW_SHIFT 16
+#define OMAP44XX_REG_NVM_T_WRA_SHDW_MASK (0xf << 16)
+#define OMAP44XX_REG_NVM_T_RRD_SHDW_SHIFT 8
+#define OMAP44XX_REG_NVM_T_RRD_SHDW_MASK (0xff << 8)
+#define OMAP44XX_REG_NVM_T_RCDMIN_SHDW_SHIFT 0
+#define OMAP44XX_REG_NVM_T_RCDMIN_SHDW_MASK (0xff << 0)
+
+/* PWR_MGMT_CTRL */
+#define OMAP44XX_REG_PD_TIM_SHIFT 12
+#define OMAP44XX_REG_PD_TIM_MASK (0xf << 12)
+#define OMAP44XX_REG_DPD_EN_SHIFT 11
+#define OMAP44XX_REG_DPD_EN_MASK (1 << 11)
+#define OMAP44XX_REG_LP_MODE_SHIFT 8
+#define OMAP44XX_REG_LP_MODE_MASK (0x7 << 8)
+#define OMAP44XX_REG_SR_TIM_SHIFT 4
+#define OMAP44XX_REG_SR_TIM_MASK (0xf << 4)
+#define OMAP44XX_REG_CS_TIM_SHIFT 0
+#define OMAP44XX_REG_CS_TIM_MASK (0xf << 0)
+
+/* PWR_MGMT_CTRL_SHDW */
+#define OMAP44XX_REG_PD_TIM_SHDW_SHIFT 12
+#define OMAP44XX_REG_PD_TIM_SHDW_MASK (0xf << 12)
+#define OMAP44XX_REG_SR_TIM_SHDW_SHIFT 4
+#define OMAP44XX_REG_SR_TIM_SHDW_MASK (0xf << 4)
+#define OMAP44XX_REG_CS_TIM_SHDW_SHIFT 0
+#define OMAP44XX_REG_CS_TIM_SHDW_MASK (0xf << 0)
+
+/* LPDDR2_MODE_REG_DATA */
+#define OMAP44XX_REG_VALUE_0_SHIFT 0
+#define OMAP44XX_REG_VALUE_0_MASK (0x7f << 0)
+
+/* LPDDR2_MODE_REG_CFG */
+#define OMAP44XX_REG_CS_SHIFT 31
+#define OMAP44XX_REG_CS_MASK (1 << 31)
+#define OMAP44XX_REG_REFRESH_EN_SHIFT 30
+#define OMAP44XX_REG_REFRESH_EN_MASK (1 << 30)
+#define OMAP44XX_REG_ADDRESS_SHIFT 0
+#define OMAP44XX_REG_ADDRESS_MASK (0xff << 0)
+
+/* OCP_CONFIG */
+#define OMAP44XX_REG_SYS_THRESH_MAX_SHIFT 24
+#define OMAP44XX_REG_SYS_THRESH_MAX_MASK (0xf << 24)
+#define OMAP44XX_REG_LL_THRESH_MAX_SHIFT 16
+#define OMAP44XX_REG_LL_THRESH_MAX_MASK (0xf << 16)
+#define OMAP44XX_REG_PR_OLD_COUNT_SHIFT 0
+#define OMAP44XX_REG_PR_OLD_COUNT_MASK (0xff << 0)
+
+/* OCP_CFG_VAL_1 */
+#define OMAP44XX_REG_SYS_BUS_WIDTH_SHIFT 30
+#define OMAP44XX_REG_SYS_BUS_WIDTH_MASK (0x3 << 30)
+#define OMAP44XX_REG_LL_BUS_WIDTH_SHIFT 28
+#define OMAP44XX_REG_LL_BUS_WIDTH_MASK (0x3 << 28)
+#define OMAP44XX_REG_WR_FIFO_DEPTH_SHIFT 8
+#define OMAP44XX_REG_WR_FIFO_DEPTH_MASK (0xff << 8)
+#define OMAP44XX_REG_CMD_FIFO_DEPTH_SHIFT 0
+#define OMAP44XX_REG_CMD_FIFO_DEPTH_MASK (0xff << 0)
+
+/* OCP_CFG_VAL_2 */
+#define OMAP44XX_REG_RREG_FIFO_DEPTH_SHIFT 16
+#define OMAP44XX_REG_RREG_FIFO_DEPTH_MASK (0xff << 16)
+#define OMAP44XX_REG_RSD_FIFO_DEPTH_SHIFT 8
+#define OMAP44XX_REG_RSD_FIFO_DEPTH_MASK (0xff << 8)
+#define OMAP44XX_REG_RCMD_FIFO_DEPTH_SHIFT 0
+#define OMAP44XX_REG_RCMD_FIFO_DEPTH_MASK (0xff << 0)
+
+/* IODFT_TLGC */
+#define OMAP44XX_REG_TLEC_SHIFT 16
+#define OMAP44XX_REG_TLEC_MASK (0xffff << 16)
+#define OMAP44XX_REG_MT_SHIFT 14
+#define OMAP44XX_REG_MT_MASK (1 << 14)
+#define OMAP44XX_REG_ACT_CAP_EN_SHIFT 13
+#define OMAP44XX_REG_ACT_CAP_EN_MASK (1 << 13)
+#define OMAP44XX_REG_OPG_LD_SHIFT 12
+#define OMAP44XX_REG_OPG_LD_MASK (1 << 12)
+#define OMAP44XX_REG_RESET_PHY_SHIFT 10
+#define OMAP44XX_REG_RESET_PHY_MASK (1 << 10)
+#define OMAP44XX_REG_MMS_SHIFT 8
+#define OMAP44XX_REG_MMS_MASK (1 << 8)
+#define OMAP44XX_REG_MC_SHIFT 4
+#define OMAP44XX_REG_MC_MASK (0x3 << 4)
+#define OMAP44XX_REG_PC_SHIFT 1
+#define OMAP44XX_REG_PC_MASK (0x7 << 1)
+#define OMAP44XX_REG_TM_SHIFT 0
+#define OMAP44XX_REG_TM_MASK (1 << 0)
+
+/* IODFT_CTRL_MISR_RSLT */
+#define OMAP44XX_REG_DQM_TLMR_SHIFT 16
+#define OMAP44XX_REG_DQM_TLMR_MASK (0x3ff << 16)
+#define OMAP44XX_REG_CTL_TLMR_SHIFT 0
+#define OMAP44XX_REG_CTL_TLMR_MASK (0x7ff << 0)
+
+/* IODFT_ADDR_MISR_RSLT */
+#define OMAP44XX_REG_ADDR_TLMR_SHIFT 0
+#define OMAP44XX_REG_ADDR_TLMR_MASK (0x1fffff << 0)
+
+/* IODFT_DATA_MISR_RSLT_1 */
+#define OMAP44XX_REG_DATA_TLMR_31_0_SHIFT 0
+#define OMAP44XX_REG_DATA_TLMR_31_0_MASK (0xffffffff << 0)
+
+/* IODFT_DATA_MISR_RSLT_2 */
+#define OMAP44XX_REG_DATA_TLMR_63_32_SHIFT 0
+#define OMAP44XX_REG_DATA_TLMR_63_32_MASK (0xffffffff << 0)
+
+/* IODFT_DATA_MISR_RSLT_3 */
+#define OMAP44XX_REG_DATA_TLMR_66_64_SHIFT 0
+#define OMAP44XX_REG_DATA_TLMR_66_64_MASK (0x7 << 0)
+
+/* PERF_CNT_1 */
+#define OMAP44XX_REG_COUNTER1_SHIFT 0
+#define OMAP44XX_REG_COUNTER1_MASK (0xffffffff << 0)
+
+/* PERF_CNT_2 */
+#define OMAP44XX_REG_COUNTER2_SHIFT 0
+#define OMAP44XX_REG_COUNTER2_MASK (0xffffffff << 0)
+
+/* PERF_CNT_CFG */
+#define OMAP44XX_REG_CNTR2_MCONNID_EN_SHIFT 31
+#define OMAP44XX_REG_CNTR2_MCONNID_EN_MASK (1 << 31)
+#define OMAP44XX_REG_CNTR2_REGION_EN_SHIFT 30
+#define OMAP44XX_REG_CNTR2_REGION_EN_MASK (1 << 30)
+#define OMAP44XX_REG_CNTR2_CFG_SHIFT 16
+#define OMAP44XX_REG_CNTR2_CFG_MASK (0xf << 16)
+#define OMAP44XX_REG_CNTR1_MCONNID_EN_SHIFT 15
+#define OMAP44XX_REG_CNTR1_MCONNID_EN_MASK (1 << 15)
+#define OMAP44XX_REG_CNTR1_REGION_EN_SHIFT 14
+#define OMAP44XX_REG_CNTR1_REGION_EN_MASK (1 << 14)
+#define OMAP44XX_REG_CNTR1_CFG_SHIFT 0
+#define OMAP44XX_REG_CNTR1_CFG_MASK (0xf << 0)
+
+/* PERF_CNT_SEL */
+#define OMAP44XX_REG_MCONNID2_SHIFT 24
+#define OMAP44XX_REG_MCONNID2_MASK (0xff << 24)
+#define OMAP44XX_REG_REGION_SEL2_SHIFT 16
+#define OMAP44XX_REG_REGION_SEL2_MASK (0x3 << 16)
+#define OMAP44XX_REG_MCONNID1_SHIFT 8
+#define OMAP44XX_REG_MCONNID1_MASK (0xff << 8)
+#define OMAP44XX_REG_REGION_SEL1_SHIFT 0
+#define OMAP44XX_REG_REGION_SEL1_MASK (0x3 << 0)
+
+/* PERF_CNT_TIM */
+#define OMAP44XX_REG_TOTAL_TIME_SHIFT 0
+#define OMAP44XX_REG_TOTAL_TIME_MASK (0xffffffff << 0)
+
+/* READ_IDLE_CTRL */
+#define OMAP44XX_REG_READ_IDLE_LEN_SHIFT 16
+#define OMAP44XX_REG_READ_IDLE_LEN_MASK (0xf << 16)
+#define OMAP44XX_REG_READ_IDLE_INTERVAL_SHIFT 0
+#define OMAP44XX_REG_READ_IDLE_INTERVAL_MASK (0x1ff << 0)
+
+/* READ_IDLE_CTRL_SHDW */
+#define OMAP44XX_REG_READ_IDLE_LEN_SHDW_SHIFT 16
+#define OMAP44XX_REG_READ_IDLE_LEN_SHDW_MASK (0xf << 16)
+#define OMAP44XX_REG_READ_IDLE_INTERVAL_SHDW_SHIFT 0
+#define OMAP44XX_REG_READ_IDLE_INTERVAL_SHDW_MASK (0x1ff << 0)
+
+/* IRQ_EOI */
+#define OMAP44XX_REG_EOI_SHIFT 0
+#define OMAP44XX_REG_EOI_MASK (1 << 0)
+
+/* IRQSTATUS_RAW_SYS */
+#define OMAP44XX_REG_DNV_SYS_SHIFT 2
+#define OMAP44XX_REG_DNV_SYS_MASK (1 << 2)
+#define OMAP44XX_REG_TA_SYS_SHIFT 1
+#define OMAP44XX_REG_TA_SYS_MASK (1 << 1)
+#define OMAP44XX_REG_ERR_SYS_SHIFT 0
+#define OMAP44XX_REG_ERR_SYS_MASK (1 << 0)
+
+/* IRQSTATUS_RAW_LL */
+#define OMAP44XX_REG_DNV_LL_SHIFT 2
+#define OMAP44XX_REG_DNV_LL_MASK (1 << 2)
+#define OMAP44XX_REG_TA_LL_SHIFT 1
+#define OMAP44XX_REG_TA_LL_MASK (1 << 1)
+#define OMAP44XX_REG_ERR_LL_SHIFT 0
+#define OMAP44XX_REG_ERR_LL_MASK (1 << 0)
+
+/* IRQSTATUS_SYS */
+
+/* IRQSTATUS_LL */
+
+/* IRQENABLE_SET_SYS */
+#define OMAP44XX_REG_EN_DNV_SYS_SHIFT 2
+#define OMAP44XX_REG_EN_DNV_SYS_MASK (1 << 2)
+#define OMAP44XX_REG_EN_TA_SYS_SHIFT 1
+#define OMAP44XX_REG_EN_TA_SYS_MASK (1 << 1)
+#define OMAP44XX_REG_EN_ERR_SYS_SHIFT 0
+#define OMAP44XX_REG_EN_ERR_SYS_MASK (1 << 0)
+
+/* IRQENABLE_SET_LL */
+#define OMAP44XX_REG_EN_DNV_LL_SHIFT 2
+#define OMAP44XX_REG_EN_DNV_LL_MASK (1 << 2)
+#define OMAP44XX_REG_EN_TA_LL_SHIFT 1
+#define OMAP44XX_REG_EN_TA_LL_MASK (1 << 1)
+#define OMAP44XX_REG_EN_ERR_LL_SHIFT 0
+#define OMAP44XX_REG_EN_ERR_LL_MASK (1 << 0)
+
+/* IRQENABLE_CLR_SYS */
+
+/* IRQENABLE_CLR_LL */
+
+/* ZQ_CONFIG */
+#define OMAP44XX_REG_ZQ_CS1EN_SHIFT 31
+#define OMAP44XX_REG_ZQ_CS1EN_MASK (1 << 31)
+#define OMAP44XX_REG_ZQ_CS0EN_SHIFT 30
+#define OMAP44XX_REG_ZQ_CS0EN_MASK (1 << 30)
+#define OMAP44XX_REG_ZQ_DUALCALEN_SHIFT 29
+#define OMAP44XX_REG_ZQ_DUALCALEN_MASK (1 << 29)
+#define OMAP44XX_REG_ZQ_SFEXITEN_SHIFT 28
+#define OMAP44XX_REG_ZQ_SFEXITEN_MASK (1 << 28)
+#define OMAP44XX_REG_ZQ_ZQINIT_MULT_SHIFT 18
+#define OMAP44XX_REG_ZQ_ZQINIT_MULT_MASK (0x3 << 18)
+#define OMAP44XX_REG_ZQ_ZQCL_MULT_SHIFT 16
+#define OMAP44XX_REG_ZQ_ZQCL_MULT_MASK (0x3 << 16)
+#define OMAP44XX_REG_ZQ_REFINTERVAL_SHIFT 0
+#define OMAP44XX_REG_ZQ_REFINTERVAL_MASK (0xffff << 0)
+
+/* TEMP_ALERT_CONFIG */
+#define OMAP44XX_REG_TA_CS1EN_SHIFT 31
+#define OMAP44XX_REG_TA_CS1EN_MASK (1 << 31)
+#define OMAP44XX_REG_TA_CS0EN_SHIFT 30
+#define OMAP44XX_REG_TA_CS0EN_MASK (1 << 30)
+#define OMAP44XX_REG_TA_SFEXITEN_SHIFT 28
+#define OMAP44XX_REG_TA_SFEXITEN_MASK (1 << 28)
+#define OMAP44XX_REG_TA_DEVWDT_SHIFT 26
+#define OMAP44XX_REG_TA_DEVWDT_MASK (0x3 << 26)
+#define OMAP44XX_REG_TA_DEVCNT_SHIFT 24
+#define OMAP44XX_REG_TA_DEVCNT_MASK (0x3 << 24)
+#define OMAP44XX_REG_TA_REFINTERVAL_SHIFT 0
+#define OMAP44XX_REG_TA_REFINTERVAL_MASK (0x3fffff << 0)
+
+/* OCP_ERR_LOG */
+#define OMAP44XX_REG_MADDRSPACE_SHIFT 14
+#define OMAP44XX_REG_MADDRSPACE_MASK (0x3 << 14)
+#define OMAP44XX_REG_MBURSTSEQ_SHIFT 11
+#define OMAP44XX_REG_MBURSTSEQ_MASK (0x7 << 11)
+#define OMAP44XX_REG_MCMD_SHIFT 8
+#define OMAP44XX_REG_MCMD_MASK (0x7 << 8)
+#define OMAP44XX_REG_MCONNID_SHIFT 0
+#define OMAP44XX_REG_MCONNID_MASK (0xff << 0)
+
+/* DDR_PHY_CTRL_1 */
+#define OMAP44XX_REG_DDR_PHY_CTRL_1_SHIFT 4
+#define OMAP44XX_REG_DDR_PHY_CTRL_1_MASK (0xfffffff << 4)
+#define OMAP44XX_REG_READ_LATENCY_SHIFT 0
+#define OMAP44XX_REG_READ_LATENCY_MASK (0xf << 0)
+#define OMAP44XX_REG_DLL_SLAVE_DLY_CTRL_SHIFT 4
+#define OMAP44XX_REG_DLL_SLAVE_DLY_CTRL_MASK (0xFF << 4)
+#define OMAP44XX_EMIF_DDR_PHY_CTRL_1_BASE_VAL_SHIFT 12
+#define OMAP44XX_EMIF_DDR_PHY_CTRL_1_BASE_VAL_MASK (0xFFFFF << 12)
+
+/* DDR_PHY_CTRL_1_SHDW */
+#define OMAP44XX_REG_DDR_PHY_CTRL_1_SHDW_SHIFT 4
+#define OMAP44XX_REG_DDR_PHY_CTRL_1_SHDW_MASK (0xfffffff << 4)
+#define OMAP44XX_REG_READ_LATENCY_SHDW_SHIFT 0
+#define OMAP44XX_REG_READ_LATENCY_SHDW_MASK (0xf << 0)
+#define OMAP44XX_REG_DLL_SLAVE_DLY_CTRL_SHDW_SHIFT 4
+#define OMAP44XX_REG_DLL_SLAVE_DLY_CTRL_SHDW_MASK (0xFF << 4)
+#define OMAP44XX_EMIF_DDR_PHY_CTRL_1_BASE_VAL_SHDW_SHIFT 12
+#define OMAP44XX_EMIF_DDR_PHY_CTRL_1_BASE_VAL_SHDW_MASK (0xFFFFF << 12)
+
+/* DDR_PHY_CTRL_2 */
+#define OMAP44XX_REG_DDR_PHY_CTRL_2_SHIFT 0
+#define OMAP44XX_REG_DDR_PHY_CTRL_2_MASK (0xffffffff << 0)
+#endif
diff --git a/arch/arm/mach-omap2/include/mach/emif.h b/arch/arm/mach-omap2/include/mach/emif.h
new file mode 100644
index 0000000..8a3ea34
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/emif.h
@@ -0,0 +1,268 @@
+/*
+ * OMAP44xx EMIF header
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * Aneesh V <aneesh@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _EMIF_H
+#define _EMIF_H
+
+#include <mach/emif-44xx.h>
+#include <mach/lpddr2-jedec.h>
+
+#define EMIF_NUM_INSTANCES 2
+#define EMIF1 0
+#define EMIF2 1
+
+/* The maximum frequency at which the LPDDR2 interface can operate in Hz*/
+#define MAX_LPDDR2_FREQ 400000000 /* 400 MHz */
+
+/* 19.2 MHz to be used for finding initialization values */
+#define EMIF_FREQ_19_2_MHZ 19200000 /* 19.2 MHz */
+/*
+ * The period of DDR clk is represented as numerator and denominator for
+ * better accuracy in integer based calculations. However, if the numerator
+ * and denominator are very huge there may be chances of overflow in
+ * calculations. So, as a trade-off keep denominator(and consequently
+ * numerator) within a limit sacrificing some accuracy - but not much
+ * If denominator and numerator are already small (such as at 400 MHz)
+ * no adjustment is needed
+ */
+#define EMIF_PERIOD_DEN_LIMIT 1000
+/*
+ * Maximum number of different frequencies supported by EMIF driver
+ * Determines the number of entries in the pointer array for register
+ * cache
+ */
+#define EMIF_MAX_NUM_FREQUENCIES 6
+/*
+ * Indices into the Addressing Table array.
+ * One entry each for all the different types of devices with different
+ * addressing schemes
+ */
+#define ADDR_TABLE_INDEX64M 0
+#define ADDR_TABLE_INDEX128M 1
+#define ADDR_TABLE_INDEX256M 2
+#define ADDR_TABLE_INDEX512M 3
+#define ADDR_TABLE_INDEX1GS4 4
+#define ADDR_TABLE_INDEX2GS4 5
+#define ADDR_TABLE_INDEX4G 6
+#define ADDR_TABLE_INDEX8G 7
+#define ADDR_TABLE_INDEX1GS2 8
+#define ADDR_TABLE_INDEX2GS2 9
+#define ADDR_TABLE_INDEXMAX 10
+
+/* Number of Row bits */
+#define ROW_9 0
+#define ROW_10 1
+#define ROW_11 2
+#define ROW_12 3
+#define ROW_13 4
+#define ROW_14 5
+#define ROW_15 6
+#define ROW_16 7
+
+/* Number of Column bits */
+#define COL_8 0
+#define COL_9 1
+#define COL_10 2
+#define COL_11 3
+#define COL_7 4 /*Not supported by OMAP included for completeness */
+
+/* Number of Banks*/
+#define BANKS1 0
+#define BANKS2 1
+#define BANKS4 2
+#define BANKS8 3
+
+/* Refresh rate in micro seconds x 10 */
+#define T_REFI_15_6 156
+#define T_REFI_7_8 78
+#define T_REFI_3_9 39
+
+#define EBANK_CS1_DIS 0
+#define EBANK_CS1_EN 1
+
+/* Read Latency at the base frequency - 19.2 MHz on bootup */
+#define RL_19_2_MHZ 3
+/* Interleaving policies at EMIF level- between banks and Chip Selects */
+#define EMIF_INTERLEAVING_POLICY_MAX_INTERLEAVING 0
+#define EMIF_INTERLEAVING_POLICY_NO_BANK_INTERLEAVING 3
+
+/*
+ * Interleaving policy to be used
+ * Currently set to MAX interleaving for better performance
+ */
+#define EMIF_INTERLEAVING_POLICY EMIF_INTERLEAVING_POLICY_MAX_INTERLEAVING
+
+/* State of the core voltage:
+ * This is important for some parameters such as read idle control and
+ * ZQ calibration timings. Timings are much stricter when voltage ramp
+ * is happening compared to when the voltage is stable.
+ * We need to calculate two sets of values for these parameters and use
+ * them accordingly
+ */
+#define LPDDR2_VOLTAGE_STABLE 0
+#define LPDDR2_VOLTAGE_RAMPING 1
+
+/* Length of the forced read idle period in terms of cycles */
+#define EMIF_REG_READ_IDLE_LEN_VAL 5
+
+/* Interval between forced 'read idles' */
+/* To be used when voltage is changed for DPS/DVFS - 1us */
+#define READ_IDLE_INTERVAL_DVFS (1*1000)
+/*
+ * To be used when voltage is not scaled except by Smart Reflex
+ * 50us - or maximum value will do
+ */
+#define READ_IDLE_INTERVAL_NORMAL (50*1000)
+
+
+/*
+ * Unless voltage is changing due to DVFS one ZQCS command every 50ms should
+ * be enough. This shoule be enough also in the case when voltage is changing
+ * due to smart-reflex.
+ */
+#define EMIF_ZQCS_INTERVAL_NORMAL_IN_US (50*1000)
+/*
+ * If voltage is changing due to DVFS ZQCS should be performed more
+ * often(every 50us)
+ */
+#define EMIF_ZQCS_INTERVAL_DVFS_IN_US 50
+
+/* The interval between ZQCL commands as a multiple of ZQCS interval */
+#define REG_ZQ_ZQCL_MULT 4
+/* The interval between ZQINIT commands as a multiple of ZQCL interval */
+#define REG_ZQ_ZQINIT_MULT 3
+/* Enable ZQ Calibration on exiting Self-refresh */
+#define REG_ZQ_SFEXITEN_ENABLE 1
+/*
+ * ZQ Calibration simultaneously on both chip-selects:
+ * Needs one calibration resistor per CS
+ * None of the boards that we know of have this capability
+ * So disabled by default
+ */
+#define REG_ZQ_DUALCALEN_DISABLE 0
+/*
+ * Enable ZQ Calibration by default on CS0. If we are asked to program
+ * the EMIF there will be something connected to CS0 for sure
+ */
+#define REG_ZQ_CS0EN_ENABLE 1
+
+/* EMIF_PWR_MGMT_CTRL register */
+/* Low power modes */
+#define LP_MODE_DISABLE 0
+#define LP_MODE_CLOCK_STOP 1
+#define LP_MODE_SELF_REFRESH 2
+#define LP_MODE_PWR_DN 3
+
+/* REG_DPD_EN */
+#define DPD_DISABLE 0
+#define DPD_ENABLE 1
+
+/*
+ * Value of bits 12:31 of DDR_PHY_CTRL_1 register:
+ * All these fields have magic values dependent on frequency and
+ * determined by PHY and DLL integration with EMIF. Setting the magic
+ * values suggested by hw team.
+ */
+#define EMIF_DDR_PHY_CTRL_1_BASE_VAL 0x049FF
+#define EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ 0x41
+#define EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ 0x80
+#define EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS 0xFF
+
+/*
+* MR1 value:
+* Burst length : 8
+* Burst type : sequential
+* Wrap : enabled
+* nWR : 3(default). EMIF does not do pre-charge.
+* : So nWR is don't care
+*/
+#define MR1_VAL 0x23
+
+/* MR10: ZQ calibration codes */
+#define MR10_ZQ_ZQCS 0x56
+#define MR10_ZQ_ZQCL 0xAB
+#define MR10_ZQ_ZQINIT 0xFF
+#define MR10_ZQ_ZQRESET 0xC3
+
+/* TEMP_ALERT_CONFIG */
+#define TEMP_ALERT_POLL_INTERVAL_MS 360 /* for temp gradient - 5 C/s */
+#define TEMP_ALERT_CONFIG_DEVCT_1 0
+#define TEMP_ALERT_CONFIG_DEVWDT_32 2
+
+/* MR16 value: refresh full array(no partial array self refresh) */
+#define MR16_VAL 0
+
+#if defined(DEBUG)
+#define emif_assert(c) BUG_ON(!(c))
+#else
+#define emif_assert(c) ({ if (0) BUG_ON(!(c)); 0; })
+#endif
+
+/* Details of the devices connected to each chip-select of an EMIF instance */
+struct emif_device_details {
+ const struct lpddr2_device_info *cs0_device;
+ const struct lpddr2_device_info *cs1_device;
+};
+
+/*
+ * LPDDR2 interface clock frequency:
+ * Period (represented as numerator and denominator for better accuracy in
+ * calculations) should be <= the real value. Period is used for calculating
+ * all timings except refresh rate.
+ * freq_mhz_floor - freq in mhz truncated to the lower integer is used for
+ * calculating refresh rate
+ * freq_mhz_ceil - frequency in mhz rounded up is used for identifying the
+ * right speed bin and the corresponding timings table for the LPDDR2 device
+ */
+struct freq_info {
+ u16 period_num;
+ u16 period_den;
+ u16 freq_mhz_floor;
+ u16 freq_mhz_ceil;
+};
+
+/*
+ * Structure containing shadow of important registers in EMIF
+ * The calculation function fills in this structure to be later used for
+ * initialization and DVFS
+ */
+struct emif_regs {
+ u32 freq;
+ u8 RL_final;
+ u32 sdram_config_init;
+ u32 sdram_config_final;
+ u32 ref_ctrl;
+ u32 ref_ctrl_derated;
+ u32 sdram_tim1;
+ u32 sdram_tim1_derated;
+ u32 sdram_tim2;
+ u32 sdram_tim3;
+ u32 read_idle_ctrl_normal;
+ u32 read_idle_ctrl_volt_ramp;
+ u32 zq_config_normal;
+ u32 zq_config_volt_ramp;
+ u32 temp_alert_config;
+ u32 temp_alert_config_derated;
+ u32 emif_ddr_phy_ctlr_1_init;
+ u32 emif_ddr_phy_ctlr_1_final;
+};
+
+int omap_emif_setup_registers(u32 freq,
+ u32 volt_state);
+void omap_emif_frequency_pre_notify(void);
+void omap_emif_frequency_post_notify(void);
+int omap_emif_setup_device_details(
+ const struct emif_device_details *emif1_devices,
+ const struct emif_device_details *emif2_devices);
+
+void emif_clear_irq(int emif_id);
+#endif
diff --git a/arch/arm/mach-omap2/include/mach/id.h b/arch/arm/mach-omap2/include/mach/id.h
index 02ed3aa..096c02b 100644
--- a/arch/arm/mach-omap2/include/mach/id.h
+++ b/arch/arm/mach-omap2/include/mach/id.h
@@ -18,5 +18,6 @@
};
void omap_get_die_id(struct omap_die_id *odi);
+void omap_get_production_id(struct omap_die_id *odi);
#endif
diff --git a/arch/arm/mach-omap2/include/mach/lpddr2-elpida.h b/arch/arm/mach-omap2/include/mach/lpddr2-elpida.h
new file mode 100644
index 0000000..b1accb8
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/lpddr2-elpida.h
@@ -0,0 +1,23 @@
+/*
+ * ELPIDA LPDDR2 timings.
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * Aneesh V <aneesh@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LPDDR2_ELPIDA_H
+#define _LPDDR2_ELPIDA_H
+
+extern const struct lpddr2_timings lpddr2_elpida_timings_200_mhz;
+extern const struct lpddr2_timings lpddr2_elpida_timings_333_mhz;
+extern const struct lpddr2_timings lpddr2_elpida_timings_400_mhz;
+extern const struct lpddr2_min_tck lpddr2_elpida_min_tck;
+extern struct lpddr2_device_info lpddr2_elpida_2G_S4_dev;
+
+#endif
diff --git a/arch/arm/mach-omap2/include/mach/lpddr2-jedec.h b/arch/arm/mach-omap2/include/mach/lpddr2-jedec.h
new file mode 100644
index 0000000..4545c29
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/lpddr2-jedec.h
@@ -0,0 +1,149 @@
+/*
+ * LPDDR2 header based on JESD209-2
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * Aneesh V <aneesh@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LPDDR2_JDEC_H
+#define _LPDDR2_JDEC_H
+
+#include <linux/types.h>
+
+/*
+ * Maximum number of entries we keep in our array of timing tables
+ * We need not keep all the speed bins supported by the device
+ * We need to keep timing tables for only the speed bins that we
+ * are interested in
+ */
+#define MAX_NUM_SPEEDBINS 4
+
+/* LPDDR2 Densities */
+#define LPDDR2_DENSITY_64Mb 0
+#define LPDDR2_DENSITY_128Mb 1
+#define LPDDR2_DENSITY_256Mb 2
+#define LPDDR2_DENSITY_512Mb 3
+#define LPDDR2_DENSITY_1Gb 4
+#define LPDDR2_DENSITY_2Gb 5
+#define LPDDR2_DENSITY_4Gb 6
+#define LPDDR2_DENSITY_8Gb 7
+#define LPDDR2_DENSITY_16Gb 8
+#define LPDDR2_DENSITY_32Gb 9
+
+/* LPDDR2 type */
+#define LPDDR2_TYPE_S4 0
+#define LPDDR2_TYPE_S2 1
+#define LPDDR2_TYPE_NVM 2
+
+/* LPDDR2 IO width */
+#define LPDDR2_IO_WIDTH_32 0
+#define LPDDR2_IO_WIDTH_16 1
+#define LPDDR2_IO_WIDTH_8 2
+
+/* Mode register numbers */
+#define LPDDR2_MR0 0
+#define LPDDR2_MR1 1
+#define LPDDR2_MR2 2
+#define LPDDR2_MR3 3
+#define LPDDR2_MR4 4
+#define LPDDR2_MR5 5
+#define LPDDR2_MR6 6
+#define LPDDR2_MR7 7
+#define LPDDR2_MR8 8
+#define LPDDR2_MR9 9
+#define LPDDR2_MR10 10
+#define LPDDR2_MR11 11
+#define LPDDR2_MR16 16
+#define LPDDR2_MR17 17
+#define LPDDR2_MR18 18
+
+/* MR4 register fields */
+#define MR4_SDRAM_REF_RATE_SHIFT 0
+#define MR4_SDRAM_REF_RATE_MASK 7
+#define MR4_TUF_SHIFT 7
+#define MR4_TUF_MASK (1 << 7)
+
+/* MR4 SDRAM Refresh Rate field values */
+#define SDRAM_TEMP_NOMINAL 0x3
+#define SDRAM_TEMP_RESERVED_4 0x4
+#define SDRAM_TEMP_HIGH_DERATE_REFRESH 0x5
+#define SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS 0x6
+#define SDRAM_TEMP_VERY_HIGH_SHUTDOWN 0x7
+
+struct lpddr2_addressing {
+ u8 num_banks;
+ u8 t_REFI_us_x10;
+ u8 row_sz[2]; /* One entry each for x32 and x16 */
+ u8 col_sz[2]; /* One entry each for x32 and x16 */
+};
+
+/* Structure for timings from the DDR datasheet */
+struct lpddr2_timings {
+ u32 max_freq;
+ u8 RL;
+ u8 tRPab;
+ u8 tRCD;
+ u8 tWR;
+ u8 tRASmin;
+ u8 tRRD;
+ u8 tWTRx2;
+ u8 tXSR;
+ u8 tXPx2;
+ u8 tRFCab;
+ u8 tRTPx2;
+ u8 tCKE;
+ u8 tCKESR;
+ u8 tZQCS;
+ u32 tZQCL;
+ u32 tZQINIT;
+ u8 tDQSCKMAXx2;
+ u8 tRASmax;
+ u8 tFAW;
+};
+
+/*
+ * Min tCK values for some of the parameters:
+ * If the calculated clock cycles for the respective parameter is
+ * less than the corresponding min tCK value, we need to set the min
+ * tCK value. This may happen at lower frequencies.
+ */
+struct lpddr2_min_tck {
+ u32 tRL;
+ u32 tRP_AB;
+ u32 tRCD;
+ u32 tWR;
+ u32 tRAS_MIN;
+ u32 tRRD;
+ u32 tWTR;
+ u32 tXP;
+ u32 tRTP;
+ u8 tCKE;
+ u32 tCKESR;
+ u32 tFAW;
+};
+
+struct lpddr2_device_info {
+ const struct lpddr2_timings *device_timings[MAX_NUM_SPEEDBINS];
+ const struct lpddr2_min_tck *min_tck;
+ u8 type;
+ u8 density;
+ u8 io_width;
+
+ /* Idle time in cycles to wait before putting the memory in self refresh */
+ s32 emif_ddr_selfrefresh_cycles;
+};
+
+/* The following are exported for devices which use JDEC specifications */
+extern const struct lpddr2_addressing lpddr2_jedec_addressing_table[];
+extern const struct lpddr2_timings lpddr2_jedec_timings_400_mhz;
+extern const struct lpddr2_timings lpddr2_jedec_timings_333_mhz;
+extern const struct lpddr2_timings lpddr2_jedec_timings_200_mhz;
+extern const struct lpddr2_min_tck lpddr2_jedec_min_tck;
+
+#endif
diff --git a/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h b/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
new file mode 100644
index 0000000..66f31c3a
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
@@ -0,0 +1,41 @@
+/*
+ * OMAP WakeupGen header file
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Written by Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef OMAP_ARCH_WAKEUPGEN_H
+#define OMAP_ARCH_WAKEUPGEN_H
+
+#define OMAP_WKG_CONTROL_0 0x00
+#define OMAP_WKG_ENB_A_0 0x10
+#define OMAP_WKG_ENB_B_0 0x14
+#define OMAP_WKG_ENB_C_0 0x18
+#define OMAP_WKG_ENB_D_0 0x1c
+#define OMAP_WKG_ENB_SECURE_A_0 0x20
+#define OMAP_WKG_ENB_SECURE_B_0 0x24
+#define OMAP_WKG_ENB_SECURE_C_0 0x28
+#define OMAP_WKG_ENB_SECURE_D_0 0x2c
+#define OMAP_WKG_ENB_A_1 0x410
+#define OMAP_WKG_ENB_B_1 0x414
+#define OMAP_WKG_ENB_C_1 0x418
+#define OMAP_WKG_ENB_D_1 0x41c
+#define OMAP_WKG_ENB_SECURE_A_1 0x420
+#define OMAP_WKG_ENB_SECURE_B_1 0x424
+#define OMAP_WKG_ENB_SECURE_C_1 0x428
+#define OMAP_WKG_ENB_SECURE_D_1 0x42c
+#define OMAP_AUX_CORE_BOOT_0 0x800
+#define OMAP_AUX_CORE_BOOT_1 0x804
+#define OMAP_PTMSYNCREQ_MASK 0xc00
+#define OMAP_PTMSYNCREQ_EN 0xc04
+#define OMAP_TIMESTAMPCYCLELO 0xc08
+#define OMAP_TIMESTAMPCYCLEHI 0xc0c
+
+extern int __init omap_wakeupgen_init(void);
+extern void omap_wakeupgen_irqmask_all(unsigned int cpu, unsigned int set);
+extern void omap_wakeupgen_save(void);
+#endif
diff --git a/arch/arm/mach-omap2/include/mach/omap4-common.h b/arch/arm/mach-omap2/include/mach/omap4-common.h
index e4bd87619..f37cf4e 100644
--- a/arch/arm/mach-omap2/include/mach/omap4-common.h
+++ b/arch/arm/mach-omap2/include/mach/omap4-common.h
@@ -13,25 +13,84 @@
#ifndef OMAP_ARCH_OMAP4_COMMON_H
#define OMAP_ARCH_OMAP4_COMMON_H
+#include <asm/proc-fns.h>
/*
- * wfi used in low power code. Directly opcode is used instead
- * of instruction to avoid mulit-omap build break
+ * Secure low power context save/restore API index
*/
-#ifdef CONFIG_THUMB2_KERNEL
-#define do_wfi() __asm__ __volatile__ ("wfi" : : : "memory")
-#else
-#define do_wfi() \
- __asm__ __volatile__ (".word 0xe320f003" : : : "memory")
-#endif
+#define HAL_SAVESECURERAM_INDEX 0x1a
+#define HAL_SAVEHW_INDEX 0x1b
+#define HAL_SAVEALL_INDEX 0x1c
+#define HAL_SAVEGIC_INDEX 0x1d
+
+/*
+ * Secure HAL, PPA services available
+ */
+#define PPA_SERVICE_0 0x21
+#define PPA_SERVICE_PL310_POR 0x23
+#define PPA_SERVICE_DEFAULT_POR_NS_SMP 0x25
+/*
+ * Secure HAL API flags
+ */
+#define FLAG_START_CRITICAL 0x4
+#define FLAG_IRQFIQ_MASK 0x3
+#define FLAG_IRQ_ENABLE 0x2
+#define FLAG_FIQ_ENABLE 0x1
+#define NO_FLAG 0x0
+
+/*
+ * SAR restore phase USB HOST static port
+ * configuration
+ */
+#define OMAP4_USBHOST_CLKSEL_UTMI_P2_INT_P1_INT 0x0
+#define OMAP4_USBHOST_CLKSEL_UTMI_P2_INT_P1_EXT 0x1
+#define OMAP4_USBHOST_CLKSEL_UTMI_P2_EXT_P1_INT 0x2
+#define OMAP4_USBHOST_CLKSEL_UTMI_P2_EXT_P1_EXT 0x3
+
+#ifndef __ASSEMBLER__
#ifdef CONFIG_CACHE_L2X0
-extern void __iomem *l2cache_base;
+extern void __iomem *omap4_get_l2cache_base(void);
#endif
-extern void __iomem *gic_dist_base_addr;
+#ifdef CONFIG_SMP
+extern void __iomem *omap4_get_scu_base(void);
+#else
+static inline void __iomem *omap4_get_scu_base(void)
+{
+ return NULL;
+}
+#endif
+extern void __iomem *omap4_get_gic_dist_base(void);
+extern void __iomem *omap4_get_gic_cpu_base(void);
+extern void __iomem *omap4_get_sar_ram_base(void);
+extern void *omap_get_dram_barrier_base(void);
+extern dma_addr_t omap4_secure_ram_phys;
extern void __init gic_init_irq(void);
+extern void gic_cpu_enable(void);
+extern void gic_cpu_disable(void);
+extern void gic_dist_enable(void);
+extern void gic_dist_disable(void);
+extern u32 gic_cpu_read(u32 reg);
extern void omap_smc1(u32 fn, u32 arg);
+extern void omap_bus_sync(void);
+extern void omap_do_wfi(void);
+
+extern bool gic_dist_disabled(void);
+extern void gic_timer_retrigger(void);
+
+/*
+ * Read MPIDR: Multiprocessor affinity register
+ */
+static inline unsigned int hard_smp_processor_id(void)
+{
+ unsigned int cpunum;
+
+ asm volatile (
+ "mrc p15, 0, %0, c0, c0, 5\n"
+ : "=r" (cpunum));
+ return cpunum &= 0x0f;
+}
#ifdef CONFIG_SMP
/* Needed for secondary core boot */
@@ -39,5 +98,65 @@
extern u32 omap_modify_auxcoreboot0(u32 set_mask, u32 clear_mask);
extern void omap_auxcoreboot_addr(u32 cpu_addr);
extern u32 omap_read_auxcoreboot0(void);
+
+#ifdef CONFIG_PM
+extern int omap4_mpuss_init(void);
+extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
+extern void omap4_cpu_suspend(unsigned int cpu, unsigned int save_state);
+extern void omap4_cpu_resume(void);
+extern u32 omap_smc2(u32 id, u32 falg, u32 pargs);
+extern u32 omap4_secure_dispatcher(u32 idx, u32 flag, u32 nargs,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+#else
+static inline int omap4_enter_lowpower(unsigned int cpu,
+ unsigned int power_state)
+{
+ cpu_do_idle();
+ return 0;
+}
+
+static inline int omap4_mpuss_init(void)
+{
+ return 0;
+}
+
+static inline void omap4_cpu_suspend(unsigned int cpu, unsigned int save_state)
+{
+}
+
+static inline void omap4_cpu_resume(void)
+{
+}
+
+static inline u32 omap_smc2(u32 id, u32 falg, u32 pargs)
+{
+ return 0;
+}
+static inline u32 omap4_secure_dispatcher(u32 idx, u32 flag, u32 nargs,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ return 0;
+}
+#endif /* CONFIG_PM */
+#endif /* CONFIG_SMP */
+
+extern int omap4_prcm_freq_update(void);
+
+#ifdef CONFIG_PM
+extern int omap4_sar_save(void);
+extern void omap4_sar_overwrite(void);
+extern void omap4_sar_usbhost_init(u32 fck_source);
+#else
+void omap4_sar_save(void)
+{
+}
+void omap4_sar_overwrite(void)
+{
+}
+void omap4_sar_usbhost_init(u32 fck_source)
+{
+}
#endif
-#endif
+
+#endif /* __ASSEMBLER__ */
+#endif /* OMAP_ARCH_OMAP4_COMMON_H */
diff --git a/arch/arm/mach-omap2/include/mach/omap_fiq_debugger.h b/arch/arm/mach-omap2/include/mach/omap_fiq_debugger.h
new file mode 100644
index 0000000..4378a77
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/omap_fiq_debugger.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_OMAP_FIQ_DEBUGGER_H
+#define __MACH_OMAP_FIQ_DEBUGGER_H
+
+#ifdef CONFIG_OMAP_FIQ_DEBUGGER
+int __init omap_serial_debug_init(int id, bool is_fiq, bool is_high_prio_irq,
+ struct omap_device_pad *pads, int num_pads);
+u32 omap_debug_uart_resume_idle(void);
+
+#else
+static inline int __init omap_serial_debug_init(int id, bool is_fiq, bool is_high_prio_irq,
+ struct omap_device_pad *pads, int num_pads)
+{
+ return 0;
+}
+
+static inline u32 omap_debug_uart_resume_idle(void)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-omap2/include/mach/tiler.h b/arch/arm/mach-omap2/include/mach/tiler.h
new file mode 100644
index 0000000..0fdb6eb
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/tiler.h
@@ -0,0 +1,513 @@
+/*
+ * tiler.h
+ *
+ * TILER driver support functions for TI TILER hardware block.
+ *
+ * Authors: Lajos Molnar <molnar@ti.com>
+ * David Sin <davidsin@ti.com>
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TILER_H
+#define TILER_H
+
+#include <linux/mm.h>
+
+/*
+ * ----------------------------- API Definitions -----------------------------
+ */
+
+/* return true if physical address is in the tiler container */
+bool is_tiler_addr(u32 phys);
+
+enum tiler_fmt {
+ TILFMT_MIN = -2,
+ TILFMT_INVALID = -2,
+ TILFMT_NONE = -1,
+ TILFMT_8BIT = 0,
+ TILFMT_16BIT = 1,
+ TILFMT_32BIT = 2,
+ TILFMT_PAGE = 3,
+ TILFMT_MAX = 3,
+ TILFMT_8AND16 = 4, /* used to mark NV12 reserve block */
+};
+
+/* tiler block info */
+struct tiler_block_t {
+ u32 phys; /* system space (L3) tiler addr */
+ u32 width; /* width */
+ u32 height; /* height */
+ u32 key; /* secret key */
+ u32 id; /* unique block ID */
+};
+
+/* tiler (image/video frame) view */
+struct tiler_view_t {
+ u32 tsptr; /* tiler space addr */
+ u32 width; /* width */
+ u32 height; /* height */
+ u32 bpp; /* bytes per pixel */
+ s32 h_inc; /* horizontal increment */
+ s32 v_inc; /* vertical increment */
+};
+
+/* get the tiler format for a physical address or TILFMT_INVALID */
+enum tiler_fmt tiler_fmt(u32 phys);
+
+/* get the modified (1 for page mode) bytes-per-pixel for a tiler block */
+u32 tiler_bpp(const struct tiler_block_t *b);
+
+/* get tiler block physical stride */
+u32 tiler_pstride(const struct tiler_block_t *b);
+
+/* get tiler block virtual stride */
+static inline u32 tiler_vstride(const struct tiler_block_t *b)
+{
+ return PAGE_ALIGN((b->phys & ~PAGE_MASK) + tiler_bpp(b) * b->width);
+}
+
+/* returns the virtual size of the block (for mmap) */
+static inline u32 tiler_size(const struct tiler_block_t *b)
+{
+ return b->height * tiler_vstride(b);
+}
+
+/* Event types */
+#define TILER_DEVICE_CLOSE 0
+
+/**
+ * Registers a notifier block with TILER driver.
+ *
+ * @param nb notifier_block
+ *
+ * @return error status
+ */
+s32 tiler_reg_notifier(struct notifier_block *nb);
+
+/**
+ * Un-registers a notifier block with TILER driver.
+ *
+ * @param nb notifier_block
+ *
+ * @return error status
+ */
+s32 tiler_unreg_notifier(struct notifier_block *nb);
+
+/**
+ * Get the physical address for a given user va.
+ *
+ * @param usr user virtual address
+ *
+ * @return valid pa or 0 for error
+ */
+u32 tiler_virt2phys(u32 usr);
+
+/**
+ * Reserves a 1D or 2D TILER block area and memory for the
+ * current process with group ID 0.
+ *
+ * @param blk pointer to tiler block data. This must be set up ('phys' member
+ * must be 0) with the tiler block information. 'height' must be 1
+ * for 1D block.
+ * @param fmt TILER block format
+ *
+ * @return error status
+ */
+s32 tiler_alloc(struct tiler_block_t *blk, enum tiler_fmt fmt);
+
+/**
+ * Reserves a 1D or 2D TILER block area and memory for a set process and group
+ * ID.
+ *
+ * @param blk pointer to tiler block data. This must be set up ('phys' member
+ * must be 0) with the tiler block information. 'height' must be 1
+ * for 1D block.
+ * @param fmt TILER block format
+ * @param gid group ID
+ * @param pid process ID
+ *
+ * @return error status
+ */
+s32 tiler_allocx(struct tiler_block_t *blk, enum tiler_fmt fmt,
+ u32 gid, pid_t pid);
+
+/**
+ * Mmaps a portion of a tiler block to a virtual address. Use this method in
+ * your driver's mmap function to potentially combine multiple tiler blocks as
+ * one virtual buffer.
+ *
+ * @param blk pointer to tiler block data
+ * @param offs offset from where to map (must be page aligned)
+ * @param size size of area to map (must be page aligned)
+ * @param vma VMM memory area to map to
+ * @param voffs offset (from vm_start) in the VMM memory area to start
+ * mapping at
+ *
+ * @return error status
+ */
+s32 tiler_mmap_blk(struct tiler_block_t *blk, u32 offs, u32 size,
+ struct vm_area_struct *vma, u32 voffs);
+
+/**
+ * Ioremaps a portion of a tiler block. Use this method in your driver instead
+ * of ioremap to potentially combine multiple tiler blocks as one virtual
+ * buffer.
+ *
+ * @param blk pointer to tiler block data
+ * @param offs offset from where to map (must be page aligned)
+ * @param size size of area to map (must be page aligned)
+ * @param addr virtual address
+ * @param mtype ioremap memory type (e.g. MT_DEVICE)
+ *
+ * @return error status
+ */
+s32 tiler_ioremap_blk(struct tiler_block_t *blk, u32 offs, u32 size, u32 addr,
+ u32 mtype);
+
+/**
+ * Maps an existing buffer to a 1D or 2D TILER area for the
+ * current process with group ID 0.
+ *
+ * Currently, only 1D area mapping is supported.
+ *
+ * NOTE: alignment is always PAGE_SIZE and offset is 0 as full pages are mapped
+ * into tiler container.
+ *
+ * @param blk pointer to tiler block data. This must be set up
+ * ('phys' member must be 0) with the tiler block
+ * information. 'height' must be 1 for 1D block.
+ * @param fmt TILER format
+ * @param usr_addr user space address of existing buffer.
+ *
+ * @return error status
+ */
+s32 tiler_map(struct tiler_block_t *blk, enum tiler_fmt fmt, u32 usr_addr);
+
+/**
+ * Maps an existing buffer to a 1D or 2D TILER area for a set process and group
+ * ID.
+ *
+ * Currently, only 1D area mapping is supported.
+ *
+ * NOTE: alignment is always PAGE_SIZE and offset is 0 as full pages are mapped
+ * into tiler container.
+ *
+ * @param blk pointer to tiler block data. This must be set up
+ * ('phys' member must be 0) with the tiler block
+ * information. 'height' must be 1 for 1D block.
+ * @param fmt TILER format
+ * @param gid group ID
+ * @param pid process ID
+ * @param usr_addr user space address of existing buffer.
+ *
+ * @return error status
+ */
+s32 tiler_mapx(struct tiler_block_t *blk, enum tiler_fmt fmt,
+ u32 gid, pid_t pid, u32 usr_addr);
+
+/**
+ * Frees TILER memory. Since there may be multiple references for the same area
+ * if duplicated by tiler_dup, the area is only actually freed if all references
+ * have been freed.
+ *
+ * @param blk pointer to a tiler block data as filled by tiler_alloc,
+ * tiler_map or tiler_dup. 'phys' and 'id' members will be set to
+ * 0 on success.
+ */
+void tiler_free(struct tiler_block_t *blk);
+
+/**
+ * Reserves tiler area for n identical blocks for the current process. Use this
+ * method to get optimal placement of multiple identical tiler blocks; however,
+ * it may not reserve area if tiler_alloc is equally efficient.
+ *
+ * @param n number of identical set of blocks
+ * @param fmt TILER format
+ * @param width block width
+ * @param height block height (must be 1 for 1D)
+ */
+void tiler_reserve(u32 n, enum tiler_fmt fmt, u32 width, u32 height);
+
+/**
+ * Reserves tiler area for n identical blocks. Use this method to get optimal
+ * placement of multiple identical tiler blocks; however, it may not reserve
+ * area if tiler_alloc is equally efficient.
+ *
+ * @param n number of identical set of blocks
+ * @param fmt TILER bit mode
+ * @param width block width
+ * @param height block height (must be 1 for 1D)
+ * @param gid group ID
+ * @param pid process ID
+ */
+void tiler_reservex(u32 n, enum tiler_fmt fmt, u32 width, u32 height,
+ u32 gid, pid_t pid);
+
+/**
+ * Reserves tiler area for n identical NV12 blocks for the current process. Use
+ * this method to get optimal placement of multiple identical NV12 tiler blocks;
+ * however, it may not reserve area if tiler_alloc is equally efficient.
+ *
+ * @param n number of identical set of blocks
+ * @param width block width (Y)
+ * @param height block height (Y)
+ */
+void tiler_reserve_nv12(u32 n, u32 width, u32 height);
+
+/**
+ * Reserves tiler area for n identical NV12 blocks. Use this method to get
+ * optimal placement of multiple identical NV12 tiler blocks; however, it may
+ * not reserve area if tiler_alloc is equally efficient.
+ *
+ * @param n number of identical set of blocks
+ * @param width block width (Y)
+ * @param height block height (Y)
+ * @param gid group ID
+ * @param pid process ID
+ */
+void tiler_reservex_nv12(u32 n, u32 width, u32 height, u32 gid, pid_t pid);
+
+/**
+ * Create a view based on a tiler address and width and height
+ *
+ * This method should only be used as a last resort, e.g. if tilview object
+ * cannot be passed because of incoherence with other view 2D objects that must
+ * be supported.
+ *
+ * @param view Pointer to a view where the information will be stored
+ * @param ssptr MUST BE a tiler address
+ * @param width view width
+ * @param height view height
+ */
+void tilview_create(struct tiler_view_t *view, u32 phys, u32 width, u32 height);
+
+/**
+ * Obtains the view information for a tiler block
+ *
+ * @param view Pointer to a view where the information will be stored
+ * @param blk Pointer to an existing allocated tiler block
+ */
+void tilview_get(struct tiler_view_t *view, struct tiler_block_t *blk);
+
+/**
+ * Crops a tiler view to a rectangular portion. Crop area must be fully within
+ * the orginal tiler view: 0 <= left <= left + width <= view->width, also:
+ * 0 <= top <= top + height <= view->height.
+ *
+ * @param view Pointer to tiler view to be cropped
+ * @param left x of top-left corner
+ * @param top y of top-left corner
+ * @param width crop width
+ * @param height crop height
+ *
+ * @return error status. The view will be reduced to the crop region if the
+ * crop region is correct. Otherwise, no modifications are made.
+ */
+s32 tilview_crop(struct tiler_view_t *view, u32 left, u32 top, u32 width,
+ u32 height);
+
+/**
+ * Rotates a tiler view clockwise by a specified degree.
+ *
+ * @param view Pointer to tiler view to be cropped
+ * @param rotate Degree of rotation (clockwise). Must be a multiple of
+ * 90.
+ * @return error status. View is not modified on error; otherwise, it is
+ * updated in place.
+ */
+s32 tilview_rotate(struct tiler_view_t *view, s32 rotation);
+
+/**
+ * Mirrors a tiler view horizontally and/or vertically.
+ *
+ * @param view Pointer to tiler view to be cropped
+ * @param flip_x Mirror horizontally (left-to-right)
+ * @param flip_y Mirror vertically (top-to-bottom)
+ *
+ * @return error status. View is not modified on error; otherwise, it is
+ * updated in place.
+ */
+s32 tilview_flip(struct tiler_view_t *view, bool flip_x, bool flip_y);
+
+/*
+ * -------------------- TILER hooks for ION/HWC migration --------------------
+ */
+
+/* type of tiler memory */
+enum tiler_memtype {
+ TILER_MEM_ALLOCED, /* tiler allocated the memory */
+ TILER_MEM_GOT_PAGES, /* tiler used get_user_pages */
+ TILER_MEM_USING, /* tiler is using the pages */
+};
+
+/* physical pages to pin - mem must be kmalloced */
+struct tiler_pa_info {
+ u32 num_pg; /* number of pages in page-list */
+ u32 *mem; /* list of phys page addresses */
+ enum tiler_memtype memtype; /* how we got physical pages */
+};
+
+typedef struct mem_info *tiler_blk_handle;
+
+/**
+ * Allocate a 1D area of container space in the Tiler
+ *
+ * @param pa ptr to tiler_pa_info structure
+ *
+ * @return handle Handle to tiler block information. NULL on error.
+ *
+ * NOTE: this will take ownership pa->mem (will free it)
+ *
+ */
+tiler_blk_handle tiler_map_1d_block(struct tiler_pa_info *pa);
+
+/**
+ * Allocate an area of container space in the Tiler
+ *
+ * @param fmt Tiler bpp mode
+ * @param width Width in pixels
+ * @param height Height in pixels
+ * @param ssptr Value of tiler physical address of allocation
+ * @param virt_array Array of physical address for the start of each virtual
+ page
+ *
+ * @return handle Handle to tiler block information. NULL on error.
+ *
+ * NOTE: For 1D allocations, specify the full size in the width field, and
+ * specify a height of 1.
+ */
+tiler_blk_handle tiler_alloc_block_area(enum tiler_fmt fmt, u32 width,
+ u32 height, u32 *ssptr,
+ u32 *virt_array);
+
+/**
+ * Free a reserved area in the Tiler
+ *
+ * @param handle Handle to tiler block information
+ *
+ */
+void tiler_free_block_area(tiler_blk_handle block);
+
+/**
+ * Pins a set of physical pages into the Tiler using the area defined in a
+ * handle
+ *
+ * @param handle Handle to tiler block information
+ * @param addr_array Array of addresses
+ * @param nents Number of addresses in array
+ *
+ * @return error status.
+ */
+s32 tiler_pin_block(tiler_blk_handle handle, u32 *addr_array, u32 nents);
+
+/**
+ * Unpins a set of physical pages from the Tiler
+ *
+ * @param handle Handle to tiler block information
+ *
+ */
+void tiler_unpin_block(tiler_blk_handle handle);
+
+/**
+ * Gives memory requirements for a given container allocation
+ *
+ * @param fmt Tiler bpp mode
+ * @param width Width in pixels
+ * @param height Height in pixels
+ * @param alloc_pages Number of pages required to back tiler container
+ * @param virt_pages Number of pages required to back the virtual address space
+ *
+ * @return 0 for success. Non zero for error
+ */
+s32 tiler_memsize(enum tiler_fmt fmt, u32 width, u32 height, u32 *alloc_pages,
+ u32 *virt_pages);
+
+/**
+ * Returns virtual stride of a tiler block
+ *
+ * @param handle Handle to tiler block allocation
+ *
+ * @return Size of virtual stride
+ */
+u32 tiler_block_vstride(tiler_blk_handle handle);
+
+struct tiler_pa_info *user_block_to_pa(u32 usr_addr, u32 num_pg);
+void tiler_pa_free(struct tiler_pa_info *pa);
+
+/*
+ * ---------------------------- IOCTL Definitions ----------------------------
+ */
+
+/* ioctls */
+#define TILIOC_GBLK _IOWR('z', 100, struct tiler_block_info)
+#define TILIOC_FBLK _IOW('z', 101, struct tiler_block_info)
+#define TILIOC_GSSP _IOWR('z', 102, u32)
+#define TILIOC_MBLK _IOWR('z', 103, struct tiler_block_info)
+#define TILIOC_UMBLK _IOW('z', 104, struct tiler_block_info)
+#define TILIOC_QBUF _IOWR('z', 105, struct tiler_buf_info)
+#define TILIOC_RBUF _IOWR('z', 106, struct tiler_buf_info)
+#define TILIOC_URBUF _IOWR('z', 107, struct tiler_buf_info)
+#define TILIOC_QBLK _IOWR('z', 108, struct tiler_block_info)
+#define TILIOC_PRBLK _IOW('z', 109, struct tiler_block_info)
+#define TILIOC_URBLK _IOW('z', 110, u32)
+
+struct area {
+ u16 width;
+ u16 height;
+};
+
+/* userspace tiler block info */
+struct tiler_block_info {
+ enum tiler_fmt fmt;
+ union {
+ struct area area;
+ u32 len;
+ } dim;
+ u32 stride; /* stride is not maintained for 1D blocks */
+ void *ptr; /* userspace address for mapping existing buffer */
+ u32 id;
+ u32 key;
+ u32 group_id;
+ u32 ssptr; /* physical address, may not exposed by default */
+};
+
+#define TILER_MAX_NUM_BLOCKS 16
+
+/* userspace tiler buffer info */
+struct tiler_buf_info {
+ u32 num_blocks;
+ struct tiler_block_info blocks[TILER_MAX_NUM_BLOCKS];
+ u32 offset;
+ u32 length; /* also used as number of buffers for reservation */
+};
+
+#endif
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 441e79d..b5c8e80 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -38,6 +38,7 @@
#include "io.h"
#include <plat/omap-pm.h>
+#include "voltage.h"
#include "powerdomain.h"
#include "clockdomain.h"
@@ -355,18 +356,22 @@
u8 postsetup_state;
if (cpu_is_omap242x()) {
+ omap2xxx_voltagedomains_init();
omap2xxx_powerdomains_init();
omap2xxx_clockdomains_init();
omap2420_hwmod_init();
} else if (cpu_is_omap243x()) {
+ omap2xxx_voltagedomains_init();
omap2xxx_powerdomains_init();
omap2xxx_clockdomains_init();
omap2430_hwmod_init();
} else if (cpu_is_omap34xx()) {
+ omap3xxx_voltagedomains_init();
omap3xxx_powerdomains_init();
omap3xxx_clockdomains_init();
omap3xxx_hwmod_init();
} else if (cpu_is_omap44xx()) {
+ omap44xx_voltagedomains_init();
omap44xx_powerdomains_init();
omap44xx_clockdomains_init();
omap44xx_hwmod_init();
diff --git a/arch/arm/mach-omap2/iommu2.c b/arch/arm/mach-omap2/iommu2.c
index adb083e..f42a4a3 100644
--- a/arch/arm/mach-omap2/iommu2.c
+++ b/arch/arm/mach-omap2/iommu2.c
@@ -19,6 +19,7 @@
#include <linux/stringify.h>
#include <plat/iommu.h>
+#include <plat/omap_device.h>
/*
* omap2 architecture specific register bit definitions
@@ -84,18 +85,25 @@
iommu_write_reg(obj, l, MMU_CNTL);
}
-
static int omap2_iommu_enable(struct iommu *obj)
{
u32 l, pa;
unsigned long timeout;
+ int ret = 0;
- if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
- return -EINVAL;
+ if (!obj->secure_mode) {
+ if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
+ return -EINVAL;
- pa = virt_to_phys(obj->iopgd);
- if (!IS_ALIGNED(pa, SZ_16K))
- return -EINVAL;
+ pa = virt_to_phys(obj->iopgd);
+ if (!IS_ALIGNED(pa, SZ_16K))
+ return -EINVAL;
+ } else
+ pa = (u32)obj->secure_ttb;
+
+ ret = omap_device_enable(obj->pdev);
+ if (ret)
+ return ret;
iommu_write_reg(obj, MMU_SYS_SOFTRESET, MMU_SYSCONFIG);
@@ -124,11 +132,16 @@
__iommu_set_twl(obj, true);
+ if (cpu_is_omap44xx())
+ iommu_write_reg(obj, 0x1, MMU_GP_REG);
+
return 0;
}
static void omap2_iommu_disable(struct iommu *obj)
{
+ int ret = 0;
+
u32 l = iommu_read_reg(obj, MMU_CNTL);
l &= ~MMU_CNTL_MASK;
@@ -136,6 +149,8 @@
iommu_write_reg(obj, MMU_SYS_IDLE_FORCE, MMU_SYSCONFIG);
dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
+ if (omap_device_shutdown(obj->pdev))
+ dev_err(obj->dev, "%s err 0x%x\n", __func__, ret);
}
static void omap2_iommu_set_twl(struct iommu *obj, bool on)
@@ -168,7 +183,6 @@
errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT;
if (stat & MMU_IRQ_MULTIHITFAULT)
errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT;
- iommu_write_reg(obj, stat, MMU_IRQSTATUS);
return errs;
}
@@ -225,7 +239,8 @@
attr = e->mixed << 5;
attr |= e->endian;
attr |= e->elsz >> 3;
- attr <<= ((e->pgsz & MMU_CAM_PGSZ_4K) ? 0 : 6);
+ attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
+ (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
return attr;
}
diff --git a/arch/arm/mach-omap2/ldo.c b/arch/arm/mach-omap2/ldo.c
new file mode 100644
index 0000000..13ee2a3
--- /dev/null
+++ b/arch/arm/mach-omap2/ldo.c
@@ -0,0 +1,333 @@
+/*
+ * OMAP3/4 LDO users core
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Mike Turquette <mturquette@ti.com>
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <plat/cpu.h>
+#include "voltage.h"
+#include "ldo.h"
+
+/**
+ * _is_abb_enabled() - check if abb is enabled
+ * @voltdm: voltage domain to check for
+ * @abb: abb instance pointer
+ *
+ * Returns true if enabled, else returns false
+ */
+static inline bool _is_abb_enabled(struct voltagedomain *voltdm,
+ struct omap_ldo_abb_instance *abb)
+{
+ return (voltdm->read(abb->setup_reg) & abb->setup_bits->enable_mask) ?
+ true : false;
+}
+
+/**
+ * _abb_set_availability() - sets the availability of the ABB LDO
+ * @voltdm: voltage domain for which we would like to set
+ * @abb: abb instance pointer
+ * @available: should I enable/disable the LDO?
+ *
+ * Depending on the request, it enables/disables the LDO if it was not
+ * in that state already.
+ */
+static inline void _abb_set_availability(struct voltagedomain *voltdm,
+ struct omap_ldo_abb_instance *abb,
+ bool available)
+{
+ if (_is_abb_enabled(voltdm, abb) == available)
+ return;
+
+ voltdm->rmw(abb->setup_bits->enable_mask,
+ (available) ? abb->setup_bits->enable_mask : 0,
+ abb->setup_reg);
+}
+
+/**
+ * _abb_wait_tranx() - wait for abb tranxdone event
+ * @voltdm: voltage domain we are operating on
+ * @abb: pointer to the abb instance
+ *
+ * Returns -ETIMEDOUT if the event is not set on time.
+ */
+static int _abb_wait_tranx(struct voltagedomain *voltdm,
+ struct omap_ldo_abb_instance *abb)
+{
+ int timeout;
+ int ret;
+
+ timeout = 0;
+ while (timeout++ < abb->tranx_timeout) {
+ ret = abb->ops->check_txdone(abb->prm_irq_id);
+ if (ret)
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout >= abb->tranx_timeout) {
+ pr_warning("%s:%s: ABB TRANXDONE waittimeout(timeout=%d)\n",
+ __func__, voltdm->name, timeout);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/**
+ * _abb_clear_tranx() - clear abb tranxdone event
+ * @voltdm: voltage domain we are operating on
+ * @abb: pointer to the abb instance
+ *
+ * Returns -ETIMEDOUT if the event is not cleared on time.
+ */
+static int _abb_clear_tranx(struct voltagedomain *voltdm,
+ struct omap_ldo_abb_instance *abb)
+{
+ int timeout;
+ int ret;
+
+ /* clear interrupt status */
+ timeout = 0;
+ while (timeout++ < abb->tranx_timeout) {
+ abb->ops->clear_txdone(abb->prm_irq_id);
+
+ ret = abb->ops->check_txdone(abb->prm_irq_id);
+ if (!ret)
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout >= abb->tranx_timeout) {
+ pr_warning("%s:%s: ABB TRANXDONE timeout(timeout=%d)\n",
+ __func__, voltdm->name, timeout);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/**
+ * _abb_set_abb() - helper to actually set ABB (NOMINAL/FAST)
+ * @voltdm: voltage domain we are operating on
+ * @abb_type: ABB type we want to set
+ */
+static int _abb_set_abb(struct voltagedomain *voltdm, int abb_type)
+{
+ struct omap_ldo_abb_instance *abb = voltdm->abb;
+ int ret;
+
+ ret = _abb_clear_tranx(voltdm, abb);
+ if (ret)
+ return ret;
+
+ /* program next state of ABB ldo */
+ voltdm->rmw(abb->ctrl_bits->opp_sel_mask,
+ abb_type << __ffs(abb->ctrl_bits->opp_sel_mask),
+ abb->ctrl_reg);
+
+ /* initiate ABB ldo change */
+ voltdm->rmw(abb->ctrl_bits->opp_change_mask,
+ abb->ctrl_bits->opp_change_mask, abb->ctrl_reg);
+
+ /* Wait for conversion completion */
+ ret = _abb_wait_tranx(voltdm, abb);
+ WARN_ONCE(ret, "%s: voltdm %s ABB TRANXDONE was not set on time:%d\n",
+ __func__, voltdm->name, ret);
+ /* clear interrupt status */
+ ret |= _abb_clear_tranx(voltdm, abb);
+
+ return ret;
+}
+
+/**
+ * _abb_scale() - wrapper which does the necessary things for pre and post scale
+ * @voltdm: voltage domain to operate on
+ * @target_volt: voltage we are going to
+ * @is_prescale: are we doing a prescale operation?
+ *
+ * NOTE: We expect caller ensures that a specific voltdm is modified
+ * sequentially. All locking is expected to be implemented by users
+ * of LDO functions
+ */
+static int _abb_scale(struct voltagedomain *voltdm,
+ struct omap_volt_data *target_vdata, bool is_prescale)
+{
+ int ret = 0;
+ int curr_abb, target_abb;
+ struct omap_ldo_abb_instance *abb;
+
+ if (IS_ERR_OR_NULL(target_vdata)) {
+ pr_err("%s:%s: Invalid volt data tv=%p!\n", __func__,
+ voltdm->name, target_vdata);
+ return -EINVAL;
+ }
+
+ abb = voltdm->abb;
+ if (IS_ERR_OR_NULL(abb)) {
+ WARN(1, "%s:%s: no abb structure!\n", __func__, voltdm->name);
+ return -EINVAL;
+ }
+
+ curr_abb = abb->__cur_abb_type;
+ target_abb = target_vdata->abb_type;
+
+ pr_debug("%s: %s: Enter: t_v=%ld scale=%d c_abb=%d t_abb=%d ret=%d\n",
+ __func__, voltdm->name, omap_get_nominal_voltage(target_vdata),
+ is_prescale, curr_abb, target_abb, ret);
+
+ /* If we were'nt booting and there is no change, we get out */
+ if (target_abb == curr_abb && voltdm->curr_volt)
+ goto out;
+
+ /* Do we have an invalid ABB entry? scream for a fix! */
+ if (curr_abb == OMAP_ABB_NONE || target_abb == OMAP_ABB_NONE) {
+ WARN(1, "%s:%s: INVALID abb entries? curr=%d target=%d\n",
+ __func__, voltdm->name, curr_abb, target_abb);
+ return -EINVAL;
+ }
+
+ /*
+ * We set up ABB as follows:
+ * if we are scaling *to* a voltage which needs ABB, do it in post
+ * if we are scaling *from* a voltage which needs ABB, do it in pre
+ * So, if the conditions are in reverse, we just return happy
+ */
+ if (is_prescale && (target_abb > curr_abb))
+ goto out;
+
+ if (!is_prescale && (target_abb < curr_abb))
+ goto out;
+
+ /* Time to set ABB now */
+ ret = _abb_set_abb(voltdm, target_abb);
+ if (!ret) {
+ abb->__cur_abb_type = target_abb;
+ pr_debug("%s: %s: scaled - t_abb=%d!\n", __func__,
+ voltdm->name, target_abb);
+ } else {
+ pr_warning("%s: %s: failed scale: t_abb=%d (%d)!\n", __func__,
+ voltdm->name, target_abb, ret);
+ }
+
+out:
+ pr_debug("%s: %s:Exit: t_v=%ld scale=%d c_abb=%d t_abb=%d ret=%d\n",
+ __func__, voltdm->name, omap_get_nominal_voltage(target_vdata),
+ is_prescale, curr_abb, target_abb, ret);
+ return ret;
+
+}
+
+/**
+ * omap_ldo_abb_pre_scale() - Enable required ABB strategy before voltage scale
+ * @voltdm: voltage domain to operate on
+ * @target_volt: target voltage data we moved to.
+ */
+int omap_ldo_abb_pre_scale(struct voltagedomain *voltdm,
+ struct omap_volt_data *target_vdata)
+{
+ return _abb_scale(voltdm, target_vdata, true);
+}
+
+/**
+ * omap_ldo_abb_pre_scale() - Enable required ABB strategy after voltage scale
+ * @voltdm: voltage domain operated on
+ * @target_volt: target voltage we are going to
+ */
+int omap_ldo_abb_post_scale(struct voltagedomain *voltdm,
+ struct omap_volt_data *target_vdata)
+{
+ return _abb_scale(voltdm, target_vdata, false);
+}
+
+/**
+ * omap_ldo_abb_init() - initialize the ABB LDO for associated for this domain
+ * @voltdm: voltdm for which we need to initialize the ABB LDO
+ *
+ * Programs up the the configurations that dont change in the domain
+ *
+ * Return 0 if all goes fine, else returns appropriate error value
+ */
+void __init omap_ldo_abb_init(struct voltagedomain *voltdm)
+{
+ u32 sys_clk_rate;
+ u32 cycle_rate;
+ u32 settling_time;
+ u32 wait_count_val;
+ struct omap_ldo_abb_instance *abb;
+
+ if (IS_ERR_OR_NULL(voltdm)) {
+ pr_err("%s: No voltdm?\n", __func__);
+ return;
+ }
+ if (!voltdm->read || !voltdm->write || !voltdm->rmw) {
+ pr_err("%s: No read/write/rmw API for accessing vdd_%s regs\n",
+ __func__, voltdm->name);
+ return;
+ }
+
+ abb = voltdm->abb;
+ if (IS_ERR_OR_NULL(abb))
+ return;
+ if (IS_ERR_OR_NULL(abb->ctrl_bits) || IS_ERR_OR_NULL(abb->setup_bits)) {
+ pr_err("%s: Corrupted ABB configuration on vdd_%s regs\n",
+ __func__, voltdm->name);
+ return;
+ }
+
+ /*
+ * SR2_WTCNT_VALUE must be programmed with the expected settling time
+ * for ABB ldo transition. This value depends on the cycle rate for
+ * the ABB IP (varies per OMAP family), and the system clock frequency
+ * (varies per board). The formula is:
+ *
+ * SR2_WTCNT_VALUE = SettlingTime / (CycleRate / SystemClkRate))
+ * where SettlingTime is in micro-seconds and SystemClkRate is in MHz.
+ *
+ * To avoid dividing by zero multiply both CycleRate and SettlingTime
+ * by 10 such that the final result is the one we want.
+ */
+
+ /* Convert SYS_CLK rate to MHz & prevent divide by zero */
+ sys_clk_rate = DIV_ROUND_CLOSEST(voltdm->sys_clk.rate, 1000000);
+ cycle_rate = abb->cycle_rate * 10;
+ settling_time = abb->settling_time * 10;
+
+ /* Calculate cycle rate */
+ cycle_rate = DIV_ROUND_CLOSEST(cycle_rate, sys_clk_rate);
+
+ /* Calulate SR2_WTCNT_VALUE */
+ wait_count_val = DIV_ROUND_CLOSEST(settling_time, cycle_rate);
+
+ voltdm->rmw(abb->setup_bits->wait_count_mask,
+ wait_count_val << __ffs(abb->setup_bits->wait_count_mask),
+ abb->setup_reg);
+
+ /* Allow Forward Body-Bias */
+ voltdm->rmw(abb->setup_bits->active_fbb_mask,
+ abb->setup_bits->active_fbb_mask, abb->setup_reg);
+
+ /* Enable ABB */
+ _abb_set_availability(voltdm, abb, true);
+
+ /*
+ * Beware of the bootloader!
+ * Initialize current abb type based on what we read off the reg.
+ * we cant trust the initial state based off boot voltage's volt_data
+ * even. Not all bootloaders are nice :(
+ */
+ abb->__cur_abb_type = (voltdm->read(abb->ctrl_reg) &
+ abb->ctrl_bits->opp_sel_mask) >>
+ __ffs(abb->ctrl_bits->opp_sel_mask);
+
+ return;
+}
diff --git a/arch/arm/mach-omap2/ldo.h b/arch/arm/mach-omap2/ldo.h
new file mode 100644
index 0000000..44e66f4
--- /dev/null
+++ b/arch/arm/mach-omap2/ldo.h
@@ -0,0 +1,113 @@
+/*
+ * OMAP3/4 LDO structure and macro definitions
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Mike Turquette <mturquette@ti.com>
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_LDO_H
+#define __ARCH_ARM_MACH_OMAP2_LDO_H
+
+
+/**
+ * struct omap_ldo_abb_ops - ABB LDO status operation pointers
+ * @check_txdone: check if the transaction is done
+ * @clear_txdone: clear the transaction done event
+ */
+struct omap_ldo_abb_ops {
+ u32(*check_txdone) (u8 irq_id);
+ void (*clear_txdone) (u8 irq_id);
+};
+
+/*
+ * NOTE: OMAP3630 calls this the ctrl register, while
+ * OMAP4430, OMAP4460 is setup
+ */
+#define OMAP_LDO_ABB_SETUP_SR2_WTCNT_VALUE_MASK (0xFF << 8)
+#define OMAP_LDO_ABB_SETUP_ACTIVE_FBB_SEL_MASK BIT(2)
+#define OMAP_LDO_ABB_SETUP_SR2EN_MASK BIT(0)
+
+/**
+ * struct omap_ldo_abb_setup_bits - setup register bit defns
+ * @enable_mask: SR2EN field
+ * @active_fbb_mask: ACTIVE_FBB_SEL field
+ * @wait_count_mask: SR2_WTCNT_VALUE field
+ */
+struct omap_ldo_abb_setup_bits {
+ u32 enable_mask;
+ u32 active_fbb_mask;
+ /* RBB is not recommended to be used and hence not supported */
+ u32 wait_count_mask;
+};
+
+/*
+ * NOTE: OMAP3630 calls this the setup register, while
+ * OMAP4430, OMAP4460 is ctrl
+ */
+#define OMAP_LDO_ABB_CTRL_SR2_IN_TRANSITION_MASK BIT(6)
+#define OMAP_LDO_ABB_CTRL_SR2_STATUS_MASK (0x3 << 3)
+#define OMAP_LDO_ABB_CTRL_OPP_CHANGE_MASK BIT(2)
+#define OMAP_LDO_ABB_CTRL_OPP_SEL_MASK (0x3 << 0)
+
+/**
+ * struct omap_ldo_abb_ctrl_bits - ctrl register bit defns
+ * @in_tansition_mask: SR2_IN_TRANSITION field
+ * @status_mask: SR2_STATUS field
+ * @opp_change_mask: OPP_CHANGE field
+ * @opp_sel_mask: OPP_SEL field
+ */
+struct omap_ldo_abb_ctrl_bits {
+ u32 in_tansition_mask;
+ u32 status_mask;
+ u32 opp_change_mask;
+ u32 opp_sel_mask;
+};
+
+#define OMAP_ABB_TRANXDONE_TIMEOUT_US 50
+
+/**
+ * struct omap_ldo_abb_instance - Describe an LDO instance
+ * @prm_irq_id: PRM irq id for relevant for this block
+ * @ctrl_reg: control reg offset
+ * @setup_reg: setup reg offset
+ * @ctrl_bits: pointer to control register bitfield
+ * @setup_bits: pointer to setup register bitfield
+ * @settling_time: OMAP internal settling time(in uS)
+ * @cycle_rate: Cycle rate for the IP block
+ * @tranx_timeout: timeout count in uSec
+ * @ops: operations for ldo_abb
+ * @__cur_abb_type: private structure used by the driver, donot use.
+ */
+struct omap_ldo_abb_instance {
+ u8 prm_irq_id;
+
+ u32 ctrl_reg;
+ u32 setup_reg;
+ struct omap_ldo_abb_ctrl_bits *ctrl_bits;
+ struct omap_ldo_abb_setup_bits *setup_bits;
+
+ unsigned long settling_time;
+ unsigned long cycle_rate;
+ unsigned int tranx_timeout;
+
+ struct omap_ldo_abb_ops *ops;
+ int __cur_abb_type;
+};
+
+extern struct omap_ldo_abb_instance omap3630_ldo_abb_mpu_instance;
+
+extern struct omap_ldo_abb_instance omap4_ldo_abb_mpu_instance;
+extern struct omap_ldo_abb_instance omap4_ldo_abb_iva_instance;
+
+extern int omap_ldo_abb_pre_scale(struct voltagedomain *voltdm,
+ struct omap_volt_data *target_vdata);
+extern int omap_ldo_abb_post_scale(struct voltagedomain *voltdm,
+ struct omap_volt_data *target_vdata);
+extern void __init omap_ldo_abb_init(struct voltagedomain *voltdm);
+
+#endif /* __ARCH_ARM_MACH_OMAP2_LDO_H */
diff --git a/arch/arm/mach-omap2/ldo3xxx_data.c b/arch/arm/mach-omap2/ldo3xxx_data.c
new file mode 100644
index 0000000..870a912
--- /dev/null
+++ b/arch/arm/mach-omap2/ldo3xxx_data.c
@@ -0,0 +1,49 @@
+/*
+ * OMAP3xxx LDO data
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Mike Turquette <mturquette@ti.com>
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "voltage.h"
+#include "ldo.h"
+#include "prm2xxx_3xxx.h"
+#include "prm-regbits-34xx.h"
+
+static struct omap_ldo_abb_ops omap3630_ldo_abb_ops = {
+ .check_txdone = omap36xx_prm_abb_check_txdone,
+ .clear_txdone = omap36xx_prm_abb_clear_txdone,
+};
+
+/* WARNING: OMAP3630 as per TRM rev J, has the register names inverted */
+
+static struct omap_ldo_abb_setup_bits omap3630_ldo_abb_setup_bits = {
+ .enable_mask = OMAP_LDO_ABB_SETUP_SR2EN_MASK,
+ .active_fbb_mask = OMAP_LDO_ABB_SETUP_ACTIVE_FBB_SEL_MASK,
+ .wait_count_mask = OMAP_LDO_ABB_SETUP_SR2_WTCNT_VALUE_MASK,
+};
+
+static struct omap_ldo_abb_ctrl_bits omap3630_ldo_abb_ctrl_bits = {
+ .in_tansition_mask = OMAP_LDO_ABB_CTRL_SR2_IN_TRANSITION_MASK,
+ .status_mask = OMAP_LDO_ABB_CTRL_SR2_STATUS_MASK,
+ .opp_change_mask = OMAP_LDO_ABB_CTRL_OPP_CHANGE_MASK,
+ .opp_sel_mask = OMAP_LDO_ABB_CTRL_OPP_SEL_MASK,
+};
+
+struct omap_ldo_abb_instance omap3630_ldo_abb_mpu_instance = {
+ .prm_irq_id = OMAP3_PRM_IRQ_VDD_MPU_ID,
+ .ctrl_reg = OMAP3_PRM_LDO_ABB_CTRL_OFFSET,
+ .setup_reg = OMAP3_PRM_LDO_ABB_SETUP_OFFSET,
+ .ctrl_bits = &omap3630_ldo_abb_ctrl_bits,
+ .setup_bits = &omap3630_ldo_abb_setup_bits,
+ .ops = &omap3630_ldo_abb_ops,
+
+ .settling_time = 30,
+ .cycle_rate = 8,
+ .tranx_timeout = OMAP_ABB_TRANXDONE_TIMEOUT_US,
+};
diff --git a/arch/arm/mach-omap2/ldo4xxx_data.c b/arch/arm/mach-omap2/ldo4xxx_data.c
new file mode 100644
index 0000000..161e957
--- /dev/null
+++ b/arch/arm/mach-omap2/ldo4xxx_data.c
@@ -0,0 +1,60 @@
+/*
+ * OMAP4xxx LDO data
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Mike Turquette <mturquette@ti.com>
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "voltage.h"
+#include "ldo.h"
+#include "prm44xx.h"
+#include "prm-regbits-44xx.h"
+
+static struct omap_ldo_abb_ops omap4_ldo_abb_ops = {
+ .check_txdone = omap4_prm_abb_check_txdone,
+ .clear_txdone = omap4_prm_abb_clear_txdone,
+};
+
+static struct omap_ldo_abb_setup_bits omap4_ldo_abb_setup_bits = {
+ .enable_mask = OMAP_LDO_ABB_SETUP_SR2EN_MASK,
+ .active_fbb_mask = OMAP_LDO_ABB_SETUP_ACTIVE_FBB_SEL_MASK,
+ .wait_count_mask = OMAP_LDO_ABB_SETUP_SR2_WTCNT_VALUE_MASK,
+};
+
+static struct omap_ldo_abb_ctrl_bits omap4_ldo_abb_ctrl_bits = {
+ .in_tansition_mask = OMAP_LDO_ABB_CTRL_SR2_IN_TRANSITION_MASK,
+ .status_mask = OMAP_LDO_ABB_CTRL_SR2_STATUS_MASK,
+ .opp_change_mask = OMAP_LDO_ABB_CTRL_OPP_CHANGE_MASK,
+ .opp_sel_mask = OMAP_LDO_ABB_CTRL_OPP_SEL_MASK,
+};
+
+struct omap_ldo_abb_instance omap4_ldo_abb_mpu_instance = {
+ .prm_irq_id = OMAP4_PRM_IRQ_VDD_MPU_ID,
+ .ctrl_reg = OMAP4_PRM_LDO_ABB_MPU_CTRL_OFFSET,
+ .setup_reg = OMAP4_PRM_LDO_ABB_MPU_SETUP_OFFSET,
+ .ctrl_bits = &omap4_ldo_abb_ctrl_bits,
+ .setup_bits = &omap4_ldo_abb_setup_bits,
+ .ops = &omap4_ldo_abb_ops,
+
+ .settling_time = 50,
+ .cycle_rate = 16,
+ .tranx_timeout = OMAP_ABB_TRANXDONE_TIMEOUT_US,
+};
+
+struct omap_ldo_abb_instance omap4_ldo_abb_iva_instance = {
+ .prm_irq_id = OMAP4_PRM_IRQ_VDD_IVA_ID,
+ .ctrl_reg = OMAP4_PRM_LDO_ABB_IVA_CTRL_OFFSET,
+ .setup_reg = OMAP4_PRM_LDO_ABB_IVA_SETUP_OFFSET,
+ .ctrl_bits = &omap4_ldo_abb_ctrl_bits,
+ .setup_bits = &omap4_ldo_abb_setup_bits,
+ .ops = &omap4_ldo_abb_ops,
+
+ .settling_time = 50,
+ .cycle_rate = 16,
+ .tranx_timeout = OMAP_ABB_TRANXDONE_TIMEOUT_US,
+};
diff --git a/arch/arm/mach-omap2/lpddr2_elpida_data.c b/arch/arm/mach-omap2/lpddr2_elpida_data.c
new file mode 100644
index 0000000..aee63f1
--- /dev/null
+++ b/arch/arm/mach-omap2/lpddr2_elpida_data.c
@@ -0,0 +1,111 @@
+/*
+ * LPDDR2 data as per JESD209-2
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * Aneesh V <aneesh@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <mach/emif.h>
+#include <mach/lpddr2-elpida.h>
+
+const struct lpddr2_timings lpddr2_elpida_timings_400_mhz = {
+ .max_freq = 400000000,
+ .RL = 6,
+ .tRPab = 21,
+ .tRCD = 18,
+ .tWR = 15,
+ .tRASmin = 42,
+ .tRRD = 10,
+ .tWTRx2 = 15,
+ .tXSR = 140,
+ .tXPx2 = 15,
+ .tRFCab = 130,
+ .tRTPx2 = 15,
+ .tCKE = 3,
+ .tCKESR = 15,
+ .tZQCS = 90,
+ .tZQCL = 360,
+ .tZQINIT = 1000,
+ .tDQSCKMAXx2 = 11,
+ .tRASmax = 70,
+ .tFAW = 50
+};
+
+const struct lpddr2_timings lpddr2_elpida_timings_333_mhz = {
+ .max_freq = 333000000,
+ .RL = 5,
+ .tRPab = 21,
+ .tRCD = 18,
+ .tWR = 15,
+ .tRASmin = 42,
+ .tRRD = 10,
+ .tWTRx2 = 15,
+ .tXSR = 140,
+ .tXPx2 = 15,
+ .tRFCab = 130,
+ .tRTPx2 = 15,
+ .tCKE = 3,
+ .tCKESR = 15,
+ .tZQCS = 90,
+ .tZQCL = 360,
+ .tZQINIT = 1000,
+ .tDQSCKMAXx2 = 11,
+ .tRASmax = 70,
+ .tFAW = 50
+};
+
+const struct lpddr2_timings lpddr2_elpida_timings_200_mhz = {
+ .max_freq = 200000000,
+ .RL = 3,
+ .tRPab = 21,
+ .tRCD = 18,
+ .tWR = 15,
+ .tRASmin = 42,
+ .tRRD = 10,
+ .tWTRx2 = 20,
+ .tXSR = 140,
+ .tXPx2 = 15,
+ .tRFCab = 130,
+ .tRTPx2 = 15,
+ .tCKE = 3,
+ .tCKESR = 15,
+ .tZQCS = 90,
+ .tZQCL = 360,
+ .tZQINIT = 1000,
+ .tDQSCKMAXx2 = 11,
+ .tRASmax = 70,
+ .tFAW = 50
+};
+
+const struct lpddr2_min_tck lpddr2_elpida_min_tck = {
+ .tRL = 3,
+ .tRP_AB = 3,
+ .tRCD = 3,
+ .tWR = 3,
+ .tRAS_MIN = 3,
+ .tRRD = 2,
+ .tWTR = 2,
+ .tXP = 2,
+ .tRTP = 2,
+ .tCKE = 3,
+ .tCKESR = 3,
+ .tFAW = 8
+};
+
+struct lpddr2_device_info lpddr2_elpida_2G_S4_dev = {
+ .device_timings = {
+ &lpddr2_elpida_timings_200_mhz,
+ &lpddr2_elpida_timings_333_mhz,
+ &lpddr2_elpida_timings_400_mhz
+ },
+ .min_tck = &lpddr2_elpida_min_tck,
+ .type = LPDDR2_TYPE_S4,
+ .density = LPDDR2_DENSITY_2Gb,
+ .io_width = LPDDR2_IO_WIDTH_32
+};
diff --git a/arch/arm/mach-omap2/lpddr2_jedec_data.c b/arch/arm/mach-omap2/lpddr2_jedec_data.c
new file mode 100644
index 0000000..e8b447c
--- /dev/null
+++ b/arch/arm/mach-omap2/lpddr2_jedec_data.c
@@ -0,0 +1,132 @@
+/*
+ * LPDDR2 data as per JESD209-2
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * Aneesh V <aneesh@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <mach/lpddr2-jedec.h>
+#include <mach/emif.h>
+
+/*
+ * Organization and refresh requirements for LPDDR2 devices of different
+ * types and densities. Derived from JESD209-2 section 2.4
+ */
+const struct lpddr2_addressing lpddr2_jedec_addressing_table[] = {
+ /* Banks tREFIx10 rowx32,rowx16 colx32,colx16 density */
+ {BANKS4, T_REFI_15_6, {ROW_12, ROW_12}, {COL_7, COL_8} }, /*64M*/
+ {BANKS4, T_REFI_15_6, {ROW_12, ROW_12}, {COL_8, COL_9} }, /*128M*/
+ {BANKS4, T_REFI_7_8, {ROW_13, ROW_13}, {COL_8, COL_9} }, /*256M*/
+ {BANKS4, T_REFI_7_8, {ROW_13, ROW_13}, {COL_9, COL_10} }, /*512M*/
+ {BANKS8, T_REFI_7_8, {ROW_13, ROW_13}, {COL_9, COL_10} }, /*1GS4*/
+ {BANKS8, T_REFI_3_9, {ROW_14, ROW_14}, {COL_9, COL_10} }, /*2GS4*/
+ {BANKS8, T_REFI_3_9, {ROW_14, ROW_14}, {COL_10, COL_11} }, /*4G*/
+ {BANKS8, T_REFI_3_9, {ROW_15, ROW_15}, {COL_10, COL_11} }, /*8G*/
+ {BANKS4, T_REFI_7_8, {ROW_14, ROW_14}, {COL_9, COL_10} }, /*1GS2*/
+ {BANKS4, T_REFI_3_9, {ROW_15, ROW_15}, {COL_9, COL_10} }, /*2GS2*/
+};
+
+/*
+ * Base AC Timing values specified by JESD209-2 for 400MHz operation
+ * All devices will honour these timings at this frequency.
+ * Some devices may have better timings. Using these timings is safe when the
+ * timings are not available from the device data sheet.
+ */
+const struct lpddr2_timings lpddr2_jedec_timings_400_mhz = {
+ .max_freq = 400000000,
+ .RL = 6,
+ .tRPab = 21,
+ .tRCD = 18,
+ .tWR = 15,
+ .tRASmin = 42,
+ .tRRD = 10,
+ .tWTRx2 = 15,
+ .tXSR = 140,
+ .tXPx2 = 15,
+ .tRFCab = 130,
+ .tRTPx2 = 15,
+ .tCKE = 3,
+ .tCKESR = 15,
+ .tZQCS = 90,
+ .tZQCL = 360,
+ .tZQINIT = 1000,
+ .tDQSCKMAXx2 = 11,
+ .tRASmax = 70,
+ .tFAW = 50
+};
+
+/* Base AC Timing values specified by JESD209-2 for 333 MHz operation */
+const struct lpddr2_timings lpddr2_jedec_timings_333_mhz = {
+ .max_freq = 333000000,
+ .RL = 5,
+ .tRPab = 21,
+ .tRCD = 18,
+ .tWR = 15,
+ .tRASmin = 42,
+ .tRRD = 10,
+ .tWTRx2 = 15,
+ .tXSR = 140,
+ .tXPx2 = 15,
+ .tRFCab = 130,
+ .tRTPx2 = 15,
+ .tCKE = 3,
+ .tCKESR = 15,
+ .tZQCS = 90,
+ .tZQCL = 360,
+ .tZQINIT = 1000,
+ .tDQSCKMAXx2 = 11,
+ .tRASmax = 70,
+ .tFAW = 50
+};
+
+/* Base AC Timing values specified by JESD209-2 for 200 MHz operation */
+const struct lpddr2_timings lpddr2_jedec_timings_200_mhz = {
+ .max_freq = 200000000,
+ .RL = 3,
+ .tRPab = 21,
+ .tRCD = 18,
+ .tWR = 15,
+ .tRASmin = 42,
+ .tRRD = 10,
+ .tWTRx2 = 20,
+ .tXSR = 140,
+ .tXPx2 = 15,
+ .tRFCab = 130,
+ .tRTPx2 = 15,
+ .tCKE = 3,
+ .tCKESR = 15,
+ .tZQCS = 90,
+ .tZQCL = 360,
+ .tZQINIT = 1000,
+ .tDQSCKMAXx2 = 11,
+ .tRASmax = 70,
+ .tFAW = 50
+};
+
+/*
+ * Min tCK values specified by JESD209-2
+ * Min tCK specifies the minimum duration of some AC timing parameters in terms
+ * of the number of cycles. If the calculated number of cycles based on the
+ * absolute time value is less than the min tCK value, min tCK value should
+ * be used instead. This typically happens at low frequencies.
+ */
+const struct lpddr2_min_tck lpddr2_jedec_min_tck = {
+ .tRL = 3,
+ .tRP_AB = 3,
+ .tRCD = 3,
+ .tWR = 3,
+ .tRAS_MIN = 3,
+ .tRRD = 2,
+ .tWTR = 2,
+ .tXP = 2,
+ .tRTP = 2,
+ .tCKE = 3,
+ .tCKESR = 3,
+ .tFAW = 8
+};
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index 86d564a..74750bf 100644
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -14,33 +14,39 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <plat/mailbox.h>
#include <mach/irqs.h>
#define MAILBOX_REVISION 0x000
-#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m))
-#define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m))
-#define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m))
-#define MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u))
-#define MAILBOX_IRQENABLE(u) (0x104 + 8 * (u))
+#define MAILBOX_SYSCONFIG 0x10
+#define MAILBOX_MESSAGE(m) (0x040 + 0x4 * (m))
+#define MAILBOX_FIFOSTATUS(m) (0x080 + 0x4 * (m))
+#define MAILBOX_MSGSTATUS(m) (0x0c0 + 0x4 * (m))
+#define MAILBOX_IRQSTATUS(u) (0x100 + 0x8 * (u))
+#define MAILBOX_IRQENABLE(u) (0x104 + 0x8 * (u))
-#define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 10 * (u))
-#define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 10 * (u))
-#define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 10 * (u))
+#define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u))
+#define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u))
+#define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u))
#define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m)))
#define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1))
+#define MAILBOX_SOFTRESET 1
-#define MBOX_REG_SIZE 0x120
-
-#define OMAP4_MBOX_REG_SIZE 0x130
-
-#define MBOX_NR_REGS (MBOX_REG_SIZE / sizeof(u32))
-#define OMAP4_MBOX_NR_REGS (OMAP4_MBOX_REG_SIZE / sizeof(u32))
+#define MBOX_NUM_USER 2
+#define OMAP4_MBOX_NUM_USER 3
+#define MBOX_NR_REGS 2
+#define OMAP4_MBOX_NR_REGS 3
static void __iomem *mbox_base;
+static u32 *mbox_ctx;
+static int nr_mbox_users;
+static bool context_saved;
+
struct omap_mbox2_fifo {
unsigned long msg;
unsigned long fifo_stat;
@@ -54,7 +60,6 @@
unsigned long irqstatus;
u32 newmsg_bit;
u32 notfull_bit;
- u32 ctx[OMAP4_MBOX_NR_REGS];
unsigned long irqdisable;
};
@@ -71,14 +76,66 @@
__raw_writel(val, mbox_base + ofs);
}
+static void omap2_mbox_save_ctx(struct omap_mbox *mbox)
+{
+ int i;
+
+ if (context_saved)
+ return;
+
+ /* Save irqs per user */
+ for (i = 0; i < nr_mbox_users; i++) {
+ if (cpu_is_omap44xx())
+ mbox_ctx[i] = mbox_read_reg(OMAP4_MAILBOX_IRQENABLE(i));
+ else
+ mbox_ctx[i] = mbox_read_reg(MAILBOX_IRQENABLE(i));
+
+ dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
+ i, mbox_ctx[i]);
+ }
+
+ context_saved = true;
+}
+
+static void omap2_mbox_restore_ctx(struct omap_mbox *mbox)
+{
+ int i;
+
+ if (!context_saved)
+ return;
+
+ /* Restore irqs per user */
+ for (i = 0; i < nr_mbox_users; i++) {
+ if (cpu_is_omap44xx())
+ mbox_write_reg(mbox_ctx[i], OMAP4_MAILBOX_IRQENABLE(i));
+ else
+ mbox_write_reg(mbox_ctx[i], MAILBOX_IRQENABLE(i));
+
+ dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
+ i, mbox_ctx[i]);
+ }
+
+ context_saved = false;
+}
+
/* Mailbox H/W preparations */
static int omap2_mbox_startup(struct omap_mbox *mbox)
{
u32 l;
+ u32 max_iter = 100;
pm_runtime_enable(mbox->dev->parent);
pm_runtime_get_sync(mbox->dev->parent);
+ mbox_write_reg(MAILBOX_SOFTRESET, MAILBOX_SYSCONFIG);
+ while (mbox_read_reg(MAILBOX_SYSCONFIG) & MAILBOX_SOFTRESET) {
+ if (WARN_ON(!max_iter--))
+ break;
+ udelay(1);
+ }
+
+ omap2_mbox_restore_ctx(mbox);
+
l = mbox_read_reg(MAILBOX_REVISION);
pr_debug("omap mailbox rev %d.%d\n", (l & 0xf0) >> 4, (l & 0x0f));
@@ -89,6 +146,7 @@
static void omap2_mbox_shutdown(struct omap_mbox *mbox)
{
+ omap2_mbox_save_ctx(mbox);
pm_runtime_put_sync(mbox->dev->parent);
pm_runtime_disable(mbox->dev->parent);
}
@@ -169,40 +227,6 @@
return (int)(enable & status & bit);
}
-static void omap2_mbox_save_ctx(struct omap_mbox *mbox)
-{
- int i;
- struct omap_mbox2_priv *p = mbox->priv;
- int nr_regs;
- if (cpu_is_omap44xx())
- nr_regs = OMAP4_MBOX_NR_REGS;
- else
- nr_regs = MBOX_NR_REGS;
- for (i = 0; i < nr_regs; i++) {
- p->ctx[i] = mbox_read_reg(i * sizeof(u32));
-
- dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
- i, p->ctx[i]);
- }
-}
-
-static void omap2_mbox_restore_ctx(struct omap_mbox *mbox)
-{
- int i;
- struct omap_mbox2_priv *p = mbox->priv;
- int nr_regs;
- if (cpu_is_omap44xx())
- nr_regs = OMAP4_MBOX_NR_REGS;
- else
- nr_regs = MBOX_NR_REGS;
- for (i = 0; i < nr_regs; i++) {
- mbox_write_reg(p->ctx[i], i * sizeof(u32));
-
- dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
- i, p->ctx[i]);
- }
-}
-
static struct omap_mbox_ops omap2_mbox_ops = {
.type = OMAP_MBOX_TYPE2,
.startup = omap2_mbox_startup,
@@ -373,17 +397,31 @@
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -ENODEV;
+
mbox_base = ioremap(mem->start, resource_size(mem));
if (!mbox_base)
return -ENOMEM;
- ret = omap_mbox_register(&pdev->dev, list);
- if (ret) {
- iounmap(mbox_base);
- return ret;
+ nr_mbox_users = cpu_is_omap44xx() ? OMAP4_MBOX_NUM_USER : MBOX_NUM_USER;
+ mbox_ctx = kzalloc(sizeof(u32) * nr_mbox_users, GFP_KERNEL);
+ if (!mbox_ctx) {
+ ret = -ENOMEM;
+ goto unmap_base;
}
+ ret = omap_mbox_register(&pdev->dev, list);
+ if (ret)
+ goto free_ctx;
+
return 0;
+
+free_ctx:
+ kfree(mbox_ctx);
+unmap_base:
+ iounmap(mbox_base);
+ return ret;
}
static int __devexit omap2_mbox_remove(struct platform_device *pdev)
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index 4a6ef6a..6afbc0d 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -71,9 +71,9 @@
mcbsp = id_to_mcbsp_ptr(id);
if (fck_src_id == MCBSP_CLKS_PAD_SRC)
- fck_src_name = "pad_fck";
+ fck_src_name = mcbsp->pdata->clks_pad_src;
else if (fck_src_id == MCBSP_CLKS_PRCM_SRC)
- fck_src_name = "prcm_fck";
+ fck_src_name = mcbsp->pdata->clks_prcm_src;
else
return -EINVAL;
@@ -129,12 +129,21 @@
pdata->mcbsp_config_type = oh->class->rev;
if (oh->class->rev == MCBSP_CONFIG_TYPE3) {
+ strcpy(pdata->clks_pad_src, "pad_fck");
+ strcpy(pdata->clks_prcm_src, "prcm_fck");
+
if (id == 2)
/* The FIFO has 1024 + 256 locations */
pdata->buffer_size = 0x500;
else
/* The FIFO has 128 locations */
pdata->buffer_size = 0x80;
+ } else if (oh->class->rev == MCBSP_CONFIG_TYPE4) {
+ strcpy(pdata->clks_pad_src, "pad_clks_ck");
+ sprintf(pdata->clks_prcm_src, "mcbsp%d_sync_mux_ck", id);
+
+ /* The FIFO has 128 locations for all instances */
+ pdata->buffer_size = 0x80;
}
oh_device[0] = oh;
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index c7fb22a..c7d0b21 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -118,10 +118,8 @@
}
}
- if (found == 0) {
- pr_err("%s: Could not set gpio%i\n", __func__, gpio);
+ if (found == 0)
return -ENODEV;
- }
if (found > 1) {
pr_info("%s: Multiple gpio paths (%d) for gpio%i\n", __func__,
@@ -153,6 +151,8 @@
return ret;
}
+ pr_err("%s: Could not set gpio%i\n", __func__, gpio);
+
return -ENODEV;
}
@@ -210,8 +210,6 @@
return -EINVAL;
}
- pr_err("%s: Could not find signal %s\n", __func__, muxname);
-
return -ENODEV;
}
@@ -234,6 +232,8 @@
return mux_mode;
}
+ pr_err("%s: Could not find signal %s\n", __func__, muxname);
+
return -ENODEV;
}
@@ -351,6 +351,36 @@
return NULL;
}
+/**
+ * omap_hwmod_mux_get_wake_status - omap hwmod check pad wakeup
+ * @hmux: Pads for a hwmod
+ *
+ * Gets the wakeup status of given pad from omap-hwmod.
+ * Returns true if wakeup event is set for pad else false
+ * if wakeup is not occured or pads are not avialable.
+ */
+int omap_hwmod_mux_get_wake_status(struct omap_hwmod_mux_info *hmux)
+{
+ int i;
+ unsigned int val;
+ u8 ret = false;
+
+ for (i = 0; i < hmux->nr_pads; i++) {
+ struct omap_device_pad *pad = &hmux->pads[i];
+
+ if (pad->flags & OMAP_DEVICE_PAD_WAKEUP) {
+ val = omap_mux_read(pad->partition,
+ pad->mux->reg_offset);
+ if (val & OMAP_WAKEUP_EVENT) {
+ ret = true;
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
/* Assumes the calling function takes care of locking */
void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state)
{
@@ -906,7 +936,7 @@
}
/* Needed for dynamic muxing of GPIO pins for off-idle */
-u16 omap_mux_get_gpio(int gpio)
+struct omap_mux *omap_mux_get_gpio(int gpio)
{
struct omap_mux_partition *partition;
struct omap_mux *m = NULL;
@@ -914,13 +944,10 @@
list_for_each_entry(partition, &mux_partitions, node) {
m = omap_mux_get_by_gpio(partition, gpio);
if (m)
- return omap_mux_read(partition, m->reg_offset);
+ return m;
}
- if (!m || m->reg_offset == OMAP_MUX_TERMINATOR)
- pr_err("%s: Could not get gpio%i\n", __func__, gpio);
-
- return OMAP_MUX_TERMINATOR;
+ return NULL;
}
/* Needed for dynamic muxing of GPIO pins for off-idle */
@@ -941,6 +968,55 @@
pr_err("%s: Could not set gpio%i\n", __func__, gpio);
}
+bool omap_mux_get_wakeupevent(struct omap_mux *m)
+{
+ u16 val;
+ if (IS_ERR_OR_NULL(m) || !cpu_is_omap44xx())
+ return false;
+
+ val = omap_mux_read(m->partition, m->reg_offset);
+ return val & OMAP_WAKEUP_EVENT;
+}
+
+/* Has no locking, don't use on a pad that is remuxed (by hwmod or otherwise) */
+bool omap_mux_get_wakeupenable(struct omap_mux *m)
+{
+ u16 val;
+ if (IS_ERR_OR_NULL(m))
+ return false;
+
+ val = omap_mux_read(m->partition, m->reg_offset);
+ return val & OMAP_PIN_OFF_WAKEUPENABLE;
+}
+
+/* Has no locking, don't use on a pad that is remuxed (by hwmod or otherwise) */
+int omap_mux_set_wakeupenable(struct omap_mux *m)
+{
+ u16 val;
+ if (IS_ERR_OR_NULL(m))
+ return -EINVAL;
+
+ val = omap_mux_read(m->partition, m->reg_offset);
+ val |= OMAP_PIN_OFF_WAKEUPENABLE;
+ omap_mux_write(m->partition, val, m->reg_offset);
+
+ return 0;
+}
+
+/* Has no locking, don't use on a pad that is remuxed (by hwmod or otherwise) */
+int omap_mux_clear_wakeupenable(struct omap_mux *m)
+{
+ u16 val;
+ if (IS_ERR_OR_NULL(m))
+ return -EINVAL;
+
+ val = omap_mux_read(m->partition, m->reg_offset);
+ val &= ~OMAP_PIN_OFF_WAKEUPENABLE;
+ omap_mux_write(m->partition, val, m->reg_offset);
+
+ return 0;
+}
+
static struct omap_mux * __init omap_mux_list_add(
struct omap_mux_partition *partition,
struct omap_mux *src)
@@ -954,6 +1030,7 @@
m = &entry->mux;
entry->mux = *src;
+ m->partition = partition;
#ifdef CONFIG_OMAP_MUX
if (omap_mux_copy_names(src, m)) {
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index 2132308..e631b5e 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -131,6 +131,7 @@
struct omap_mux {
u16 reg_offset;
u16 gpio;
+ struct omap_mux_partition *partition;
#ifdef CONFIG_OMAP_MUX
char *muxnames[OMAP_MUX_NR_MODES];
#ifdef CONFIG_DEBUG_FS
@@ -225,8 +226,21 @@
*/
void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state);
+/**
+ * omap_hwmod_mux_get_wake_status - omap hwmod check pad wakeup
+ * @hmux: Pads for a hwmod
+ *
+ * Called only from omap_hwmod.c, do not use.
+ */
+int omap_hwmod_mux_get_wake_status(struct omap_hwmod_mux_info *hmux);
#else
+static inline int
+omap_hwmod_mux_get_wake_status(struct omap_hwmod_mux_info *hmux)
+{
+ return 0;
+}
+
static inline int omap_mux_init_gpio(int gpio, int val)
{
return 0;
@@ -251,11 +265,31 @@
#endif
/**
- * omap_mux_get_gpio() - get mux register value based on GPIO number
+ * omap_mux_get_gpio() - get mux struct based on GPIO number
* @gpio: GPIO number
*
*/
-u16 omap_mux_get_gpio(int gpio);
+struct omap_mux *omap_mux_get_gpio(int gpio);
+
+/** omap_mux_set_wakeupenable() - set the wakeupenable bit on a mux struct
+ * @m: mux struct
+ */
+int omap_mux_set_wakeupenable(struct omap_mux *m);
+
+/** omap_mux_clear_wakeupenable() - clear the wakeupenable bit on a mux struct
+ * @m: mux struct
+ */
+int omap_mux_clear_wakeupenable(struct omap_mux *m);
+
+/** omap_mux_get_wakeupenable() - get the wakeupenable bit from a mux struct
+ * @m: mux struct
+ */
+bool omap_mux_get_wakeupenable(struct omap_mux *m);
+
+/** omap_mux_get_wakeupevent() - get the wakeupevent bit from a mux struct
+ * @m: mux struct
+ */
+bool omap_mux_get_wakeupevent(struct omap_mux *m);
/**
* omap_mux_set_gpio() - set mux register value based on GPIO number
diff --git a/arch/arm/mach-omap2/omap-hotplug.c b/arch/arm/mach-omap2/omap-hotplug.c
index 4976b93..f69cd5c 100644
--- a/arch/arm/mach-omap2/omap-hotplug.c
+++ b/arch/arm/mach-omap2/omap-hotplug.c
@@ -19,7 +19,13 @@
#include <linux/smp.h>
#include <asm/cacheflush.h>
+#include <asm/hardware/gic.h>
+
#include <mach/omap4-common.h>
+#include <mach/omap-wakeupgen.h>
+
+#include "powerdomain.h"
+#include "clockdomain.h"
int platform_cpu_kill(unsigned int cpu)
{
@@ -32,6 +38,12 @@
*/
void platform_cpu_die(unsigned int cpu)
{
+ unsigned int this_cpu;
+ static struct clockdomain *cpu1_clkdm;
+
+ if (!cpu1_clkdm)
+ cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
+
flush_cache_all();
dsb();
@@ -39,18 +51,26 @@
* we're ready for shutdown now, so do it
*/
if (omap_modify_auxcoreboot0(0x0, 0x200) != 0x0)
- printk(KERN_CRIT "Secure clear status failed\n");
+ pr_err("Secure clear status failed\n");
for (;;) {
/*
- * Execute WFI
+ * Enter into low power state
+ * clear all interrupt wakeup sources
*/
- do_wfi();
-
- if (omap_read_auxcoreboot0() == cpu) {
+ omap_wakeupgen_irqmask_all(cpu, 1);
+ gic_cpu_disable();
+ omap4_enter_lowpower(cpu, PWRDM_POWER_OFF);
+ this_cpu = hard_smp_processor_id();
+ if (omap_read_auxcoreboot0() == this_cpu) {
/*
* OK, proper wakeup, we're done
*/
+ omap_wakeupgen_irqmask_all(this_cpu, 0);
+ gic_cpu_enable();
+
+ /* Restore clockdomain to hardware supervised */
+ clkdm_allow_idle(cpu1_clkdm);
break;
}
pr_debug("CPU%u: spurious wakeup call\n", cpu);
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
index 3fc5dc7..6965b4d 100644
--- a/arch/arm/mach-omap2/omap-iommu.c
+++ b/arch/arm/mach-omap2/omap-iommu.c
@@ -11,9 +11,12 @@
*/
#include <linux/platform_device.h>
+#include <linux/err.h>
#include <plat/iommu.h>
-#include <plat/irqs.h>
+#include <plat/omap_device.h>
+#include <plat/omap_hwmod.h>
+
struct iommu_device {
resource_size_t base;
@@ -21,145 +24,118 @@
struct iommu_platform_data pdata;
struct resource res[2];
};
-static struct iommu_device *devices;
+static struct iommu_platform_data *devices_data;
static int num_iommu_devices;
#ifdef CONFIG_ARCH_OMAP3
-static struct iommu_device omap3_devices[] = {
+static struct iommu_platform_data omap3_devices_data[] = {
{
- .base = 0x480bd400,
- .irq = 24,
- .pdata = {
- .name = "isp",
- .nr_tlb_entries = 8,
- .clk_name = "cam_ick",
- .da_start = 0x0,
- .da_end = 0xFFFFF000,
- },
+ .name = "isp",
+ .oh_name = "isp",
+ .nr_tlb_entries = 8,
+ .da_start = 0x0,
+ .da_end = 0xFFFFF000,
},
#if defined(CONFIG_OMAP_IOMMU_IVA2)
{
- .base = 0x5d000000,
- .irq = 28,
- .pdata = {
- .name = "iva2",
- .nr_tlb_entries = 32,
- .clk_name = "iva2_ck",
- .da_start = 0x11000000,
- .da_end = 0xFFFFF000,
- },
+ .name = "iva2",
+ .oh_name = "dsp",
+ .nr_tlb_entries = 32,
+ .da_start = 0x11000000,
+ .da_end = 0xFFFFF000,
},
#endif
};
-#define NR_OMAP3_IOMMU_DEVICES ARRAY_SIZE(omap3_devices)
-static struct platform_device *omap3_iommu_pdev[NR_OMAP3_IOMMU_DEVICES];
+#define NR_OMAP3_IOMMU_DEVICES ARRAY_SIZE(omap3_devices_data)
#else
-#define omap3_devices NULL
+#define omap3_devices_data NULL
#define NR_OMAP3_IOMMU_DEVICES 0
-#define omap3_iommu_pdev NULL
#endif
#ifdef CONFIG_ARCH_OMAP4
-static struct iommu_device omap4_devices[] = {
+static struct iommu_platform_data omap4_devices_data[] = {
{
- .base = OMAP4_MMU1_BASE,
- .irq = OMAP44XX_IRQ_DUCATI_MMU,
- .pdata = {
- .name = "ducati",
- .nr_tlb_entries = 32,
- .clk_name = "ducati_ick",
- .da_start = 0x0,
- .da_end = 0xFFFFF000,
- },
+ .name = "ducati",
+ .oh_name = "ipu",
+ .nr_tlb_entries = 32,
+ .da_start = 0x0,
+ .da_end = 0xFFFFF000,
},
-#if defined(CONFIG_MPU_TESLA_IOMMU)
{
- .base = OMAP4_MMU2_BASE,
- .irq = INT_44XX_DSP_MMU,
- .pdata = {
- .name = "tesla",
- .nr_tlb_entries = 32,
- .clk_name = "tesla_ick",
- .da_start = 0x0,
- .da_end = 0xFFFFF000,
- },
+ .name = "tesla",
+ .oh_name = "dsp",
+ .nr_tlb_entries = 32,
+ .da_start = 0x0,
+ .da_end = 0xFFFFF000,
},
-#endif
};
-#define NR_OMAP4_IOMMU_DEVICES ARRAY_SIZE(omap4_devices)
-static struct platform_device *omap4_iommu_pdev[NR_OMAP4_IOMMU_DEVICES];
+#define NR_OMAP4_IOMMU_DEVICES ARRAY_SIZE(omap4_devices_data)
#else
-#define omap4_devices NULL
+#define omap4_devices_data NULL
#define NR_OMAP4_IOMMU_DEVICES 0
-#define omap4_iommu_pdev NULL
#endif
-static struct platform_device **omap_iommu_pdev;
+static struct omap_device_pm_latency omap_iommu_latency[] = {
+ [0] = {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
+};
+
+int iommu_get_plat_data_size(void)
+{
+ return num_iommu_devices;
+}
+EXPORT_SYMBOL(iommu_get_plat_data_size);
+
+struct iommu_platform_data *iommu_get_device_data(void)
+{
+ return devices_data;
+}
static int __init omap_iommu_init(void)
{
- int i, err;
- struct resource res[] = {
- { .flags = IORESOURCE_MEM },
- { .flags = IORESOURCE_IRQ },
- };
+ int i, ohl_cnt;
+ struct omap_hwmod *oh;
+ struct omap_device *od;
+ struct omap_device_pm_latency *ohl;
if (cpu_is_omap34xx()) {
- devices = omap3_devices;
- omap_iommu_pdev = omap3_iommu_pdev;
+ devices_data = omap3_devices_data;
num_iommu_devices = NR_OMAP3_IOMMU_DEVICES;
} else if (cpu_is_omap44xx()) {
- devices = omap4_devices;
- omap_iommu_pdev = omap4_iommu_pdev;
+ devices_data = omap4_devices_data;
num_iommu_devices = NR_OMAP4_IOMMU_DEVICES;
} else
return -ENODEV;
+ ohl = omap_iommu_latency;
+ ohl_cnt = ARRAY_SIZE(omap_iommu_latency);
+
for (i = 0; i < num_iommu_devices; i++) {
- struct platform_device *pdev;
- const struct iommu_device *d = &devices[i];
+ struct iommu_platform_data *data = &devices_data[i];
- pdev = platform_device_alloc("omap-iommu", i);
- if (!pdev) {
- err = -ENOMEM;
- goto err_out;
+ oh = omap_hwmod_lookup(data->oh_name);
+ data->io_base = oh->_mpu_rt_va;
+ data->irq = oh->mpu_irqs[0].irq;
+
+ if (!oh) {
+ pr_err("%s: could not look up %s\n", __func__,
+ data->oh_name);
+ continue;
}
-
- res[0].start = d->base;
- res[0].end = d->base + MMU_REG_SIZE - 1;
- res[1].start = res[1].end = d->irq;
-
- err = platform_device_add_resources(pdev, res,
- ARRAY_SIZE(res));
- if (err)
- goto err_out;
- err = platform_device_add_data(pdev, &d->pdata,
- sizeof(d->pdata));
- if (err)
- goto err_out;
- err = platform_device_add(pdev);
- if (err)
- goto err_out;
- omap_iommu_pdev[i] = pdev;
+ od = omap_device_build("omap-iommu", i, oh,
+ data, sizeof(*data),
+ ohl, ohl_cnt, false);
+ WARN(IS_ERR(od), "Could not build omap_device"
+ "for %s %s\n", "omap-iommu", data->oh_name);
}
return 0;
-
-err_out:
- while (i--)
- platform_device_put(omap_iommu_pdev[i]);
- return err;
}
module_init(omap_iommu_init);
-static void __exit omap_iommu_exit(void)
-{
- int i;
-
- for (i = 0; i < num_iommu_devices; i++)
- platform_device_unregister(omap_iommu_pdev[i]);
-}
-module_exit(omap_iommu_exit);
-
MODULE_AUTHOR("Hiroshi DOYU");
+MODULE_AUTHOR("Hari Kanigeri");
MODULE_DESCRIPTION("omap iommu: omap device registration");
MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index ecfe93c..40e425a 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -17,7 +17,9 @@
*/
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/delay.h>
#include <linux/smp.h>
+#include <linux/hrtimer.h>
#include <linux/io.h>
#include <asm/cacheflush.h>
@@ -26,13 +28,27 @@
#include <mach/hardware.h>
#include <mach/omap4-common.h>
+#include "clockdomain.h"
+
/* SCU base address */
static void __iomem *scu_base;
static DEFINE_SPINLOCK(boot_lock);
+
+void __iomem *omap4_get_scu_base(void)
+{
+ return scu_base;
+}
+
void __cpuinit platform_secondary_init(unsigned int cpu)
{
+ /* Enable NS access to SMP bit for this CPU on HS devices */
+ if (cpu_is_omap443x() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
+ omap4_secure_dispatcher(PPA_SERVICE_DEFAULT_POR_NS_SMP,
+ FLAG_START_CRITICAL,
+ 0, 0, 0, 0, 0);
+
/*
* If any interrupts are already enabled for the primary
* core (e.g. timer irq), then they will not have been enabled
@@ -49,6 +65,9 @@
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
+ static struct clockdomain *cpu1_clkdm;
+ static bool booted;
+
/*
* Set synchronisation state between this boot processor
* and the secondary one
@@ -64,7 +83,57 @@
omap_modify_auxcoreboot0(0x200, 0xfffffdff);
flush_cache_all();
smp_wmb();
- gic_raise_softirq(cpumask_of(cpu), 1);
+
+ if (!cpu1_clkdm)
+ cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
+
+ /*
+ * The SGI(Software Generated Interrupts) are not wakeup capable
+ * from low power states. This is known limitation on OMAP4 and
+ * needs to be worked around by using software forced clockdomain
+ * wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to
+ * software force wakeup. After the wakeup, CPU1 restores its
+ * clockdomain hardware supervised mode.
+ * More details can be found in OMAP4430 TRM - Version J
+ * Section :
+ * 4.3.4.2 Power States of CPU0 and CPU1
+ */
+ if (booted) {
+ /*
+ * GIC distributor control register has changed between
+ * CortexA9 r1pX and r2pX. The Control Register secure
+ * banked version is now composed of 2 bits:
+ * bit 0 == Secure Enable
+ * bit 1 == Non-Secure Enable
+ * The Non-Secure banked register has not changed
+ * Because the ROM Code is based on the r1pX GIC, the CPU1
+ * GIC restoration will cause a problem to CPU0 Non-Secure SW.
+ * The workaround must be:
+ * 1) Before doing the CPU1 wakeup, CPU0 must disable
+ * the GIC distributor
+ * 2) CPU1 must re-enable the GIC distributor on
+ * it's wakeup path.
+ */
+ if (!cpu_is_omap443x()) {
+ local_irq_disable();
+ gic_dist_disable();
+ }
+
+ clkdm_wakeup(cpu1_clkdm);
+
+ if (!cpu_is_omap443x()) {
+ while (gic_dist_disabled()) {
+ udelay(1);
+ cpu_relax();
+ }
+ gic_timer_retrigger();
+ local_irq_enable();
+ }
+
+ } else {
+ dsb_sev();
+ booted = true;
+ }
/*
* Now the secondary core is starting up let it run its
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
new file mode 100644
index 0000000..7567ee6
--- /dev/null
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -0,0 +1,311 @@
+/*
+ * OMAP WakeupGen Source file
+ *
+ * The WakeupGen unit is responsible for generating wakeup event from the
+ * incoming interrupts and enable bits. The WakeupGen is implemented in MPU
+ * always-On power domain. The WakeupGen consists of two sub-units, one for
+ * each CPU and manages only SPI interrupts. Hardware requirements is that
+ * the GIC and WakeupGen should be kept in sync for proper operation.
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Written by Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+
+#include <asm/hardware/gic.h>
+
+#include <mach/omap-wakeupgen.h>
+#include <mach/omap4-common.h>
+
+#include "omap4-sar-layout.h"
+
+#define NR_BANKS 4
+#define MAX_IRQS 128
+#define WKG_MASK_ALL 0x00000000
+#define WKG_UNMASK_ALL 0xffffffff
+#define CPU_ENA_OFFSET 0x400
+#define CPU0_ID 0x0
+#define CPU1_ID 0x1
+
+/* WakeupGen Base addres */
+static void __iomem *wakeupgen_base;
+static void __iomem *sar_base;
+static DEFINE_PER_CPU(u32 [NR_BANKS], irqmasks);
+static DEFINE_SPINLOCK(wakeupgen_lock);
+
+/*
+ * Static helper functions
+ */
+
+static inline u32 wakeupgen_readl(u8 idx, u32 cpu)
+{
+ return __raw_readl(wakeupgen_base + OMAP_WKG_ENB_A_0 +
+ (cpu * CPU_ENA_OFFSET) + (idx * 4));
+}
+
+static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu)
+{
+ __raw_writel(val, wakeupgen_base + OMAP_WKG_ENB_A_0 +
+ (cpu * CPU_ENA_OFFSET) + (idx * 4));
+}
+
+static inline void sar_writel(u32 val, u32 offset, u8 idx)
+{
+ __raw_writel(val, sar_base + offset + (idx * 4));
+}
+
+static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
+{
+ u8 i;
+
+ for (i = 0; i < NR_BANKS; i++)
+ wakeupgen_writel(reg, i, cpu);
+}
+
+static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index)
+{
+ unsigned int spi_irq;
+
+ /*
+ * PPIs and SGIs are not supported
+ */
+ if (irq < OMAP44XX_IRQ_GIC_START)
+ return -EINVAL;
+
+ /*
+ * Subtract the GIC offset
+ */
+ spi_irq = irq - OMAP44XX_IRQ_GIC_START;
+ if (spi_irq > MAX_IRQS) {
+ pr_err("omap wakeupGen: Invalid IRQ%d\n", irq);
+ return -EINVAL;
+ }
+
+ /*
+ * Each wakeup gen register controls 32
+ * interrupts. i.e 1 bit per SPI IRQ
+ */
+ *reg_index = spi_irq >> 5;
+ *bit_posn = spi_irq %= 32;
+
+ return 0;
+}
+
+static void _wakeupgen_clear(unsigned int irq, unsigned int cpu)
+{
+ u32 val, bit_number;
+ u8 i;
+
+ if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
+ return;
+
+ val = wakeupgen_readl(i, cpu);
+ val &= ~BIT(bit_number);
+ wakeupgen_writel(val, i, cpu);
+}
+
+static void _wakeupgen_set(unsigned int irq, unsigned int cpu)
+{
+ u32 val, bit_number;
+ u8 i;
+
+ if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
+ return;
+
+ val = wakeupgen_readl(i, cpu);
+ val |= BIT(bit_number);
+ wakeupgen_writel(val, i, cpu);
+}
+
+static void _wakeupgen_save_masks(unsigned int cpu)
+{
+ u8 i;
+
+ for (i = 0; i < NR_BANKS; i++)
+ per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
+}
+
+static void _wakeupgen_restore_masks(unsigned int cpu)
+{
+ u8 i;
+
+ for (i = 0; i < NR_BANKS; i++)
+ wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
+}
+
+/*
+ * Architecture specific Mask extensiom
+ */
+static void wakeupgen_mask(struct irq_data *d)
+{
+ spin_lock(&wakeupgen_lock);
+ _wakeupgen_clear(d->irq, d->node);
+ spin_unlock(&wakeupgen_lock);
+}
+
+/*
+ * Architecture specific Unmask extensiom
+ */
+static void wakeupgen_unmask(struct irq_data *d)
+{
+ spin_lock(&wakeupgen_lock);
+ _wakeupgen_set(d->irq, d->node);
+ spin_unlock(&wakeupgen_lock);
+}
+
+/**
+ * omap_wakeupgen_irqmask_all() - Mask or unmask interrupts
+ * @cpu - CPU ID
+ * @set - The IRQ register mask.
+ * 0 = Mask all interrupts on the 'cpu'
+ * 1 = Unmask all interrupts on the 'cpu'
+ *
+ * Ensure that the initial mask is maintained. This is faster than
+ * iterating through GIC rgeisters to arrive at the correct masks
+ */
+void omap_wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
+{
+ if (omap_rev() == OMAP4430_REV_ES1_0)
+ return;
+
+ spin_lock(&wakeupgen_lock);
+ if (set) {
+ _wakeupgen_save_masks(cpu);
+ _wakeupgen_set_all(cpu, WKG_MASK_ALL);
+ } else {
+ _wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
+ _wakeupgen_restore_masks(cpu);
+ }
+ spin_unlock(&wakeupgen_lock);
+}
+
+#ifdef CONFIG_PM
+/*
+ * Masking wakeup irqs is handled by the IRQCHIP_MASK_ON_SUSPEND flag,
+ * so no action is necessary in set_wake, but implement an empty handler
+ * here to prevent enable_irq_wake() returning an error.
+ */
+static int wakeupgen_set_wake(struct irq_data *d, unsigned int on)
+{
+ return 0;
+}
+#else
+#define wakeupgen_set_wake NULL
+#endif
+
+/*
+ * Initialse the wakeupgen module
+ */
+int __init omap_wakeupgen_init(void)
+{
+ u8 i;
+
+ /* Not supported on on OMAP4 ES1.0 silicon */
+ if (omap_rev() == OMAP4430_REV_ES1_0) {
+ WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
+ return -EPERM;
+ }
+
+ /* Static mapping, never released */
+ wakeupgen_base = ioremap(OMAP44XX_WKUPGEN_BASE, SZ_4K);
+ if (WARN_ON(!wakeupgen_base))
+ return -ENODEV;
+
+ /* Clear all IRQ bitmasks at wakeupGen level */
+ for (i = 0; i < NR_BANKS; i++) {
+ wakeupgen_writel(0, i, CPU0_ID);
+ wakeupgen_writel(0, i, CPU1_ID);
+ }
+
+ /*
+ * Override gic architecture specific fucntioms to add
+ * OMAP WakeupGen interrupt controller along with GIC
+ */
+ gic_arch_extn.irq_mask = wakeupgen_mask;
+ gic_arch_extn.irq_unmask = wakeupgen_unmask;
+ gic_arch_extn.irq_set_wake = wakeupgen_set_wake;
+ gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND;
+
+ return 0;
+}
+
+/**
+ * omap_wakeupgen_save() - WakeupGen context save function
+ *
+ * Save WakewupGen context in SAR BANK3. Restore is done by ROM code.
+ * WakeupGen IP is integrated along with GIC to manage the
+ * interrupt wakeups from CPU low power states. It's located in
+ * always ON power domain. It manages masking/unmasking of
+ * Shared peripheral interrupts(SPI).So the interrupt enable/disable
+ * control should be in sync and consistent at WakeupGen and GIC so
+ * that interrupts are not lost. Hence GIC and WakeupGen are saved
+ * and restored together.
+
+ * During normal operation, WakeupGen delivers external interrupts
+ * directly to the GIC. When the CPU asserts StandbyWFI, indicating
+ * it wants to enter lowpower state, the Standby Controller checks
+ * with the WakeupGen unit using the idlereq/idleack handshake to make
+ * sure there is no incoming interrupts.
+ */
+
+void omap_wakeupgen_save(void)
+{
+ u8 i;
+ u32 val;
+
+ if (omap_rev() == OMAP4430_REV_ES1_0)
+ return;
+
+ if (!sar_base)
+ sar_base = omap4_get_sar_ram_base();
+
+ for (i = 0; i < NR_BANKS; i++) {
+ /* Save the CPUx interrupt mask for IRQ 0 to 127 */
+ val = wakeupgen_readl(i, 0);
+ sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i);
+ val = wakeupgen_readl(i, 1);
+ sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i);
+
+ /*
+ * Disable the secure interrupts for CPUx. The restore
+ * code blindly restores secure and non-secure interrupt
+ * masks from SAR RAM. Secure interrupts are not suppose
+ * to be enabled from HLOS. So overwrite the SAR location
+ * so that the secure interrupt remains disabled.
+ */
+ sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
+ sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
+ }
+
+ /* Save AuxBoot* registers */
+ val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
+ __raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET);
+ val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
+ __raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET);
+
+ /* Save SyncReq generation logic */
+ val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
+ __raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET);
+ val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
+ __raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET);
+
+ /* Save SyncReq generation logic */
+ val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_MASK);
+ __raw_writel(val, sar_base + PTMSYNCREQ_MASK_OFFSET);
+ val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_EN);
+ __raw_writel(val, sar_base + PTMSYNCREQ_EN_OFFSET);
+
+ /* Set the Backup Bit Mask status */
+ val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET);
+ val |= SAR_BACKUP_STATUS_WAKEUPGEN;
+ __raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
+}
diff --git a/arch/arm/mach-omap2/omap2plus-cpufreq.c b/arch/arm/mach-omap2/omap2plus-cpufreq.c
new file mode 100644
index 0000000..7c5a6f9
--- /dev/null
+++ b/arch/arm/mach-omap2/omap2plus-cpufreq.c
@@ -0,0 +1,439 @@
+/*
+ * OMAP2PLUS cpufreq driver
+ *
+ * CPU frequency scaling for OMAP using OPP information
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Written by Tony Lindgren <tony@atomide.com>
+ *
+ * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
+ *
+ * Copyright (C) 2007-2011 Texas Instruments, Inc.
+ * Updated to support OMAP3
+ * Rajendra Nayak <rnayak@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/opp.h>
+#include <linux/cpu.h>
+#include <linux/platform_device.h>
+
+#include <asm/system.h>
+#include <asm/smp_plat.h>
+#include <asm/cpu.h>
+
+#include <plat/clock.h>
+#include <plat/omap-pm.h>
+#include <plat/common.h>
+
+#include <mach/hardware.h>
+
+#include "dvfs.h"
+
+#ifdef CONFIG_SMP
+struct lpj_info {
+ unsigned long ref;
+ unsigned int freq;
+};
+
+static DEFINE_PER_CPU(struct lpj_info, lpj_ref);
+static struct lpj_info global_lpj_ref;
+#endif
+
+static struct cpufreq_frequency_table *freq_table;
+static atomic_t freq_table_users = ATOMIC_INIT(0);
+static struct clk *mpu_clk;
+static char *mpu_clk_name;
+static struct device *mpu_dev;
+static DEFINE_MUTEX(omap_cpufreq_lock);
+
+static unsigned int max_thermal;
+static unsigned int max_freq;
+static unsigned int current_target_freq;
+static bool omap_cpufreq_ready;
+static bool omap_cpufreq_suspended;
+
+static unsigned int omap_getspeed(unsigned int cpu)
+{
+ unsigned long rate;
+
+ if (cpu >= NR_CPUS)
+ return 0;
+
+ rate = clk_get_rate(mpu_clk) / 1000;
+ return rate;
+}
+
+static int omap_cpufreq_scale(unsigned int target_freq, unsigned int cur_freq)
+{
+ unsigned int i;
+ int ret;
+ struct cpufreq_freqs freqs;
+
+ freqs.new = target_freq;
+ freqs.old = omap_getspeed(0);
+
+ /*
+ * If the new frequency is more than the thermal max allowed
+ * frequency, go ahead and scale the mpu device to proper frequency.
+ */
+ if (freqs.new > max_thermal)
+ freqs.new = max_thermal;
+
+ if ((freqs.old == freqs.new) && (cur_freq = freqs.new))
+ return 0;
+
+ get_online_cpus();
+
+ /* notifiers */
+ for_each_online_cpu(freqs.cpu)
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+#ifdef CONFIG_CPU_FREQ_DEBUG
+ pr_info("cpufreq-omap: transition: %u --> %u\n", freqs.old, freqs.new);
+#endif
+
+ ret = omap_device_scale(mpu_dev, mpu_dev, freqs.new * 1000);
+
+ freqs.new = omap_getspeed(0);
+
+#ifdef CONFIG_SMP
+ /*
+ * Note that loops_per_jiffy is not updated on SMP systems in
+ * cpufreq driver. So, update the per-CPU loops_per_jiffy value
+ * on frequency transition. We need to update all dependent CPUs.
+ */
+ for_each_possible_cpu(i) {
+ struct lpj_info *lpj = &per_cpu(lpj_ref, i);
+ if (!lpj->freq) {
+ lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy;
+ lpj->freq = freqs.old;
+ }
+
+ per_cpu(cpu_data, i).loops_per_jiffy =
+ cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
+ }
+
+ /* And don't forget to adjust the global one */
+ if (!global_lpj_ref.freq) {
+ global_lpj_ref.ref = loops_per_jiffy;
+ global_lpj_ref.freq = freqs.old;
+ }
+ loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq,
+ freqs.new);
+#endif
+
+ /* notifiers */
+ for_each_online_cpu(freqs.cpu)
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ put_online_cpus();
+
+ return ret;
+}
+
+static unsigned int omap_thermal_lower_speed(void)
+{
+ unsigned int max = 0;
+ unsigned int curr;
+ int i;
+
+ curr = max_thermal;
+
+ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
+ if (freq_table[i].frequency > max &&
+ freq_table[i].frequency < curr)
+ max = freq_table[i].frequency;
+
+ if (!max)
+ return curr;
+
+ return max;
+}
+
+void omap_thermal_throttle(void)
+{
+ unsigned int cur;
+
+ if (!omap_cpufreq_ready) {
+ pr_warn_once("%s: Thermal throttle prior to CPUFREQ ready\n",
+ __func__);
+ return;
+ }
+
+ mutex_lock(&omap_cpufreq_lock);
+
+ max_thermal = omap_thermal_lower_speed();
+
+ pr_warn("%s: temperature too high, cpu throttle at max %u\n",
+ __func__, max_thermal);
+
+ if (!omap_cpufreq_suspended) {
+ cur = omap_getspeed(0);
+ if (cur > max_thermal)
+ omap_cpufreq_scale(max_thermal, cur);
+ }
+
+ mutex_unlock(&omap_cpufreq_lock);
+}
+
+void omap_thermal_unthrottle(void)
+{
+ unsigned int cur;
+
+ if (!omap_cpufreq_ready)
+ return;
+
+ mutex_lock(&omap_cpufreq_lock);
+
+ if (max_thermal == max_freq) {
+ pr_warn("%s: not throttling\n", __func__);
+ goto out;
+ }
+
+ max_thermal = max_freq;
+
+ pr_warn("%s: temperature reduced, ending cpu throttling\n", __func__);
+
+ if (!omap_cpufreq_suspended) {
+ cur = omap_getspeed(0);
+ omap_cpufreq_scale(current_target_freq, cur);
+ }
+
+out:
+ mutex_unlock(&omap_cpufreq_lock);
+}
+
+static int omap_verify_speed(struct cpufreq_policy *policy)
+{
+ if (!freq_table)
+ return -EINVAL;
+ return cpufreq_frequency_table_verify(policy, freq_table);
+}
+
+static int omap_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int i;
+ int ret = 0;
+
+ if (!freq_table) {
+ dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__,
+ policy->cpu);
+ return -EINVAL;
+ }
+
+ ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
+ relation, &i);
+ if (ret) {
+ dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n",
+ __func__, policy->cpu, target_freq, ret);
+ return ret;
+ }
+
+ mutex_lock(&omap_cpufreq_lock);
+
+ current_target_freq = freq_table[i].frequency;
+
+ if (!omap_cpufreq_suspended)
+ ret = omap_cpufreq_scale(current_target_freq, policy->cur);
+
+
+ mutex_unlock(&omap_cpufreq_lock);
+
+ return ret;
+}
+
+static inline void freq_table_free(void)
+{
+ if (atomic_dec_and_test(&freq_table_users))
+ opp_free_cpufreq_table(mpu_dev, &freq_table);
+}
+
+static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
+{
+ int result = 0;
+ int i;
+
+ mpu_clk = clk_get(NULL, mpu_clk_name);
+ if (IS_ERR(mpu_clk))
+ return PTR_ERR(mpu_clk);
+
+ if (policy->cpu >= NR_CPUS) {
+ result = -EINVAL;
+ goto fail_ck;
+ }
+
+ policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu);
+
+ if (atomic_inc_return(&freq_table_users) == 1)
+ result = opp_init_cpufreq_table(mpu_dev, &freq_table);
+
+ if (result) {
+ dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n",
+ __func__, policy->cpu, result);
+ goto fail_ck;
+ }
+
+ result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ if (result)
+ goto fail_table;
+
+ cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+ policy->cur = omap_getspeed(policy->cpu);
+
+ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
+ max_freq = max(freq_table[i].frequency, max_freq);
+ max_thermal = max_freq;
+
+ /*
+ * On OMAP SMP configuartion, both processors share the voltage
+ * and clock. So both CPUs needs to be scaled together and hence
+ * needs software co-ordination. Use cpufreq affected_cpus
+ * interface to handle this scenario. Additional is_smp() check
+ * is to keep SMP_ON_UP build working.
+ */
+ if (is_smp()) {
+ policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
+ cpumask_setall(policy->cpus);
+ }
+
+ /* FIXME: what's the actual transition time? */
+ policy->cpuinfo.transition_latency = 300 * 1000;
+
+ return 0;
+
+fail_table:
+ freq_table_free();
+fail_ck:
+ clk_put(mpu_clk);
+ return result;
+}
+
+static int omap_cpu_exit(struct cpufreq_policy *policy)
+{
+ freq_table_free();
+ clk_put(mpu_clk);
+ return 0;
+}
+
+static struct freq_attr *omap_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver omap_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = omap_verify_speed,
+ .target = omap_target,
+ .get = omap_getspeed,
+ .init = omap_cpu_init,
+ .exit = omap_cpu_exit,
+ .name = "omap2plus",
+ .attr = omap_cpufreq_attr,
+};
+
+static int omap_cpufreq_suspend_noirq(struct device *dev)
+{
+ mutex_lock(&omap_cpufreq_lock);
+ omap_cpufreq_suspended = true;
+ mutex_unlock(&omap_cpufreq_lock);
+ return 0;
+}
+
+static int omap_cpufreq_resume_noirq(struct device *dev)
+{
+ unsigned int cur;
+
+ mutex_lock(&omap_cpufreq_lock);
+ cur = omap_getspeed(0);
+ if (cur != current_target_freq)
+ omap_cpufreq_scale(current_target_freq, cur);
+
+ omap_cpufreq_suspended = false;
+ mutex_unlock(&omap_cpufreq_lock);
+ return 0;
+}
+
+static struct dev_pm_ops omap_cpufreq_driver_pm_ops = {
+ .suspend_noirq = omap_cpufreq_suspend_noirq,
+ .resume_noirq = omap_cpufreq_resume_noirq,
+};
+
+static struct platform_driver omap_cpufreq_platform_driver = {
+ .driver.name = "omap_cpufreq",
+ .driver.pm = &omap_cpufreq_driver_pm_ops,
+};
+static struct platform_device omap_cpufreq_device = {
+ .name = "omap_cpufreq",
+};
+
+static int __init omap_cpufreq_init(void)
+{
+ int ret;
+
+ if (cpu_is_omap24xx())
+ mpu_clk_name = "virt_prcm_set";
+ else if (cpu_is_omap34xx())
+ mpu_clk_name = "dpll1_ck";
+ else if (cpu_is_omap443x())
+ mpu_clk_name = "dpll_mpu_ck";
+ else if (cpu_is_omap446x())
+ mpu_clk_name = "virt_dpll_mpu_ck";
+
+ if (!mpu_clk_name) {
+ pr_err("%s: unsupported Silicon?\n", __func__);
+ return -EINVAL;
+ }
+
+ mpu_dev = omap2_get_mpuss_device();
+ if (!mpu_dev) {
+ pr_warning("%s: unable to get the mpu device\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = cpufreq_register_driver(&omap_driver);
+ omap_cpufreq_ready = !ret;
+
+ if (!ret) {
+ int t;
+
+ t = platform_device_register(&omap_cpufreq_device);
+ if (t)
+ pr_warn("%s_init: platform_device_register failed\n",
+ __func__);
+ t = platform_driver_register(&omap_cpufreq_platform_driver);
+ if (t)
+ pr_warn("%s_init: platform_driver_register failed\n",
+ __func__);
+ }
+
+ return ret;
+}
+
+static void __exit omap_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&omap_driver);
+ platform_driver_unregister(&omap_cpufreq_platform_driver);
+ platform_device_unregister(&omap_cpufreq_device);
+}
+
+MODULE_DESCRIPTION("cpufreq driver for OMAP2PLUS SOCs");
+MODULE_LICENSE("GPL");
+late_initcall(omap_cpufreq_init);
+module_exit(omap_cpufreq_exit);
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 9ef8c29..7bf033c 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -14,38 +14,132 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
+#include <asm/cacheflush.h>
+#include <asm/smp_twd.h>
#include <mach/hardware.h>
#include <mach/omap4-common.h>
+#include <mach/omap-wakeupgen.h>
+
+#include "omap4-sar-layout.h"
+#include "clockdomain.h"
#ifdef CONFIG_CACHE_L2X0
-void __iomem *l2cache_base;
+#define L2X0_POR_OFFSET_VALUE 0x7
+static void __iomem *l2cache_base;
#endif
-void __iomem *gic_dist_base_addr;
+static void __iomem *gic_dist_base_addr;
+static void __iomem *gic_cpu_base;
+static struct clockdomain *l4_secure_clkdm;
+static void *dram_barrier_base;
+static void omap_bus_sync_noop(void)
+{ }
+
+struct omap_bus_post_fns omap_bus_post = {
+ .sync = omap_bus_sync_noop,
+};
+EXPORT_SYMBOL(omap_bus_post);
+
+void __iomem *omap4_get_gic_dist_base(void)
+{
+ return gic_dist_base_addr;
+}
+
+void __iomem *omap4_get_gic_cpu_base(void)
+{
+ return gic_cpu_base;
+}
+
+void *omap_get_dram_barrier_base(void)
+{
+ return dram_barrier_base;
+}
void __init gic_init_irq(void)
{
- void __iomem *gic_cpu_base;
/* Static mapping, never released */
gic_dist_base_addr = ioremap(OMAP44XX_GIC_DIST_BASE, SZ_4K);
- BUG_ON(!gic_dist_base_addr);
+ if (WARN_ON(!gic_dist_base_addr))
+ return;
/* Static mapping, never released */
gic_cpu_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
- BUG_ON(!gic_cpu_base);
+ if (WARN_ON(!gic_cpu_base))
+ return;
+
+ omap_wakeupgen_init();
gic_init(0, 29, gic_dist_base_addr, gic_cpu_base);
}
+/*
+ * FIXME: Remove this GIC APIs once common GIG library starts
+ * supporting it.
+ */
+void gic_cpu_enable(void)
+{
+ __raw_writel(0xf0, gic_cpu_base + GIC_CPU_PRIMASK);
+ __raw_writel(1, gic_cpu_base + GIC_CPU_CTRL);
+}
+
+void gic_cpu_disable(void)
+{
+ __raw_writel(0, gic_cpu_base + GIC_CPU_CTRL);
+}
+
+
+bool gic_dist_disabled(void)
+{
+ return !(__raw_readl(gic_dist_base_addr + GIC_DIST_CTRL) & 0x1);
+}
+
+void gic_dist_enable(void)
+{
+ if (cpu_is_omap443x() || gic_dist_disabled())
+ __raw_writel(0x1, gic_dist_base_addr + GIC_DIST_CTRL);
+}
+void gic_dist_disable(void)
+{
+ __raw_writel(0, gic_dist_base_addr + GIC_CPU_CTRL);
+}
+
+void gic_timer_retrigger(void)
+{
+ u32 twd_int = __raw_readl(twd_base + TWD_TIMER_INTSTAT);
+ u32 gic_int = __raw_readl(gic_dist_base_addr + GIC_DIST_PENDING_SET);
+ u32 twd_ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL);
+
+ if (twd_int && !(gic_int & BIT(OMAP44XX_IRQ_LOCALTIMER))) {
+ /*
+ * The local timer interrupt got lost while the distributor was
+ * disabled. Ack the pending interrupt, and retrigger it.
+ */
+ pr_warn("%s: lost localtimer interrupt\n", __func__);
+ __raw_writel(1, twd_base + TWD_TIMER_INTSTAT);
+ if (!(twd_ctrl & TWD_TIMER_CONTROL_PERIODIC)) {
+ __raw_writel(1, twd_base + TWD_TIMER_COUNTER);
+ twd_ctrl |= TWD_TIMER_CONTROL_ENABLE;
+ __raw_writel(twd_ctrl, twd_base + TWD_TIMER_CONTROL);
+ }
+ }
+}
+
#ifdef CONFIG_CACHE_L2X0
+void __iomem *omap4_get_l2cache_base(void)
+{
+ return l2cache_base;
+}
+
static void omap4_l2x0_disable(void)
{
/* Disable PL310 L2 Cache controller */
@@ -61,6 +155,9 @@
static int __init omap_l2_cache_init(void)
{
u32 aux_ctrl = 0;
+ u32 por_ctrl = 0;
+ u32 lockdown = 0;
+ bool mpu_prefetch_disable_errata = false;
/*
* To avoid code running on other OMAPs in
@@ -69,32 +166,77 @@
if (!cpu_is_omap44xx())
return -ENODEV;
+#ifdef CONFIG_OMAP_ALLOW_OSWR
+ if (omap_rev() == OMAP4460_REV_ES1_0)
+ mpu_prefetch_disable_errata = true;
+#endif
+
/* Static mapping, never released */
l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
- BUG_ON(!l2cache_base);
+ if (WARN_ON(!l2cache_base))
+ return -ENODEV;
/*
* 16-way associativity, parity disabled
* Way size - 32KB (es1.0)
* Way size - 64KB (es2.0 +)
*/
- aux_ctrl = ((1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT) |
- (0x1 << 25) |
- (0x1 << L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT) |
- (0x1 << L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT));
+ aux_ctrl = readl_relaxed(l2cache_base + L2X0_AUX_CTRL);
if (omap_rev() == OMAP4430_REV_ES1_0) {
aux_ctrl |= 0x2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT;
- } else {
- aux_ctrl |= ((0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
- (1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
- (1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) |
- (1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) |
- (1 << L2X0_AUX_CTRL_EARLY_BRESP_SHIFT));
+ goto skip_aux_por_api;
}
- if (omap_rev() != OMAP4430_REV_ES1_0)
- omap_smc1(0x109, aux_ctrl);
+ /*
+ * Drop instruction prefetch hint since it degrades the
+ * the performance.
+ */
+ aux_ctrl |= ((0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
+ (1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
+ (1 << L2X0_AUX_CTRL_EARLY_BRESP_SHIFT));
+
+ if (!mpu_prefetch_disable_errata)
+ aux_ctrl |= (1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT);
+
+ omap_smc1(0x109, aux_ctrl);
+
+ /* Setup POR Control register */
+ por_ctrl = readl_relaxed(l2cache_base + L2X0_PREFETCH_CTRL);
+
+ /*
+ * Double linefill is available only on OMAP4460 L2X0.
+ * It may cause single cache line memory corruption, leave it disabled
+ * on all devices
+ */
+ por_ctrl &= ~(1 << L2X0_PREFETCH_DOUBLE_LINEFILL_SHIFT);
+ if (!mpu_prefetch_disable_errata) {
+ por_ctrl |= 1 << L2X0_PREFETCH_DATA_PREFETCH_SHIFT;
+ por_ctrl |= L2X0_POR_OFFSET_VALUE;
+ }
+
+ /* Set POR through PPA service only in EMU/HS devices */
+ if (omap_type() != OMAP2_DEVICE_TYPE_GP)
+ omap4_secure_dispatcher(PPA_SERVICE_PL310_POR, 0x7, 1,
+ por_ctrl, 0, 0, 0);
+ else if (omap_rev() >= OMAP4430_REV_ES2_1)
+ omap_smc1(0x113, por_ctrl);
+
+
+ /*
+ * FIXME: Temporary WA for OMAP4460 stability issue.
+ * Lock-down specific L2 cache ways which makes effective
+ * L2 size as 512 KB instead of 1 MB
+ */
+ if (omap_rev() == OMAP4460_REV_ES1_0) {
+ lockdown = 0xa5a5;
+ writel_relaxed(lockdown, l2cache_base + L2X0_LOCKDOWN_WAY_D0);
+ writel_relaxed(lockdown, l2cache_base + L2X0_LOCKDOWN_WAY_D1);
+ writel_relaxed(lockdown, l2cache_base + L2X0_LOCKDOWN_WAY_I0);
+ writel_relaxed(lockdown, l2cache_base + L2X0_LOCKDOWN_WAY_I1);
+ }
+
+skip_aux_por_api:
/* Enable PL310 L2 Cache controller */
omap_smc1(0x102, 0x1);
@@ -111,3 +253,74 @@
}
early_initcall(omap_l2_cache_init);
#endif
+
+static int __init omap_barriers_init(void)
+{
+ dma_addr_t dram_phys;
+
+ if (!cpu_is_omap44xx())
+ return -ENODEV;
+
+ dram_barrier_base = dma_alloc_stronglyordered(NULL, SZ_4K,
+ (dma_addr_t *)&dram_phys, GFP_KERNEL);
+ if (!dram_barrier_base) {
+ pr_err("%s: failed to allocate memory.\n", __func__);
+ return -ENOMEM;
+ }
+
+ omap_bus_post.sync = omap_bus_sync;
+
+ return 0;
+}
+core_initcall(omap_barriers_init);
+
+/*
+ * omap4_sec_dispatcher: Routine to dispatch low power secure
+ * service routines
+ *
+ * @idx: The HAL API index
+ * @flag: The flag indicating criticality of operation
+ * @nargs: Number of valid arguments out of four.
+ * @arg1, arg2, arg3 args4: Parameters passed to secure API
+ *
+ * Return the error value on success/failure
+ */
+u32 omap4_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4)
+{
+ u32 ret;
+ u32 param[5];
+
+ param[0] = nargs;
+ param[1] = arg1;
+ param[2] = arg2;
+ param[3] = arg3;
+ param[4] = arg4;
+
+ /* Look-up Only once */
+ if (!l4_secure_clkdm)
+ l4_secure_clkdm = clkdm_lookup("l4_secure_clkdm");
+
+ /*
+ * Put l4 secure to software wakeup so that secure
+ * modules are accessible
+ */
+ clkdm_wakeup(l4_secure_clkdm);
+
+ /*
+ * Secure API needs physical address
+ * pointer for the parameters
+ */
+ flush_cache_all();
+ outer_clean_range(__pa(param), __pa(param + 5));
+
+ ret = omap_smc2(idx, flag, __pa(param));
+
+ /*
+ * Restore l4 secure to hardware superwised to allow
+ * secure modules idle
+ */
+ clkdm_allow_idle(l4_secure_clkdm);
+
+ return ret;
+}
diff --git a/arch/arm/mach-omap2/omap4-mpuss-lowpower.c b/arch/arm/mach-omap2/omap4-mpuss-lowpower.c
new file mode 100644
index 0000000..81511e8
--- /dev/null
+++ b/arch/arm/mach-omap2/omap4-mpuss-lowpower.c
@@ -0,0 +1,824 @@
+/*
+ * OMAP4 MPUSS low power code
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Written by Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU
+ * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller,
+ * CPU0 and CPU1 LPRM modules.
+ * CPU0, CPU1 and MPUSS each have there own power domain and
+ * hence multiple low power combinations of MPUSS are possible.
+ *
+ * The CPU0 and CPU1 can't support Closed switch Retention (CSWR)
+ * because the mode is not supported by hw constraints of dormant
+ * mode. While waking up from the dormant mode, a reset signal
+ * to the Cortex-A9 processor must be asserted by the external
+ * power controller.
+ *
+ * With architectural inputs and hardware recommendations, only
+ * below modes are supported from power gain vs latency point of view.
+ *
+ * CPU0 CPU1 MPUSS
+ * ----------------------------------------------
+ * ON ON ON
+ * ON(Inactive) OFF ON(Inactive)
+ * OFF OFF CSWR
+ * OFF OFF OSWR
+ * OFF OFF OFF
+ * ----------------------------------------------
+ *
+ * Note: CPU0 is the master core and it is the last CPU to go down
+ * and first to wake-up when MPUSS low power states are excercised
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/tlbflush.h>
+#include <asm/smp_scu.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/hardware/gic.h>
+#include <asm/hardware/cache-l2x0.h>
+
+#include <plat/omap44xx.h>
+#include <mach/omap4-common.h>
+#include <mach/omap-wakeupgen.h>
+
+#include "omap4-sar-layout.h"
+#include "pm.h"
+#include "prcm_mpu44xx.h"
+#include "prminst44xx.h"
+#include "prcm44xx.h"
+#include "prm44xx.h"
+#include "prm-regbits-44xx.h"
+#include "cm.h"
+#include "prm.h"
+#include "cm44xx.h"
+#include "prcm-common.h"
+
+#ifdef CONFIG_SMP
+
+#define GIC_MASK_ALL 0x0
+#define GIC_ISR_NON_SECURE 0xffffffff
+#define SPI_ENABLE_SET_OFFSET 0x04
+#define PPI_PRI_OFFSET 0x1c
+#define SPI_PRI_OFFSET 0x20
+#define SPI_TARGET_OFFSET 0x20
+#define SPI_CONFIG_OFFSET 0x20
+
+/* GIC save SAR bank base */
+static struct powerdomain *mpuss_pd;
+/*
+ * Maximum Secure memory storage size.
+ */
+#define OMAP4_SECURE_RAM_STORAGE (88 * SZ_1K)
+/*
+ * Physical address of secure memory storage
+ */
+dma_addr_t omap4_secure_ram_phys;
+static void *secure_ram;
+
+/* Variables to store maximum spi(Shared Peripheral Interrupts) registers. */
+static u32 max_spi_irq, max_spi_reg;
+
+struct omap4_cpu_pm_info {
+ struct powerdomain *pwrdm;
+ void __iomem *scu_sar_addr;
+};
+
+static void __iomem *gic_dist_base;
+static void __iomem *gic_cpu_base;
+static void __iomem *sar_base;
+
+static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
+
+#define PPI_CONTEXT_SIZE 11
+static DEFINE_PER_CPU(u32[PPI_CONTEXT_SIZE], gic_ppi_context);
+static DEFINE_PER_CPU(u32, gic_ppi_enable_mask);
+
+/* Helper functions */
+static inline void sar_writel(u32 val, u32 offset, u8 idx)
+{
+ __raw_writel(val, sar_base + offset + 4 * idx);
+}
+
+static inline u32 gic_readl(u32 offset, u8 idx)
+{
+ return __raw_readl(gic_dist_base + offset + 4 * idx);
+}
+
+u32 gic_cpu_read(u32 reg)
+{
+ return __raw_readl(gic_cpu_base + reg);
+}
+
+/*
+ * Set the CPUx powerdomain's previous power state
+ */
+static inline void set_cpu_next_pwrst(unsigned int cpu_id,
+ unsigned int power_state)
+{
+ struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
+
+ pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
+}
+
+/*
+ * Read CPU's previous power state
+ */
+static inline unsigned int read_cpu_prev_pwrst(unsigned int cpu_id)
+{
+ struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
+
+ return pwrdm_read_prev_pwrst(pm_info->pwrdm);
+}
+
+/*
+ * Clear the CPUx powerdomain's previous power state
+ */
+static inline void clear_cpu_prev_pwrst(unsigned int cpu_id)
+{
+ struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
+
+ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
+}
+
+struct reg_tuple {
+ void __iomem *addr;
+ u32 val;
+};
+
+static struct reg_tuple tesla_reg[] = {
+ {.addr = OMAP4430_CM_TESLA_CLKSTCTRL},
+ {.addr = OMAP4430_CM_TESLA_TESLA_CLKCTRL},
+ {.addr = OMAP4430_PM_TESLA_PWRSTCTRL},
+};
+
+static struct reg_tuple ivahd_reg[] = {
+ {.addr = OMAP4430_CM_IVAHD_CLKSTCTRL},
+ {.addr = OMAP4430_CM_IVAHD_IVAHD_CLKCTRL},
+ {.addr = OMAP4430_CM_IVAHD_SL2_CLKCTRL},
+ {.addr = OMAP4430_PM_IVAHD_PWRSTCTRL}
+};
+
+static struct reg_tuple l3instr_reg[] = {
+ {.addr = OMAP4430_CM_L3INSTR_L3_3_CLKCTRL},
+ {.addr = OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL},
+ {.addr = OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL},
+};
+
+/*
+ * Store the SCU power status value to scratchpad memory
+ */
+static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state)
+{
+ struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
+ u32 scu_pwr_st;
+
+ switch (cpu_state) {
+ case PWRDM_POWER_RET:
+ scu_pwr_st = SCU_PM_DORMANT;
+ break;
+ case PWRDM_POWER_OFF:
+ scu_pwr_st = SCU_PM_POWEROFF;
+ break;
+ case PWRDM_POWER_ON:
+ case PWRDM_POWER_INACTIVE:
+ default:
+ scu_pwr_st = SCU_PM_NORMAL;
+ break;
+ }
+
+ __raw_writel(scu_pwr_st, pm_info->scu_sar_addr);
+}
+
+static void gic_save_ppi(void)
+{
+ void __iomem *gic_dist_base = omap4_get_gic_dist_base();
+ u32 *context = __get_cpu_var(gic_ppi_context);
+ int i = 0;
+
+ context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI);
+ context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0x4);
+ context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0x8);
+ context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0xc);
+ context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0x10);
+ context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0x14);
+ context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0x18);
+ context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0x1c);
+ context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_CONFIG);
+ context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_CONFIG + 0x4);
+ context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_ENABLE_SET);
+
+ BUG_ON(i != PPI_CONTEXT_SIZE);
+}
+
+static void gic_restore_ppi(void)
+{
+ void __iomem *gic_dist_base = omap4_get_gic_dist_base();
+ u32 *context = __get_cpu_var(gic_ppi_context);
+ int i = 0;
+
+ writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI);
+ writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0x4);
+ writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0x8);
+ writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0xc);
+ writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0x10);
+ writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0x14);
+ writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0x18);
+ writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0x1c);
+ writel_relaxed(context[i++], gic_dist_base + GIC_DIST_CONFIG);
+ writel_relaxed(context[i++], gic_dist_base + GIC_DIST_CONFIG + 0x4);
+ writel_relaxed(context[i++], gic_dist_base + GIC_DIST_ENABLE_SET);
+
+ BUG_ON(i != PPI_CONTEXT_SIZE);
+}
+
+/*
+ * Mask all the PPIs. This should only be called after they have been saved
+ * through secure trap or through save_ppi(). This is primarily needed to
+ * mask the local timer irq that could be pending since timekeeping gets
+ * suspended after the local irqs are disabled. The pending interrupt would
+ * kick the CPU out of WFI immediately, and prevent it from going to the lower
+ * power states. The correct value will be restored when the CPU is brought
+ * back up by restore.
+ */
+static void gic_mask_ppi(void)
+{
+ void __iomem *gic_dist_base = omap4_get_gic_dist_base();
+
+ __get_cpu_var(gic_ppi_enable_mask) =
+ readl_relaxed(gic_dist_base + GIC_DIST_ENABLE_SET);
+ writel_relaxed(0xffffffff, gic_dist_base + GIC_DIST_ENABLE_CLEAR);
+}
+
+static void gic_unmask_ppi(void)
+{
+ void __iomem *gic_dist_base = omap4_get_gic_dist_base();
+ writel_relaxed(__get_cpu_var(gic_ppi_enable_mask),
+ gic_dist_base + GIC_DIST_ENABLE_SET);
+}
+
+/*
+ * Save GIC context in SAR RAM. Restore is done by ROM code
+ * GIC is lost only when MPU hits OSWR or OFF. It consists
+ * of a distributor and a per-CPU interface module. The GIC
+ * save restore is optimised to save only necessary registers.
+ */
+static void gic_save_context(void)
+{
+ u8 i;
+ u32 val;
+
+ /*
+ * Interrupt Clear Enable registers are inverse of set enable
+ * and hence not needed to be saved. ROM code programs it
+ * based on Set Enable register values.
+ */
+
+ /* Save CPU 0 Interrupt Set Enable register */
+ val = gic_readl(GIC_DIST_ENABLE_SET, 0);
+ sar_writel(val, ICDISER_CPU0_OFFSET, 0);
+
+ /* Disable interrupts on CPU1 */
+ sar_writel(GIC_MASK_ALL, ICDISER_CPU1_OFFSET, 0);
+
+ /* Save all SPI Set Enable register */
+ for (i = 0; i < max_spi_reg; i++) {
+ val = gic_readl(GIC_DIST_ENABLE_SET + SPI_ENABLE_SET_OFFSET, i);
+ sar_writel(val, ICDISER_SPI_OFFSET, i);
+ }
+
+ /*
+ * Interrupt Priority Registers
+ * Secure sw accesses, last 5 bits of the 8 bits (bit[7:3] are used)
+ * Non-Secure sw accesses, last 4 bits (i.e. bits[7:4] are used)
+ * But the Secure Bits[7:3] are shifted by 1 in Non-Secure access.
+ * Secure (bits[7:3] << 1)== Non Secure bits[7:4]
+ * Hence right shift the value by 1 while saving the priority
+ */
+
+ /* Save SGI priority registers (Software Generated Interrupt) */
+ for (i = 0; i < 4; i++) {
+ val = gic_readl(GIC_DIST_PRI, i);
+
+ /* Save the priority bits of the Interrupts */
+ sar_writel(val >> 0x1, ICDIPR_SFI_CPU0_OFFSET, i);
+
+ /* Disable the interrupts on CPU1 */
+ sar_writel(GIC_MASK_ALL, ICDIPR_SFI_CPU1_OFFSET, i);
+ }
+
+ /* Save PPI priority registers (Private Peripheral Intterupts) */
+ val = gic_readl(GIC_DIST_PRI + PPI_PRI_OFFSET, 0);
+ sar_writel(val >> 0x1, ICDIPR_PPI_CPU0_OFFSET, 0);
+ sar_writel(GIC_MASK_ALL, ICDIPR_PPI_CPU1_OFFSET, 0);
+
+ /* SPI priority registers - 4 interrupts/register */
+ for (i = 0; i < (max_spi_irq / 4); i++) {
+ val = gic_readl((GIC_DIST_PRI + SPI_PRI_OFFSET), i);
+ sar_writel(val >> 0x1, ICDIPR_SPI_OFFSET, i);
+ }
+
+ /* SPI Interrupt Target registers - 4 interrupts/register */
+ for (i = 0; i < (max_spi_irq / 4); i++) {
+ val = gic_readl((GIC_DIST_TARGET + SPI_TARGET_OFFSET), i);
+ sar_writel(val, ICDIPTR_SPI_OFFSET, i);
+ }
+
+ /* SPI Interrupt Congigeration eegisters- 16 interrupts/register */
+ for (i = 0; i < (max_spi_irq / 16); i++) {
+ val = gic_readl((GIC_DIST_CONFIG + SPI_CONFIG_OFFSET), i);
+ sar_writel(val, ICDICFR_OFFSET, i);
+ }
+
+ /* Set the Backup Bit Mask status for GIC */
+ val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET);
+ val |= (SAR_BACKUP_STATUS_GIC_CPU0 | SAR_BACKUP_STATUS_GIC_CPU1);
+ __raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
+}
+/*
+ * API to save GIC and Wakeupgen using secure API
+ * for HS/EMU device
+ */
+static void save_gic_wakeupgen_secure(void)
+{
+ u32 ret;
+ ret = omap4_secure_dispatcher(HAL_SAVEGIC_INDEX,
+ FLAG_START_CRITICAL,
+ 0, 0, 0, 0, 0);
+ if (!ret)
+ pr_debug("GIC and Wakeupgen context save failed\n");
+}
+
+
+/*
+ * API to save Secure RAM, GIC, WakeupGen Registers using secure API
+ * for HS/EMU device
+ */
+static void save_secure_all(void)
+{
+ u32 ret;
+ ret = omap4_secure_dispatcher(HAL_SAVEALL_INDEX,
+ FLAG_START_CRITICAL,
+ 1, omap4_secure_ram_phys, 0, 0, 0);
+ if (ret)
+ pr_debug("Secure all context save failed\n");
+}
+
+/*
+ * API to save Secure RAM using secure API
+ * for HS/EMU device
+ */
+static void save_secure_ram(void)
+{
+ u32 ret;
+ ret = omap4_secure_dispatcher(HAL_SAVESECURERAM_INDEX,
+ FLAG_START_CRITICAL,
+ 1, omap4_secure_ram_phys, 0, 0, 0);
+ if (!ret)
+ pr_debug("Secure ram context save failed\n");
+}
+
+/* Helper functions for MPUSS OSWR */
+static inline u32 mpuss_read_prev_logic_pwrst(void)
+{
+ u32 reg;
+
+ reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
+ reg &= OMAP4430_LOSTCONTEXT_DFF_MASK;
+ return reg;
+}
+
+static inline void mpuss_clear_prev_logic_pwrst(void)
+{
+ u32 reg;
+
+ reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
+ omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
+}
+
+static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id)
+{
+ u32 reg;
+
+ if (cpu_id) {
+ reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST,
+ OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET);
+ omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST,
+ OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET);
+ } else {
+ reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST,
+ OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET);
+ omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST,
+ OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET);
+ }
+}
+
+static inline void save_ivahd_tesla_regs(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tesla_reg); i++)
+ tesla_reg[i].val = __raw_readl(tesla_reg[i].addr);
+
+ for (i = 0; i < ARRAY_SIZE(ivahd_reg); i++)
+ ivahd_reg[i].val = __raw_readl(ivahd_reg[i].addr);
+}
+
+static inline void restore_ivahd_tesla_regs(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tesla_reg); i++)
+ __raw_writel(tesla_reg[i].val, tesla_reg[i].addr);
+
+ for (i = 0; i < ARRAY_SIZE(ivahd_reg); i++)
+ __raw_writel(ivahd_reg[i].val, ivahd_reg[i].addr);
+}
+
+static inline void save_l3instr_regs(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(l3instr_reg); i++)
+ l3instr_reg[i].val = __raw_readl(l3instr_reg[i].addr);
+}
+
+static inline void restore_l3instr_regs(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(l3instr_reg); i++)
+ __raw_writel(l3instr_reg[i].val, l3instr_reg[i].addr);
+}
+
+/*
+ * OMAP4 MPUSS Low Power Entry Function
+ *
+ * The purpose of this function is to manage low power programming
+ * of OMAP4 MPUSS subsystem
+ * Paramenters:
+ * cpu : CPU ID
+ * power_state: Targetted Low power state.
+ *
+ * MPUSS Low power states
+ * The basic rule is that the MPUSS power domain must be at the higher or
+ * equal power state (state that consume more power) than the higher of the
+ * two CPUs. For example, it is illegal for system power to be OFF, while
+ * the power of one or both of the CPU is DORMANT. When an illegal state is
+ * entered, then the hardware behavior is unpredictable.
+ *
+ * MPUSS state for the context save
+ * save_state =
+ * 0 - Nothing lost and no need to save: MPUSS INACTIVE
+ * 1 - CPUx L1 and logic lost: MPUSS CSWR
+ * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
+ * 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
+ */
+int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
+{
+ unsigned int save_state = 0;
+ unsigned int wakeup_cpu;
+
+ if ((cpu >= NR_CPUS) || (omap_rev() == OMAP4430_REV_ES1_0))
+ goto ret;
+
+ switch (power_state) {
+ case PWRDM_POWER_ON:
+ case PWRDM_POWER_INACTIVE:
+ save_state = 0;
+ break;
+ case PWRDM_POWER_OFF:
+ save_state = 1;
+ break;
+ case PWRDM_POWER_RET:
+ default:
+ /*
+ * CPUx CSWR is invalid hardware state. Also CPUx OSWR
+ * doesn't make much scense, since logic is lost and $L1
+ * needs to be cleaned because of coherency. This makes
+ * CPUx OSWR equivalent to CPUX OFF and hence not supported
+ */
+ WARN_ON(1);
+ goto ret;
+ }
+
+ /*
+ * MPUSS book keeping should be executed by master
+ * CPU only which is also the last CPU to go down.
+ */
+ if (cpu)
+ goto cpu_prepare;
+
+ pwrdm_pre_transition();
+
+ /*
+ * Check MPUSS next state and save GIC if needed
+ * GIC lost during MPU OFF and OSWR
+ */
+ pwrdm_clear_all_prev_pwrst(mpuss_pd);
+ mpuss_clear_prev_logic_pwrst();
+ if (omap4_device_next_state_off()) {
+ if (omap_type() == OMAP2_DEVICE_TYPE_GP) {
+ omap_wakeupgen_save();
+ gic_save_context();
+ } else {
+ save_secure_all();
+ save_ivahd_tesla_regs();
+ save_l3instr_regs();
+ }
+ save_state = 3;
+ goto cpu_prepare;
+ }
+
+ switch (pwrdm_read_next_pwrst(mpuss_pd)) {
+ case PWRDM_POWER_RET:
+ /*
+ * MPUSS OSWR - Complete logic lost + L2$ retained.
+ * MPUSS CSWR - Complete logic retained + L2$ retained.
+ */
+ if (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF) {
+ if (omap_type() == OMAP2_DEVICE_TYPE_GP) {
+ omap_wakeupgen_save();
+ gic_save_context();
+ } else {
+ save_gic_wakeupgen_secure();
+ save_ivahd_tesla_regs();
+ save_l3instr_regs();
+ }
+ save_state = 2;
+ }
+ break;
+ case PWRDM_POWER_OFF:
+ /* MPUSS OFF - logic lost + L2$ lost */
+ if (omap_type() == OMAP2_DEVICE_TYPE_GP) {
+ omap_wakeupgen_save();
+ gic_save_context();
+ } else {
+ save_gic_wakeupgen_secure();
+ save_ivahd_tesla_regs();
+ save_l3instr_regs();
+ save_secure_ram();
+ }
+ save_state = 3;
+ break;
+ case PWRDM_POWER_ON:
+ case PWRDM_POWER_INACTIVE:
+ /* No need to save MPUSS context */
+ default:
+ ;
+ }
+
+cpu_prepare:
+ if (cpu)
+ gic_save_ppi();
+
+ /*
+ * mask all PPIs to prevent them from kicking us out of wfi.
+ */
+ gic_mask_ppi();
+
+ clear_cpu_prev_pwrst(cpu);
+ cpu_clear_prev_logic_pwrst(cpu);
+ set_cpu_next_pwrst(cpu, power_state);
+ scu_pwrst_prepare(cpu, power_state);
+
+ /*
+ * Call low level function with targeted CPU id
+ * and its low power state.
+ */
+ stop_critical_timings();
+ omap4_cpu_suspend(cpu, save_state);
+ start_critical_timings();
+
+ /*
+ * Restore the CPUx power state to ON otherwise CPUx
+ * power domain can transitions to programmed low power
+ * state while doing WFI outside the low powe code. On
+ * secure devices, CPUx does WFI which can result in
+ * domain transition
+ */
+ wakeup_cpu = hard_smp_processor_id();
+ set_cpu_next_pwrst(wakeup_cpu, PWRDM_POWER_ON);
+
+ /*
+ * If we didn't actually get into the low power state (e.g. immediately
+ * exited wfi due to a pending interrupt), the secure side
+ * would not have restored CPU0's GIC PPI enable mask.
+ * For other CPUs, gic_restore_ppi will do that for us.
+ */
+ if (cpu)
+ gic_restore_ppi();
+ else
+ gic_unmask_ppi();
+
+ /*
+ * If !master cpu return to hotplug-path.
+ *
+ * GIC distributor control register has changed between
+ * CortexA9 r1pX and r2pX. The Control Register secure
+ * banked version is now composed of 2 bits:
+ * bit 0 == Secure Enable
+ * bit 1 == Non-Secure Enable
+ * The Non-Secure banked register has not changed
+ * Because the ROM Code is based on the r1pX GIC, the CPU1
+ * GIC restoration will cause a problem to CPU0 Non-Secure SW.
+ * The workaround must be:
+ * 1) Before doing the CPU1 wakeup, CPU0 must disable
+ * the GIC distributor
+ * 2) CPU1 must re-enable the GIC distributor on
+ * it's wakeup path.
+ */
+ if (wakeup_cpu) {
+ if (!cpu_is_omap443x())
+ gic_dist_enable();
+ goto ret;
+ }
+
+ /* Check if MPUSS lost it's logic */
+ if (mpuss_read_prev_logic_pwrst()) {
+ /* Clear SAR BACKUP status on GP devices */
+ if (omap_type() == OMAP2_DEVICE_TYPE_GP)
+ __raw_writel(0x0, sar_base + SAR_BACKUP_STATUS_OFFSET);
+ /* Enable GIC distributor and inteface on CPU0*/
+ gic_cpu_enable();
+ gic_dist_enable();
+
+ /*
+ * Dummy dispatcher call after OSWR and OFF
+ * Restore the right return Kernel address (with MMU on) for
+ * subsequent calls to secure ROM. Otherwise the return address
+ * will be to a PA return address and the system will hang.
+ */
+ if (omap_type() != OMAP2_DEVICE_TYPE_GP)
+ omap4_secure_dispatcher(PPA_SERVICE_0,
+ FLAG_START_CRITICAL,
+ 0, 0, 0, 0, 0);
+ }
+
+ if (omap4_device_prev_state_off()) {
+ restore_ivahd_tesla_regs();
+ restore_l3instr_regs();
+ }
+
+ pwrdm_post_transition();
+
+ret:
+ return 0;
+}
+
+static void save_l2x0_auxctrl(void)
+{
+#ifdef CONFIG_CACHE_L2X0
+ /*
+ * Save the L2X0 AUXCTRL value to SAR memory. Its used to
+ * in every restore patch MPUSS OFF path.
+ */
+ void __iomem *l2x0_base = omap4_get_l2cache_base();
+ u32 val;
+
+ val = __raw_readl(l2x0_base + L2X0_AUX_CTRL);
+ __raw_writel(val, sar_base + L2X0_AUXCTRL_OFFSET);
+
+ /*
+ * Save the L2X0 PREFETCH_CTRL value to SAR memory.
+ * Its used in every restore path MPUSS OFF path.
+ */
+
+ val = __raw_readl(l2x0_base + L2X0_PREFETCH_CTRL);
+ __raw_writel(val, sar_base + L2X0_PREFETCHCTRL_OFFSET);
+
+ /* Save L2X0 LOCKDOWN_OFFSET0 during SAR */
+ val = readl_relaxed(l2x0_base + 0x900);
+ writel_relaxed(val, sar_base + L2X0_LOCKDOWN_OFFSET0);
+#endif
+}
+
+/*
+ * Initialise OMAP4 MPUSS
+ */
+int __init omap4_mpuss_init(void)
+{
+ struct omap4_cpu_pm_info *pm_info;
+ u8 i;
+
+ /* Get GIC and SAR RAM base addresses */
+ sar_base = omap4_get_sar_ram_base();
+ gic_dist_base = omap4_get_gic_dist_base();
+ gic_cpu_base = omap4_get_gic_cpu_base();
+
+ if (omap_rev() == OMAP4430_REV_ES1_0) {
+ WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
+ return -ENODEV;
+ }
+
+ /* Initilaise per CPU PM information */
+ pm_info = &per_cpu(omap4_pm_info, 0x0);
+ pm_info->scu_sar_addr = sar_base + SCU_OFFSET0;
+ pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm");
+ if (!pm_info->pwrdm) {
+ pr_err("Lookup failed for CPU0 pwrdm\n");
+ return -ENODEV;
+ }
+
+ /* Clear CPU previous power domain state */
+ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
+ cpu_clear_prev_logic_pwrst(0);
+
+ /* Initialise CPU0 power domain state to ON */
+ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
+
+ pm_info = &per_cpu(omap4_pm_info, 0x1);
+ pm_info->scu_sar_addr = sar_base + SCU_OFFSET1;
+ pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm");
+ if (!pm_info->pwrdm) {
+ pr_err("Lookup failed for CPU1 pwrdm\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Check the OMAP type and store it to scratchpad
+ */
+ if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
+ /* Memory not released */
+ secure_ram = dma_alloc_coherent(NULL, OMAP4_SECURE_RAM_STORAGE,
+ (dma_addr_t *)&omap4_secure_ram_phys, GFP_ATOMIC);
+ if (!secure_ram)
+ pr_err("Unable to allocate secure ram storage\n");
+ writel(0x1, sar_base + OMAP_TYPE_OFFSET);
+ } else {
+ writel(0x0, sar_base + OMAP_TYPE_OFFSET);
+ }
+
+ /* Clear CPU previous power domain state */
+ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
+ cpu_clear_prev_logic_pwrst(1);
+
+ /* Initialise CPU1 power domain state to ON */
+ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
+
+ /*
+ * Program the wakeup routine address for the CPU0 and CPU1
+ * used for OFF or DORMANT wakeup. Wakeup routine address
+ * is fixed so programit in init itself.
+ */
+ __raw_writel(virt_to_phys(omap4_cpu_resume),
+ sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
+ __raw_writel(virt_to_phys(omap4_cpu_resume),
+ sar_base + CPU0_WAKEUP_NS_PA_ADDR_OFFSET);
+
+ mpuss_pd = pwrdm_lookup("mpu_pwrdm");
+ if (!mpuss_pd) {
+ pr_err("Failed to get lookup for MPUSS pwrdm\n");
+ return -ENODEV;
+ }
+
+ /* Clear CPU previous power domain state */
+ pwrdm_clear_all_prev_pwrst(mpuss_pd);
+ mpuss_clear_prev_logic_pwrst();
+
+ /*
+ * Find out how many interrupts are supported.
+ * OMAP4 supports max of 128 SPIs where as GIC can support
+ * up to 1020 interrupt sources. On OMAP4, maximum SPIs are
+ * fused in DIST_CTR bit-fields as 128. Hence the code is safe
+ * from reserved register writes since its well within 1020.
+ */
+ max_spi_reg = __raw_readl(gic_dist_base + GIC_DIST_CTR) & 0x1f;
+ max_spi_irq = max_spi_reg * 32;
+
+ /*
+ * Mark the PPI and SPI interrupts as non-secure.
+ * program the SAR locations for interrupt security registers to
+ * reflect the same.
+ */
+ if (omap_type() == OMAP2_DEVICE_TYPE_GP) {
+ sar_writel(GIC_ISR_NON_SECURE, ICDISR_CPU0_OFFSET, 0);
+ sar_writel(GIC_ISR_NON_SECURE, ICDISR_CPU1_OFFSET, 0);
+ for (i = 0; i < max_spi_reg; i++)
+ sar_writel(GIC_ISR_NON_SECURE, ICDISR_SPI_OFFSET, i);
+ }
+ save_l2x0_auxctrl();
+
+ return 0;
+}
+
+#endif
+
diff --git a/arch/arm/mach-omap2/omap4-sar-layout.h b/arch/arm/mach-omap2/omap4-sar-layout.h
new file mode 100644
index 0000000..851db59
--- /dev/null
+++ b/arch/arm/mach-omap2/omap4-sar-layout.h
@@ -0,0 +1,124 @@
+/*
+ * omap4-sar-layout.h: OMAP4 SAR RAM layout header file
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef OMAP_ARCH_OMAP4_SAR_LAYOUT_H
+#define OMAP_ARCH_OMAP4_SAR_LAYOUT_H
+
+#include <mach/hardware.h>
+#include <mach/omap4-common.h>
+#include <mach/emif-44xx.h>
+#include <mach/dmm-44xx.h>
+#include <mach/ctrl_module_pad_core_44xx.h>
+
+#include "cm1_44xx.h"
+#include "cm2_44xx.h"
+#include "prcm-common.h"
+
+/*
+ * The SAR RAM is maintained during Device OFF mode.
+ * It is split into 4 banks with different privilege accesses
+ *
+ * ---------------------------------------------------------------------
+ * Access mode Bank Address Range
+ * ---------------------------------------------------------------------
+ * HS/GP : Public 1 0x4A32_6000 - 0x4A32_6FFF (4kB)
+ * HS/GP : Public, Secured
+ * if padconfaccdisable=1 2 0x4A32_7000 - 0x4A32_73FF (1kB)
+ * HS/EMU : Secured
+ * GP : Public 3 0x4A32_8000 - 0x4A32_87FF (2kB)
+ * HS/GP :
+ * Secure Priviledge,
+ * write once. 4 0x4A32_9000 - 0x4A32_93FF (1kB)
+ * ---------------------------------------------------------------------
+ * The SAR RAM save regiter layout is fixed since restore is done by hardware.
+ */
+
+#define MODULE_ADDR_IDX 0
+#define MODULE_OFFSET_IDX 1
+#define MODULE_NB_REGS_IDX 2
+#define SAR_RAM_OFFSET_IDX 3
+
+/*
+ * Module Index used to lookup VA using index
+ */
+#define MAX_SAR_MODULES 14
+#define EMIF1_INDEX 0
+#define EMIF2_INDEX 1
+#define DMM_INDEX 2
+#define CM1_INDEX 3
+#define CM2_INDEX 4
+#define C2C_INDEX 5
+#define CTRL_MODULE_PAD_CORE_INDEX 6
+#define L3_CLK1_INDEX 7
+#define L3_CLK2_INDEX 8
+#define L3_CLK3_INDEX 9
+#define USBTLL_INDEX 10
+#define UHH_INDEX 11
+#define L4CORE_INDEX 12
+#define L4PER_INDEX 13
+
+/*
+ * SAR BANK offsets from base address OMAP44XX_SAR_RAM_BASE
+ */
+#define SAR_BANK1_OFFSET 0x0000
+#define SAR_BANK2_OFFSET 0x1000
+#define SAR_BANK3_OFFSET 0x2000
+#define SAR_BANK4_OFFSET 0x3000
+
+/* Scratch pad memory offsets from SAR_BANK1 */
+#define CPU0_SAVE_OFFSET 0xb00
+#define CPU1_SAVE_OFFSET 0xc00
+#define MMU_OFFSET0 0xd00
+#define MMU_OFFSET1 0xd10
+#define SCU_OFFSET0 0xd20
+#define SCU_OFFSET1 0xd24
+#define L2X0_AUXCTRL_OFFSET 0xd28
+#define OMAP_TYPE_OFFSET 0xd2c
+#define L2X0_LOCKDOWN_OFFSET0 0xd30
+#define L2X0_PREFETCHCTRL_OFFSET 0xd34
+#define L2X0_SAVE_OFFSET0 0xd38
+#define L2X0_SAVE_OFFSET1 0xd3c
+
+/* CPUx Wakeup Non-Secure Physical Address offsets in SAR_BANK3 */
+#define CPU0_WAKEUP_NS_PA_ADDR_OFFSET 0xa04
+#define CPU1_WAKEUP_NS_PA_ADDR_OFFSET 0xa08
+
+/* GIC save restore offset from SAR_BANK3 */
+#define SAR_BACKUP_STATUS_OFFSET (SAR_BANK3_OFFSET + 0x500)
+#define SAR_SECURE_RAM_SIZE_OFFSET (SAR_BANK3_OFFSET + 0x504)
+#define SAR_SECRAM_SAVED_AT_OFFSET (SAR_BANK3_OFFSET + 0x508)
+#define ICDISR_CPU0_OFFSET (SAR_BANK3_OFFSET + 0x50c)
+#define ICDISR_CPU1_OFFSET (SAR_BANK3_OFFSET + 0x510)
+#define ICDISR_SPI_OFFSET (SAR_BANK3_OFFSET + 0x514)
+#define ICDISER_CPU0_OFFSET (SAR_BANK3_OFFSET + 0x524)
+#define ICDISER_CPU1_OFFSET (SAR_BANK3_OFFSET + 0x528)
+#define ICDISER_SPI_OFFSET (SAR_BANK3_OFFSET + 0x52c)
+#define ICDIPR_SFI_CPU0_OFFSET (SAR_BANK3_OFFSET + 0x53c)
+#define ICDIPR_PPI_CPU0_OFFSET (SAR_BANK3_OFFSET + 0x54c)
+#define ICDIPR_SFI_CPU1_OFFSET (SAR_BANK3_OFFSET + 0x550)
+#define ICDIPR_PPI_CPU1_OFFSET (SAR_BANK3_OFFSET + 0x560)
+#define ICDIPR_SPI_OFFSET (SAR_BANK3_OFFSET + 0x564)
+#define ICDIPTR_SPI_OFFSET (SAR_BANK3_OFFSET + 0x5e4)
+#define ICDICFR_OFFSET (SAR_BANK3_OFFSET + 0x664)
+#define SAR_BACKUP_STATUS_GIC_CPU0 0x1
+#define SAR_BACKUP_STATUS_GIC_CPU1 0x2
+
+/* WakeUpGen save restore offset from OMAP44XX_SAR_RAM_BASE */
+#define WAKEUPGENENB_OFFSET_CPU0 (SAR_BANK3_OFFSET + 0x684)
+#define WAKEUPGENENB_SECURE_OFFSET_CPU0 (SAR_BANK3_OFFSET + 0x694)
+#define WAKEUPGENENB_OFFSET_CPU1 (SAR_BANK3_OFFSET + 0x6a4)
+#define WAKEUPGENENB_SECURE_OFFSET_CPU1 (SAR_BANK3_OFFSET + 0x6b4)
+#define AUXCOREBOOT0_OFFSET (SAR_BANK3_OFFSET + 0x6c4)
+#define AUXCOREBOOT1_OFFSET (SAR_BANK3_OFFSET + 0x6c8)
+#define PTMSYNCREQ_MASK_OFFSET (SAR_BANK3_OFFSET + 0x6cc)
+#define PTMSYNCREQ_EN_OFFSET (SAR_BANK3_OFFSET + 0x6d0)
+#define SAR_BACKUP_STATUS_WAKEUPGEN 0x10
+
+#endif
diff --git a/arch/arm/mach-omap2/omap4-sar.c b/arch/arm/mach-omap2/omap4-sar.c
new file mode 100644
index 0000000..85b5fe9
--- /dev/null
+++ b/arch/arm/mach-omap2/omap4-sar.c
@@ -0,0 +1,1054 @@
+/*
+ * OMAP4 Save Restore source file
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * Written by Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <mach/omap4-common.h>
+#include <mach/ctrl_module_wkup_44xx.h>
+
+#include "clockdomain.h"
+#include "omap4-sar-layout.h"
+#include "cm-regbits-44xx.h"
+#include "prcm44xx.h"
+#include "cminst44xx.h"
+
+/*
+ * These SECURE control registers are used to work-around
+ * DDR corruption on the second chip select on OMAP443x.
+ */
+#define OMAP4_CTRL_SECURE_EMIF1_SDRAM_CONFIG2_REG 0x0114
+#define OMAP4_CTRL_SECURE_EMIF2_SDRAM_CONFIG2_REG 0x011c
+
+static void __iomem *sar_ram_base;
+static void __iomem *omap4_sar_modules[MAX_SAR_MODULES];
+static struct powerdomain *l3init_pwrdm;
+ static struct clockdomain *l3init_clkdm;
+static struct clk *usb_host_ck, *usb_tll_ck;
+
+/*
+ * SAR_RAM1 register layout consist of EMIF1, EMIF2, CM1, CM2,
+ * CONTROL_CORE efuse, DMM and USB TLL registers.
+ * The layout is arranged is a two dimentional array like
+ * below,
+ * const u32 sar_ramX_layout[nb_regs_sets][4] = {
+ * {module_index, reg_offset, size, sar_ram_offset},
+ * }
+ */
+static const u32 omap443x_sar_ram1_layout[][4] = {
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_CONFIG, 1, 0x00000000},
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_CONFIG_2, 1, 0x00000004},
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_REF_CTRL, 1, 0x00000008},
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_REF_CTRL_SHDW, 1, 0x0000000C},
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_TIM_1, 1, 0x00000010},
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_TIM_1_SHDW, 1, 0x00000014},
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_TIM_2, 1, 0x00000018},
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_TIM_2_SHDW, 1, 0x0000001C},
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_TIM_3, 1, 0x00000020},
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_TIM_3_SHDW, 1, 0x00000024},
+ {EMIF1_INDEX, OMAP44XX_EMIF_LPDDR2_NVM_TIM, 1, 0x00000028},
+ {EMIF1_INDEX, OMAP44XX_EMIF_LPDDR2_NVM_TIM_SHDW, 1, 0x0000002C},
+ {EMIF1_INDEX, OMAP44XX_EMIF_PWR_MGMT_CTRL, 1, 0x00000030},
+ {EMIF1_INDEX, OMAP44XX_EMIF_PWR_MGMT_CTRL_SHDW, 1, 0x00000034},
+ {EMIF1_INDEX, OMAP44XX_EMIF_OCP_CONFIG, 1, 0x00000038},
+ {EMIF1_INDEX, OMAP44XX_EMIF_PERF_CNT_CFG, 1, 0x0000003C},
+ {EMIF1_INDEX, OMAP44XX_EMIF_PERF_CNT_SEL, 1, 0x00000040},
+ {EMIF1_INDEX, OMAP44XX_EMIF_READ_IDLE_CTRL, 1, 0x00000044},
+ {EMIF1_INDEX, OMAP44XX_EMIF_READ_IDLE_CTRL_SHDW, 1, 0x00000048},
+ {EMIF1_INDEX, OMAP44XX_EMIF_IRQENABLE_SET_SYS, 1, 0x0000004C},
+ {EMIF1_INDEX, OMAP44XX_EMIF_IRQENABLE_SET_LL, 1, 0x00000050},
+ {EMIF1_INDEX, OMAP44XX_EMIF_ZQ_CONFIG, 1, 0x00000054},
+ {EMIF1_INDEX, OMAP44XX_EMIF_TEMP_ALERT_CONFIG, 1, 0x00000058},
+ {EMIF1_INDEX, OMAP44XX_EMIF_DDR_PHY_CTRL_1, 1, 0x0000005C},
+ {EMIF1_INDEX, OMAP44XX_EMIF_DDR_PHY_CTRL_1_SHDW, 1, 0x00000060},
+ {EMIF1_INDEX, OMAP44XX_EMIF_DDR_PHY_CTRL_2, 1, 0x00000064},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_CONFIG, 1, 0x00000068},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_CONFIG_2, 1, 0x0000006C},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_REF_CTRL, 1, 0x00000070},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_REF_CTRL_SHDW, 1, 0x00000074},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_TIM_1, 1, 0x00000078},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_TIM_1_SHDW, 1, 0x0000007C},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_TIM_2, 1, 0x00000080},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_TIM_2_SHDW, 1, 0x00000084},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_TIM_3, 1, 0x00000088},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_TIM_3_SHDW, 1, 0x0000008C},
+ {EMIF2_INDEX, OMAP44XX_EMIF_LPDDR2_NVM_TIM, 1, 0x00000090},
+ {EMIF2_INDEX, OMAP44XX_EMIF_LPDDR2_NVM_TIM_SHDW, 1, 0x00000094},
+ {EMIF2_INDEX, OMAP44XX_EMIF_PWR_MGMT_CTRL, 1, 0x00000098},
+ {EMIF2_INDEX, OMAP44XX_EMIF_PWR_MGMT_CTRL_SHDW, 1, 0x0000009C},
+ {EMIF2_INDEX, OMAP44XX_EMIF_OCP_CONFIG, 1, 0x000000A0},
+ {EMIF2_INDEX, OMAP44XX_EMIF_PERF_CNT_CFG, 1, 0x000000A4},
+ {EMIF2_INDEX, OMAP44XX_EMIF_PERF_CNT_SEL, 1, 0x000000A8},
+ {EMIF2_INDEX, OMAP44XX_EMIF_READ_IDLE_CTRL, 1, 0x000000AC},
+ {EMIF2_INDEX, OMAP44XX_EMIF_READ_IDLE_CTRL_SHDW, 1, 0x000000B0},
+ {EMIF2_INDEX, OMAP44XX_EMIF_IRQENABLE_SET_SYS, 1, 0x000000B4},
+ {EMIF2_INDEX, OMAP44XX_EMIF_IRQENABLE_SET_LL, 1, 0x000000B8},
+ {EMIF2_INDEX, OMAP44XX_EMIF_ZQ_CONFIG, 1, 0x000000BC},
+ {EMIF2_INDEX, OMAP44XX_EMIF_TEMP_ALERT_CONFIG, 1, 0x000000C0},
+ {EMIF2_INDEX, OMAP44XX_EMIF_DDR_PHY_CTRL_1, 1, 0x000000C4},
+ {EMIF2_INDEX, OMAP44XX_EMIF_DDR_PHY_CTRL_1_SHDW, 1, 0x000000C8},
+ {EMIF2_INDEX, OMAP44XX_EMIF_DDR_PHY_CTRL_2, 1, 0x000000CC},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_MEMIF_CLKSTCTRL_RESTORE_OFFSET, 1, 0x000000D0},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_CLKSEL_CORE_RESTORE_OFFSET, 1, 0x000000D4},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_DIV_M2_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000D8},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_DIV_M3_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000DC},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_DIV_M4_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000E0},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_DIV_M5_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000E4},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_DIV_M6_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000E8},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_DIV_M7_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000EC},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_CLKSEL_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000F0},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000F4},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000F8},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_CLKMODE_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000FC},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_SHADOW_FREQ_CONFIG2_RESTORE_OFFSET, 1, 0x00000100},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_SHADOW_FREQ_CONFIG1_RESTORE_OFFSET, 1, 0x00000104},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_AUTOIDLE_DPLL_CORE_RESTORE_OFFSET, 1, 0x00000108},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_MPU_CLKSTCTRL_RESTORE_OFFSET, 1, 0x0000010C},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_CM1_PROFILING_CLKCTRL_RESTORE_OFFSET, 1, 0x00000110},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_DYN_DEP_PRESCAL_RESTORE_OFFSET, 1, 0x00000114},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3_1_CLKSTCTRL_RESTORE_OFFSET, 1, 0x00000118},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3_2_CLKSTCTRL_RESTORE_OFFSET, 1, 0x0000011C},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L4CFG_CLKSTCTRL_RESTORE_OFFSET, 1, 0x00000120},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_MEMIF_CLKSTCTRL_RESTORE_OFFSET, 1, 0x00000124},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L4PER_CLKSTCTRL_RESTORE_OFFSET, 1, 0x00000128},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3INIT_CLKSTCTRL_RESTORE_OFFSET, 1, 0x0000012C},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3INSTR_L3_3_CLKCTRL_RESTORE_OFFSET, 1, 0x00000130},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE_OFFSET, 1, 0x00000134},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE_OFFSET, 1, 0x00000138},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_CM2_PROFILING_CLKCTRL_RESTORE_OFFSET, 1, 0x0000013C},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_D2D_STATICDEP_RESTORE_OFFSET, 1, 0x00000140},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3_1_DYNAMICDEP_RESTORE_OFFSET, 1, 0x00000144},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3_2_DYNAMICDEP_RESTORE_OFFSET, 1, 0x00000148},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_D2D_DYNAMICDEP_RESTORE_OFFSET, 1, 0x0000014C},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L4CFG_DYNAMICDEP_RESTORE_OFFSET, 1, 0x00000150},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L4PER_DYNAMICDEP_RESTORE_OFFSET, 1, 0x00000154},
+ {C2C_INDEX, 0x0C, 1, 0x00000158},
+ {C2C_INDEX, 0x10, 1, 0x0000015C},
+ {C2C_INDEX, 0x28, 1, 0x00000160},
+ {C2C_INDEX, 0x40, 1, 0x00000164},
+ {C2C_INDEX, 0x44, 1, 0x00000168},
+ {C2C_INDEX, 0x70, 1, 0x0000016C},
+ {C2C_INDEX, 0x74, 1, 0x00000170},
+ {C2C_INDEX, 0x84, 1, 0x00000174},
+ {C2C_INDEX, 0x88, 1, 0x00000178},
+ {CTRL_MODULE_PAD_CORE_INDEX,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PADCONF_GLOBAL, 15, 0x0000017C},
+ {CTRL_MODULE_PAD_CORE_INDEX,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PBIASLITE, 14, 0x000001B8},
+ {CTRL_MODULE_PAD_CORE_INDEX,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_LPDDR2IO1_0, 8, 0x000001F0},
+ {CTRL_MODULE_PAD_CORE_INDEX,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_BUS_HOLD, 1, 0x00000210},
+ {CTRL_MODULE_PAD_CORE_INDEX,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_C2C, 1, 0x00000214},
+ {CTRL_MODULE_PAD_CORE_INDEX,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_EFUSE_1, 1, 0x00000218},
+ {CTRL_MODULE_PAD_CORE_INDEX,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_EFUSE_2, 1, 0x0000021C},
+ {CTRL_MODULE_PAD_CORE_INDEX,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_EFUSE_4, 1, 0x00000220},
+ {DMM_INDEX, OMAP44XX_DMM_LISA_MAP, 4, 0x000000224},
+ {DMM_INDEX, OMAP44XX_DMM_LISA_LOCK, 1, 0x00000234},
+ {DMM_INDEX, OMAP44XX_DMM_TILER_OR, 2, 0x00000238},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_VIEW, 2, 0x00000240},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_VIEW_MAP, 4, 0x00000248},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_VIEW_MAP_BASE, 1, 0x00000258},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_IRQENABLE_SET, 1, 0x0000025C},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_DESCR, 1, 0x00000260},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_AREA, 1, 0x00000264},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_CTRL, 1, 0x00000268},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_DATA, 1, 0x0000026C},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_DESCR + 0x10, 1, 0x00000270},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_AREA + 0x10, 1, 0x00000274},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_CTRL + 0x10, 1, 0x00000278},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_DATA + 0x10, 1, 0x0000027C},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_DESCR + 0x20, 1, 0x00000280},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_AREA + 0x20, 1, 0x00000284},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_CTRL + 0x20, 1, 0x00000288},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_DATA + 0x20, 1, 0x0000028C},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_DESCR + 0x30, 1, 0x00000290},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_AREA + 0x30, 1, 0x00000294},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_CTRL + 0x30, 1, 0x00000298},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_DATA + 0x30, 1, 0x0000029C},
+ {DMM_INDEX, OMAP44XX_DMM_PEG_PRIO, 2, 0x000002A0},
+ {DMM_INDEX, OMAP44XX_DMM_PEG_PRIO_PAT, 1, 0x000002A8},
+ {L3_CLK1_INDEX, 0x508, 1, 0x000002AC},
+ {L3_CLK1_INDEX, 0x510, 1, 0x000002B0},
+ {L3_CLK1_INDEX, 0x708, 1, 0x000002B4},
+ {L3_CLK1_INDEX, 0x70C, 1, 0x000002B8},
+ {L3_CLK1_INDEX, 0x808, 1, 0x000002BC},
+ {L3_CLK2_INDEX, 0x1008, 1, 0x000002C0},
+ {L3_CLK2_INDEX, 0x1010, 1, 0x000002C4},
+ {L3_CLK2_INDEX, 0x1208, 1, 0x000002C8},
+ {L3_CLK2_INDEX, 0x1308, 1, 0x000002CC},
+ {L3_CLK2_INDEX, 0x130C, 1, 0x000002D0},
+ {L3_CLK2_INDEX, 0x1408, 1, 0x000002D4},
+ {L3_CLK2_INDEX, 0x140C, 1, 0x000002D8},
+ {L3_CLK2_INDEX, 0x1508, 1, 0x000002DC},
+ {L3_CLK2_INDEX, 0x150C, 1, 0x000002E0},
+ {L3_CLK3_INDEX, 0x208, 1, 0x000002E4},
+ {L3_CLK3_INDEX, 0x210, 1, 0x000002E8},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_RESTORE_OFFSET, 1, 0x000002EC},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_RESTORE_OFFSET, 1, 0x000002F0},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_SDMA_STATICDEP_RESTORE_OFFSET, 1, 0x000002F4},
+ {USBTLL_INDEX, 0x400, 7, 0x000002F8},
+ {UHH_INDEX, 0x10, 1, 0x00000314},
+ {UHH_INDEX, 0x40, 1, 0x00000318},
+ {UHH_INDEX, 0x100, 384, 0x0000031C},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_RESTORE_OFFSET, 1, 0x0000091C},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_RESTORE_OFFSET, 1, 0x00000920},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_SDMA_STATICDEP_RESTORE_OFFSET, 1, 0x00000924},
+};
+
+/*
+ * SAR_RAM2 register layout consist of SYSCTRL_PADCONF_CORE regsiters
+ */
+static const u32 omap443x_sar_ram2_layout[][4] = {
+ {CTRL_MODULE_PAD_CORE_INDEX, 0x40, 102, 0x00000000},
+};
+
+/*
+ * SAR_RAM3 and SAR_RAM4 layout is not listed since moslty it's handle by
+ * secure software.
+ */
+static const u32 omap443x_sar_ram3_layout[][4] = {
+ {L4CORE_INDEX, 0x2140, 1, 0x00000000},
+ {L4CORE_INDEX, 0x2104, 1, 0x00000004},
+ {L4CORE_INDEX, 0x2100, 1, 0x00000008},
+ {L4CORE_INDEX, 0x2108, 1, 0x0000000C},
+ {L4CORE_INDEX, 0x210C, 1, 0x00000010},
+ {L4CORE_INDEX, 0x2110, 1, 0x00000014},
+ {L4CORE_INDEX, 0x2114, 1, 0x00000018},
+ {L4CORE_INDEX, 0x204088, 14, 0x0000001C},
+ {L4CORE_INDEX, 0x206088, 2, 0x00000054},
+ {L4CORE_INDEX, 0x20C088, 30, 0x0000005C},
+ {L4CORE_INDEX, 0x210088, 30, 0x000000D4},
+ {L4CORE_INDEX, 0x212088, 38, 0x0000014C},
+ {L4CORE_INDEX, 0x214088, 2, 0x000001E4},
+ {L4CORE_INDEX, 0x216088, 2, 0x000001EC},
+ {L4CORE_INDEX, 0x218088, 2, 0x000001F4},
+ {L4CORE_INDEX, 0x21C088, 2, 0x000001FC},
+ {L4CORE_INDEX, 0x21E088, 2, 0x00000204},
+ {L4CORE_INDEX, 0x220088, 2, 0x0000020C},
+ {L4CORE_INDEX, 0x226088, 6, 0x00000214},
+ {L4CORE_INDEX, 0x228088, 2, 0x0000022C},
+ {L4CORE_INDEX, 0x22A088, 14, 0x00000234},
+ {L4PER_INDEX, 0x218, 1, 0x0000026C},
+ {L4PER_INDEX, 0x220, 1, 0x00000270},
+ {L4PER_INDEX, 0x228, 1, 0x00000274},
+ {L4PER_INDEX, 0x230, 1, 0x00000278},
+ {L4PER_INDEX, 0x238, 1, 0x0000027C},
+ {L4PER_INDEX, 0x298, 2, 0x00000280},
+ {L4PER_INDEX, 0x2A0, 2, 0x00000288},
+ {L4PER_INDEX, 0x2A8, 2, 0x00000290},
+ {L4PER_INDEX, 0x2B0, 2, 0x00000298},
+ {L4PER_INDEX, 0x2B8, 2, 0x000002A0},
+ {L4PER_INDEX, 0x304, 1, 0x000002A8},
+ {L4PER_INDEX, 0x31C, 1, 0x000002AC},
+ {L4PER_INDEX, 0x32C, 1, 0x000002B0},
+ {L4PER_INDEX, 0x33C, 1, 0x000002B4},
+ {L4PER_INDEX, 0x34C, 1, 0x000002B8},
+ {L4PER_INDEX, 0x35C, 1, 0x000002BC},
+ {L4PER_INDEX, 0x36C, 1, 0x000002C0},
+ {L4PER_INDEX, 0x37C, 1, 0x000002C4},
+ {L4PER_INDEX, 0x38C, 1, 0x000002C8},
+ {L4PER_INDEX, 0x39C, 1, 0x000002CC},
+ {L4PER_INDEX, 0x3AC, 1, 0x000002D0},
+ {L4PER_INDEX, 0x3BC, 1, 0x000002D4},
+ {L4PER_INDEX, 0x3CC, 1, 0x000002D8},
+ {L4PER_INDEX, 0x3D4, 1, 0x000002DC},
+ {L4PER_INDEX, 0x3E4, 1, 0x000002E0},
+ {L4PER_INDEX, 0x3F4, 1, 0x000002E4},
+ {L4PER_INDEX, 0x404, 1, 0x000002E8},
+ {L4PER_INDEX, 0x414, 1, 0x000002EC},
+ {L4PER_INDEX, 0x42C, 1, 0x000002F0},
+ {L4PER_INDEX, 0x43C, 1, 0x000002F4},
+ {L4PER_INDEX, 0x44C, 1, 0x000002F8},
+ {L4PER_INDEX, 0x45C, 1, 0x000002FC},
+ {L4PER_INDEX, 0x46C, 1, 0x00000300},
+ {L4PER_INDEX, 0x47C, 1, 0x00000304},
+ {L4PER_INDEX, 0x48C, 1, 0x00000308},
+ {L4PER_INDEX, 0x49C, 1, 0x0000030C},
+ {L4PER_INDEX, 0x4AC, 1, 0x00000310},
+ {L4PER_INDEX, 0x4BC, 1, 0x00000314},
+ {L4PER_INDEX, 0x4CC, 1, 0x00000318},
+ {L4PER_INDEX, 0x4DC, 1, 0x0000031C},
+ {L4PER_INDEX, 0x4EC, 1, 0x00000320},
+ {L4PER_INDEX, 0x4FC, 1, 0x00000324},
+ {L4PER_INDEX, 0x50C, 1, 0x00000328},
+ {L4PER_INDEX, 0x51C, 1, 0x0000032C},
+ {L4PER_INDEX, 0x52C, 1, 0x00000330},
+ {L4PER_INDEX, 0x53C, 1, 0x00000334},
+ {L4PER_INDEX, 0x54C, 1, 0x00000338},
+ {L4PER_INDEX, 0x55C, 1, 0x0000033C},
+ {L4PER_INDEX, 0x56C, 1, 0x00000340},
+ {L4PER_INDEX, 0x57C, 1, 0x00000344},
+ {L4PER_INDEX, 0x5A4, 1, 0x00000348},
+ {L4CORE_INDEX, 0x230, 1, 0x0000034C},
+ {L4CORE_INDEX, 0x238, 1, 0x00000350},
+ {L4CORE_INDEX, 0x2B0, 2, 0x00000354},
+ {L4CORE_INDEX, 0x2B8, 2, 0x0000035C},
+ {L4CORE_INDEX, 0x304, 1, 0x00000364},
+ {L4CORE_INDEX, 0x31C, 1, 0x00000368},
+ {L4CORE_INDEX, 0x32C, 1, 0x0000036C},
+ {L4CORE_INDEX, 0x33C, 1, 0x00000370},
+ {L4CORE_INDEX, 0x354, 1, 0x00000374},
+ {L4CORE_INDEX, 0x35C, 1, 0x00000378},
+ {L4CORE_INDEX, 0x36C, 1, 0x0000037C},
+ {L4CORE_INDEX, 0x37C, 1, 0x00000380},
+ {L4CORE_INDEX, 0x38C, 1, 0x00000384},
+ {L4CORE_INDEX, 0x3AC, 1, 0x00000388},
+ {L4CORE_INDEX, 0x3BC, 1, 0x0000038C},
+ {L4CORE_INDEX, 0x3CC, 1, 0x00000390},
+ {L4CORE_INDEX, 0x3DC, 1, 0x00000394},
+ {L4CORE_INDEX, 0x3EC, 1, 0x00000398},
+ {L4CORE_INDEX, 0x3FC, 1, 0x0000039C},
+ {L4CORE_INDEX, 0x40C, 1, 0x000003A0},
+ {L4CORE_INDEX, 0x41C, 1, 0x000003A4},
+ {L4CORE_INDEX, 0x42C, 1, 0x000003A8},
+ {L4CORE_INDEX, 0x43C, 1, 0x000003AC},
+ {L4CORE_INDEX, 0x44C, 1, 0x000003B0},
+ {L4CORE_INDEX, 0x45C, 1, 0x000003B4},
+ {L4CORE_INDEX, 0x46C, 1, 0x000003B8},
+ {L4CORE_INDEX, 0x47C, 1, 0x000003BC},
+ {L4CORE_INDEX, 0x48C, 1, 0x000003C0},
+ {L4CORE_INDEX, 0x49C, 1, 0x000003C4},
+ {L4CORE_INDEX, 0x4AC, 1, 0x000003C8},
+ {L4CORE_INDEX, 0x4BC, 1, 0x000003CC},
+ {L4CORE_INDEX, 0x4CC, 1, 0x000003D0},
+ {L4CORE_INDEX, 0x4DC, 1, 0x000003D4},
+ {L4CORE_INDEX, 0x4EC, 1, 0x000003D8},
+ {L4CORE_INDEX, 0x4FC, 1, 0x000003DC},
+ {L4CORE_INDEX, 0x50C, 1, 0x000003E0},
+ {L4CORE_INDEX, 0x51C, 1, 0x000003E4},
+ {L4CORE_INDEX, 0x52C, 1, 0x000003E8},
+ {L4CORE_INDEX, 0x53C, 1, 0x000003EC},
+ {L4CORE_INDEX, 0x54C, 1, 0x000003F0},
+ {L4CORE_INDEX, 0x55C, 1, 0x000003F4},
+ {L4CORE_INDEX, 0x56C, 1, 0x000003F8},
+ {L4CORE_INDEX, 0x574, 1, 0x000003FC},
+ {L4CORE_INDEX, 0x584, 1, 0x00000400},
+ {L4CORE_INDEX, 0x594, 1, 0x00000404},
+ {L4CORE_INDEX, 0x5A4, 1, 0x00000408},
+ {L4CORE_INDEX, 0x5B4, 1, 0x0000040C},
+ {L4CORE_INDEX, 0x5C4, 1, 0x00000410},
+ {L4CORE_INDEX, 0x5D4, 1, 0x00000414},
+ {L4CORE_INDEX, 0x5DC, 1, 0x00000418},
+};
+
+
+static const u32 omap446x_sar_ram1_layout[][4] = {
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_CONFIG_2, 1, 0x00000000},
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_CONFIG, 1, 0x00000004},
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_REF_CTRL, 1, 0x00000008},
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_REF_CTRL_SHDW, 1, 0x0000000C},
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_TIM_1, 1, 0x00000010},
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_TIM_1_SHDW, 1, 0x00000014},
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_TIM_2, 1, 0x00000018},
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_TIM_2_SHDW, 1, 0x0000001C},
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_TIM_3, 1, 0x00000020},
+ {EMIF1_INDEX, OMAP44XX_EMIF_SDRAM_TIM_3_SHDW, 1, 0x00000024},
+ {EMIF1_INDEX, OMAP44XX_EMIF_LPDDR2_NVM_TIM, 1, 0x00000028},
+ {EMIF1_INDEX, OMAP44XX_EMIF_LPDDR2_NVM_TIM_SHDW, 1, 0x0000002C},
+ {EMIF1_INDEX, OMAP44XX_EMIF_PWR_MGMT_CTRL, 1, 0x00000030},
+ {EMIF1_INDEX, OMAP44XX_EMIF_PWR_MGMT_CTRL_SHDW, 1, 0x00000034},
+ {EMIF1_INDEX, OMAP44XX_EMIF_OCP_CONFIG, 1, 0x00000038},
+ {EMIF1_INDEX, OMAP44XX_EMIF_PERF_CNT_CFG, 1, 0x0000003C},
+ {EMIF1_INDEX, OMAP44XX_EMIF_PERF_CNT_SEL, 1, 0x00000040},
+ {EMIF1_INDEX, OMAP44XX_EMIF_READ_IDLE_CTRL, 1, 0x00000044},
+ {EMIF1_INDEX, OMAP44XX_EMIF_READ_IDLE_CTRL_SHDW, 1, 0x00000048},
+ {EMIF1_INDEX, OMAP44XX_EMIF_IRQENABLE_SET_SYS, 1, 0x0000004C},
+ {EMIF1_INDEX, OMAP44XX_EMIF_IRQENABLE_SET_LL, 1, 0x00000050},
+ {EMIF1_INDEX, OMAP44XX_EMIF_ZQ_CONFIG, 1, 0x00000054},
+ {EMIF1_INDEX, OMAP44XX_EMIF_TEMP_ALERT_CONFIG, 1, 0x00000058},
+ {EMIF1_INDEX, OMAP44XX_EMIF_DDR_PHY_CTRL_1, 1, 0x0000005C},
+ {EMIF1_INDEX, OMAP44XX_EMIF_DDR_PHY_CTRL_1_SHDW, 1, 0x00000060},
+ {EMIF1_INDEX, OMAP44XX_EMIF_DDR_PHY_CTRL_2, 1, 0x00000064},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_CONFIG_2, 1, 0x00000068},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_CONFIG, 1, 0x0000006C},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_REF_CTRL, 1, 0x00000070},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_REF_CTRL_SHDW, 1, 0x00000074},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_TIM_1, 1, 0x00000078},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_TIM_1_SHDW, 1, 0x0000007C},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_TIM_2, 1, 0x00000080},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_TIM_2_SHDW, 1, 0x00000084},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_TIM_3, 1, 0x00000088},
+ {EMIF2_INDEX, OMAP44XX_EMIF_SDRAM_TIM_3_SHDW, 1, 0x0000008C},
+ {EMIF2_INDEX, OMAP44XX_EMIF_LPDDR2_NVM_TIM, 1, 0x00000090},
+ {EMIF2_INDEX, OMAP44XX_EMIF_LPDDR2_NVM_TIM_SHDW, 1, 0x00000094},
+ {EMIF2_INDEX, OMAP44XX_EMIF_PWR_MGMT_CTRL, 1, 0x00000098},
+ {EMIF2_INDEX, OMAP44XX_EMIF_PWR_MGMT_CTRL_SHDW, 1, 0x0000009C},
+ {EMIF2_INDEX, OMAP44XX_EMIF_OCP_CONFIG, 1, 0x000000A0},
+ {EMIF2_INDEX, OMAP44XX_EMIF_PERF_CNT_CFG, 1, 0x000000A4},
+ {EMIF2_INDEX, OMAP44XX_EMIF_PERF_CNT_SEL, 1, 0x000000A8},
+ {EMIF2_INDEX, OMAP44XX_EMIF_READ_IDLE_CTRL, 1, 0x000000AC},
+ {EMIF2_INDEX, OMAP44XX_EMIF_READ_IDLE_CTRL_SHDW, 1, 0x000000B0},
+ {EMIF2_INDEX, OMAP44XX_EMIF_IRQENABLE_SET_SYS, 1, 0x000000B4},
+ {EMIF2_INDEX, OMAP44XX_EMIF_IRQENABLE_SET_LL, 1, 0x000000B8},
+ {EMIF2_INDEX, OMAP44XX_EMIF_ZQ_CONFIG, 1, 0x000000BC},
+ {EMIF2_INDEX, OMAP44XX_EMIF_TEMP_ALERT_CONFIG, 1, 0x000000C0},
+ {EMIF2_INDEX, OMAP44XX_EMIF_DDR_PHY_CTRL_1, 1, 0x000000C4},
+ {EMIF2_INDEX, OMAP44XX_EMIF_DDR_PHY_CTRL_1_SHDW, 1, 0x000000C8},
+ {EMIF2_INDEX, OMAP44XX_EMIF_DDR_PHY_CTRL_2, 1, 0x000000CC},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_MEMIF_CLKSTCTRL_RESTORE_OFFSET, 1, 0x000000D0},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_CLKSEL_CORE_RESTORE_OFFSET, 1, 0x000000D4},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_DIV_M2_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000D8},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_DIV_M3_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000DC},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_DIV_M4_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000E0},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_DIV_M5_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000E4},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_DIV_M6_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000E8},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_DIV_M7_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000EC},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_CLKSEL_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000F0},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000F4},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000F8},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_CLKMODE_DPLL_CORE_RESTORE_OFFSET, 1, 0x000000FC},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_SHADOW_FREQ_CONFIG2_RESTORE_OFFSET, 1, 0x00000100},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_SHADOW_FREQ_CONFIG1_RESTORE_OFFSET, 1, 0x00000104},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_AUTOIDLE_DPLL_CORE_RESTORE_OFFSET, 1, 0x00000108},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_MPU_CLKSTCTRL_RESTORE_OFFSET, 1, 0x0000010C},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_CM1_PROFILING_CLKCTRL_RESTORE_OFFSET, 1, 0x00000110},
+ {CM1_INDEX, OMAP4430_CM1_RESTORE_INST +
+ OMAP4_CM_DYN_DEP_PRESCAL_RESTORE_OFFSET, 1, 0x00000114},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3_1_CLKSTCTRL_RESTORE_OFFSET, 1, 0x00000118},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3_2_CLKSTCTRL_RESTORE_OFFSET, 1, 0x0000011C},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L4CFG_CLKSTCTRL_RESTORE_OFFSET, 1, 0x00000120},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_MEMIF_CLKSTCTRL_RESTORE_OFFSET, 1, 0x00000124},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L4PER_CLKSTCTRL_RESTORE_OFFSET, 1, 0x00000128},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3INIT_CLKSTCTRL_RESTORE_OFFSET, 1, 0x0000012C},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3INSTR_L3_3_CLKCTRL_RESTORE_OFFSET, 1, 0x00000130},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE_OFFSET, 1, 0x00000134},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE_OFFSET, 1, 0x00000138},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_CM2_PROFILING_CLKCTRL_RESTORE_OFFSET, 1, 0x0000013C},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_D2D_STATICDEP_RESTORE_OFFSET, 1, 0x00000140},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3_1_DYNAMICDEP_RESTORE_OFFSET, 1, 0x00000144},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3_2_DYNAMICDEP_RESTORE_OFFSET, 1, 0x00000148},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_D2D_DYNAMICDEP_RESTORE_OFFSET, 1, 0x0000014C},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L4CFG_DYNAMICDEP_RESTORE_OFFSET, 1, 0x00000150},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L4PER_DYNAMICDEP_RESTORE_OFFSET, 1, 0x00000154},
+ {C2C_INDEX, 0x0C, 1, 0x00000158},
+ {C2C_INDEX, 0x10, 1, 0x0000015C},
+ {C2C_INDEX, 0x28, 1, 0x00000160},
+ {C2C_INDEX, 0x40, 1, 0x00000164},
+ {C2C_INDEX, 0x44, 1, 0x00000168},
+ {C2C_INDEX, 0x70, 1, 0x0000016C},
+ {C2C_INDEX, 0x74, 1, 0x00000170},
+ {C2C_INDEX, 0x84, 1, 0x00000174},
+ {C2C_INDEX, 0x88, 1, 0x00000178},
+ {CTRL_MODULE_PAD_CORE_INDEX,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PADCONF_GLOBAL, 15, 0x0000017C},
+ {CTRL_MODULE_PAD_CORE_INDEX,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PBIASLITE, 14, 0x000001B8},
+ {CTRL_MODULE_PAD_CORE_INDEX,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_LPDDR2IO1_0, 8, 0x000001F0},
+ {CTRL_MODULE_PAD_CORE_INDEX,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_BUS_HOLD, 1, 0x00000210},
+ {CTRL_MODULE_PAD_CORE_INDEX,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_C2C, 1, 0x00000214},
+ {CTRL_MODULE_PAD_CORE_INDEX,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_EFUSE_1, 1, 0x00000218},
+ {CTRL_MODULE_PAD_CORE_INDEX,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_EFUSE_2, 1, 0x0000021C},
+ {CTRL_MODULE_PAD_CORE_INDEX,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_EFUSE_4, 1, 0x00000220},
+ {L4CORE_INDEX, 0x2350, 1, 0x00000224},
+ {DMM_INDEX, OMAP44XX_DMM_LISA_MAP, 4, 0x000000228},
+ {DMM_INDEX, OMAP44XX_DMM_LISA_LOCK, 1, 0x00000238},
+ {DMM_INDEX, OMAP44XX_DMM_TILER_OR, 2, 0x0000023C},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_VIEW, 2, 0x00000244},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_VIEW_MAP, 4, 0x0000024C},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_VIEW_MAP_BASE, 1, 0x0000025C},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_IRQENABLE_SET, 1, 0x00000260},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_DESCR, 1, 0x00000264},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_AREA, 1, 0x00000268},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_CTRL, 1, 0x0000026C},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_DATA, 1, 0x00000270},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_DESCR + 0x10, 1, 0x00000274},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_AREA + 0x10, 1, 0x00000278},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_CTRL + 0x10, 1, 0x0000027C},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_DATA + 0x10, 1, 0x00000280},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_DESCR + 0x20, 1, 0x00000284},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_AREA + 0x20, 1, 0x00000288},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_CTRL + 0x20, 1, 0x0000028C},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_DATA + 0x20, 1, 0x00000290},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_DESCR + 0x30, 1, 0x00000294},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_AREA + 0x30, 1, 0x00000298},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_CTRL + 0x30, 1, 0x0000029C},
+ {DMM_INDEX, OMAP44XX_DMM_PAT_DATA + 0x30, 1, 0x000002A0},
+ {DMM_INDEX, OMAP44XX_DMM_PEG_PRIO, 2, 0x000002A4},
+ {DMM_INDEX, OMAP44XX_DMM_PEG_PRIO_PAT, 1, 0x000002AC},
+ {L3_CLK1_INDEX, 0x508, 1, 0x000002B0},
+ {L3_CLK1_INDEX, 0x510, 1, 0x000002B4},
+ {L3_CLK1_INDEX, 0x708, 1, 0x000002B8},
+ {L3_CLK1_INDEX, 0x70C, 1, 0x000002BC},
+ {L3_CLK1_INDEX, 0x808, 1, 0x000002C0},
+ {L3_CLK2_INDEX, 0x1008, 1, 0x000002C4},
+ {L3_CLK2_INDEX, 0x1010, 1, 0x000002C8},
+ {L3_CLK2_INDEX, 0x1208, 1, 0x000002CC},
+ {L3_CLK2_INDEX, 0x1308, 1, 0x000002D0},
+ {L3_CLK2_INDEX, 0x130C, 1, 0x000002D4},
+ {L3_CLK2_INDEX, 0x1408, 1, 0x000002D8},
+ {L3_CLK2_INDEX, 0x140C, 1, 0x000002DC},
+ {L3_CLK2_INDEX, 0x1508, 1, 0x000002E0},
+ {L3_CLK2_INDEX, 0x150C, 1, 0x000002E4},
+ {L3_CLK3_INDEX, 0x208, 1, 0x000002E8},
+ {L3_CLK3_INDEX, 0x210, 1, 0x000002EC},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_RESTORE_OFFSET, 1, 0x000002F0},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_RESTORE_OFFSET, 1, 0x000002F4},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_SDMA_STATICDEP_RESTORE_OFFSET, 1, 0x000002F8},
+ {USBTLL_INDEX, 0x400, 7, 0x000002FC},
+ {UHH_INDEX, 0x10, 1, 0x00000318},
+ {UHH_INDEX, 0x40, 1, 0x0000031C},
+ {UHH_INDEX, 0x100, 384, 0x00000320},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_RESTORE_OFFSET, 1, 0x00000920},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_RESTORE_OFFSET, 1, 0x00000924},
+ {CM2_INDEX, OMAP4430_CM2_RESTORE_INST +
+ OMAP4_CM_SDMA_STATICDEP_RESTORE_OFFSET, 1, 0x00000928},
+};
+
+/*
+ * SAR_RAM2 register layout consist of SYSCTRL_PADCONF_CORE regsiters
+ */
+static const u32 omap446x_sar_ram2_layout[][4] = {
+ {CTRL_MODULE_PAD_CORE_INDEX, 0x40, 102, 0x00000000},
+ {CTRL_MODULE_PAD_CORE_INDEX, 0x1f4, 1, 0x00000198},
+};
+
+/*
+ * SAR_RAM3 and SAR_RAM4 layout is not listed since moslty it's handle by
+ * secure software.
+ */
+static const u32 omap446x_sar_ram3_layout[][4] = {
+ {L4CORE_INDEX, 0x2140, 1, 0x00000000},
+ {L4CORE_INDEX, 0x2104, 1, 0x00000004},
+ {L4CORE_INDEX, 0x2100, 1, 0x00000008},
+ {L4CORE_INDEX, 0x2108, 1, 0x0000000C},
+ {L4CORE_INDEX, 0x210C, 1, 0x00000010},
+ {L4CORE_INDEX, 0x2110, 1, 0x00000014},
+ {L4CORE_INDEX, 0x2114, 1, 0x00000018},
+ {L4CORE_INDEX, 0x204088, 14, 0x0000001C},
+ {L4CORE_INDEX, 0x206088, 2, 0x00000054},
+ {L4CORE_INDEX, 0x20C088, 30, 0x0000005C},
+ {L4CORE_INDEX, 0x210088, 30, 0x000000D4},
+ {L4CORE_INDEX, 0x212088, 38, 0x0000014C},
+ {L4CORE_INDEX, 0x214088, 2, 0x000001E4},
+ {L4CORE_INDEX, 0x216088, 2, 0x000001EC},
+ {L4CORE_INDEX, 0x218088, 2, 0x000001F4},
+ {L4CORE_INDEX, 0x21C088, 2, 0x000001FC},
+ {L4CORE_INDEX, 0x21E088, 2, 0x00000204},
+ {L4CORE_INDEX, 0x220088, 2, 0x0000020C},
+ {L4CORE_INDEX, 0x226088, 6, 0x00000214},
+ {L4CORE_INDEX, 0x228088, 2, 0x0000022C},
+ {L4CORE_INDEX, 0x22A088, 14, 0x00000234},
+ {L4CORE_INDEX, 0x20A088, 30, 0x0000026C},
+ {L4PER_INDEX, 0x218, 1, 0x000002E4},
+ {L4PER_INDEX, 0x220, 1, 0x000002E8},
+ {L4PER_INDEX, 0x228, 1, 0x000002EC},
+ {L4PER_INDEX, 0x230, 1, 0x000002F0},
+ {L4PER_INDEX, 0x238, 1, 0x000002F4},
+ {L4PER_INDEX, 0x298, 2, 0x000002F8},
+ {L4PER_INDEX, 0x2A0, 2, 0x00000300},
+ {L4PER_INDEX, 0x2A8, 2, 0x00000308},
+ {L4PER_INDEX, 0x2B0, 2, 0x00000310},
+ {L4PER_INDEX, 0x2B8, 2, 0x00000318},
+ {L4PER_INDEX, 0x304, 1, 0x00000320},
+ {L4PER_INDEX, 0x31C, 1, 0x00000324},
+ {L4PER_INDEX, 0x32C, 1, 0x00000328},
+ {L4PER_INDEX, 0x33C, 1, 0x0000032C},
+ {L4PER_INDEX, 0x34C, 1, 0x00000330},
+ {L4PER_INDEX, 0x35C, 1, 0x00000334},
+ {L4PER_INDEX, 0x36C, 1, 0x00000338},
+ {L4PER_INDEX, 0x37C, 1, 0x0000033C},
+ {L4PER_INDEX, 0x38C, 1, 0x00000340},
+ {L4PER_INDEX, 0x39C, 1, 0x00000344},
+ {L4PER_INDEX, 0x3AC, 1, 0x00000348},
+ {L4PER_INDEX, 0x3BC, 1, 0x0000034C},
+ {L4PER_INDEX, 0x3CC, 1, 0x00000350},
+ {L4PER_INDEX, 0x3D4, 1, 0x00000354},
+ {L4PER_INDEX, 0x3E4, 1, 0x00000358},
+ {L4PER_INDEX, 0x3F4, 1, 0x0000035C},
+ {L4PER_INDEX, 0x404, 1, 0x00000360},
+ {L4PER_INDEX, 0x414, 1, 0x00000364},
+ {L4PER_INDEX, 0x42C, 1, 0x00000368},
+ {L4PER_INDEX, 0x43C, 1, 0x0000036C},
+ {L4PER_INDEX, 0x44C, 1, 0x00000370},
+ {L4PER_INDEX, 0x45C, 1, 0x00000374},
+ {L4PER_INDEX, 0x46C, 1, 0x00000378},
+ {L4PER_INDEX, 0x47C, 1, 0x0000037C},
+ {L4PER_INDEX, 0x48C, 1, 0x00000380},
+ {L4PER_INDEX, 0x49C, 1, 0x00000384},
+ {L4PER_INDEX, 0x4AC, 1, 0x00000388},
+ {L4PER_INDEX, 0x4BC, 1, 0x0000038C},
+ {L4PER_INDEX, 0x4CC, 1, 0x00000390},
+ {L4PER_INDEX, 0x4DC, 1, 0x00000394},
+ {L4PER_INDEX, 0x4EC, 1, 0x00000398},
+ {L4PER_INDEX, 0x4FC, 1, 0x0000039C},
+ {L4PER_INDEX, 0x50C, 1, 0x000003A0},
+ {L4PER_INDEX, 0x51C, 1, 0x000003A4},
+ {L4PER_INDEX, 0x52C, 1, 0x000003A8},
+ {L4PER_INDEX, 0x53C, 1, 0x000003AC},
+ {L4PER_INDEX, 0x54C, 1, 0x000003B0},
+ {L4PER_INDEX, 0x55C, 1, 0x000003B4},
+ {L4PER_INDEX, 0x56C, 1, 0x000003B8},
+ {L4PER_INDEX, 0x57C, 1, 0x000003BC},
+ {L4PER_INDEX, 0x5A4, 1, 0x000003C0},
+ {L4CORE_INDEX, 0x230, 1, 0x000003C4},
+ {L4CORE_INDEX, 0x238, 1, 0x000003C8},
+ {L4CORE_INDEX, 0x2B0, 2, 0x000003CC},
+ {L4CORE_INDEX, 0x2B8, 2, 0x000003D4},
+ {L4CORE_INDEX, 0x304, 1, 0x000003DC},
+ {L4CORE_INDEX, 0x31C, 1, 0x000003E0},
+ {L4CORE_INDEX, 0x32C, 1, 0x000003E4},
+ {L4CORE_INDEX, 0x33C, 1, 0x000003E8},
+ {L4CORE_INDEX, 0x354, 1, 0x000003EC},
+ {L4CORE_INDEX, 0x35C, 1, 0x000003F0},
+ {L4CORE_INDEX, 0x36C, 1, 0x000003F4},
+ {L4CORE_INDEX, 0x37C, 1, 0x000003F8},
+ {L4CORE_INDEX, 0x38C, 1, 0x000003FC},
+ {L4CORE_INDEX, 0x3AC, 1, 0x00000400},
+ {L4CORE_INDEX, 0x3BC, 1, 0x00000404},
+ {L4CORE_INDEX, 0x3CC, 1, 0x00000408},
+ {L4CORE_INDEX, 0x3DC, 1, 0x0000040C},
+ {L4CORE_INDEX, 0x3EC, 1, 0x00000410},
+ {L4CORE_INDEX, 0x3FC, 1, 0x00000414},
+ {L4CORE_INDEX, 0x40C, 1, 0x00000418},
+ {L4CORE_INDEX, 0x41C, 1, 0x0000041C},
+ {L4CORE_INDEX, 0x42C, 1, 0x00000420},
+ {L4CORE_INDEX, 0x43C, 1, 0x00000424},
+ {L4CORE_INDEX, 0x44C, 1, 0x00000428},
+ {L4CORE_INDEX, 0x45C, 1, 0x0000042C},
+ {L4CORE_INDEX, 0x46C, 1, 0x00000430},
+ {L4CORE_INDEX, 0x47C, 1, 0x00000434},
+ {L4CORE_INDEX, 0x48C, 1, 0x00000438},
+ {L4CORE_INDEX, 0x49C, 1, 0x0000043C},
+ {L4CORE_INDEX, 0x4AC, 1, 0x00000440},
+ {L4CORE_INDEX, 0x4BC, 1, 0x00000444},
+ {L4CORE_INDEX, 0x4CC, 1, 0x00000448},
+ {L4CORE_INDEX, 0x4DC, 1, 0x0000044C},
+ {L4CORE_INDEX, 0x4EC, 1, 0x00000450},
+ {L4CORE_INDEX, 0x4FC, 1, 0x00000454},
+ {L4CORE_INDEX, 0x50C, 1, 0x00000458},
+ {L4CORE_INDEX, 0x51C, 1, 0x0000045C},
+ {L4CORE_INDEX, 0x52C, 1, 0x00000460},
+ {L4CORE_INDEX, 0x53C, 1, 0x00000464},
+ {L4CORE_INDEX, 0x54C, 1, 0x00000468},
+ {L4CORE_INDEX, 0x55C, 1, 0x0000046C},
+ {L4CORE_INDEX, 0x56C, 1, 0x00000470},
+ {L4CORE_INDEX, 0x574, 1, 0x00000474},
+ {L4CORE_INDEX, 0x584, 1, 0x00000478},
+ {L4CORE_INDEX, 0x594, 1, 0x0000047C},
+ {L4CORE_INDEX, 0x5A4, 1, 0x00000480},
+ {L4CORE_INDEX, 0x5B4, 1, 0x00000484},
+ {L4CORE_INDEX, 0x5C4, 1, 0x00000488},
+ {L4CORE_INDEX, 0x5D4, 1, 0x0000048C},
+ {L4CORE_INDEX, 0x5DC, 1, 0x00000490},
+ {L4CORE_INDEX, 0x5E4, 1, 0x00000494},
+ {L4CORE_INDEX, 0x5EC, 1, 0x00000498},
+ {L4CORE_INDEX, 0x5F4, 1, 0x0000049C},
+ {L4CORE_INDEX, 0x5FC, 1, 0x000004A0},
+};
+
+/*
+ * omap_sar_save :
+ * common routine to save the registers to SAR RAM with the
+ * given parameters
+ * @nb_regs - Number of registers to saved
+ * @sar_bank_offset - where to backup
+ * @sar_layout - constant table containing the backup info
+ */
+static void sar_save(u32 nb_regs, u32 sar_bank, const u32 sar_layout_table[][4])
+{
+ u32 reg_val, size, i, j;
+ void __iomem *reg_read_addr, *sar_wr_addr;
+
+ for (i = 0; i < nb_regs; i++) {
+ if (omap4_sar_modules[(sar_layout_table[i][MODULE_ADDR_IDX])]) {
+ size = sar_layout_table[i][MODULE_NB_REGS_IDX];
+ reg_read_addr =
+ omap4_sar_modules[sar_layout_table[i]
+ [MODULE_ADDR_IDX]]
+ + sar_layout_table[i][MODULE_OFFSET_IDX];
+ sar_wr_addr = sar_ram_base + sar_bank +
+ sar_layout_table[i][SAR_RAM_OFFSET_IDX];
+ for (j = 0; j < size; j++) {
+ reg_val = __raw_readl(reg_read_addr + j * 4);
+ __raw_writel(reg_val, sar_wr_addr + j * 4);
+ }
+ }
+ }
+}
+
+static void save_sar_bank3(void)
+{
+ struct clockdomain *l4_secure_clkdm;
+
+ /*
+ * Not supported on ES1.0 silicon
+ */
+ if (omap_rev() == OMAP4430_REV_ES1_0) {
+ WARN_ONCE(1, "omap4: SAR backup not supported on ES1.0 ..\n");
+ return;
+ }
+
+ l4_secure_clkdm = clkdm_lookup("l4_secure_clkdm");
+ clkdm_wakeup(l4_secure_clkdm);
+
+ if (cpu_is_omap446x())
+ sar_save(ARRAY_SIZE(omap446x_sar_ram3_layout), SAR_BANK3_OFFSET,
+ omap446x_sar_ram3_layout);
+ else
+ sar_save(ARRAY_SIZE(omap443x_sar_ram3_layout), SAR_BANK3_OFFSET,
+ omap443x_sar_ram3_layout);
+
+ clkdm_allow_idle(l4_secure_clkdm);
+}
+
+static int omap4_sar_not_accessible(void)
+{
+ u32 usbhost_state, usbtll_state;
+
+ /*
+ * Make sure that USB host and TLL modules are not
+ * enabled before attempting to save the context
+ * registers, otherwise this will trigger an exception.
+ */
+ usbhost_state = omap4_cminst_read_inst_reg(OMAP4430_CM2_PARTITION,
+ OMAP4430_CM2_L3INIT_INST,
+ OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_OFFSET)
+ & (OMAP4430_STBYST_MASK | OMAP4430_IDLEST_MASK);
+
+ usbtll_state = omap4_cminst_read_inst_reg(OMAP4430_CM2_PARTITION,
+ OMAP4430_CM2_L3INIT_INST,
+ OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_OFFSET)
+ & OMAP4430_IDLEST_MASK;
+
+ if ((usbhost_state == (OMAP4430_STBYST_MASK | OMAP4430_IDLEST_MASK)) &&
+ (usbtll_state == (OMAP4430_IDLEST_MASK)))
+ return 0;
+ else
+ return -EBUSY;
+}
+
+ /*
+ * omap4_sar_save -
+ * Save the context to SAR_RAM1 and SAR_RAM2 as per
+ * omap4xxx_sar_ram1_layout and omap4xxx_sar_ram2_layout for the device OFF
+ * mode
+ */
+int omap4_sar_save(void)
+{
+ /*
+ * Not supported on ES1.0 silicon
+ */
+ if (omap_rev() == OMAP4430_REV_ES1_0) {
+ WARN_ONCE(1, "omap4: SAR backup not supported on ES1.0 ..\n");
+ return -ENODEV;
+ }
+
+ if (omap4_sar_not_accessible()) {
+ pr_debug("%s: USB SAR CNTX registers are not accessible!\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ /*
+ * SAR bits and clocks needs to be enabled
+ */
+ clkdm_wakeup(l3init_clkdm);
+ pwrdm_enable_hdwr_sar(l3init_pwrdm);
+ clk_enable(usb_host_ck);
+ clk_enable(usb_tll_ck);
+
+ /* Save SAR BANK1 */
+ if (cpu_is_omap446x())
+ sar_save(ARRAY_SIZE(omap446x_sar_ram1_layout), SAR_BANK1_OFFSET,
+ omap446x_sar_ram1_layout);
+ else
+ sar_save(ARRAY_SIZE(omap443x_sar_ram1_layout), SAR_BANK1_OFFSET,
+ omap443x_sar_ram1_layout);
+
+ clk_disable(usb_host_ck);
+ clk_disable(usb_tll_ck);
+ pwrdm_disable_hdwr_sar(l3init_pwrdm);
+ clkdm_allow_idle(l3init_clkdm);
+
+ /* Save SAR BANK2 */
+ if (cpu_is_omap446x())
+ sar_save(ARRAY_SIZE(omap446x_sar_ram2_layout), SAR_BANK2_OFFSET,
+ omap446x_sar_ram2_layout);
+ else
+ sar_save(ARRAY_SIZE(omap443x_sar_ram2_layout), SAR_BANK2_OFFSET,
+ omap443x_sar_ram2_layout);
+
+ return 0;
+}
+
+/**
+ * omap4_sar_overwrite :
+ * This API overwrite some of the SAR locations as a special cases
+ * The register content to be saved can be the register value before
+ * going into OFF-mode or a value that is required on wake up. This means
+ * that the restored register value can be different from the last value
+ * of the register before going into OFF-mode
+ * - CM1 and CM2 configuration
+ * Bits 0 of the CM_SHADOW_FREQ_CONFIG1 regiser and the
+ * CM_SHADOW_FREQ_CONFIG2 register are self-clearing and must
+ * be set at restore time. Thus, these data must always be
+ * overwritten in the SAR RAM.
+ * - Because USBHOSTHS and USBTLL restore needs a particular
+ * sequencing, the software must overwrite data read from
+ * the following registers implied in phase2a and phase 2b
+ */
+void omap4_sar_overwrite(void)
+{
+ u32 val = 0;
+ u32 offset = 0;
+
+ if (cpu_is_omap446x())
+ offset = 0x04;
+
+ /* Overwriting Phase1 data to be restored */
+ /* CM2 MEMIF_CLKTRCTRL = SW_WKUP, before FREQ UPDATE */
+ __raw_writel(0x2, sar_ram_base + SAR_BANK1_OFFSET + 0xd0);
+ /* CM1 CM_SHADOW_FREQ_CONFIG2, Enable FREQ UPDATE */
+ val = __raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG2);
+ /*
+ * FIXME: Implement FREQ UPDATE for L#/M5 before enabling this
+ * val |= 1 << OMAP4430_FREQ_UPDATE_SHIFT;
+ */
+ __raw_writel(val, sar_ram_base + SAR_BANK1_OFFSET + 0x100);
+ /* CM1 CM_SHADOW_FREQ_CONFIG1, Enable FREQ UPDATE */
+ val = __raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1);
+ val |= 1 << OMAP4430_FREQ_UPDATE_SHIFT;
+ val &= ~OMAP4430_DLL_OVERRIDE_MASK;
+ __raw_writel(val, sar_ram_base + SAR_BANK1_OFFSET + 0x104);
+ /* CM2 MEMIF_CLKTRCTRL = HW_AUTO, after FREQ UPDATE */
+ __raw_writel(0x3, sar_ram_base + SAR_BANK1_OFFSET + 0x124);
+
+ /* Overwriting Phase2a data to be restored */
+ /* CM_L3INIT_USB_HOST_CLKCTRL: SAR_MODE = 1, MODULEMODE = 2 */
+ __raw_writel(0x00000012,
+ sar_ram_base + SAR_BANK1_OFFSET + 0x2ec + offset);
+ /* CM_L3INIT_USB_TLL_CLKCTRL: SAR_MODE = 1, MODULEMODE = 1 */
+ __raw_writel(0x00000011,
+ sar_ram_base + SAR_BANK1_OFFSET + 0x2f0 + offset);
+ /* CM2 CM_SDMA_STATICDEP : Enable static depedency for SAR modules */
+ __raw_writel(0x000090e8,
+ sar_ram_base + SAR_BANK1_OFFSET + 0x2f4 + offset);
+
+ /* Overwriting Phase2b data to be restored */
+ /* CM_L3INIT_USB_HOST_CLKCTRL: SAR_MODE = 0, MODULEMODE = 0 */
+ val = __raw_readl(OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL);
+ val &= (OMAP4430_CLKSEL_UTMI_P1_MASK | OMAP4430_CLKSEL_UTMI_P2_MASK);
+ __raw_writel(val, sar_ram_base + SAR_BANK1_OFFSET + 0x91c + offset);
+ /* CM_L3INIT_USB_TLL_CLKCTRL: SAR_MODE = 0, MODULEMODE = 0 */
+ __raw_writel(0x0000000,
+ sar_ram_base + SAR_BANK1_OFFSET + 0x920 + offset);
+ /* CM2 CM_SDMA_STATICDEP : Clear the static depedency */
+ __raw_writel(0x00000040,
+ sar_ram_base + SAR_BANK1_OFFSET + 0x924 + offset);
+
+ /* readback to ensure data reaches to SAR RAM */
+ barrier();
+ val = __raw_readl(sar_ram_base + SAR_BANK1_OFFSET + 0x924 + offset);
+}
+
+void __iomem *omap4_get_sar_ram_base(void)
+{
+ return sar_ram_base;
+}
+
+/*
+ * SAR RAM used to save and restore the HW
+ * context in low power modes
+ */
+static int __init omap4_sar_ram_init(void)
+{
+ /*
+ * To avoid code running on other OMAPs in
+ * multi-omap builds
+ */
+ if (!cpu_is_omap44xx())
+ return -ENODEV;
+
+ /*
+ * Static mapping, never released Actual SAR area used is 8K it's
+ * spaced over 16K address with some part is reserved.
+ */
+ sar_ram_base = ioremap(OMAP44XX_SAR_RAM_BASE, SZ_16K);
+ BUG_ON(!sar_ram_base);
+
+ /*
+ * All these are static mappings so ioremap() will
+ * just return with mapped VA
+ */
+ omap4_sar_modules[EMIF1_INDEX] = ioremap(OMAP44XX_EMIF1, SZ_1M);
+ BUG_ON(!omap4_sar_modules[EMIF1_INDEX]);
+ omap4_sar_modules[EMIF2_INDEX] = ioremap(OMAP44XX_EMIF2, SZ_1M);
+ BUG_ON(!omap4_sar_modules[EMIF2_INDEX]);
+ omap4_sar_modules[DMM_INDEX] = ioremap(OMAP44XX_DMM_BASE, SZ_1M);
+ BUG_ON(!omap4_sar_modules[DMM_INDEX]);
+ omap4_sar_modules[CM1_INDEX] = ioremap(OMAP4430_CM1_BASE, SZ_8K);
+ BUG_ON(!omap4_sar_modules[CM1_INDEX]);
+ omap4_sar_modules[CM2_INDEX] = ioremap(OMAP4430_CM2_BASE, SZ_8K);
+ BUG_ON(!omap4_sar_modules[CM2_INDEX]);
+ omap4_sar_modules[C2C_INDEX] = ioremap(OMAP44XX_C2C_BASE, SZ_1M);
+ BUG_ON(!omap4_sar_modules[C2C_INDEX]);
+ omap4_sar_modules[CTRL_MODULE_PAD_CORE_INDEX] =
+ ioremap(OMAP443X_CTRL_BASE, SZ_4K);
+ BUG_ON(!omap4_sar_modules[CTRL_MODULE_PAD_CORE_INDEX]);
+ omap4_sar_modules[L3_CLK1_INDEX] = ioremap(L3_44XX_BASE_CLK1, SZ_1M);
+ BUG_ON(!omap4_sar_modules[L3_CLK1_INDEX]);
+ omap4_sar_modules[L3_CLK2_INDEX] = ioremap(L3_44XX_BASE_CLK2, SZ_1M);
+ BUG_ON(!omap4_sar_modules[L3_CLK2_INDEX]);
+ omap4_sar_modules[L3_CLK3_INDEX] = ioremap(L3_44XX_BASE_CLK3, SZ_1M);
+ BUG_ON(!omap4_sar_modules[L3_CLK3_INDEX]);
+ omap4_sar_modules[USBTLL_INDEX] = ioremap(OMAP44XX_USBTLL_BASE, SZ_1M);
+ BUG_ON(!omap4_sar_modules[USBTLL_INDEX]);
+ omap4_sar_modules[UHH_INDEX] = ioremap(OMAP44XX_UHH_CONFIG_BASE, SZ_1M);
+ BUG_ON(!omap4_sar_modules[UHH_INDEX]);
+ omap4_sar_modules[L4CORE_INDEX] = ioremap(L4_44XX_PHYS, SZ_4M);
+ BUG_ON(!omap4_sar_modules[L4CORE_INDEX]);
+ omap4_sar_modules[L4PER_INDEX] = ioremap(L4_PER_44XX_PHYS, SZ_4M);
+ BUG_ON(!omap4_sar_modules[L4PER_INDEX]);
+
+ /*
+ * SAR BANK3 contains all firewall settings and it's saved through
+ * secure API on HS device. On GP device these registers are
+ * meaningless but still needs to be saved. Otherwise Auto-restore
+ * phase DMA takes an abort. Hence save these conents only once
+ * in init to avoid the issue while waking up from device OFF
+ */
+ if (omap_type() == OMAP2_DEVICE_TYPE_GP)
+ save_sar_bank3();
+ /*
+ * Work around for OMAP443x Errata i632: "LPDDR2 Corruption After OFF
+ * Mode Transition When CS1 Is Used On EMIF":
+ * Overwrite EMIF1/EMIF2
+ * SECURE_EMIF1_SDRAM_CONFIG2_REG
+ * SECURE_EMIF2_SDRAM_CONFIG2_REG
+ */
+ if (cpu_is_omap443x()) {
+ void __iomem *secure_ctrl_mod;
+
+ secure_ctrl_mod = ioremap(OMAP4_CTRL_MODULE_WKUP, SZ_4K);
+ BUG_ON(!secure_ctrl_mod);
+
+ __raw_writel(0x10, secure_ctrl_mod +
+ OMAP4_CTRL_SECURE_EMIF1_SDRAM_CONFIG2_REG);
+ __raw_writel(0x10, secure_ctrl_mod +
+ OMAP4_CTRL_SECURE_EMIF2_SDRAM_CONFIG2_REG);
+ wmb();
+ iounmap(secure_ctrl_mod);
+ }
+
+ /*
+ * L3INIT PD and clocks are needed for SAR save phase
+ */
+ l3init_pwrdm = pwrdm_lookup("l3init_pwrdm");
+ if (!l3init_pwrdm)
+ pr_err("Failed to get l3init_pwrdm\n");
+
+ l3init_clkdm = clkdm_lookup("l3_init_clkdm");
+ if (!l3init_clkdm)
+ pr_err("Failed to get l3_init_clkdm\n");
+
+ usb_host_ck = clk_get(NULL, "usb_host_hs_fck");
+ if (!usb_host_ck)
+ pr_err("Could not get usb_host_ck\n");
+
+ usb_tll_ck = clk_get(NULL, "usb_tll_hs_ick");
+ if (!usb_tll_ck)
+ pr_err("Could not get usb_tll_ck\n");
+
+ return 0;
+}
+early_initcall(omap4_sar_ram_init);
diff --git a/arch/arm/mach-omap2/omap44xx-smc.S b/arch/arm/mach-omap2/omap44xx-smc.S
index e69d37d..83ba6d9 100644
--- a/arch/arm/mach-omap2/omap44xx-smc.S
+++ b/arch/arm/mach-omap2/omap44xx-smc.S
@@ -31,6 +31,30 @@
ldmfd sp!, {r2-r12, pc}
ENDPROC(omap_smc1)
+/*
+ * Low level common routine to manage secure
+ * HAL APIs.
+ * Function signature : u32 omap_smc2(u32 id, u32 falg, u32 pargs)
+ * @id : Application ID of HAL APIs
+ * @flag : Flag to indicate the criticality of operation
+ * @pargs : Physical address of parameter list starting
+ * with number of parametrs
+ */
+ENTRY(omap_smc2)
+ stmfd sp!, {r1-r12, lr}
+ mov r3, r2
+ mov r2, r1
+ mov r1, #0x0 @ Process ID
+ mov r6, #0xff
+ mov r12, #0x00 @ Secure Service ID
+ mov r7, #0
+ mcr p15, 0, r7, c7, c5, 6
+ dsb
+ dmb
+ smc #0
+ ldmfd sp!, {r1-r12, pc}
+END(omap_smc2)
+
ENTRY(omap_modify_auxcoreboot0)
stmfd sp!, {r1-r12, lr}
ldr r12, =0x104
diff --git a/arch/arm/mach-omap2/omap4_trim_quirks.c b/arch/arm/mach-omap2/omap4_trim_quirks.c
new file mode 100644
index 0000000..dd96726
--- /dev/null
+++ b/arch/arm/mach-omap2/omap4_trim_quirks.c
@@ -0,0 +1,180 @@
+/*
+ * OMAP LDO control and configuration
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+
+#include "control.h"
+#include "pm.h"
+#include <mach/ctrl_module_core_44xx.h>
+
+#define OMAP4_DPLL_MPU_TRIMMED_VAL_2P4 (0x1 << 18)
+#define OMAP4_DPLL_MPU_TRIMMED_VAL_3P0 (0x3 << 18)
+#define OMAP4_DPLL_MPU_TRIMMED_MASK (BIT(19) | BIT(18))
+/*
+ * Trim value has to be written to CONTROL_EFUSE_2 according to
+ * OMAP4430 errata i684 (version B)
+ * OMAP4430 units with ProdID[51:50]=11 are not affected
+ */
+#define OMAP4_LPDDR2_I684_FIX_VALUE 0x004E4000
+#define OMAP4_PROD_ID_I684_MASK 0x000C0000
+
+
+static bool bgap_trim_sw_overide;
+static bool dpll_trim_override;
+static bool ddr_io_trim_override;
+
+/**
+ * omap4_ldo_trim_configure() - Handle device trim variance
+ *
+ * Few of the silicon out of the fab come out without trim parameters
+ * efused in. These need some software support to allow the device to
+ * function normally. Handle these silicon quirks here.
+ */
+int omap4_ldo_trim_configure(void)
+{
+ u32 val;
+
+ /* if not trimmed, we set force overide, insted of efuse. */
+ if (bgap_trim_sw_overide) {
+ /* Fill in recommended values */
+ val = 0x0f << OMAP4_LDOSRAMCORE_ACTMODE_VSET_OUT_SHIFT;
+ val |= OMAP4_LDOSRAMCORE_ACTMODE_MUX_CTRL_MASK;
+ val |= 0x1 << OMAP4_LDOSRAMCORE_RETMODE_VSET_OUT_SHIFT;
+ val |= OMAP4_LDOSRAMCORE_RETMODE_MUX_CTRL_MASK;
+
+ omap_ctrl_writel(val,
+ OMAP4_CTRL_MODULE_CORE_LDOSRAM_MPU_VOLTAGE_CTRL);
+ omap_ctrl_writel(val,
+ OMAP4_CTRL_MODULE_CORE_LDOSRAM_CORE_VOLTAGE_CTRL);
+ omap_ctrl_writel(val,
+ OMAP4_CTRL_MODULE_CORE_LDOSRAM_IVA_VOLTAGE_CTRL);
+ }
+
+ /* For all trimmed and untrimmed write value as per recomendation */
+ val = 0x10 << OMAP4_AVDAC_TRIM_BYTE0_SHIFT;
+ val |= 0x01 << OMAP4_AVDAC_TRIM_BYTE1_SHIFT;
+ val |= 0x4d << OMAP4_AVDAC_TRIM_BYTE2_SHIFT;
+ val |= 0x1C << OMAP4_AVDAC_TRIM_BYTE3_SHIFT;
+ omap4_ctrl_pad_writel(val,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_EFUSE_1);
+
+ /* DDR I/O Trim override as per erratum i684 */
+ if (ddr_io_trim_override) {
+ omap4_ctrl_pad_writel(OMAP4_LPDDR2_I684_FIX_VALUE,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_EFUSE_2);
+ }
+
+ /* Required for DPLL_MPU to lock at 2.4 GHz */
+ if (dpll_trim_override)
+ omap_ctrl_writel(0x29, OMAP4_CTRL_MODULE_CORE_DPLL_NWELL_TRIM_0);
+
+ return 0;
+}
+
+/**
+ * omap4460_mpu_dpll_trim_override() - provide a selective s/w trim overide
+ */
+static __init void omap4460_mpu_dpll_trim_override(void)
+{
+ u32 val;
+
+ val = omap_ctrl_readl(OMAP4_CTRL_MODULE_CORE_STD_FUSE_OPP_DPLL_1) &
+ OMAP4_DPLL_MPU_TRIMMED_MASK;
+ switch (val) {
+ case OMAP4_DPLL_MPU_TRIMMED_VAL_3P0:
+ /* all ok.. */
+ break;
+ case OMAP4_DPLL_MPU_TRIMMED_VAL_2P4:
+ /* Cross check! */
+ if (omap4_has_mpu_1_5ghz()) {
+ WARN(1, "%s: OMAP is 1.5GHz capable, trimmed=1.2GHz!\n",
+ __func__);
+ }
+ break;
+ default:
+ WARN(1, "%s: UNKNOWN TRIM:0x%08x, using s/w override\n",
+ __func__, val);
+ /* fall through and use override */
+ case 0:
+ /*
+ * For PRE_RTP devices: Not trimmed, use s/w override!
+ * We only support unto 1.2GHz with s/w override,
+ * so just give a gentle warning if higher opp is attempted
+ */
+ dpll_trim_override = true;
+ /* Confirm */
+ if (omap4_has_mpu_1_5ghz()) {
+ pr_err("%s: OMAP is 1.5GHz capable, s/w trim=1.2GHz!\n",
+ __func__);
+ }
+ break;
+ }
+}
+
+static __init int omap4_ldo_trim_init(void)
+{
+ u32 bgap_trimmed = 0;
+
+ /* Applicable only for OMAP4 */
+ if (!cpu_is_omap44xx())
+ return 0;
+
+ /*
+ * Some ES2.2 efuse values for BGAP and SLDO trim
+ * are not programmed. For these units
+ * 1. we can set overide mode for SLDO trim,
+ * and program the max multiplication factor, to ensure
+ * high enough voltage on SLDO output.
+ * 2. trim VDAC value for TV output as per recomendation
+ */
+ if (omap_rev() >= CHIP_IS_OMAP4430ES2_2)
+ bgap_trimmed = omap_ctrl_readl(
+ OMAP4_CTRL_MODULE_CORE_STD_FUSE_OPP_BGAP);
+
+ bgap_trimmed &= OMAP4_STD_FUSE_OPP_BGAP_MASK_LSB;
+
+ /* if not trimmed, we set force overide, insted of efuse. */
+ if (!bgap_trimmed)
+ bgap_trim_sw_overide = true;
+
+ /* If not already trimmed, use s/w override */
+ if (cpu_is_omap446x())
+ omap4460_mpu_dpll_trim_override();
+
+ /*
+ * Errata i684 (revision B)
+ * Impacts all OMAP4430ESx.y trimmed and untrimmed excluding units
+ * with with ProdID[51:50]=11
+ * OMAP4460/70 are not impacted.
+ *
+ * ProdID:
+ * 51 50
+ * 0 0 Incorrect trim, SW WA needed.
+ * 0 1 Fixed test program issue of overlapping of LPDDR & SmartIO
+ * efuse fields, SW WA needed for LPDDR.
+ * 1 1 New LPDDR trim formula to compensate for vertical vs horizontal
+ * cell layout. No overwrite required.
+ */
+ if (cpu_is_omap443x()) {
+ u32 prod_id;
+
+ prod_id = omap_ctrl_readl(
+ OMAP4_CTRL_MODULE_CORE_STD_FUSE_PROD_ID_1);
+ prod_id &= OMAP4_PROD_ID_I684_MASK;
+ if (prod_id != OMAP4_PROD_ID_I684_MASK)
+ ddr_io_trim_override = true;
+ }
+
+ return omap4_ldo_trim_configure();
+}
+arch_initcall(omap4_ldo_trim_init);
diff --git a/arch/arm/mach-omap2/omap_dmm.c b/arch/arm/mach-omap2/omap_dmm.c
new file mode 100644
index 0000000..c2c44d2
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_dmm.c
@@ -0,0 +1,73 @@
+/*
+ * DMM driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <mach/dmm.h>
+#include <plat/omap_device.h>
+#include <plat/omap_hwmod.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <mach/dmm.h>
+#include <mach/tiler.h>
+
+#ifdef CONFIG_TI_TILER
+
+static struct omap_dmm_platform_data dmm_data = {
+ .oh_name = "dmm",
+};
+
+static struct platform_device omap_tiler_device = {
+ .name = "tiler",
+ .id = -1,
+};
+
+static struct omap_device_pm_latency omap_dmm_latency[] = {
+ [0] = {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
+};
+
+
+void __init omap_dmm_init(void)
+{
+ struct omap_hwmod *oh = NULL;
+ struct omap_device *od = NULL;
+
+ oh = omap_hwmod_lookup(dmm_data.oh_name);
+ if (!oh)
+ return;
+
+ dmm_data.base = omap_hwmod_get_mpu_rt_va(oh);
+ dmm_data.irq = oh->mpu_irqs[0].irq;
+
+ od = omap_device_build(dmm_data.oh_name, -1, oh, &dmm_data,
+ sizeof(dmm_data), omap_dmm_latency,
+ ARRAY_SIZE(omap_dmm_latency), false);
+
+ /* register tiler platform device to go along with the dmm device */
+ if (platform_device_register(&omap_tiler_device) < 0)
+ printk(KERN_ERR "Unable to register OMAP Tiler device\n");
+
+ return;
+}
+
+#else
+void __init omap_dmm_init(void)
+{
+}
+#endif
diff --git a/arch/arm/mach-omap2/omap_fiq_debugger.c b/arch/arm/mach-omap2/omap_fiq_debugger.c
new file mode 100644
index 0000000..174bba0
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_fiq_debugger.c
@@ -0,0 +1,418 @@
+/*
+ * Serial Debugger Interface for Omap
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/serial_reg.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stacktrace.h>
+#include <linux/uaccess.h>
+
+#include <plat/omap_device.h>
+#include <plat/omap-pm.h>
+#include <plat/omap-serial.h>
+
+#include <asm/fiq_debugger.h>
+
+#include <mach/omap_fiq_debugger.h>
+#include <mach/system.h>
+
+#include "mux.h"
+
+struct omap_fiq_debugger {
+ struct fiq_debugger_pdata pdata;
+ struct platform_device *pdev;
+ void __iomem *debug_port_base;
+ bool suspended;
+ spinlock_t lock;
+ bool have_state;
+
+ /* uart state */
+ unsigned char lcr;
+ unsigned char fcr;
+ unsigned char efr;
+ unsigned char dll;
+ unsigned char dlh;
+ unsigned char mcr;
+ unsigned char ier;
+ unsigned char wer;
+};
+
+static struct omap_fiq_debugger *dbgs[OMAP_MAX_HSUART_PORTS];
+
+static inline struct omap_fiq_debugger *get_dbg(struct platform_device *pdev)
+{
+ struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev);
+ return container_of(pdata, struct omap_fiq_debugger, pdata);
+}
+
+static inline void omap_write(struct omap_fiq_debugger *dbg,
+ unsigned int val, unsigned int off)
+{
+ __raw_writel(val, dbg->debug_port_base + off * 4);
+}
+
+static inline unsigned int omap_read(struct omap_fiq_debugger *dbg,
+ unsigned int off)
+{
+ return __raw_readl(dbg->debug_port_base + off * 4);
+}
+
+static void debug_omap_port_enable(struct platform_device *pdev)
+{
+ pm_runtime_get_sync(&pdev->dev);
+}
+
+static void debug_omap_port_disable(struct platform_device *pdev)
+{
+ struct omap_fiq_debugger *dbg = get_dbg(pdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dbg->lock, flags);
+ if (!dbg->suspended) {
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+ } else {
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ }
+ spin_unlock_irqrestore(&dbg->lock, flags);
+}
+
+static int debug_omap_port_resume(struct platform_device *pdev)
+{
+ struct omap_fiq_debugger *dbg = get_dbg(pdev);
+
+ dbg->suspended = false;
+ barrier();
+ return 0;
+}
+
+static int debug_omap_port_suspend(struct platform_device *pdev)
+{
+ struct omap_fiq_debugger *dbg = get_dbg(pdev);
+ unsigned long flags;
+
+ /* this will force the device to be idle'd now, in case it was
+ * autosuspended but timer has not yet run out.
+ */
+ spin_lock_irqsave(&dbg->lock, flags);
+ dbg->suspended = true;
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ spin_unlock_irqrestore(&dbg->lock, flags);
+
+ return 0;
+}
+
+/* mostly copied from drivers/tty/serial/omap-serial.c */
+static void omap_write_mdr1(struct omap_fiq_debugger *dbg, u8 mdr1)
+{
+ u8 timeout = 255;
+
+ if (!(cpu_is_omap34xx() || cpu_is_omap44xx())) {
+ omap_write(dbg, UART_OMAP_MDR1_DISABLE, UART_OMAP_MDR1);
+ return;
+ }
+
+ omap_write(dbg, mdr1, UART_OMAP_MDR1);
+ udelay(2);
+ omap_write(dbg, dbg->fcr | UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR,
+ UART_FCR);
+
+ /*
+ * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
+ * TX_FIFO_E bit is 1.
+ */
+ while (UART_LSR_THRE !=
+ (omap_read(dbg, UART_LSR) & (UART_LSR_THRE | UART_LSR_DR))) {
+ timeout--;
+ if (!timeout) {
+ /* Should *never* happen. we warn and carry on */
+ dev_crit(&dbg->pdev->dev, "Errata i202: timedout %x\n",
+ omap_read(dbg, UART_LSR));
+ break;
+ }
+ udelay(1);
+ }
+}
+
+/* assume the bootloader programmed us correctly */
+static void debug_port_read_state(struct omap_fiq_debugger *dbg)
+{
+ /* assume we're in operational mode when we are called */
+ dbg->lcr = omap_read(dbg, UART_LCR);
+
+ /* config mode A */
+ omap_write(dbg, UART_LCR_CONF_MODE_A, UART_LCR);
+ dbg->mcr = omap_read(dbg, UART_MCR);
+
+ /* config mode B */
+ omap_write(dbg, UART_LCR_CONF_MODE_B, UART_LCR);
+ dbg->efr = omap_read(dbg, UART_EFR);
+ dbg->dll = omap_read(dbg, UART_DLL);
+ dbg->dlh = omap_read(dbg, UART_DLM);
+
+ /* back to operational */
+ omap_write(dbg, dbg->lcr, UART_LCR);
+
+ pr_debug("%s: lcr=%02x mcr=%02x efr=%02x dll=%02x dlh=%02x\n",
+ __func__, dbg->lcr, dbg->mcr, dbg->efr, dbg->dll, dbg->dlh);
+}
+
+static void debug_port_restore(struct omap_fiq_debugger *dbg)
+{
+ omap_write(dbg, UART_LCR_CONF_MODE_B, UART_LCR); /* Config B mode */
+ omap_write(dbg, dbg->efr | UART_EFR_ECB, UART_EFR);
+ omap_write(dbg, 0x0, UART_LCR); /* Operational mode */
+ omap_write(dbg, 0x0, UART_IER);
+ omap_write(dbg, UART_LCR_CONF_MODE_B, UART_LCR); /* Config B mode */
+ omap_write(dbg, 0, UART_DLL);
+ omap_write(dbg, 0, UART_DLM);
+ omap_write(dbg, UART_LCR_CONF_MODE_A, UART_LCR);
+ omap_write(dbg, dbg->mcr | UART_MCR_TCRTLR, UART_MCR);
+ omap_write(dbg, UART_LCR_CONF_MODE_B, UART_LCR);
+ omap_write(dbg, 0, UART_TI752_TLR);
+ omap_write(dbg, 0, UART_SCR);
+ omap_write(dbg, dbg->efr, UART_EFR);
+ omap_write(dbg, UART_LCR_CONF_MODE_A, UART_LCR);
+ omap_write(dbg, dbg->fcr, UART_FCR);
+ omap_write(dbg, dbg->mcr, UART_MCR);
+ omap_write_mdr1(dbg, UART_OMAP_MDR1_DISABLE);
+
+ omap_write(dbg, UART_LCR_CONF_MODE_B, UART_LCR);
+ omap_write(dbg, dbg->efr | UART_EFR_ECB, UART_EFR);
+ omap_write(dbg, dbg->dll, UART_DLL);
+ omap_write(dbg, dbg->dlh, UART_DLM);
+ omap_write(dbg, 0, UART_LCR);
+ omap_write(dbg, dbg->ier, UART_IER);
+ omap_write(dbg, UART_LCR_CONF_MODE_B, UART_LCR);
+ omap_write(dbg, dbg->efr, UART_EFR);
+
+ /* will put us back to operational mode */
+ omap_write(dbg, dbg->lcr, UART_LCR);
+ omap_write_mdr1(dbg, UART_OMAP_MDR1_16X_MODE);
+
+ omap_write(dbg, dbg->wer, UART_OMAP_WER);
+}
+
+u32 omap_debug_uart_resume_idle(void)
+{
+ int i;
+ u32 ret = 0;
+
+ for (i = 0; i < OMAP_MAX_HSUART_PORTS; i++) {
+ struct omap_fiq_debugger *dbg = dbgs[i];
+ struct omap_device *od;
+
+ if (!dbg || !dbg->pdev)
+ continue;
+
+ od = to_omap_device(dbg->pdev);
+ if (omap_hwmod_pad_get_wakeup_status(od->hwmods[0])) {
+ /*
+ * poke the uart and let it stay on long enough
+ * to process any further data. It's ok to use
+ * autosuspend here since this is on the resume path
+ * during the wakeup. We'll still go through a full
+ * resume cycle, so if we go back to suspend
+ * the suspended flag will properly get reset.
+ */
+ pm_runtime_get_sync(&dbg->pdev->dev);
+ pm_runtime_mark_last_busy(&dbg->pdev->dev);
+ pm_runtime_put_autosuspend(&dbg->pdev->dev);
+ dev_dbg(&dbg->pdev->dev, "woke up from IO pad\n");
+ ret++;
+ }
+ }
+
+ return ret;
+}
+
+static int debug_port_init(struct platform_device *pdev)
+{
+ struct omap_fiq_debugger *dbg = get_dbg(pdev);
+
+ dbg->ier = UART_IER_RLSI | UART_IER_RDI;
+ dbg->fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
+ UART_FCR_T_TRIG_01;
+ dbg->wer = 0;
+
+ device_init_wakeup(&pdev->dev, true);
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, DEFAULT_AUTOSUSPEND_DELAY);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_irq_safe(&pdev->dev);
+
+ omap_hwmod_idle(to_omap_device(pdev)->hwmods[0]);
+ debug_omap_port_enable(pdev);
+ debug_omap_port_disable(pdev);
+
+ debug_omap_port_enable(pdev);
+
+ if (device_may_wakeup(&pdev->dev))
+ omap_hwmod_enable_wakeup(to_omap_device(pdev)->hwmods[0]);
+
+ debug_port_read_state(dbg);
+ debug_port_restore(dbg);
+
+ dbg->have_state = true;
+
+
+ debug_omap_port_disable(pdev);
+ return 0;
+}
+
+static int debug_getc(struct platform_device *pdev)
+{
+ struct omap_fiq_debugger *dbg = get_dbg(pdev);
+ unsigned int lsr;
+ int ret = FIQ_DEBUGGER_NO_CHAR;
+
+ lsr = omap_read(dbg, UART_LSR);
+ if (lsr & UART_LSR_BI) {
+ /* need to read RHR to clear the BI condition */
+ omap_read(dbg, UART_RX);
+ ret = FIQ_DEBUGGER_BREAK;
+ } else if (lsr & UART_LSR_DR) {
+ ret = omap_read(dbg, UART_RX);
+ }
+
+ return ret;
+}
+
+static void debug_putc(struct platform_device *pdev, unsigned int c)
+{
+ struct omap_fiq_debugger *dbg = get_dbg(pdev);
+
+ while (!(omap_read(dbg, UART_LSR) & UART_LSR_THRE))
+ cpu_relax();
+
+ omap_write(dbg, c, UART_TX);
+}
+
+static void debug_flush(struct platform_device *pdev)
+{
+ struct omap_fiq_debugger *dbg = get_dbg(pdev);
+
+ while (!(omap_read(dbg, UART_LSR) & UART_LSR_TEMT))
+ cpu_relax();
+}
+
+static int uart_idle_hwmod(struct omap_device *od)
+{
+ omap_hwmod_idle(od->hwmods[0]);
+
+ return 0;
+}
+
+static int uart_enable_hwmod(struct omap_device *od)
+{
+ struct platform_device *pdev = &od->pdev;
+ struct omap_fiq_debugger *dbg = get_dbg(pdev);
+
+ omap_hwmod_enable(od->hwmods[0]);
+ if (omap_pm_was_context_lost(&pdev->dev) && dbg->have_state) {
+ debug_port_restore(dbg);
+ dev_dbg(&pdev->dev, "restoring lost context!\n");
+ }
+
+ return 0;
+}
+
+static struct omap_device_pm_latency omap_uart_latency[] = {
+ {
+ .deactivate_func = uart_idle_hwmod,
+ .activate_func = uart_enable_hwmod,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
+};
+
+extern struct omap_hwmod *omap_uart_hwmod_lookup(int num);
+
+int __init omap_serial_debug_init(int id, bool is_fiq, bool is_high_prio_irq,
+ struct omap_device_pad *pads, int num_pads)
+{
+ struct omap_fiq_debugger *dbg;
+ struct omap_hwmod *oh;
+ struct omap_device *od;
+ int ret;
+
+ if (id >= OMAP_MAX_HSUART_PORTS)
+ return -EINVAL;
+ if (dbgs[id])
+ return -EBUSY;
+
+ oh = omap_uart_hwmod_lookup(id);
+ if (!oh)
+ return -ENODEV;
+
+ oh->mpu_irqs[0].name = "uart_irq";
+ oh->mux = omap_hwmod_mux_init(pads, num_pads);
+
+ dbg = kzalloc(sizeof(struct omap_fiq_debugger), GFP_KERNEL);
+ if (!dbg) {
+ pr_err("Failed to allocate for fiq debugger\n");
+ return -ENOMEM;
+ }
+
+ dbg->debug_port_base = ioremap(oh->slaves[0]->addr[0].pa_start,
+ PAGE_SIZE);
+ if (!dbg->debug_port_base) {
+ pr_err("Failed to ioremap for fiq debugger\n");
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ spin_lock_init(&dbg->lock);
+
+ dbg->pdata.uart_init = debug_port_init;
+ dbg->pdata.uart_getc = debug_getc;
+ dbg->pdata.uart_putc = debug_putc;
+ dbg->pdata.uart_flush = debug_flush;
+ dbg->pdata.uart_enable = debug_omap_port_enable;
+ dbg->pdata.uart_disable = debug_omap_port_disable;
+ dbg->pdata.uart_dev_suspend = debug_omap_port_suspend;
+ dbg->pdata.uart_dev_resume = debug_omap_port_resume;
+
+ od = omap_device_build("fiq_debugger", id,
+ oh, dbg, sizeof(*dbg), omap_uart_latency,
+ ARRAY_SIZE(omap_uart_latency), false);
+ if (IS_ERR(od)) {
+ pr_err("Could not build omap_device for fiq_debugger: %s\n",
+ oh->name);
+ ret = PTR_ERR(od);
+ goto err_dev_build;
+ }
+
+ dbg->pdev = &od->pdev;
+ dbgs[id] = dbg;
+
+ return 0;
+
+err_dev_build:
+ iounmap(dbg->debug_port_base);
+err_ioremap:
+ kfree(dbg);
+ return ret;
+}
diff --git a/arch/arm/mach-omap2/omap_hsi.c b/arch/arm/mach-omap2/omap_hsi.c
new file mode 100644
index 0000000..ce8fa54
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_hsi.c
@@ -0,0 +1,426 @@
+/*
+ * arch/arm/mach-omap2/hsi.c
+ *
+ * HSI device definition
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Original Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/jiffies.h>
+#include <linux/notifier.h>
+#include <linux/hsi_driver_if.h>
+
+#include <asm/clkdev.h>
+
+#include <plat/omap_hsi.h>
+#include <plat/omap_hwmod.h>
+#include <plat/omap_device.h>
+
+#include <../drivers/omap_hsi/hsi_driver.h>
+#include "clock.h"
+#include "mux.h"
+#include "control.h"
+
+static int omap_hsi_wakeup_enable(int hsi_port);
+static int omap_hsi_wakeup_disable(int hsi_port);
+#define OMAP_HSI_PLATFORM_DEVICE_DRIVER_NAME "omap_hsi"
+#define OMAP_HSI_PLATFORM_DEVICE_NAME "omap_hsi.0"
+#define OMAP_HSI_HWMOD_NAME "hsi"
+#define OMAP_HSI_HWMOD_CLASSNAME "hsi"
+#define OMAP_HSI_PADCONF_CAWAKE_PIN "usbb1_ulpitll_clk.hsi1_cawake"
+#define OMAP_HSI_PADCONF_CAWAKE_MODE OMAP_MUX_MODE1
+
+
+#define OMAP_MUX_MODE_MASK 0x7
+
+
+/* Hack till correct hwmod-mux api gets used */
+#define CA_WAKE_MUX_REG (0x4a1000C2)
+#define OMAP44XX_PADCONF_WAKEUPENABLE0 (1 << 14)
+#define OMAP44XX_PADCONF_WAKEUPEVENT0 (1 << 15)
+
+static int omap_mux_read_signal(const char *muxname)
+{
+ u16 val = 0;
+ val = omap_readw(CA_WAKE_MUX_REG);
+ return val;
+}
+
+static int omap_mux_enable_wakeup(const char *muxname)
+{
+ u16 val = 0;
+ val = omap_readw(CA_WAKE_MUX_REG);
+ val |= OMAP44XX_PADCONF_WAKEUPENABLE0;
+ omap_writew(val, CA_WAKE_MUX_REG);
+ return 0;
+}
+
+static int omap_mux_disable_wakeup(const char *muxname)
+{
+ u16 val = 0;
+ val = omap_readw(CA_WAKE_MUX_REG);
+ val &= ~OMAP44XX_PADCONF_WAKEUPENABLE0;
+ omap_writew(val, CA_WAKE_MUX_REG);
+ return 0;
+}
+
+/*
+ * NOTE: We abuse a little bit the struct port_ctx to use it also for
+ * initialization.
+ */
+
+
+static struct port_ctx hsi_port_ctx[] = {
+ [0] = {
+ .hst.mode = HSI_MODE_FRAME,
+ .hst.flow = HSI_FLOW_SYNCHRONIZED,
+ .hst.frame_size = HSI_FRAMESIZE_DEFAULT,
+ .hst.divisor = HSI_DIVISOR_DEFAULT,
+ .hst.channels = HSI_CHANNELS_DEFAULT,
+ .hst.arb_mode = HSI_ARBMODE_ROUNDROBIN,
+ .hsr.mode = HSI_MODE_FRAME,
+ .hsr.flow = HSI_FLOW_SYNCHRONIZED,
+ .hsr.frame_size = HSI_FRAMESIZE_DEFAULT,
+ .hsr.channels = HSI_CHANNELS_DEFAULT,
+ .hsr.divisor = HSI_DIVISOR_DEFAULT,
+ .hsr.counters = HSI_COUNTERS_FT_DEFAULT |
+ HSI_COUNTERS_TB_DEFAULT |
+ HSI_COUNTERS_FB_DEFAULT,
+ },
+};
+
+static struct ctrl_ctx hsi_ctx = {
+ .sysconfig = 0,
+ .gdd_gcr = 0,
+ .dll = 0,
+ .pctx = hsi_port_ctx,
+};
+
+static struct hsi_platform_data omap_hsi_platform_data = {
+ .num_ports = ARRAY_SIZE(hsi_port_ctx),
+ .hsi_gdd_chan_count = HSI_HSI_DMA_CHANNEL_MAX,
+ .default_hsi_fclk = HSI_DEFAULT_FCLK,
+ .ctx = &hsi_ctx,
+ .device_enable = omap_device_enable,
+ .device_idle = omap_device_idle,
+ .device_shutdown = omap_device_shutdown,
+ .wakeup_enable = omap_hsi_wakeup_enable,
+ .wakeup_disable = omap_hsi_wakeup_disable,
+ .wakeup_is_from_hsi = omap_hsi_is_io_wakeup_from_hsi,
+ .board_suspend = omap_hsi_prepare_suspend,
+};
+
+
+static struct platform_device *hsi_get_hsi_platform_device(void)
+{
+ struct device *dev;
+ struct platform_device *pdev;
+
+ /* HSI_TODO: handle platform device id (or port) (0/1) */
+ dev = bus_find_device_by_name(&platform_bus_type, NULL,
+ OMAP_HSI_PLATFORM_DEVICE_NAME);
+ if (!dev) {
+ pr_debug("Could not find platform device %s\n",
+ OMAP_HSI_PLATFORM_DEVICE_NAME);
+ return 0;
+ }
+
+ if (!dev->driver) {
+ /* Could not find driver for platform device. */
+ return 0;
+ }
+
+ pdev = to_platform_device(dev);
+
+ return pdev;
+}
+
+static struct hsi_dev *hsi_get_hsi_controller_data(struct platform_device *pd)
+{
+ struct hsi_dev *hsi_ctrl;
+
+ if (!pd)
+ return 0;
+
+ hsi_ctrl = (struct hsi_dev *) platform_get_drvdata(pd);
+ if (!hsi_ctrl) {
+ pr_err("Could not find HSI controller data\n");
+ return 0;
+ }
+
+ return hsi_ctrl;
+}
+
+/**
+* omap_hsi_is_io_pad_hsi - Indicates if IO Pad has been muxed for HSI CAWAKE
+*
+* Return value :* 0 if CAWAKE Padconf has not been found or CAWAKE not muxed for
+* CAWAKE
+* * else 1
+*/
+static int omap_hsi_is_io_pad_hsi(void)
+{
+ u16 val;
+
+ /* Check for IO pad */
+ val = omap_mux_read_signal(OMAP_HSI_PADCONF_CAWAKE_PIN);
+ if (val == -ENODEV)
+ return 0;
+
+ /* Continue only if CAWAKE is muxed */
+ if ((val & OMAP_MUX_MODE_MASK) != OMAP_HSI_PADCONF_CAWAKE_MODE)
+ return 0;
+
+ return 1;
+}
+
+/**
+* omap_hsi_is_io_wakeup_from_hsi - Indicates an IO wakeup from HSI CAWAKE
+*
+* Return value :* 0 if CAWAKE Padconf has not been found or no IOWAKEUP event
+* occured for CAWAKE
+* * else 1
+* TODO : return value should indicate the HSI port which has awaken
+*/
+int omap_hsi_is_io_wakeup_from_hsi(void)
+{
+ u16 val;
+
+ /* Check for IO pad wakeup */
+ val = omap_mux_read_signal(OMAP_HSI_PADCONF_CAWAKE_PIN);
+ if (val == -ENODEV)
+ return 0;
+
+ /* Continue only if CAWAKE is muxed */
+ if ((val & OMAP_MUX_MODE_MASK) != OMAP_HSI_PADCONF_CAWAKE_MODE)
+ return 0;
+
+ if (val & OMAP44XX_PADCONF_WAKEUPEVENT0)
+ return 1;
+
+ return 0;
+}
+
+/**
+* omap_hsi_wakeup_enable - Enable HSI wakeup feature from RET/OFF mode
+*
+* @hsi_port - reference to the HSI port onto which enable wakeup feature.
+*
+* Return value :* 0 if CAWAKE has been configured to wakeup platform
+* * -ENODEV if CAWAKE is not muxed on padconf
+*/
+static int omap_hsi_wakeup_enable(int hsi_port)
+{
+ int ret = -ENODEV;
+
+ if (omap_hsi_is_io_pad_hsi())
+ ret = omap_mux_enable_wakeup(OMAP_HSI_PADCONF_CAWAKE_PIN);
+ else
+ pr_debug("Trying to enable HSI IO wakeup on non HSI board\n");
+
+
+ /* TODO: handle hsi_port param and use it to find the correct Pad */
+ return ret;
+}
+
+/**
+* omap_hsi_wakeup_disable - Disable HSI wakeup feature from RET/OFF mode
+*
+* @hsi_port - reference to the HSI port onto which disable wakeup feature.
+*
+* Return value :* 0 if CAWAKE has been configured to not wakeup platform
+* * -ENODEV if CAWAKE is not muxed on padconf
+*/
+static int omap_hsi_wakeup_disable(int hsi_port)
+{
+ int ret = -ENODEV;
+
+ if (omap_hsi_is_io_pad_hsi())
+ ret = omap_mux_disable_wakeup(OMAP_HSI_PADCONF_CAWAKE_PIN);
+ else
+ pr_debug("Trying to disable HSI IO wakeup on non HSI board\n");
+
+
+ /* TODO: handle hsi_port param and use it to find the correct Pad */
+
+ return ret;
+}
+
+/**
+* omap_hsi_prepare_suspend - Prepare HSI for suspend mode
+*
+* Return value :* 0 if CAWAKE padconf has been configured properly
+* * -ENODEV if CAWAKE is not muxed on padconf.
+*
+*/
+int omap_hsi_prepare_suspend(int hsi_port, bool dev_may_wakeup)
+{
+ int ret;
+
+ if (dev_may_wakeup)
+ ret = omap_hsi_wakeup_enable(hsi_port);
+ else
+ ret = omap_hsi_wakeup_disable(hsi_port);
+
+ return ret;
+}
+
+/**
+* omap_hsi_wakeup - Prepare HSI for wakeup from suspend mode (RET/OFF)
+*
+* Return value : 1 if IO wakeup source is HSI
+* 0 if IO wakeup source is not HSI.
+*/
+int omap_hsi_wakeup(int hsi_port)
+{
+ static struct platform_device *pdev;
+ static struct hsi_dev *hsi_ctrl;
+
+ if (!pdev) {
+ pdev = hsi_get_hsi_platform_device();
+ if (!pdev)
+ return -ENODEV;
+}
+
+ if (!device_may_wakeup(&pdev->dev)) {
+ dev_info(&pdev->dev, "Modem not allowed to wakeup platform");
+ return -EPERM;
+ }
+
+ if (!hsi_ctrl) {
+ hsi_ctrl = hsi_get_hsi_controller_data(pdev);
+ if (!hsi_ctrl)
+ return -ENODEV;
+ }
+
+ dev_dbg(hsi_ctrl->dev, "Modem wakeup detected from HSI CAWAKE Pad");
+
+ /* CAWAKE falling or rising edge detected */
+ hsi_ctrl->hsi_port->cawake_off_event = true;
+ tasklet_hi_schedule(&hsi_ctrl->hsi_port->hsi_tasklet);
+
+ /* Disable interrupt until Bottom Half has cleared */
+ /* the IRQ status register */
+ disable_irq_nosync(hsi_ctrl->hsi_port->irq);
+
+ return 0;
+}
+
+/* HSI_TODO : This requires some fine tuning & completion of
+ * activate/deactivate latency values
+ */
+static struct omap_device_pm_latency omap_hsi_latency[] = {
+ [0] = {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
+};
+
+/* HSI device registration */
+static int __init omap_hsi_register(struct omap_hwmod *oh, void *user)
+{
+ struct omap_device *od;
+ struct hsi_platform_data *pdata = &omap_hsi_platform_data;
+
+ if (!oh) {
+ pr_err("Could not look up %s omap_hwmod\n",
+ OMAP_HSI_HWMOD_NAME);
+ return -EEXIST;
+ }
+
+ od = omap_device_build(OMAP_HSI_PLATFORM_DEVICE_DRIVER_NAME, 0, oh,
+ pdata, sizeof(*pdata), omap_hsi_latency,
+ ARRAY_SIZE(omap_hsi_latency), false);
+ WARN(IS_ERR(od), "Can't build omap_device for %s:%s.\n",
+ OMAP_HSI_PLATFORM_DEVICE_DRIVER_NAME, oh->name);
+
+ pr_info("HSI: device registered as omap_hwmod: %s\n", oh->name);
+ return 0;
+}
+
+static void __init omap_4430hsi_pad_conf(void)
+{
+ /*
+ * HSI pad conf: hsi1_ca/ac_wake/flag/data/ready
+ * Also configure gpio_92/95/157/187 used by modem
+ */
+ /* hsi1_cawake */
+ omap_mux_init_signal("usbb1_ulpitll_clk.hsi1_cawake", \
+ OMAP_PIN_INPUT_PULLDOWN | \
+ OMAP_PIN_OFF_NONE | \
+ OMAP_PIN_OFF_WAKEUPENABLE);
+ /* hsi1_caflag */
+ omap_mux_init_signal("usbb1_ulpitll_dir.hsi1_caflag", \
+ OMAP_PIN_INPUT | \
+ OMAP_PIN_OFF_NONE);
+ /* hsi1_cadata */
+ omap_mux_init_signal("usbb1_ulpitll_stp.hsi1_cadata", \
+ OMAP_PIN_INPUT | \
+ OMAP_PIN_OFF_NONE);
+ /* hsi1_acready */
+ omap_mux_init_signal("usbb1_ulpitll_nxt.hsi1_acready", \
+ OMAP_PIN_OUTPUT | \
+ OMAP_PIN_OFF_OUTPUT_LOW);
+ /* hsi1_acwake */
+ omap_mux_init_signal("usbb1_ulpitll_dat0.hsi1_acwake", \
+ OMAP_PIN_OUTPUT | \
+ OMAP_PIN_OFF_NONE);
+ /* hsi1_acdata */
+ omap_mux_init_signal("usbb1_ulpitll_dat1.hsi1_acdata", \
+ OMAP_PIN_OUTPUT | \
+ OMAP_PIN_OFF_NONE);
+ /* hsi1_acflag */
+ omap_mux_init_signal("usbb1_ulpitll_dat2.hsi1_acflag", \
+ OMAP_PIN_OUTPUT | \
+ OMAP_PIN_OFF_NONE);
+ /* hsi1_caready */
+ omap_mux_init_signal("usbb1_ulpitll_dat3.hsi1_caready", \
+ OMAP_PIN_INPUT | \
+ OMAP_PIN_OFF_NONE);
+ /* gpio_92 */
+ omap_mux_init_signal("usbb1_ulpitll_dat4.gpio_92", \
+ OMAP_PULL_ENA);
+ /* gpio_95 */
+ omap_mux_init_signal("usbb1_ulpitll_dat7.gpio_95", \
+ OMAP_PIN_INPUT_PULLDOWN | \
+ OMAP_PIN_OFF_NONE);
+ /* gpio_157 */
+ omap_mux_init_signal("usbb2_ulpitll_clk.gpio_157", \
+ OMAP_PIN_OUTPUT | \
+ OMAP_PIN_OFF_NONE);
+ /* gpio_187 */
+ omap_mux_init_signal("sys_boot3.gpio_187", \
+ OMAP_PIN_OUTPUT | \
+ OMAP_PIN_OFF_NONE);
+}
+
+int __init omap_hsi_dev_init(void)
+{
+ /* Keep this for genericity, although there is only one hwmod for HSI */
+ return omap_hwmod_for_each_by_class(OMAP_HSI_HWMOD_CLASSNAME,
+ omap_hsi_register, NULL);
+}
+postcore_initcall(omap_hsi_dev_init);
+
+/* HSI devices registration */
+int __init omap_hsi_init(void)
+{
+ omap_4430hsi_pad_conf();
+ return 0;
+}
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 293fa6c..fac4aec 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -142,13 +142,16 @@
#include "powerdomain.h"
#include <plat/clock.h>
#include <plat/omap_hwmod.h>
+#include <plat/omap_device.h>
#include <plat/prcm.h>
+#include <mach/emif.h>
#include "cm2xxx_3xxx.h"
#include "cm44xx.h"
#include "prm2xxx_3xxx.h"
#include "prm44xx.h"
#include "mux.h"
+#include "pm.h"
/* Maximum microseconds to wait for OMAP module to softreset */
#define MAX_MODULE_SOFTRESET_WAIT 10000
@@ -391,7 +394,8 @@
if (!oh->class->sysc ||
!((oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP) ||
- (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)))
+ (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) ||
+ (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)))
return -EINVAL;
if (!oh->class->sysc->sysc_fields) {
@@ -401,10 +405,13 @@
wakeup_mask = (0x1 << oh->class->sysc->sysc_fields->enwkup_shift);
- *v |= wakeup_mask;
+ if (oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP)
+ *v |= wakeup_mask;
if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
_set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART_WKUP, v);
+ if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)
+ _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART_WKUP, v);
/* XXX test pwrdm_get_wken for this hwmod's subsystem */
@@ -426,7 +433,8 @@
if (!oh->class->sysc ||
!((oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP) ||
- (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)))
+ (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) ||
+ (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)))
return -EINVAL;
if (!oh->class->sysc->sysc_fields) {
@@ -436,10 +444,13 @@
wakeup_mask = (0x1 << oh->class->sysc->sysc_fields->enwkup_shift);
- *v &= ~wakeup_mask;
+ if (oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP)
+ *v &= ~wakeup_mask;
if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
_set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART, v);
+ if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)
+ _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART_WKUP, v);
/* XXX test pwrdm_get_wken for this hwmod's subsystem */
@@ -781,8 +792,16 @@
}
if (sf & SYSC_HAS_MIDLEMODE) {
- idlemode = (oh->flags & HWMOD_SWSUP_MSTANDBY) ?
- HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
+ if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
+ idlemode = HWMOD_IDLEMODE_NO;
+ } else {
+ if (sf & SYSC_HAS_ENAWAKEUP)
+ _enable_wakeup(oh, &v);
+ if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)
+ idlemode = HWMOD_IDLEMODE_SMART_WKUP;
+ else
+ idlemode = HWMOD_IDLEMODE_SMART;
+ }
_set_master_standbymode(oh, idlemode, &v);
}
@@ -840,8 +859,16 @@
}
if (sf & SYSC_HAS_MIDLEMODE) {
- idlemode = (oh->flags & HWMOD_SWSUP_MSTANDBY) ?
- HWMOD_IDLEMODE_FORCE : HWMOD_IDLEMODE_SMART;
+ if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
+ idlemode = HWMOD_IDLEMODE_FORCE;
+ } else {
+ if (sf & SYSC_HAS_ENAWAKEUP)
+ _enable_wakeup(oh, &v);
+ if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)
+ idlemode = HWMOD_IDLEMODE_SMART_WKUP;
+ else
+ idlemode = HWMOD_IDLEMODE_SMART;
+ }
_set_master_standbymode(oh, idlemode, &v);
}
@@ -1154,6 +1181,9 @@
goto dis_opt_clks;
_write_sysconfig(v, oh);
+ if (oh->class->sysc->srst_udelay)
+ udelay(oh->class->sysc->srst_udelay);
+
if (oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)
omap_test_timeout((omap_hwmod_read(oh,
oh->class->sysc->syss_offs)
@@ -1223,6 +1253,7 @@
static int _enable(struct omap_hwmod *oh)
{
int r;
+ int hwsup = 0;
if (oh->_state != _HWMOD_STATE_INITIALIZED &&
oh->_state != _HWMOD_STATE_IDLE &&
@@ -1243,17 +1274,17 @@
oh->_state == _HWMOD_STATE_DISABLED) && oh->rst_lines_cnt == 1)
_deassert_hardreset(oh, oh->rst_lines[0].name);
- /* Mux pins for device runtime if populated */
- if (oh->mux && (!oh->mux->enabled ||
- ((oh->_state == _HWMOD_STATE_IDLE) &&
- oh->mux->pads_dynamic)))
- omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED);
-
_add_initiator_dep(oh, mpu_oh);
+ if (oh->_clk && oh->_clk->clkdm) {
+ hwsup = clkdm_is_idle(oh->_clk->clkdm);
+ clkdm_wakeup(oh->_clk->clkdm);
+ }
_enable_clocks(oh);
-
r = _wait_target_ready(oh);
if (!r) {
+ if (oh->_clk && oh->_clk->clkdm && hwsup)
+ clkdm_allow_idle(oh->_clk->clkdm);
+
oh->_state = _HWMOD_STATE_ENABLED;
/* Access the sysconfig only if the target is ready */
@@ -1268,6 +1299,12 @@
oh->name, r);
}
+ /* Mux pins for device runtime if populated */
+ if (oh->mux && (!oh->mux->enabled ||
+ ((oh->_state == _HWMOD_STATE_ENABLED) &&
+ oh->mux->pads_dynamic)))
+ omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED);
+
return r;
}
@@ -1295,8 +1332,11 @@
_disable_clocks(oh);
/* Mux pins for device idle if populated */
- if (oh->mux && oh->mux->pads_dynamic)
+ if (oh->mux && oh->mux->pads_dynamic) {
omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE);
+ if (cpu_is_omap44xx())
+ omap4_trigger_ioctrl();
+ }
oh->_state = _HWMOD_STATE_IDLE;
@@ -1374,8 +1414,11 @@
}
}
- if (oh->class->sysc)
+ if (oh->class->sysc) {
+ if (oh->_state == _HWMOD_STATE_IDLE)
+ _enable(oh);
_shutdown_sysc(oh);
+ }
/*
* If an IP contains only one HW reset line, then assert it
@@ -1507,6 +1550,7 @@
* that the copy process would be relatively complex due to the large number
* of substructures.
*/
+
static int __init _register(struct omap_hwmod *oh)
{
int ms_id;
@@ -1538,6 +1582,12 @@
*/
if (!strcmp(oh->name, MPU_INITIATOR_NAME))
mpu_oh = oh;
+ else if (cpu_is_omap44xx()) {
+ if (!strcmp(oh->name, "emif1"))
+ emif_clear_irq(0);
+ else if (!strcmp(oh->name, "emif2"))
+ emif_clear_irq(1);
+ }
return 0;
}
@@ -1770,6 +1820,34 @@
core_initcall(omap_hwmod_setup_all);
/**
+ * omap_hwmod_set_ioring_wakeup - enable io pad wakeup flag.
+ * @oh: struct omap_hwmod *
+ * @set: bool value indicating to set or clear wakeup status.
+ *
+ * Set or Clear wakeup flag for the io_pad.
+ */
+static int omap_hwmod_set_ioring_wakeup(struct omap_hwmod *oh, bool set_wake)
+{
+ struct omap_device_pad *pad;
+ int ret = -EINVAL, j;
+
+ if (oh->mux && oh->mux->enabled) {
+ for (j = 0; j < oh->mux->nr_pads_dynamic; j++) {
+ pad = oh->mux->pads_dynamic[j];
+ if (pad->flags & OMAP_DEVICE_PAD_WAKEUP) {
+ if (set_wake)
+ pad->idle |= OMAP_WAKEUP_EN;
+ else
+ pad->idle &= ~OMAP_WAKEUP_EN;
+ ret = 0;
+ }
+ }
+ }
+
+ return ret;
+}
+
+/**
* omap_hwmod_enable - enable an omap_hwmod
* @oh: struct omap_hwmod *
*
@@ -2097,6 +2175,35 @@
{
return _del_initiator_dep(oh, init_oh);
}
+/**
+ * omap_hwmod_enable_ioring_wakeup - Set wakeup flag for iopad.
+ * @oh: struct omap_hwmod *
+ *
+ * Traverse through dynamic pads, if pad is enabled then
+ * set wakeup enable bit flag for the mux pin. Wakeup pad bit
+ * will be set during hwmod idle transistion.
+ * Return error if pads are not enabled or not available.
+ */
+int omap_hwmod_enable_ioring_wakeup(struct omap_hwmod *oh)
+{
+ /* Enable pad wake-up capability */
+ return omap_hwmod_set_ioring_wakeup(oh, true);
+}
+
+/**
+ * omap_hwmod_disable_ioring_wakeup - Clear wakeup flag for iopad.
+ * @oh: struct omap_hwmod *
+ *
+ * Traverse through dynamic pads, if pad is enabled then
+ * clear wakeup enable bit flag for the mux pin. Wakeup pad bit
+ * will be set during hwmod idle transistion.
+ * Return error if pads are not enabled or not available.
+ */
+int omap_hwmod_disable_ioring_wakeup(struct omap_hwmod *oh)
+{
+ /* Disable pad wakeup capability */
+ return omap_hwmod_set_ioring_wakeup(oh, false);
+}
/**
* omap_hwmod_enable_wakeup - allow device to wake up the system
@@ -2123,6 +2230,7 @@
v = oh->_sysc_cache;
_enable_wakeup(oh, &v);
_write_sysconfig(v, oh);
+ omap_hwmod_enable_ioring_wakeup(oh);
spin_unlock_irqrestore(&oh->_lock, flags);
return 0;
@@ -2153,6 +2261,7 @@
v = oh->_sysc_cache;
_disable_wakeup(oh, &v);
_write_sysconfig(v, oh);
+ omap_hwmod_disable_ioring_wakeup(oh);
spin_unlock_irqrestore(&oh->_lock, flags);
return 0;
@@ -2332,7 +2441,7 @@
* Returns the context loss count of the powerdomain assocated with @oh
* upon success, or zero if no powerdomain exists for @oh.
*/
-u32 omap_hwmod_get_context_loss_count(struct omap_hwmod *oh)
+int omap_hwmod_get_context_loss_count(struct omap_hwmod *oh)
{
struct powerdomain *pwrdm;
int ret = 0;
@@ -2369,3 +2478,42 @@
return 0;
}
+
+int omap_hwmod_pad_get_wakeup_status(struct omap_hwmod *oh)
+{
+ if (oh && oh->mux)
+ return omap_hwmod_mux_get_wake_status(oh->mux);
+ return -EINVAL;
+}
+
+/**
+ * omap_hwmod_name_get_dev() - convert a hwmod name to device pointer
+ * @oh_name: name of the hwmod device
+ *
+ * returns back a struct device * pointer associated with a hwmod
+ * device represented by a hwmod_name
+ */
+struct device *omap_hwmod_name_get_dev(const char *oh_name)
+{
+ struct omap_hwmod *oh;
+
+ if (!oh_name) {
+ WARN(1, "%s: no hwmod name!\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ oh = _lookup(oh_name);
+ if (IS_ERR_OR_NULL(oh)) {
+ WARN(1, "%s: no hwmod for %s\n", __func__,
+ oh_name);
+ return ERR_PTR(oh ? PTR_ERR(oh) : -ENODEV);
+ }
+ if (IS_ERR_OR_NULL(oh->od)) {
+ WARN(1, "%s: no omap_device for %s\n", __func__,
+ oh_name);
+ return ERR_PTR(oh ? PTR_ERR(oh) : -ENODEV);
+ }
+
+ return &oh->od->pdev.dev;
+}
+EXPORT_SYMBOL(omap_hwmod_name_get_dev);
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index c4d0ae87..d87019d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -1208,6 +1208,7 @@
};
static struct omap_hwmod_opt_clk dss_opt_clks[] = {
+ { .role = "dss_clk", .clk = "dss1_fck" },
{ .role = "tv_clk", .clk = "dss_54m_fck" },
{ .role = "sys_clk", .clk = "dss2_fck" },
};
@@ -1291,6 +1292,10 @@
&omap2420_l4_core__dss_dispc,
};
+static struct omap_hwmod_opt_clk dispc_opt_clks[] = {
+ { .role = "dss_clk", .clk = "dss1_fck" },
+};
+
static struct omap_hwmod omap2420_dss_dispc_hwmod = {
.name = "dss_dispc",
.class = &omap2420_dispc_hwmod_class,
@@ -1306,6 +1311,8 @@
.idlest_stdby_bit = OMAP24XX_ST_DSS_SHIFT,
},
},
+ .opt_clks = dispc_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dispc_opt_clks),
.slaves = omap2420_dss_dispc_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_dss_dispc_slaves),
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
@@ -1361,6 +1368,10 @@
&omap2420_l4_core__dss_rfbi,
};
+static struct omap_hwmod_opt_clk rfbi_opt_clks[] = {
+ { .role = "rfbi_iclk", .clk = "dss_ick" },
+};
+
static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
.name = "dss_rfbi",
.class = &omap2420_rfbi_hwmod_class,
@@ -1372,6 +1383,8 @@
.module_offs = CORE_MOD,
},
},
+ .opt_clks = rfbi_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(rfbi_opt_clks),
.slaves = omap2420_dss_rfbi_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_dss_rfbi_slaves),
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
@@ -1418,6 +1431,10 @@
&omap2420_l4_core__dss_venc,
};
+static struct omap_hwmod_opt_clk venc_opt_clks[] = {
+ { .role = "tv_clk", .clk = "dss_54m_fck" },
+};
+
static struct omap_hwmod omap2420_dss_venc_hwmod = {
.name = "dss_venc",
.class = &omap2420_venc_hwmod_class,
@@ -1429,6 +1446,8 @@
.module_offs = CORE_MOD,
},
},
+ .opt_clks = venc_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(venc_opt_clks),
.slaves = omap2420_dss_venc_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_dss_venc_slaves),
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index 9682dd5..8009945 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -1302,6 +1302,7 @@
};
static struct omap_hwmod_opt_clk dss_opt_clks[] = {
+ { .role = "dss_clk", .clk = "dss1_fck" },
{ .role = "tv_clk", .clk = "dss_54m_fck" },
{ .role = "sys_clk", .clk = "dss2_fck" },
};
@@ -1379,6 +1380,10 @@
&omap2430_l4_core__dss_dispc,
};
+static struct omap_hwmod_opt_clk dispc_opt_clks[] = {
+ { .role = "dss_clk", .clk = "dss1_fck" },
+};
+
static struct omap_hwmod omap2430_dss_dispc_hwmod = {
.name = "dss_dispc",
.class = &omap2430_dispc_hwmod_class,
@@ -1394,6 +1399,8 @@
.idlest_stdby_bit = OMAP24XX_ST_DSS_SHIFT,
},
},
+ .opt_clks = dispc_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dispc_opt_clks),
.slaves = omap2430_dss_dispc_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_dss_dispc_slaves),
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
@@ -1443,6 +1450,10 @@
&omap2430_l4_core__dss_rfbi,
};
+static struct omap_hwmod_opt_clk rfbi_opt_clks[] = {
+ { .role = "rfbi_iclk", .clk = "dss_ick" },
+};
+
static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
.name = "dss_rfbi",
.class = &omap2430_rfbi_hwmod_class,
@@ -1454,6 +1465,8 @@
.module_offs = CORE_MOD,
},
},
+ .opt_clks = rfbi_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(rfbi_opt_clks),
.slaves = omap2430_dss_rfbi_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_dss_rfbi_slaves),
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
@@ -1494,6 +1507,10 @@
&omap2430_l4_core__dss_venc,
};
+static struct omap_hwmod_opt_clk venc_opt_clks[] = {
+ { .role = "tv_clk", .clk = "dss_54m_fck" },
+};
+
static struct omap_hwmod omap2430_dss_venc_hwmod = {
.name = "dss_venc",
.class = &omap2430_venc_hwmod_class,
@@ -1505,6 +1522,8 @@
.module_offs = CORE_MOD,
},
},
+ .opt_clks = venc_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(venc_opt_clks),
.slaves = omap2430_dss_venc_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_dss_venc_slaves),
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 909a84d..b327776 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -29,6 +29,7 @@
#include "omap_hwmod_common_data.h"
+#include "smartreflex.h"
#include "prm-regbits-34xx.h"
#include "cm-regbits-34xx.h"
#include "wd_timer.h"
@@ -84,6 +85,10 @@
static struct omap_hwmod omap3xxx_mcbsp5_hwmod;
static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod;
static struct omap_hwmod omap3xxx_mcbsp3_sidetone_hwmod;
+static struct omap_hwmod omap34xx_usb_host_hs_hwmod;
+static struct omap_hwmod omap34xx_usbhs_ohci_hwmod;
+static struct omap_hwmod omap34xx_usbhs_ehci_hwmod;
+static struct omap_hwmod omap34xx_usb_tll_hs_hwmod;
/* L3 -> L4_CORE interface */
static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_core = {
@@ -396,6 +401,15 @@
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+
+static struct omap_hwmod_irq_info omap3_smartreflex_mpu_irqs[] = {
+ {.name = "sr1_irq", .irq = 18},
+};
+
+static struct omap_hwmod_irq_info omap3_smartreflex_core_irqs[] = {
+ {.name = "sr2_irq", .irq = 19},
+};
+
/* L4 CORE -> SR1 interface */
static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = {
{
@@ -1542,9 +1556,15 @@
};
static struct omap_hwmod_opt_clk dss_opt_clks[] = {
- { .role = "tv_clk", .clk = "dss_tv_fck" },
- { .role = "video_clk", .clk = "dss_96m_fck" },
+ { .role = "dss_clk", .clk = "dss1_alwon_fck" },
+ /*
+ * The rest of the clocks are not needed by the driver,
+ * but are needed by the hwmod to reset DSS properly.
+ */
{ .role = "sys_clk", .clk = "dss2_alwon_fck" },
+ { .role = "tv_clk", .clk = "dss_tv_fck" },
+ /* required only on OMAP3430 */
+ { .role = "tv_dac_clk", .clk = "dss_96m_fck" },
};
static struct omap_hwmod omap3430es1_dss_core_hwmod = {
@@ -1656,6 +1676,10 @@
&omap3xxx_l4_core__dss_dispc,
};
+static struct omap_hwmod_opt_clk dispc_opt_clks[] = {
+ { .role = "dss_clk", .clk = "dss1_alwon_fck" },
+};
+
static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
.name = "dss_dispc",
.class = &omap3xxx_dispc_hwmod_class,
@@ -1669,6 +1693,8 @@
.module_offs = OMAP3430_DSS_MOD,
},
},
+ .opt_clks = dispc_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dispc_opt_clks),
.slaves = omap3xxx_dss_dispc_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dispc_slaves),
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1 |
@@ -1720,6 +1746,11 @@
&omap3xxx_l4_core__dss_dsi1,
};
+static struct omap_hwmod_opt_clk dsi1_opt_clks[] = {
+ { .role = "dss_clk", .clk = "dss1_alwon_fck" },
+ { .role = "sys_clk", .clk = "dss2_alwon_fck" },
+};
+
static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
.name = "dss_dsi1",
.class = &omap3xxx_dsi_hwmod_class,
@@ -1733,6 +1764,8 @@
.module_offs = OMAP3430_DSS_MOD,
},
},
+ .opt_clks = dsi1_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dsi1_opt_clks),
.slaves = omap3xxx_dss_dsi1_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dsi1_slaves),
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1 |
@@ -1791,6 +1824,10 @@
&omap3xxx_l4_core__dss_rfbi,
};
+static struct omap_hwmod_opt_clk rfbi_opt_clks[] = {
+ { .role = "rfbi_iclk", .clk = "dss_ick" },
+};
+
static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
.name = "dss_rfbi",
.class = &omap3xxx_rfbi_hwmod_class,
@@ -1802,6 +1839,8 @@
.module_offs = OMAP3430_DSS_MOD,
},
},
+ .opt_clks = rfbi_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(rfbi_opt_clks),
.slaves = omap3xxx_dss_rfbi_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_rfbi_slaves),
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1 |
@@ -1851,6 +1890,12 @@
&omap3xxx_l4_core__dss_venc,
};
+static struct omap_hwmod_opt_clk venc_opt_clks[] = {
+ { .role = "tv_clk", .clk = "dss_tv_fck" },
+ /* required only on OMAP3430 */
+ { .role = "tv_dac_clk", .clk = "dss_96m_fck" },
+};
+
static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
.name = "dss_venc",
.class = &omap3xxx_venc_hwmod_class,
@@ -1862,6 +1907,8 @@
.module_offs = OMAP3430_DSS_MOD,
},
},
+ .opt_clks = venc_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(venc_opt_clks),
.slaves = omap3xxx_dss_venc_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_venc_slaves),
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1 |
@@ -2910,6 +2957,10 @@
};
/* SR1 */
+static struct omap_smartreflex_dev_attr sr1_dev_attr = {
+ .sensor_voltdm_name = "mpu_iva",
+};
+
static struct omap_hwmod_ocp_if *omap3_sr1_slaves[] = {
&omap3_l4_core__sr1,
};
@@ -2918,7 +2969,6 @@
.name = "sr1_hwmod",
.class = &omap34xx_smartreflex_hwmod_class,
.main_clk = "sr1_fck",
- .vdd_name = "mpu",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
@@ -2930,9 +2980,12 @@
},
.slaves = omap3_sr1_slaves,
.slaves_cnt = ARRAY_SIZE(omap3_sr1_slaves),
+ .dev_attr = &sr1_dev_attr,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES2 |
CHIP_IS_OMAP3430ES3_0 |
CHIP_IS_OMAP3430ES3_1),
+ .mpu_irqs = omap3_smartreflex_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3_smartreflex_mpu_irqs),
.flags = HWMOD_SET_DEFAULT_CLOCKACT,
};
@@ -2940,7 +2993,6 @@
.name = "sr1_hwmod",
.class = &omap36xx_smartreflex_hwmod_class,
.main_clk = "sr1_fck",
- .vdd_name = "mpu",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
@@ -2952,10 +3004,17 @@
},
.slaves = omap3_sr1_slaves,
.slaves_cnt = ARRAY_SIZE(omap3_sr1_slaves),
+ .dev_attr = &sr1_dev_attr,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3630ES1),
+ .mpu_irqs = omap3_smartreflex_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3_smartreflex_mpu_irqs),
};
/* SR2 */
+static struct omap_smartreflex_dev_attr sr2_dev_attr = {
+ .sensor_voltdm_name = "core",
+};
+
static struct omap_hwmod_ocp_if *omap3_sr2_slaves[] = {
&omap3_l4_core__sr2,
};
@@ -2964,7 +3023,6 @@
.name = "sr2_hwmod",
.class = &omap34xx_smartreflex_hwmod_class,
.main_clk = "sr2_fck",
- .vdd_name = "core",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
@@ -2976,9 +3034,12 @@
},
.slaves = omap3_sr2_slaves,
.slaves_cnt = ARRAY_SIZE(omap3_sr2_slaves),
+ .dev_attr = &sr2_dev_attr,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES2 |
CHIP_IS_OMAP3430ES3_0 |
CHIP_IS_OMAP3430ES3_1),
+ .mpu_irqs = omap3_smartreflex_core_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3_smartreflex_core_irqs),
.flags = HWMOD_SET_DEFAULT_CLOCKACT,
};
@@ -2986,7 +3047,6 @@
.name = "sr2_hwmod",
.class = &omap36xx_smartreflex_hwmod_class,
.main_clk = "sr2_fck",
- .vdd_name = "core",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
@@ -2998,7 +3058,10 @@
},
.slaves = omap3_sr2_slaves,
.slaves_cnt = ARRAY_SIZE(omap3_sr2_slaves),
+ .dev_attr = &sr2_dev_attr,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3630ES1),
+ .mpu_irqs = omap3_smartreflex_core_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap3_smartreflex_core_irqs),
};
/*
@@ -3574,6 +3637,276 @@
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
};
+/*
+ * 'usb_host_hs' class
+ * high-speed multi-port usb host controller
+ */
+static struct omap_hwmod_ocp_if omap34xx_usb_host_hs__l3_main_2 = {
+ .master = &omap34xx_usb_host_hs_hwmod,
+ .slave = &omap3xxx_l3_main_hwmod,
+ .clk = "core_l3_ick",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_class_sysconfig omap34xx_usb_host_hs_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap34xx_usb_host_hs_hwmod_class = {
+ .name = "usbhs_uhh",
+ .sysc = &omap34xx_usb_host_hs_sysc,
+};
+
+static struct omap_hwmod_ocp_if *omap34xx_usb_host_hs_masters[] = {
+ &omap34xx_usb_host_hs__l3_main_2,
+};
+
+static struct omap_hwmod_addr_space omap34xx_usb_host_hs_addrs[] = {
+ {
+ .name = "uhh",
+ .pa_start = 0x48064000,
+ .pa_end = 0x480643ff,
+ .flags = ADDR_TYPE_RT
+ }
+};
+
+static struct omap_hwmod_ocp_if omap34xx_l4_cfg__usb_host_hs = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap34xx_usb_host_hs_hwmod,
+ .clk = "l4_ick",
+ .addr = omap34xx_usb_host_hs_addrs,
+ .addr_cnt = ARRAY_SIZE(omap34xx_usb_host_hs_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if omap34xx_f128m_cfg__usb_host_hs = {
+ .clk = "usbhost_120m_fck",
+ .user = OCP_USER_MPU,
+ .flags = OCPIF_SWSUP_IDLE,
+};
+
+static struct omap_hwmod_ocp_if omap34xx_f48m_cfg__usb_host_hs = {
+ .clk = "usbhost_48m_fck",
+ .user = OCP_USER_MPU,
+ .flags = OCPIF_SWSUP_IDLE,
+};
+
+static struct omap_hwmod_ocp_if *omap34xx_usb_host_hs_slaves[] = {
+ &omap34xx_l4_cfg__usb_host_hs,
+ &omap34xx_f128m_cfg__usb_host_hs,
+ &omap34xx_f48m_cfg__usb_host_hs,
+};
+
+static struct omap_hwmod omap34xx_usb_host_hs_hwmod = {
+ .name = "usbhs_uhh",
+ .class = &omap34xx_usb_host_hs_hwmod_class,
+ .main_clk = "usbhost_ick",
+ .prcm = {
+ .omap2 = {
+ .module_offs = OMAP3430ES2_USBHOST_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = 0,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = 1,
+ .idlest_stdby_bit = 0,
+ },
+ },
+ .slaves = omap34xx_usb_host_hs_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap34xx_usb_host_hs_slaves),
+ .masters = omap34xx_usb_host_hs_masters,
+ .masters_cnt = ARRAY_SIZE(omap34xx_usb_host_hs_masters),
+ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* 'usbhs_ohci' class */
+static struct omap_hwmod_ocp_if omap34xx_usbhs_ohci__l3_main_2 = {
+ .master = &omap34xx_usbhs_ohci_hwmod,
+ .slave = &omap3xxx_l3_main_hwmod,
+ .clk = "core_l3_ick",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_class omap34xx_usbhs_ohci_hwmod_class = {
+ .name = "usbhs_ohci",
+};
+
+static struct omap_hwmod_ocp_if *omap34xx_usbhs_ohci_masters[] = {
+ &omap34xx_usbhs_ohci__l3_main_2,
+};
+
+static struct omap_hwmod_irq_info omap34xx_usbhs_ohci_irqs[] = {
+ { .name = "ohci-irq", .irq = 76 },
+};
+
+static struct omap_hwmod_addr_space omap34xx_usbhs_ohci_addrs[] = {
+ {
+ .name = "ohci",
+ .pa_start = 0x48064400,
+ .pa_end = 0x480647FF,
+ .flags = ADDR_MAP_ON_INIT
+ }
+};
+
+static struct omap_hwmod_ocp_if omap34xx_l4_cfg__usbhs_ohci = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap34xx_usbhs_ohci_hwmod,
+ .clk = "l4_ick",
+ .addr = omap34xx_usbhs_ohci_addrs,
+ .addr_cnt = ARRAY_SIZE(omap34xx_usbhs_ohci_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if *omap34xx_usbhs_ohci_slaves[] = {
+ &omap34xx_l4_cfg__usbhs_ohci,
+};
+
+static struct omap_hwmod omap34xx_usbhs_ohci_hwmod = {
+ .name = "usbhs_ohci",
+ .class = &omap34xx_usbhs_ohci_hwmod_class,
+ .mpu_irqs = omap34xx_usbhs_ohci_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_usbhs_ohci_irqs),
+ .slaves = omap34xx_usbhs_ohci_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap34xx_usbhs_ohci_slaves),
+ .masters = omap34xx_usbhs_ohci_masters,
+ .masters_cnt = ARRAY_SIZE(omap34xx_usbhs_ohci_masters),
+ .flags = HWMOD_INIT_NO_RESET | HWMOD_NO_IDLEST,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* 'usbhs_ehci' class */
+static struct omap_hwmod_ocp_if omap34xx_usbhs_ehci__l3_main_2 = {
+ .master = &omap34xx_usbhs_ehci_hwmod,
+ .slave = &omap3xxx_l3_main_hwmod,
+ .clk = "core_l3_ick",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_class omap34xx_usbhs_ehci_hwmod_class = {
+ .name = "usbhs_ehci",
+};
+
+static struct omap_hwmod_ocp_if *omap34xx_usbhs_ehci_masters[] = {
+ &omap34xx_usbhs_ehci__l3_main_2,
+};
+
+static struct omap_hwmod_irq_info omap34xx_usbhs_ehci_irqs[] = {
+ { .name = "ehci-irq", .irq = 77 },
+};
+
+static struct omap_hwmod_addr_space omap34xx_usbhs_ehci_addrs[] = {
+ {
+ .name = "ehci",
+ .pa_start = 0x48064800,
+ .pa_end = 0x48064CFF,
+ .flags = ADDR_MAP_ON_INIT
+ }
+};
+
+static struct omap_hwmod_ocp_if omap34xx_l4_cfg__usbhs_ehci = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap34xx_usbhs_ehci_hwmod,
+ .clk = "l4_ick",
+ .addr = omap34xx_usbhs_ehci_addrs,
+ .addr_cnt = ARRAY_SIZE(omap34xx_usbhs_ehci_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if *omap34xx_usbhs_ehci_slaves[] = {
+ &omap34xx_l4_cfg__usbhs_ehci,
+};
+
+static struct omap_hwmod omap34xx_usbhs_ehci_hwmod = {
+ .name = "usbhs_ehci",
+ .class = &omap34xx_usbhs_ehci_hwmod_class,
+ .mpu_irqs = omap34xx_usbhs_ehci_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_usbhs_ehci_irqs),
+ .slaves = omap34xx_usbhs_ehci_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap34xx_usbhs_ehci_slaves),
+ .masters = omap34xx_usbhs_ehci_masters,
+ .masters_cnt = ARRAY_SIZE(omap34xx_usbhs_ehci_masters),
+ .flags = HWMOD_INIT_NO_RESET | HWMOD_NO_IDLEST,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/*
+ * 'usb_tll_hs' class
+ * usb_tll_hs module is the adapter on the usb_host_hs ports
+ */
+static struct omap_hwmod_class_sysconfig omap34xx_usb_tll_hs_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap34xx_usb_tll_hs_hwmod_class = {
+ .name = "usbhs_tll",
+ .sysc = &omap34xx_usb_tll_hs_sysc,
+};
+
+static struct omap_hwmod_irq_info omap34xx_usb_tll_hs_irqs[] = {
+ { .name = "tll-irq", .irq = 78 },
+};
+
+static struct omap_hwmod_addr_space omap34xx_usb_tll_hs_addrs[] = {
+ {
+ .name = "tll",
+ .pa_start = 0x48062000,
+ .pa_end = 0x48062fff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+static struct omap_hwmod_ocp_if omap34xx_f_cfg__usb_tll_hs = {
+ .clk = "usbtll_fck",
+ .user = OCP_USER_MPU,
+ .flags = OCPIF_SWSUP_IDLE,
+};
+
+static struct omap_hwmod_ocp_if omap34xx_l4_cfg__usb_tll_hs = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap34xx_usb_tll_hs_hwmod,
+ .clk = "l4_ick",
+ .addr = omap34xx_usb_tll_hs_addrs,
+ .addr_cnt = ARRAY_SIZE(omap34xx_usb_tll_hs_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if *omap34xx_usb_tll_hs_slaves[] = {
+ &omap34xx_l4_cfg__usb_tll_hs,
+ &omap34xx_f_cfg__usb_tll_hs,
+};
+
+static struct omap_hwmod omap34xx_usb_tll_hs_hwmod = {
+ .name = "usbhs_tll",
+ .class = &omap34xx_usb_tll_hs_hwmod_class,
+ .mpu_irqs = omap34xx_usb_tll_hs_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_usb_tll_hs_irqs),
+ .main_clk = "usbtll_ick",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 3,
+ .module_bit = 2,
+ .idlest_reg_id = 3,
+ .idlest_idle_bit = 2,
+ },
+ },
+ .slaves = omap34xx_usb_tll_hs_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap34xx_usb_tll_hs_slaves),
+ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
&omap3xxx_l3_main_hwmod,
&omap3xxx_l4_core_hwmod,
@@ -3656,6 +3989,11 @@
/* usbotg for am35x */
&am35xx_usbhsotg_hwmod,
+ &omap34xx_usb_host_hs_hwmod,
+ &omap34xx_usbhs_ohci_hwmod,
+ &omap34xx_usbhs_ehci_hwmod,
+ &omap34xx_usb_tll_hs_hwmod,
+
NULL,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index e1c69ff..b5c5b10 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -27,9 +27,11 @@
#include <plat/mcspi.h>
#include <plat/mcbsp.h>
#include <plat/mmc.h>
+#include <plat/dmtimer.h>
#include "omap_hwmod_common_data.h"
+#include "smartreflex.h"
#include "cm1_44xx.h"
#include "cm2_44xx.h"
#include "prm44xx.h"
@@ -49,6 +51,8 @@
static struct omap_hwmod omap44xx_dsp_hwmod;
static struct omap_hwmod omap44xx_dss_hwmod;
static struct omap_hwmod omap44xx_emif_fw_hwmod;
+static struct omap_hwmod omap44xx_fdif_hwmod;
+static struct omap_hwmod omap44xx_gpu_hwmod;
static struct omap_hwmod omap44xx_hsi_hwmod;
static struct omap_hwmod omap44xx_ipu_hwmod;
static struct omap_hwmod omap44xx_iss_hwmod;
@@ -65,7 +69,12 @@
static struct omap_hwmod omap44xx_mmc2_hwmod;
static struct omap_hwmod omap44xx_mpu_hwmod;
static struct omap_hwmod omap44xx_mpu_private_hwmod;
+static struct omap_hwmod omap44xx_sl2if_hwmod;
static struct omap_hwmod omap44xx_usb_otg_hs_hwmod;
+static struct omap_hwmod omap44xx_usb_host_hs_hwmod;
+static struct omap_hwmod omap44xx_usbhs_ohci_hwmod;
+static struct omap_hwmod omap44xx_usbhs_ehci_hwmod;
+static struct omap_hwmod omap44xx_usb_tll_hs_hwmod;
/*
* Interconnects omap_hwmod structures
@@ -124,7 +133,7 @@
.slaves_cnt = ARRAY_SIZE(omap44xx_dmm_slaves),
.mpu_irqs = omap44xx_dmm_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dmm_irqs),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -173,7 +182,7 @@
.class = &omap44xx_emif_fw_hwmod_class,
.slaves = omap44xx_emif_fw_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_emif_fw_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -212,7 +221,7 @@
.class = &omap44xx_l3_hwmod_class,
.slaves = omap44xx_l3_instr_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_l3_instr_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* l3_main_1 interface data */
@@ -306,7 +315,7 @@
.mpu_irqs_cnt = ARRAY_SIZE(omap44xx_l3_targ_irqs),
.slaves = omap44xx_l3_main_1_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_l3_main_1_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* l3_main_2 interface data */
@@ -318,6 +327,14 @@
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+/* gpu -> l3_main_2 */
+static struct omap_hwmod_ocp_if omap44xx_gpu__l3_main_2 = {
+ .master = &omap44xx_gpu_hwmod,
+ .slave = &omap44xx_l3_main_2_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* hsi -> l3_main_2 */
static struct omap_hwmod_ocp_if omap44xx_hsi__l3_main_2 = {
.master = &omap44xx_hsi_hwmod,
@@ -342,6 +359,14 @@
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+/* fdif -> l3_main_2 */
+static struct omap_hwmod_ocp_if omap44xx_fdif__l3_main_2 = {
+ .master = &omap44xx_fdif_hwmod,
+ .slave = &omap44xx_l3_main_2_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* iva -> l3_main_2 */
static struct omap_hwmod_ocp_if omap44xx_iva__l3_main_2 = {
.master = &omap44xx_iva_hwmod,
@@ -390,8 +415,10 @@
&omap44xx_hsi__l3_main_2,
&omap44xx_ipu__l3_main_2,
&omap44xx_iss__l3_main_2,
+ &omap44xx_fdif__l3_main_2,
&omap44xx_iva__l3_main_2,
&omap44xx_l3_main_1__l3_main_2,
+ &omap44xx_gpu__l3_main_2,
&omap44xx_l4_cfg__l3_main_2,
&omap44xx_usb_otg_hs__l3_main_2,
};
@@ -401,7 +428,7 @@
.class = &omap44xx_l3_hwmod_class,
.slaves = omap44xx_l3_main_2_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_l3_main_2_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* l3_main_3 interface data */
@@ -451,7 +478,7 @@
.class = &omap44xx_l3_hwmod_class,
.slaves = omap44xx_l3_main_3_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_l3_main_3_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -508,7 +535,7 @@
.class = &omap44xx_l4_hwmod_class,
.slaves = omap44xx_l4_abe_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_l4_abe_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* l4_cfg interface data */
@@ -530,7 +557,7 @@
.class = &omap44xx_l4_hwmod_class,
.slaves = omap44xx_l4_cfg_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_l4_cfg_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* l4_per interface data */
@@ -552,7 +579,7 @@
.class = &omap44xx_l4_hwmod_class,
.slaves = omap44xx_l4_per_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_l4_per_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* l4_wkup interface data */
@@ -574,7 +601,7 @@
.class = &omap44xx_l4_hwmod_class,
.slaves = omap44xx_l4_wkup_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_l4_wkup_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -604,7 +631,7 @@
.class = &omap44xx_mpu_bus_hwmod_class,
.slaves = omap44xx_mpu_private_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_mpu_private_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -629,7 +656,6 @@
* elm
* emif1
* emif2
- * fdif
* gpmc
* gpu
* hdq1w
@@ -640,7 +666,6 @@
* prcm_mpu
* prm
* scrm
- * sl2if
* slimbus1
* slimbus2
* usb_host_fs
@@ -651,6 +676,222 @@
*/
/*
+ * 'mpu' class
+ * mpu sub-system
+ */
+
+static struct omap_hwmod_class omap44xx_mpu_hwmod_class = {
+ .name = "mpu",
+};
+
+/* mpu */
+static struct omap_hwmod_irq_info omap44xx_mpu_irqs[] = {
+ { .name = "pl310", .irq = 0 + OMAP44XX_IRQ_GIC_START },
+ { .name = "cti0", .irq = 1 + OMAP44XX_IRQ_GIC_START },
+ { .name = "cti1", .irq = 2 + OMAP44XX_IRQ_GIC_START },
+};
+
+/* mpu master ports */
+static struct omap_hwmod_ocp_if *omap44xx_mpu_masters[] = {
+ &omap44xx_mpu__l3_main_1,
+ &omap44xx_mpu__l4_abe,
+ &omap44xx_mpu__dmm,
+};
+
+static struct omap_hwmod omap44xx_mpu_hwmod = {
+ .name = "mpu",
+ .class = &omap44xx_mpu_hwmod_class,
+ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
+ .mpu_irqs = omap44xx_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mpu_irqs),
+ .main_clk = "dpll_mpu_m2_ck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_MPU_MPU_CLKCTRL,
+ },
+ },
+ .masters = omap44xx_mpu_masters,
+ .masters_cnt = ARRAY_SIZE(omap44xx_mpu_masters),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+};
+
+/*
+ * 'smartreflex' class
+ * smartreflex module (monitor silicon performance and outputs a measure of
+ * performance error)
+ */
+
+/* The IP is not compliant to type1 / type2 scheme */
+static struct omap_hwmod_sysc_fields omap_hwmod_sysc_type_smartreflex = {
+ .sidle_shift = 24,
+ .enwkup_shift = 26,
+};
+
+static struct omap_hwmod_class_sysconfig omap44xx_smartreflex_sysc = {
+ .sysc_offs = 0x0038,
+ .sysc_flags = (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type_smartreflex,
+};
+
+static struct omap_hwmod_class omap44xx_smartreflex_hwmod_class = {
+ .name = "smartreflex",
+ .sysc = &omap44xx_smartreflex_sysc,
+ .rev = 2,
+};
+
+/* smartreflex_core */
+static struct omap_smartreflex_dev_attr smartreflex_core_dev_attr = {
+ .sensor_voltdm_name = "core",
+};
+
+static struct omap_hwmod omap44xx_smartreflex_core_hwmod;
+static struct omap_hwmod_irq_info omap44xx_smartreflex_core_irqs[] = {
+ { .irq = 19 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_smartreflex_core_addrs[] = {
+ {
+ .pa_start = 0x4a0dd000,
+ .pa_end = 0x4a0dd03f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_cfg -> smartreflex_core */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_core = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_smartreflex_core_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_smartreflex_core_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_smartreflex_core_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* smartreflex_core slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_smartreflex_core_slaves[] = {
+ &omap44xx_l4_cfg__smartreflex_core,
+};
+
+static struct omap_hwmod omap44xx_smartreflex_core_hwmod = {
+ .name = "smartreflex_core",
+ .class = &omap44xx_smartreflex_hwmod_class,
+ .mpu_irqs = omap44xx_smartreflex_core_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_smartreflex_core_irqs),
+ .main_clk = "smartreflex_core_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_ALWON_SR_CORE_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_smartreflex_core_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_smartreflex_core_slaves),
+ .dev_attr = &smartreflex_core_dev_attr,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+};
+
+/* smartreflex_iva */
+static struct omap_smartreflex_dev_attr smartreflex_iva_dev_attr = {
+ .sensor_voltdm_name = "iva",
+};
+
+static struct omap_hwmod omap44xx_smartreflex_iva_hwmod;
+static struct omap_hwmod_irq_info omap44xx_smartreflex_iva_irqs[] = {
+ { .irq = 102 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_smartreflex_iva_addrs[] = {
+ {
+ .pa_start = 0x4a0db000,
+ .pa_end = 0x4a0db03f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_cfg -> smartreflex_iva */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_iva = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_smartreflex_iva_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_smartreflex_iva_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_smartreflex_iva_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* smartreflex_iva slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_smartreflex_iva_slaves[] = {
+ &omap44xx_l4_cfg__smartreflex_iva,
+};
+
+static struct omap_hwmod omap44xx_smartreflex_iva_hwmod = {
+ .name = "smartreflex_iva",
+ .class = &omap44xx_smartreflex_hwmod_class,
+ .mpu_irqs = omap44xx_smartreflex_iva_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_smartreflex_iva_irqs),
+ .main_clk = "smartreflex_iva_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_ALWON_SR_IVA_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_smartreflex_iva_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_smartreflex_iva_slaves),
+ .dev_attr = &smartreflex_iva_dev_attr,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+};
+
+/* smartreflex_mpu */
+static struct omap_smartreflex_dev_attr smartreflex_mpu_dev_attr = {
+ .sensor_voltdm_name = "mpu",
+};
+
+static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod;
+static struct omap_hwmod_irq_info omap44xx_smartreflex_mpu_irqs[] = {
+ { .irq = 18 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_smartreflex_mpu_addrs[] = {
+ {
+ .pa_start = 0x4a0d9000,
+ .pa_end = 0x4a0d903f,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_cfg -> smartreflex_mpu */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_mpu = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_smartreflex_mpu_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_smartreflex_mpu_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_smartreflex_mpu_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* smartreflex_mpu slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_smartreflex_mpu_slaves[] = {
+ &omap44xx_l4_cfg__smartreflex_mpu,
+};
+
+static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod = {
+ .name = "smartreflex_mpu",
+ .class = &omap44xx_smartreflex_hwmod_class,
+ .mpu_irqs = omap44xx_smartreflex_mpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_smartreflex_mpu_irqs),
+ .main_clk = "smartreflex_mpu_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_ALWON_SR_MPU_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_smartreflex_mpu_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_smartreflex_mpu_slaves),
+ .dev_attr = &smartreflex_mpu_dev_attr,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+};
+
+/*
* 'aess' class
* audio engine sub system
*/
@@ -658,7 +899,7 @@
static struct omap_hwmod_class_sysconfig omap44xx_aess_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
- .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE),
+ .sysc_flags = 0,
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
.sysc_fields = &omap_hwmod_sysc_type2,
@@ -692,6 +933,27 @@
static struct omap_hwmod_addr_space omap44xx_aess_addrs[] = {
{
+ .name = "dmem",
+ .pa_start = 0x40180000,
+ .pa_end = 0x4018ffff
+ },
+ {
+ .name = "cmem",
+ .pa_start = 0x401a0000,
+ .pa_end = 0x401a1fff
+ },
+ {
+ .name = "smem",
+ .pa_start = 0x401c0000,
+ .pa_end = 0x401c5fff
+ },
+ {
+ .name = "pmem",
+ .pa_start = 0x401e0000,
+ .pa_end = 0x401e1fff
+ },
+ {
+ .name = "mpu",
.pa_start = 0x401f1000,
.pa_end = 0x401f13ff,
.flags = ADDR_TYPE_RT
@@ -710,6 +972,27 @@
static struct omap_hwmod_addr_space omap44xx_aess_dma_addrs[] = {
{
+ .name = "dmem_dma",
+ .pa_start = 0x49080000,
+ .pa_end = 0x4908ffff
+ },
+ {
+ .name = "cmem_dma",
+ .pa_start = 0x490a0000,
+ .pa_end = 0x490a1fff
+ },
+ {
+ .name = "smem_dma",
+ .pa_start = 0x490c0000,
+ .pa_end = 0x490c5fff
+ },
+ {
+ .name = "pmem_dma",
+ .pa_start = 0x490e0000,
+ .pa_end = 0x490e1fff
+ },
+ {
+ .name = "dma",
.pa_start = 0x490f1000,
.pa_end = 0x490f13ff,
.flags = ADDR_TYPE_RT
@@ -743,13 +1026,132 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM1_ABE_AESS_CLKCTRL,
+ .context_reg = OMAP4430_RM_ABE_AESS_CONTEXT,
},
},
.slaves = omap44xx_aess_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_aess_slaves),
.masters = omap44xx_aess_masters,
.masters_cnt = ARRAY_SIZE(omap44xx_aess_masters),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+};
+
+/*
+ * 'ctrl_module' class
+ * attila core control module
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_ctrl_module_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .sysc_flags = SYSC_HAS_SIDLEMODE,
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap44xx_ctrl_module_hwmod_class = {
+ .name = "ctrl_module",
+ .sysc = &omap44xx_ctrl_module_sysc,
+};
+
+/* ctrl_module_core */
+static struct omap_hwmod omap44xx_ctrl_module_core_hwmod;
+static struct omap_hwmod_irq_info omap44xx_ctrl_module_core_irqs[] = {
+ { .name = "sec_evts", .irq = 8 + OMAP44XX_IRQ_GIC_START },
+ { .name = "thermal_alert", .irq = 126 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_ctrl_module_core_addrs[] = {
+ {
+ .pa_start = 0x4a002000,
+ .pa_end = 0x4a0027ff,
+
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_cfg -> ctrl_module_core */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__ctrl_module_core = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_ctrl_module_core_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_ctrl_module_core_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_ctrl_module_core_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* ctrl_module_core slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_ctrl_module_core_slaves[] = {
+ &omap44xx_l4_cfg__ctrl_module_core,
+};
+
+static struct omap_hwmod omap44xx_ctrl_module_core_hwmod = {
+ .name = "ctrl_module_core",
+ .class = &omap44xx_ctrl_module_hwmod_class,
+ .mpu_irqs = omap44xx_ctrl_module_core_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_ctrl_module_core_irqs),
+ .main_clk = "l4_div_ck",
+ .slaves = omap44xx_ctrl_module_core_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_ctrl_module_core_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP446X),
+};
+/*
+ * 'thermal_sensor' class
+ * thermal sensor module inside the bandgap / control module
+ */
+
+static struct omap_hwmod_class omap44xx_thermal_sensor_hwmod_class = {
+ .name = "thermal_sensor",
+};
+
+static struct omap_hwmod_irq_info omap44xx_thermal_sensor_irqs[] = {
+ { .name = "thermal_alert", .irq = 126 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_thermal_sensor_addrs[] = {
+ {
+ .pa_start = 0x4a002378,
+ .pa_end = 0x4a0023ff,
+ },
+};
+
+static struct omap_hwmod omap44xx_thermal_sensor_hwmod;
+/* l4_cfg -> ctrl_module_core */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__thermal_sensor = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_thermal_sensor_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_thermal_sensor_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_thermal_sensor_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* ctrl_module_core slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_thermal_sensor_slaves[] = {
+ &omap44xx_l4_cfg__thermal_sensor,
+};
+
+static struct omap_hwmod_opt_clk thermal_sensor446x_opt_clks[] = {
+ { .role = "fclk", .clk = "bandgap_ts_fclk" },
+};
+
+static struct omap_hwmod omap44xx_thermal_sensor_hwmod = {
+ .name = "thermal_sensor",
+ .class = &omap44xx_thermal_sensor_hwmod_class,
+ .mpu_irqs = omap44xx_thermal_sensor_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_thermal_sensor_irqs),
+ .main_clk = "bandgap_ts_fclk",
+ .slaves = omap44xx_thermal_sensor_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_thermal_sensor_slaves),
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
+ },
+ },
+ .opt_clks = thermal_sensor446x_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(thermal_sensor446x_opt_clks),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP446X),
};
/*
@@ -762,11 +1164,11 @@
};
/* bandgap */
-static struct omap_hwmod_opt_clk bandgap_opt_clks[] = {
+static struct omap_hwmod_opt_clk bandgap443x_opt_clks[] = {
{ .role = "fclk", .clk = "bandgap_fclk" },
};
-static struct omap_hwmod omap44xx_bandgap_hwmod = {
+static struct omap_hwmod omap443x_bandgap_hwmod = {
.name = "bandgap",
.class = &omap44xx_bandgap_hwmod_class,
.prcm = {
@@ -774,9 +1176,26 @@
.clkctrl_reg = OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
},
},
- .opt_clks = bandgap_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(bandgap_opt_clks),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .opt_clks = bandgap443x_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(bandgap443x_opt_clks),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP443X),
+};
+
+static struct omap_hwmod_opt_clk bandgap446x_opt_clks[] = {
+ { .role = "fclk", .clk = "bandgap_ts_fclk" },
+};
+
+static struct omap_hwmod omap446x_bandgap_hwmod = {
+ .name = "bandgap",
+ .class = &omap44xx_bandgap_hwmod_class,
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
+ },
+ },
+ .opt_clks = bandgap446x_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(bandgap446x_opt_clks),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP446X),
};
/*
@@ -835,7 +1254,7 @@
},
.slaves = omap44xx_counter_32k_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_counter_32k_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -921,7 +1340,7 @@
.slaves_cnt = ARRAY_SIZE(omap44xx_dma_system_slaves),
.masters = omap44xx_dma_system_masters,
.masters_cnt = ARRAY_SIZE(omap44xx_dma_system_masters),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -1011,7 +1430,7 @@
},
.slaves = omap44xx_dmic_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_dmic_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -1036,6 +1455,14 @@
{ .name = "dsp", .rst_shift = 0 },
};
+static struct omap_hwmod_addr_space omap44xx_dsp_addrs[] = {
+ {
+ .pa_start = 0x4A066000,
+ .pa_end = 0x4A0660ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
/* dsp -> iva */
static struct omap_hwmod_ocp_if omap44xx_dsp__iva = {
.master = &omap44xx_dsp_hwmod,
@@ -1043,11 +1470,19 @@
.clk = "dpll_iva_m5x2_ck",
};
+/* dsp -> sl2if */
+static struct omap_hwmod_ocp_if omap44xx_dsp__sl2if = {
+ .master = &omap44xx_dsp_hwmod,
+ .slave = &omap44xx_sl2if_hwmod,
+ .clk = "dpll_iva_m5x2_ck",
+};
+
/* dsp master ports */
static struct omap_hwmod_ocp_if *omap44xx_dsp_masters[] = {
&omap44xx_dsp__l3_main_1,
&omap44xx_dsp__l4_abe,
&omap44xx_dsp__iva,
+ &omap44xx_dsp__sl2if,
};
/* l4_cfg -> dsp */
@@ -1055,6 +1490,8 @@
.master = &omap44xx_l4_cfg_hwmod,
.slave = &omap44xx_dsp_hwmod,
.clk = "l4_div_ck",
+ .addr = omap44xx_dsp_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_dsp_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1075,12 +1512,13 @@
.rstctrl_reg = OMAP4430_RM_TESLA_RSTCTRL,
},
},
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct omap_hwmod omap44xx_dsp_hwmod = {
.name = "dsp",
.class = &omap44xx_dsp_hwmod_class,
+ .flags = HWMOD_INIT_NO_RESET,
.mpu_irqs = omap44xx_dsp_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dsp_irqs),
.rst_lines = omap44xx_dsp_resets,
@@ -1096,7 +1534,7 @@
.slaves_cnt = ARRAY_SIZE(omap44xx_dsp_slaves),
.masters = omap44xx_dsp_masters,
.masters_cnt = ARRAY_SIZE(omap44xx_dsp_masters),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -1110,9 +1548,67 @@
.sysc_flags = SYSS_HAS_RESET_STATUS,
};
+static int omap44xx_dss_reset(struct omap_hwmod *oh)
+{
+#define DISPC_IRQSTATUS (0x48041018UL)
+#define DISPC_CONTROL1 (0x48041040UL)
+#define DISPC_CONTROL2 (0x48041238UL)
+ u32 ctrl1_mask = 0;
+ u32 ctrl2_mask = 0;
+ u32 irq_mask = 0;
+ u32 val;
+ unsigned long end_wait;
+
+ /* HACK */
+ /* If LCD1/LCD2/TV are active, disable them first before
+ * moving the clock sources back to PRCM. We don't want to change
+ * the clock source while a DMA is active.
+ */
+ val = omap_readl(DISPC_CONTROL1);
+ if (val & (1 << 0)) {
+ /* LCD1 */
+ irq_mask |= 1 << 0;
+ ctrl1_mask |= 1 << 0;
+ }
+ if (val & (1 << 1)) {
+ /* TV/VENC */
+ irq_mask |= 1 << 24;
+ ctrl1_mask |= 1 << 1;
+ }
+ val = omap_readl(DISPC_CONTROL2);
+ if (val & (1 << 0)) {
+ /* LCD2 */
+ irq_mask |= 1 << 22;
+ ctrl2_mask |= 1 << 0;
+ }
+
+ /* disable the active controllers */
+ omap_writel(omap_readl(DISPC_CONTROL1) & (~ctrl1_mask), DISPC_CONTROL1);
+ omap_writel(omap_readl(DISPC_CONTROL2) & (~ctrl2_mask), DISPC_CONTROL2);
+
+ omap_writel(irq_mask, DISPC_IRQSTATUS);
+
+ end_wait = jiffies + msecs_to_jiffies(50);
+ while (((omap_readl(DISPC_CONTROL1) & ctrl1_mask) ||
+ (omap_readl(DISPC_CONTROL2) & ctrl2_mask) ||
+ ((omap_readl(DISPC_IRQSTATUS) & irq_mask) != irq_mask)) &&
+ time_before(jiffies, end_wait))
+ cpu_relax();
+ WARN_ON((omap_readl(DISPC_CONTROL1) & ctrl1_mask) ||
+ (omap_readl(DISPC_CONTROL2) & ctrl2_mask) ||
+ ((omap_readl(DISPC_IRQSTATUS) & irq_mask) != irq_mask));
+
+ omap_hwmod_write(0x0, oh, 0x40);
+ return 0;
+#undef DISPC_IRQSTATUS
+#undef DISPC_CONTROL1
+#undef DISPC_CONTROL2
+}
+
static struct omap_hwmod_class omap44xx_dss_hwmod_class = {
.name = "dss",
.sysc = &omap44xx_dss_sysc,
+ .reset = omap44xx_dss_reset,
};
/* dss */
@@ -1164,15 +1660,15 @@
};
static struct omap_hwmod_opt_clk dss_opt_clks[] = {
- { .role = "sys_clk", .clk = "dss_sys_clk" },
- { .role = "tv_clk", .clk = "dss_tv_clk" },
{ .role = "dss_clk", .clk = "dss_dss_clk" },
- { .role = "video_clk", .clk = "dss_48mhz_clk" },
};
static struct omap_hwmod omap44xx_dss_hwmod = {
.name = "dss_core",
.class = &omap44xx_dss_hwmod_class,
+#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
+ .flags = HWMOD_INIT_NO_RESET,
+#endif
.main_clk = "dss_fck",
.prcm = {
.omap4 = {
@@ -1185,7 +1681,7 @@
.slaves_cnt = ARRAY_SIZE(omap44xx_dss_slaves),
.masters = omap44xx_dss_masters,
.masters_cnt = ARRAY_SIZE(omap44xx_dss_masters),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -1263,9 +1759,25 @@
&omap44xx_l4_per__dss_dispc,
};
+static struct omap_hwmod_opt_clk dispc_opt_clks[] = {
+ { .role = "dss_clk", .clk = "dss_dss_clk" },
+ /*
+ * The rest of the clocks are not needed by the driver,
+ * but are needed by the hwmod to reset DSS properly.
+ */
+ { .role = "sys_clk", .clk = "dss_sys_clk" },
+ { .role = "tv_clk", .clk = "dss_tv_clk" },
+ { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
+};
+
static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
.name = "dss_dispc",
.class = &omap44xx_dispc_hwmod_class,
+#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
+ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET | HWMOD_INIT_NO_RESET,
+#else
+ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+#endif
.mpu_irqs = omap44xx_dss_dispc_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dss_dispc_irqs),
.sdma_reqs = omap44xx_dss_dispc_sdma_reqs,
@@ -1274,11 +1786,14 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
+ .context_reg = OMAP4430_RM_DSS_DSS_CONTEXT,
},
},
+ .opt_clks = dispc_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dispc_opt_clks),
.slaves = omap44xx_dss_dispc_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_dss_dispc_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -1354,9 +1869,17 @@
&omap44xx_l4_per__dss_dsi1,
};
+static struct omap_hwmod_opt_clk dsi1_opt_clks[] = {
+ { .role = "dss_clk", .clk = "dss_dss_clk" },
+ { .role = "sys_clk", .clk = "dss_sys_clk" },
+};
+
static struct omap_hwmod omap44xx_dss_dsi1_hwmod = {
.name = "dss_dsi1",
.class = &omap44xx_dsi_hwmod_class,
+#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
+ .flags = HWMOD_INIT_NO_RESET,
+#endif
.mpu_irqs = omap44xx_dss_dsi1_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dss_dsi1_irqs),
.sdma_reqs = omap44xx_dss_dsi1_sdma_reqs,
@@ -1367,9 +1890,11 @@
.clkctrl_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
},
},
+ .opt_clks = dsi1_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dsi1_opt_clks),
.slaves = omap44xx_dss_dsi1_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_dss_dsi1_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* dss_dsi2 */
@@ -1427,6 +1952,9 @@
static struct omap_hwmod omap44xx_dss_dsi2_hwmod = {
.name = "dss_dsi2",
.class = &omap44xx_dsi_hwmod_class,
+#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
+ .flags = HWMOD_INIT_NO_RESET,
+#endif
.mpu_irqs = omap44xx_dss_dsi2_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dss_dsi2_irqs),
.sdma_reqs = omap44xx_dss_dsi2_sdma_reqs,
@@ -1439,7 +1967,7 @@
},
.slaves = omap44xx_dss_dsi2_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_dss_dsi2_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -1514,9 +2042,17 @@
&omap44xx_l4_per__dss_hdmi,
};
+static struct omap_hwmod_opt_clk hdmi_opt_clks[] = {
+ { .role = "sys_clk", .clk = "dss_sys_clk" },
+ { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
+};
+
static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
.name = "dss_hdmi",
.class = &omap44xx_hdmi_hwmod_class,
+#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
+ .flags = HWMOD_INIT_NO_RESET,
+#endif
.mpu_irqs = omap44xx_dss_hdmi_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dss_hdmi_irqs),
.sdma_reqs = omap44xx_dss_hdmi_sdma_reqs,
@@ -1527,9 +2063,11 @@
.clkctrl_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
},
},
+ .opt_clks = hdmi_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(hdmi_opt_clks),
.slaves = omap44xx_dss_hdmi_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_dss_hdmi_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -1600,9 +2138,16 @@
&omap44xx_l4_per__dss_rfbi,
};
+static struct omap_hwmod_opt_clk rfbi_opt_clks[] = {
+ { .role = "rfbi_iclk", .clk = "dss_fck" },
+};
+
static struct omap_hwmod omap44xx_dss_rfbi_hwmod = {
.name = "dss_rfbi",
.class = &omap44xx_rfbi_hwmod_class,
+#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
+ .flags = HWMOD_INIT_NO_RESET,
+#endif
.sdma_reqs = omap44xx_dss_rfbi_sdma_reqs,
.sdma_reqs_cnt = ARRAY_SIZE(omap44xx_dss_rfbi_sdma_reqs),
.main_clk = "dss_fck",
@@ -1611,9 +2156,11 @@
.clkctrl_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
},
},
+ .opt_clks = rfbi_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(rfbi_opt_clks),
.slaves = omap44xx_dss_rfbi_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_dss_rfbi_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -1669,18 +2216,209 @@
&omap44xx_l4_per__dss_venc,
};
+static struct omap_hwmod_opt_clk venc_opt_clks[] = {
+ { .role = "tv_clk", .clk = "dss_tv_clk" },
+};
+
static struct omap_hwmod omap44xx_dss_venc_hwmod = {
.name = "dss_venc",
.class = &omap44xx_venc_hwmod_class,
+#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
+ .flags = HWMOD_INIT_NO_RESET,
+#endif
.main_clk = "dss_fck",
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
},
},
+ .opt_clks = venc_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(venc_opt_clks),
.slaves = omap44xx_dss_venc_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_dss_venc_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+};
+
+/*
+ * 'emif' class
+ * external memory interface no1
+ */
+
+static struct omap_hwmod_class omap44xx_emif_hwmod_class = {
+ .name = "emif",
+};
+
+/* emif1 */
+static struct omap_hwmod omap44xx_emif1_hwmod;
+static struct omap_hwmod_irq_info omap44xx_emif1_irqs[] = {
+ { .irq = 110 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_emif1_addrs[] = {
+ {
+ .pa_start = 0x4c000000,
+ .pa_end = 0x4c0000ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* emif_fw -> emif1 */
+static struct omap_hwmod_ocp_if omap44xx_emif_fw__emif1 = {
+ .master = &omap44xx_emif_fw_hwmod,
+ .slave = &omap44xx_emif1_hwmod,
+ .clk = "l3_div_ck",
+ .addr = omap44xx_emif1_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_emif1_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* emif1 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_emif1_slaves[] = {
+ &omap44xx_emif_fw__emif1,
+};
+
+static struct omap_hwmod omap44xx_emif1_hwmod = {
+ .name = "emif1",
+ .class = &omap44xx_emif_hwmod_class,
+ .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+ .mpu_irqs = omap44xx_emif1_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_emif1_irqs),
+ .main_clk = "emif1_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_emif1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_emif1_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+};
+
+/* emif2 */
+static struct omap_hwmod omap44xx_emif2_hwmod;
+static struct omap_hwmod_irq_info omap44xx_emif2_irqs[] = {
+ { .irq = 111 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_emif2_addrs[] = {
+ {
+ .pa_start = 0x4d000000,
+ .pa_end = 0x4d0000ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* emif_fw -> emif2 */
+static struct omap_hwmod_ocp_if omap44xx_emif_fw__emif2 = {
+ .master = &omap44xx_emif_fw_hwmod,
+ .slave = &omap44xx_emif2_hwmod,
+ .clk = "l3_div_ck",
+ .addr = omap44xx_emif2_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_emif2_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* emif2 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_emif2_slaves[] = {
+ &omap44xx_emif_fw__emif2,
+};
+
+static struct omap_hwmod omap44xx_emif2_hwmod = {
+ .name = "emif2",
+ .class = &omap44xx_emif_hwmod_class,
+ .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+ .mpu_irqs = omap44xx_emif2_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_emif2_irqs),
+ .main_clk = "emif2_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_emif2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_emif2_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+};
+
+
+/*
+ * 'fdif' class
+ * face detection hw accelerator module
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_fdif_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ /*
+ * FDIF needs 100 OCP clk cycles delay after a softreset before
+ * accessing sysconfig again.
+ * The lowest frequency at the moment for L3 bus is 100 MHz, so
+ * 1usec delay is needed. Add an x2 margin to be safe (2 usecs).
+ *
+ * TODO: Indicate errata when available.
+ */
+ .srst_udelay = 2,
+ .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS |
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap44xx_fdif_hwmod_class = {
+ .name = "fdif",
+ .sysc = &omap44xx_fdif_sysc,
+};
+
+/* fdif */
+static struct omap_hwmod_irq_info omap44xx_fdif_irqs[] = {
+ { .irq = 69 + OMAP44XX_IRQ_GIC_START },
+};
+
+/* fdif master ports */
+static struct omap_hwmod_ocp_if *omap44xx_fdif_masters[] = {
+ &omap44xx_fdif__l3_main_2,
+};
+
+static struct omap_hwmod_addr_space omap44xx_fdif_addrs[] = {
+ {
+ .pa_start = 0x4a10a000,
+ .pa_end = 0x4a10a1ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_cfg -> fdif */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__fdif = {
+ .master = &omap44xx_l3_main_2_hwmod,
+ .slave = &omap44xx_fdif_hwmod,
+ .clk = "l3_div_ck",
+ .addr = omap44xx_fdif_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_fdif_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* fdif slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_fdif_slaves[] = {
+ &omap44xx_l3_main_2__fdif,
+};
+
+static struct omap_hwmod omap44xx_fdif_hwmod = {
+ .name = "fdif",
+ .class = &omap44xx_fdif_hwmod_class,
+ .mpu_irqs = omap44xx_fdif_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_fdif_irqs),
+ .main_clk = "fdif_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_CAM_FDIF_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_fdif_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_fdif_slaves),
+ .masters = omap44xx_fdif_masters,
+ .masters_cnt = ARRAY_SIZE(omap44xx_fdif_masters),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -1745,7 +2483,7 @@
{ .role = "dbclk", .clk = "gpio1_dbclk" },
};
-static struct omap_hwmod omap44xx_gpio1_hwmod = {
+static struct omap_hwmod omap443x_gpio1_hwmod = {
.name = "gpio1",
.class = &omap44xx_gpio_hwmod_class,
.mpu_irqs = omap44xx_gpio1_irqs,
@@ -1754,6 +2492,7 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_WKUP_GPIO1_CLKCTRL,
+ .context_reg = OMAP4430_RM_WKUP_GPIO1_CONTEXT,
},
},
.opt_clks = gpio1_opt_clks,
@@ -1761,7 +2500,28 @@
.dev_attr = &gpio_dev_attr,
.slaves = omap44xx_gpio1_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_gpio1_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP443X),
+};
+
+static struct omap_hwmod omap446x_gpio1_hwmod = {
+ .name = "gpio1",
+ .class = &omap44xx_gpio_hwmod_class,
+ .flags = HWMOD_INIT_NO_RESET,
+ .mpu_irqs = omap44xx_gpio1_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_gpio1_irqs),
+ .main_clk = "gpio1_ick",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_WKUP_GPIO1_CLKCTRL,
+ .context_reg = OMAP4430_RM_WKUP_GPIO1_CONTEXT,
+ },
+ },
+ .opt_clks = gpio1_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(gpio1_opt_clks),
+ .dev_attr = &gpio_dev_attr,
+ .slaves = omap44xx_gpio1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_gpio1_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP446X),
};
/* gpio2 */
@@ -1807,6 +2567,7 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_GPIO2_CLKCTRL,
+ .context_reg = OMAP4430_RM_L4PER_GPIO2_CONTEXT,
},
},
.opt_clks = gpio2_opt_clks,
@@ -1814,7 +2575,7 @@
.dev_attr = &gpio_dev_attr,
.slaves = omap44xx_gpio2_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_gpio2_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* gpio3 */
@@ -1860,6 +2621,7 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_GPIO3_CLKCTRL,
+ .context_reg = OMAP4430_RM_L4PER_GPIO3_CONTEXT,
},
},
.opt_clks = gpio3_opt_clks,
@@ -1867,7 +2629,7 @@
.dev_attr = &gpio_dev_attr,
.slaves = omap44xx_gpio3_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_gpio3_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* gpio4 */
@@ -1913,6 +2675,7 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_GPIO4_CLKCTRL,
+ .context_reg = OMAP4430_RM_L4PER_GPIO4_CONTEXT,
},
},
.opt_clks = gpio4_opt_clks,
@@ -1920,7 +2683,7 @@
.dev_attr = &gpio_dev_attr,
.slaves = omap44xx_gpio4_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_gpio4_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* gpio5 */
@@ -1966,6 +2729,7 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_GPIO5_CLKCTRL,
+ .context_reg = OMAP4430_RM_L4PER_GPIO5_CONTEXT,
},
},
.opt_clks = gpio5_opt_clks,
@@ -1973,7 +2737,7 @@
.dev_attr = &gpio_dev_attr,
.slaves = omap44xx_gpio5_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_gpio5_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* gpio6 */
@@ -2019,6 +2783,7 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_GPIO6_CLKCTRL,
+ .context_reg = OMAP4430_RM_L4PER_GPIO6_CONTEXT,
},
},
.opt_clks = gpio6_opt_clks,
@@ -2026,7 +2791,45 @@
.dev_attr = &gpio_dev_attr,
.slaves = omap44xx_gpio6_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_gpio6_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+};
+
+/*
+ * 'gpu' class
+ * 2d/3d graphics accelerator
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_gpu_sysc = {
+ .rev_offs = 0xfe00,
+ .sysc_offs = 0xfe10,
+ .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+ MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap44xx_gpu_hwmod_class = {
+ .name = "gpu",
+ .sysc = &omap44xx_gpu_sysc,
+};
+
+/* gpu */
+static struct omap_hwmod_irq_info omap44xx_gpu_irqs[] = {
+ { .irq = 21 + OMAP44XX_IRQ_GIC_START },
+};
+
+/* gpu master ports */
+static struct omap_hwmod_ocp_if *omap44xx_gpu_masters[] = {
+ &omap44xx_gpu__l3_main_2,
+};
+
+static struct omap_hwmod_addr_space omap44xx_gpu_addrs[] = {
+ {
+ .pa_start = 0x56000000,
+ .pa_end = 0x5600ffff,
+ .flags = ADDR_TYPE_RT
+ },
};
/*
@@ -2044,7 +2847,7 @@
SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
- MSTANDBY_SMART),
+ MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
.sysc_fields = &omap_hwmod_sysc_type1,
};
@@ -2103,7 +2906,41 @@
.slaves_cnt = ARRAY_SIZE(omap44xx_hsi_slaves),
.masters = omap44xx_hsi_masters,
.masters_cnt = ARRAY_SIZE(omap44xx_hsi_masters),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+};
+
+/* l3_main_2 -> gpu */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__gpu = {
+ .master = &omap44xx_l3_main_2_hwmod,
+ .slave = &omap44xx_gpu_hwmod,
+ .clk = "l3_div_ck",
+ .addr = omap44xx_gpu_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_gpu_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* gpu slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_gpu_slaves[] = {
+ &omap44xx_l3_main_2__gpu,
+};
+
+static struct omap_hwmod omap44xx_gpu_hwmod = {
+ .name = "gpu",
+ .class = &omap44xx_gpu_hwmod_class,
+ .mpu_irqs = omap44xx_gpu_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_gpu_irqs),
+ .main_clk = "gpu_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_GFX_GFX_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_gpu_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_gpu_slaves),
+ .masters = omap44xx_gpu_masters,
+ .masters_cnt = ARRAY_SIZE(omap44xx_gpu_masters),
+ .dev_attr = &smartreflex_core_dev_attr,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -2177,7 +3014,7 @@
},
.slaves = omap44xx_i2c1_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_i2c1_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* i2c2 */
@@ -2230,7 +3067,7 @@
},
.slaves = omap44xx_i2c2_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_i2c2_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* i2c3 */
@@ -2283,7 +3120,7 @@
},
.slaves = omap44xx_i2c3_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_i2c3_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* i2c4 */
@@ -2336,7 +3173,7 @@
},
.slaves = omap44xx_i2c4_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_i2c4_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -2365,6 +3202,14 @@
{ .name = "mmu_cache", .rst_shift = 2 },
};
+static struct omap_hwmod_addr_space omap44xx_ipu_addrs[] = {
+ {
+ .pa_start = 0x55082000,
+ .pa_end = 0x550820ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
/* ipu master ports */
static struct omap_hwmod_ocp_if *omap44xx_ipu_masters[] = {
&omap44xx_ipu__l3_main_2,
@@ -2375,6 +3220,8 @@
.master = &omap44xx_l3_main_2_hwmod,
.slave = &omap44xx_ipu_hwmod,
.clk = "l3_div_ck",
+ .addr = omap44xx_ipu_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_ipu_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2395,7 +3242,7 @@
.rstctrl_reg = OMAP4430_RM_DUCATI_RSTCTRL,
},
},
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* Pseudo hwmod for reset control purpose only */
@@ -2410,12 +3257,13 @@
.rstctrl_reg = OMAP4430_RM_DUCATI_RSTCTRL,
},
},
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct omap_hwmod omap44xx_ipu_hwmod = {
.name = "ipu",
.class = &omap44xx_ipu_hwmod_class,
+ .flags = HWMOD_INIT_NO_RESET,
.mpu_irqs = omap44xx_ipu_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap44xx_ipu_irqs),
.rst_lines = omap44xx_ipu_resets,
@@ -2431,7 +3279,7 @@
.slaves_cnt = ARRAY_SIZE(omap44xx_ipu_slaves),
.masters = omap44xx_ipu_masters,
.masters_cnt = ARRAY_SIZE(omap44xx_ipu_masters),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -2442,6 +3290,15 @@
static struct omap_hwmod_class_sysconfig omap44xx_iss_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
+ /*
+ * ISS needs 100 OCP clk cycles delay after a softreset before
+ * accessing sysconfig again.
+ * The lowest frequency at the moment for L3 bus is 100 MHz, so
+ * 1usec delay is needed. Add an x2 margin to be safe (2 usecs).
+ *
+ * TODO: Indicate errata when available.
+ */
+ .srst_udelay = 2,
.sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS |
SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
@@ -2518,7 +3375,7 @@
.slaves_cnt = ARRAY_SIZE(omap44xx_iss_slaves),
.masters = omap44xx_iss_masters,
.masters_cnt = ARRAY_SIZE(omap44xx_iss_masters),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -2549,8 +3406,16 @@
{ .name = "seq1", .rst_shift = 1 },
};
+/* iva -> sl2if */
+static struct omap_hwmod_ocp_if omap44xx_iva__sl2if = {
+ .master = &omap44xx_iva_hwmod,
+ .slave = &omap44xx_sl2if_hwmod,
+ .clk = "dpll_iva_m5x2_ck",
+};
+
/* iva master ports */
static struct omap_hwmod_ocp_if *omap44xx_iva_masters[] = {
+ &omap44xx_iva__sl2if,
&omap44xx_iva__l3_main_2,
&omap44xx_iva__l3_instr,
};
@@ -2591,7 +3456,7 @@
.rstctrl_reg = OMAP4430_RM_IVAHD_RSTCTRL,
},
},
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* Pseudo hwmod for reset control purpose only */
@@ -2606,7 +3471,7 @@
.rstctrl_reg = OMAP4430_RM_IVAHD_RSTCTRL,
},
},
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static struct omap_hwmod omap44xx_iva_hwmod = {
@@ -2627,7 +3492,46 @@
.slaves_cnt = ARRAY_SIZE(omap44xx_iva_slaves),
.masters = omap44xx_iva_masters,
.masters_cnt = ARRAY_SIZE(omap44xx_iva_masters),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+};
+
+/*
+ * 'sl2if' class
+ * shared level 2 memory interface
+ */
+
+static struct omap_hwmod_class omap44xx_sl2if_hwmod_class = {
+ .name = "sl2if",
+};
+
+/* sl2if */
+/* l3_main_2 -> sl2if */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__sl2if = {
+ .master = &omap44xx_l3_main_2_hwmod,
+ .slave = &omap44xx_sl2if_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* sl2if slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_sl2if_slaves[] = {
+ &omap44xx_l3_main_2__sl2if,
+ &omap44xx_iva__sl2if,
+ &omap44xx_dsp__sl2if,
+};
+
+static struct omap_hwmod omap44xx_sl2if_hwmod = {
+ .name = "sl2if",
+ .class = &omap44xx_sl2if_hwmod_class,
+ .main_clk = "sl2if_ick",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_IVAHD_SL2_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_sl2if_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_sl2if_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -2694,7 +3598,7 @@
},
.slaves = omap44xx_kbd_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_kbd_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -2758,7 +3662,105 @@
},
.slaves = omap44xx_mailbox_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_mailbox_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+};
+
+/*
+ * 'mcasp' class
+ * multi channel audio serial port controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_mcasp_sysc = {
+ .sysc_offs = 0x0004,
+ .sysc_flags = SYSC_HAS_SIDLEMODE,
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type3,
+};
+
+static struct omap_hwmod_class omap44xx_mcasp_hwmod_class = {
+ .name = "omap-mcasp-dai",
+ .sysc = &omap44xx_mcasp_sysc,
+};
+
+/* mcasp */
+static struct omap_hwmod omap44xx_mcasp_hwmod;
+static struct omap_hwmod_irq_info omap44xx_mcasp_irqs[] = {
+ { .irq = 109 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_mcasp_sdma_reqs[] = {
+ { .name = "tx", .dma_req = 7 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_mcasp_addrs[] = {
+ {
+ .pa_start = 0x40128000,
+ .pa_end = 0x40128000 + SZ_4K - 1, /* McASP CFG Port */
+ .flags = ADDR_TYPE_RT
+ },
+ {
+ .pa_start = 0x4012A000,
+ .pa_end = 0x4012A000 + SZ_4K - 1, /* McASP Data Port */
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> mcasp */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcasp = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_mcasp_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_mcasp_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mcasp_addrs),
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space omap44xx_mcasp_dma_addrs[] = {
+ {
+ .pa_start = 0x49028000,
+ .pa_end = 0x49028000 + SZ_4K - 1, /* McASP CFG Port */
+ .flags = ADDR_TYPE_RT
+ },
+ {
+ .pa_start = 0x4902A000,
+ .pa_end = 0x4902A000 + SZ_4K - 1, /* McASP Data Port */
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+/* l4_abe -> mcasp (dma) */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcasp_dma = {
+ .master = &omap44xx_l4_abe_hwmod,
+ .slave = &omap44xx_mcasp_hwmod,
+ .clk = "ocp_abe_iclk",
+ .addr = omap44xx_mcasp_dma_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_mcasp_dma_addrs),
+ .user = OCP_USER_SDMA,
+};
+
+/* mcasp1 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_mcasp_slaves[] = {
+ &omap44xx_l4_abe__mcasp,
+ &omap44xx_l4_abe__mcasp_dma,
+};
+
+static struct omap_hwmod omap44xx_mcasp_hwmod = {
+ .name = "omap-mcasp-dai",
+ .class = &omap44xx_mcasp_hwmod_class,
+ .flags = HWMOD_SWSUP_SIDLE,
+ .mpu_irqs = omap44xx_mcasp_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcasp_irqs),
+ .sdma_reqs = omap44xx_mcasp_sdma_reqs,
+ .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcasp_sdma_reqs),
+ .main_clk = "mcasp_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM1_ABE_MCASP_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_mcasp_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_mcasp_slaves),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -2783,7 +3785,8 @@
/* mcbsp1 */
static struct omap_hwmod omap44xx_mcbsp1_hwmod;
static struct omap_hwmod_irq_info omap44xx_mcbsp1_irqs[] = {
- { .irq = 17 + OMAP44XX_IRQ_GIC_START },
+ { .name = "tx", .irq = 17 + OMAP44XX_IRQ_GIC_START },
+ { .name = "rx", .irq = 0 },
};
static struct omap_hwmod_dma_info omap44xx_mcbsp1_sdma_reqs[] = {
@@ -2850,13 +3853,14 @@
},
.slaves = omap44xx_mcbsp1_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp1_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* mcbsp2 */
static struct omap_hwmod omap44xx_mcbsp2_hwmod;
static struct omap_hwmod_irq_info omap44xx_mcbsp2_irqs[] = {
- { .irq = 22 + OMAP44XX_IRQ_GIC_START },
+ { .name = "tx", .irq = 22 + OMAP44XX_IRQ_GIC_START },
+ { .name = "rx", .irq = 0 },
};
static struct omap_hwmod_dma_info omap44xx_mcbsp2_sdma_reqs[] = {
@@ -2923,13 +3927,14 @@
},
.slaves = omap44xx_mcbsp2_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp2_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* mcbsp3 */
static struct omap_hwmod omap44xx_mcbsp3_hwmod;
static struct omap_hwmod_irq_info omap44xx_mcbsp3_irqs[] = {
- { .irq = 23 + OMAP44XX_IRQ_GIC_START },
+ { .name = "tx", .irq = 23 + OMAP44XX_IRQ_GIC_START },
+ { .name = "rx", .irq = 0 },
};
static struct omap_hwmod_dma_info omap44xx_mcbsp3_sdma_reqs[] = {
@@ -2996,13 +4001,14 @@
},
.slaves = omap44xx_mcbsp3_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp3_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* mcbsp4 */
static struct omap_hwmod omap44xx_mcbsp4_hwmod;
static struct omap_hwmod_irq_info omap44xx_mcbsp4_irqs[] = {
- { .irq = 16 + OMAP44XX_IRQ_GIC_START },
+ { .name = "tx", .irq = 16 + OMAP44XX_IRQ_GIC_START },
+ { .name = "rx", .irq = 0 },
};
static struct omap_hwmod_dma_info omap44xx_mcbsp4_sdma_reqs[] = {
@@ -3048,7 +4054,7 @@
},
.slaves = omap44xx_mcbsp4_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp4_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -3136,11 +4142,12 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM1_ABE_PDM_CLKCTRL,
+ .context_reg = OMAP4430_RM_ABE_PDM_CONTEXT,
},
},
.slaves = omap44xx_mcpdm_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_mcpdm_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -3226,7 +4233,7 @@
.dev_attr = &mcspi1_dev_attr,
.slaves = omap44xx_mcspi1_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_mcspi1_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* mcspi2 */
@@ -3286,7 +4293,7 @@
.dev_attr = &mcspi2_dev_attr,
.slaves = omap44xx_mcspi2_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_mcspi2_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* mcspi3 */
@@ -3346,7 +4353,7 @@
.dev_attr = &mcspi3_dev_attr,
.slaves = omap44xx_mcspi3_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_mcspi3_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* mcspi4 */
@@ -3404,7 +4411,7 @@
.dev_attr = &mcspi4_dev_attr,
.slaves = omap44xx_mcspi4_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_mcspi4_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -3419,7 +4426,7 @@
SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE |
SYSC_HAS_SOFTRESET),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+ MSTANDBY_FORCE | MSTANDBY_NO |
MSTANDBY_SMART),
.sysc_fields = &omap_hwmod_sysc_type2,
};
@@ -3484,6 +4491,7 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L3INIT_MMC1_CLKCTRL,
+ .context_reg = OMAP4430_RM_L3INIT_MMC1_CONTEXT,
},
},
.dev_attr = &mmc1_dev_attr,
@@ -3491,7 +4499,7 @@
.slaves_cnt = ARRAY_SIZE(omap44xx_mmc1_slaves),
.masters = omap44xx_mmc1_masters,
.masters_cnt = ARRAY_SIZE(omap44xx_mmc1_masters),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* mmc2 */
@@ -3543,13 +4551,14 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L3INIT_MMC2_CLKCTRL,
+ .context_reg = OMAP4430_RM_L3INIT_MMC2_CONTEXT,
},
},
.slaves = omap44xx_mmc2_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_mmc2_slaves),
.masters = omap44xx_mmc2_masters,
.masters_cnt = ARRAY_SIZE(omap44xx_mmc2_masters),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* mmc3 */
@@ -3597,11 +4606,12 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_MMCSD3_CLKCTRL,
+ .context_reg = OMAP4430_RM_L4PER_MMCSD3_CONTEXT,
},
},
.slaves = omap44xx_mmc3_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_mmc3_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* mmc4 */
@@ -3649,11 +4659,12 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_MMCSD4_CLKCTRL,
+ .context_reg = OMAP4430_RM_L4PER_MMCSD4_CONTEXT,
},
},
.slaves = omap44xx_mmc4_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_mmc4_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* mmc5 */
@@ -3701,215 +4712,12 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_MMCSD5_CLKCTRL,
+ .context_reg = OMAP4430_RM_L4PER_MMCSD5_CONTEXT,
},
},
.slaves = omap44xx_mmc5_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_mmc5_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-/*
- * 'mpu' class
- * mpu sub-system
- */
-
-static struct omap_hwmod_class omap44xx_mpu_hwmod_class = {
- .name = "mpu",
-};
-
-/* mpu */
-static struct omap_hwmod_irq_info omap44xx_mpu_irqs[] = {
- { .name = "pl310", .irq = 0 + OMAP44XX_IRQ_GIC_START },
- { .name = "cti0", .irq = 1 + OMAP44XX_IRQ_GIC_START },
- { .name = "cti1", .irq = 2 + OMAP44XX_IRQ_GIC_START },
-};
-
-/* mpu master ports */
-static struct omap_hwmod_ocp_if *omap44xx_mpu_masters[] = {
- &omap44xx_mpu__l3_main_1,
- &omap44xx_mpu__l4_abe,
- &omap44xx_mpu__dmm,
-};
-
-static struct omap_hwmod omap44xx_mpu_hwmod = {
- .name = "mpu",
- .class = &omap44xx_mpu_hwmod_class,
- .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
- .mpu_irqs = omap44xx_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mpu_irqs),
- .main_clk = "dpll_mpu_m2_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_reg = OMAP4430_CM_MPU_MPU_CLKCTRL,
- },
- },
- .masters = omap44xx_mpu_masters,
- .masters_cnt = ARRAY_SIZE(omap44xx_mpu_masters),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-/*
- * 'smartreflex' class
- * smartreflex module (monitor silicon performance and outputs a measure of
- * performance error)
- */
-
-/* The IP is not compliant to type1 / type2 scheme */
-static struct omap_hwmod_sysc_fields omap_hwmod_sysc_type_smartreflex = {
- .sidle_shift = 24,
- .enwkup_shift = 26,
-};
-
-static struct omap_hwmod_class_sysconfig omap44xx_smartreflex_sysc = {
- .sysc_offs = 0x0038,
- .sysc_flags = (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type_smartreflex,
-};
-
-static struct omap_hwmod_class omap44xx_smartreflex_hwmod_class = {
- .name = "smartreflex",
- .sysc = &omap44xx_smartreflex_sysc,
- .rev = 2,
-};
-
-/* smartreflex_core */
-static struct omap_hwmod omap44xx_smartreflex_core_hwmod;
-static struct omap_hwmod_irq_info omap44xx_smartreflex_core_irqs[] = {
- { .irq = 19 + OMAP44XX_IRQ_GIC_START },
-};
-
-static struct omap_hwmod_addr_space omap44xx_smartreflex_core_addrs[] = {
- {
- .pa_start = 0x4a0dd000,
- .pa_end = 0x4a0dd03f,
- .flags = ADDR_TYPE_RT
- },
-};
-
-/* l4_cfg -> smartreflex_core */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_core = {
- .master = &omap44xx_l4_cfg_hwmod,
- .slave = &omap44xx_smartreflex_core_hwmod,
- .clk = "l4_div_ck",
- .addr = omap44xx_smartreflex_core_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_smartreflex_core_addrs),
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* smartreflex_core slave ports */
-static struct omap_hwmod_ocp_if *omap44xx_smartreflex_core_slaves[] = {
- &omap44xx_l4_cfg__smartreflex_core,
-};
-
-static struct omap_hwmod omap44xx_smartreflex_core_hwmod = {
- .name = "smartreflex_core",
- .class = &omap44xx_smartreflex_hwmod_class,
- .mpu_irqs = omap44xx_smartreflex_core_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_smartreflex_core_irqs),
- .main_clk = "smartreflex_core_fck",
- .vdd_name = "core",
- .prcm = {
- .omap4 = {
- .clkctrl_reg = OMAP4430_CM_ALWON_SR_CORE_CLKCTRL,
- },
- },
- .slaves = omap44xx_smartreflex_core_slaves,
- .slaves_cnt = ARRAY_SIZE(omap44xx_smartreflex_core_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-/* smartreflex_iva */
-static struct omap_hwmod omap44xx_smartreflex_iva_hwmod;
-static struct omap_hwmod_irq_info omap44xx_smartreflex_iva_irqs[] = {
- { .irq = 102 + OMAP44XX_IRQ_GIC_START },
-};
-
-static struct omap_hwmod_addr_space omap44xx_smartreflex_iva_addrs[] = {
- {
- .pa_start = 0x4a0db000,
- .pa_end = 0x4a0db03f,
- .flags = ADDR_TYPE_RT
- },
-};
-
-/* l4_cfg -> smartreflex_iva */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_iva = {
- .master = &omap44xx_l4_cfg_hwmod,
- .slave = &omap44xx_smartreflex_iva_hwmod,
- .clk = "l4_div_ck",
- .addr = omap44xx_smartreflex_iva_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_smartreflex_iva_addrs),
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* smartreflex_iva slave ports */
-static struct omap_hwmod_ocp_if *omap44xx_smartreflex_iva_slaves[] = {
- &omap44xx_l4_cfg__smartreflex_iva,
-};
-
-static struct omap_hwmod omap44xx_smartreflex_iva_hwmod = {
- .name = "smartreflex_iva",
- .class = &omap44xx_smartreflex_hwmod_class,
- .mpu_irqs = omap44xx_smartreflex_iva_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_smartreflex_iva_irqs),
- .main_clk = "smartreflex_iva_fck",
- .vdd_name = "iva",
- .prcm = {
- .omap4 = {
- .clkctrl_reg = OMAP4430_CM_ALWON_SR_IVA_CLKCTRL,
- },
- },
- .slaves = omap44xx_smartreflex_iva_slaves,
- .slaves_cnt = ARRAY_SIZE(omap44xx_smartreflex_iva_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-/* smartreflex_mpu */
-static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod;
-static struct omap_hwmod_irq_info omap44xx_smartreflex_mpu_irqs[] = {
- { .irq = 18 + OMAP44XX_IRQ_GIC_START },
-};
-
-static struct omap_hwmod_addr_space omap44xx_smartreflex_mpu_addrs[] = {
- {
- .pa_start = 0x4a0d9000,
- .pa_end = 0x4a0d903f,
- .flags = ADDR_TYPE_RT
- },
-};
-
-/* l4_cfg -> smartreflex_mpu */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_mpu = {
- .master = &omap44xx_l4_cfg_hwmod,
- .slave = &omap44xx_smartreflex_mpu_hwmod,
- .clk = "l4_div_ck",
- .addr = omap44xx_smartreflex_mpu_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_smartreflex_mpu_addrs),
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* smartreflex_mpu slave ports */
-static struct omap_hwmod_ocp_if *omap44xx_smartreflex_mpu_slaves[] = {
- &omap44xx_l4_cfg__smartreflex_mpu,
-};
-
-static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod = {
- .name = "smartreflex_mpu",
- .class = &omap44xx_smartreflex_hwmod_class,
- .mpu_irqs = omap44xx_smartreflex_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_smartreflex_mpu_irqs),
- .main_clk = "smartreflex_mpu_fck",
- .vdd_name = "mpu",
- .prcm = {
- .omap4 = {
- .clkctrl_reg = OMAP4430_CM_ALWON_SR_MPU_CLKCTRL,
- },
- },
- .slaves = omap44xx_smartreflex_mpu_slaves,
- .slaves_cnt = ARRAY_SIZE(omap44xx_smartreflex_mpu_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -3970,7 +4778,7 @@
},
.slaves = omap44xx_spinlock_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_spinlock_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -3994,6 +4802,7 @@
static struct omap_hwmod_class omap44xx_timer_1ms_hwmod_class = {
.name = "timer",
.sysc = &omap44xx_timer_1ms_sysc,
+ .rev = OMAP_TIMER_IP_VERSION_1,
};
static struct omap_hwmod_class_sysconfig omap44xx_timer_sysc = {
@@ -4009,6 +4818,12 @@
static struct omap_hwmod_class omap44xx_timer_hwmod_class = {
.name = "timer",
.sysc = &omap44xx_timer_sysc,
+ .rev = OMAP_TIMER_IP_VERSION_2,
+};
+
+/* secure timer can assign this to .dev_attr field */
+static struct omap_secure_timer_dev_attr secure_timer_dev_attr = {
+ .is_secure_timer = true,
};
/* timer1 */
@@ -4053,7 +4868,7 @@
},
.slaves = omap44xx_timer1_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_timer1_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* timer2 */
@@ -4094,11 +4909,12 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL,
+ .context_reg = OMAP4430_RM_L4PER_DMTIMER2_CONTEXT,
},
},
.slaves = omap44xx_timer2_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_timer2_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* timer3 */
@@ -4139,11 +4955,12 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL,
+ .context_reg = OMAP4430_RM_L4PER_DMTIMER3_CONTEXT,
},
},
.slaves = omap44xx_timer3_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_timer3_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* timer4 */
@@ -4184,11 +5001,12 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL,
+ .context_reg = OMAP4430_RM_L4PER_DMTIMER4_CONTEXT,
},
},
.slaves = omap44xx_timer4_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_timer4_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* timer5 */
@@ -4248,11 +5066,12 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM1_ABE_TIMER5_CLKCTRL,
+ .context_reg = OMAP4430_RM_ABE_TIMER5_CONTEXT,
},
},
.slaves = omap44xx_timer5_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_timer5_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* timer6 */
@@ -4312,11 +5131,12 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM1_ABE_TIMER6_CLKCTRL,
+ .context_reg = OMAP4430_RM_ABE_TIMER6_CONTEXT,
},
},
.slaves = omap44xx_timer6_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_timer6_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* timer7 */
@@ -4376,11 +5196,12 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM1_ABE_TIMER7_CLKCTRL,
+ .context_reg = OMAP4430_RM_ABE_TIMER7_CONTEXT,
},
},
.slaves = omap44xx_timer7_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_timer7_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* timer8 */
@@ -4440,11 +5261,12 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM1_ABE_TIMER8_CLKCTRL,
+ .context_reg = OMAP4430_RM_ABE_TIMER8_CONTEXT,
},
},
.slaves = omap44xx_timer8_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_timer8_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* timer9 */
@@ -4485,11 +5307,12 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL,
+ .context_reg = OMAP4430_RM_L4PER_DMTIMER9_CONTEXT,
},
},
.slaves = omap44xx_timer9_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_timer9_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* timer10 */
@@ -4530,11 +5353,12 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL,
+ .context_reg = OMAP4430_RM_L4PER_DMTIMER10_CONTEXT,
},
},
.slaves = omap44xx_timer10_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_timer10_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* timer11 */
@@ -4575,11 +5399,12 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL,
+ .context_reg = OMAP4430_RM_L4PER_DMTIMER11_CONTEXT,
},
},
.slaves = omap44xx_timer11_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_timer11_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -4641,6 +5466,7 @@
static struct omap_hwmod omap44xx_uart1_hwmod = {
.name = "uart1",
.class = &omap44xx_uart_hwmod_class,
+ .flags = HWMOD_SWSUP_SIDLE,
.mpu_irqs = omap44xx_uart1_irqs,
.mpu_irqs_cnt = ARRAY_SIZE(omap44xx_uart1_irqs),
.sdma_reqs = omap44xx_uart1_sdma_reqs,
@@ -4649,11 +5475,12 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_UART1_CLKCTRL,
+ .context_reg = OMAP4430_RM_L4PER_UART1_CONTEXT,
},
},
.slaves = omap44xx_uart1_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_uart1_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* uart2 */
@@ -4701,11 +5528,12 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_UART2_CLKCTRL,
+ .context_reg = OMAP4430_RM_L4PER_UART2_CONTEXT,
},
},
.slaves = omap44xx_uart2_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_uart2_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* uart3 */
@@ -4754,11 +5582,12 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_UART3_CLKCTRL,
+ .context_reg = OMAP4430_RM_L4PER_UART3_CONTEXT,
},
},
.slaves = omap44xx_uart3_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_uart3_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* uart4 */
@@ -4806,11 +5635,12 @@
.prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_UART4_CLKCTRL,
+ .context_reg = OMAP4430_RM_L4PER_UART4_CONTEXT,
},
},
.slaves = omap44xx_uart4_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_uart4_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -4892,7 +5722,7 @@
.slaves_cnt = ARRAY_SIZE(omap44xx_usb_otg_hs_slaves),
.masters = omap44xx_usb_otg_hs_masters,
.masters_cnt = ARRAY_SIZE(omap44xx_usb_otg_hs_masters),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/*
@@ -4960,7 +5790,7 @@
},
.slaves = omap44xx_wd_timer2_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_wd_timer2_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
/* wd_timer3 */
@@ -5024,7 +5854,235 @@
},
.slaves = omap44xx_wd_timer3_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_wd_timer3_slaves),
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+};
+
+/*
+ * 'usb_host_hs' class
+ * high-speed multi-port usb host controller
+ */
+static struct omap_hwmod_ocp_if omap44xx_usb_host_hs__l3_main_2 = {
+ .master = &omap44xx_usb_host_hs_hwmod,
+ .slave = &omap44xx_l3_main_2_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_class_sysconfig omap44xx_usb_host_hs_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART |
+ MSTANDBY_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap44xx_usb_host_hs_hwmod_class = {
+ .name = "usbhs_uhh",
+ .sysc = &omap44xx_usb_host_hs_sysc,
+};
+
+static struct omap_hwmod_ocp_if *omap44xx_usb_host_hs_masters[] = {
+ &omap44xx_usb_host_hs__l3_main_2,
+};
+
+static struct omap_hwmod_addr_space omap44xx_usb_host_hs_addrs[] = {
+ {
+ .name = "uhh",
+ .pa_start = 0x4a064000,
+ .pa_end = 0x4a0647ff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_host_hs = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_usb_host_hs_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_usb_host_hs_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_usb_host_hs_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if *omap44xx_usb_host_hs_slaves[] = {
+ &omap44xx_l4_cfg__usb_host_hs,
+};
+
+static struct omap_hwmod omap44xx_usb_host_hs_hwmod = {
+ .name = "usbhs_uhh",
+ .class = &omap44xx_usb_host_hs_hwmod_class,
+ .main_clk = "usb_host_hs_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_usb_host_hs_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_usb_host_hs_slaves),
+ .masters = omap44xx_usb_host_hs_masters,
+ .masters_cnt = ARRAY_SIZE(omap44xx_usb_host_hs_masters),
+ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+};
+
+/* 'usbhs_ohci' class */
+static struct omap_hwmod_class omap44xx_usbhs_ohci_hwmod_class = {
+ .name = "usbhs_ohci",
+};
+
+static struct omap_hwmod_irq_info omap44xx_usbhs_ohci_irqs[] = {
+ { .name = "ohci-irq", .irq = 76 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_usbhs_ohci_addrs[] = {
+ {
+ .name = "ohci",
+ .pa_start = 0x4A064800,
+ .pa_end = 0x4A064BFF,
+ .flags = ADDR_MAP_ON_INIT
+ }
+};
+
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usbhs_ohci = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_usbhs_ohci_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_usbhs_ohci_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_usbhs_ohci_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if *omap44xx_usbhs_ohci_slaves[] = {
+ &omap44xx_l4_cfg__usbhs_ohci,
+};
+
+static struct omap_hwmod_ocp_if *omap44xx_usbhs_ohci_masters[] = {
+ &omap44xx_usb_host_hs__l3_main_2,
+};
+
+static struct omap_hwmod omap44xx_usbhs_ohci_hwmod = {
+ .name = "usbhs_ohci",
+ .class = &omap44xx_usbhs_ohci_hwmod_class,
+ .mpu_irqs = omap44xx_usbhs_ohci_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_usbhs_ohci_irqs),
+ .slaves = omap44xx_usbhs_ohci_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_usbhs_ohci_slaves),
+ .masters = omap44xx_usbhs_ohci_masters,
+ .masters_cnt = ARRAY_SIZE(omap44xx_usbhs_ohci_masters),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+ .flags = HWMOD_INIT_NO_RESET | HWMOD_NO_IDLEST,
+};
+
+/* 'usbhs_ehci' class */
+static struct omap_hwmod_class omap44xx_usbhs_ehci_hwmod_class = {
+ .name = "usbhs_ehci",
+};
+
+static struct omap_hwmod_irq_info omap44xx_usbhs_ehci_irqs[] = {
+ { .name = "ehci-irq", .irq = 77 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_usbhs_ehci_addrs[] = {
+ {
+ .name = "ehci",
+ .pa_start = 0x4A064C00,
+ .pa_end = 0x4A064FFF,
+ .flags = ADDR_MAP_ON_INIT
+ }
+};
+
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usbhs_ehci = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_usbhs_ehci_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_usbhs_ehci_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_usbhs_ehci_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if *omap44xx_usbhs_ehci_slaves[] = {
+ &omap44xx_l4_cfg__usbhs_ehci,
+};
+
+static struct omap_hwmod_ocp_if *omap44xx_usbhs_ehci_masters[] = {
+ &omap44xx_usb_host_hs__l3_main_2,
+};
+
+
+static struct omap_hwmod omap44xx_usbhs_ehci_hwmod = {
+ .name = "usbhs_ehci",
+ .class = &omap44xx_usbhs_ehci_hwmod_class,
+ .mpu_irqs = omap44xx_usbhs_ehci_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_usbhs_ehci_irqs),
+ .slaves = omap44xx_usbhs_ehci_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_usbhs_ehci_slaves),
+ .masters = omap44xx_usbhs_ehci_masters,
+ .masters_cnt = ARRAY_SIZE(omap44xx_usbhs_ehci_masters),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+ .flags = HWMOD_INIT_NO_RESET | HWMOD_NO_IDLEST,
+};
+
+/*
+ * 'usb_tll_hs' class
+ * usb_tll_hs module is the adapter on the usb_host_hs ports
+ */
+static struct omap_hwmod_class_sysconfig omap44xx_usb_tll_hs_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap44xx_usb_tll_hs_hwmod_class = {
+ .name = "usbhs_tll",
+ .sysc = &omap44xx_usb_tll_hs_sysc,
+};
+
+static struct omap_hwmod_irq_info omap44xx_usb_tll_hs_irqs[] = {
+ { .name = "tll-irq", .irq = 78 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_usb_tll_hs_addrs[] = {
+ {
+ .name = "tll",
+ .pa_start = 0x4a062000,
+ .pa_end = 0x4a063fff,
+ .flags = ADDR_TYPE_RT
+ },
+};
+
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_tll_hs = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_usb_tll_hs_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_usb_tll_hs_addrs,
+ .addr_cnt = ARRAY_SIZE(omap44xx_usb_tll_hs_addrs),
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if *omap44xx_usb_tll_hs_slaves[] = {
+ &omap44xx_l4_cfg__usb_tll_hs,
+};
+
+static struct omap_hwmod omap44xx_usb_tll_hs_hwmod = {
+ .name = "usbhs_tll",
+ .class = &omap44xx_usb_tll_hs_hwmod_class,
+ .mpu_irqs = omap44xx_usb_tll_hs_irqs,
+ .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_usb_tll_hs_irqs),
+ .main_clk = "usb_tll_hs_ick",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_reg = OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
+ },
+ },
+ .slaves = omap44xx_usb_tll_hs_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_usb_tll_hs_slaves),
+ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
};
static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
@@ -5051,10 +6109,11 @@
&omap44xx_mpu_private_hwmod,
/* aess class */
-/* &omap44xx_aess_hwmod, */
+ &omap44xx_aess_hwmod,
/* bandgap class */
- &omap44xx_bandgap_hwmod,
+ &omap443x_bandgap_hwmod,
+ &omap446x_bandgap_hwmod,
/* counter class */
/* &omap44xx_counter_32k_hwmod, */
@@ -5078,8 +6137,13 @@
&omap44xx_dss_rfbi_hwmod,
&omap44xx_dss_venc_hwmod,
+ /* emif class */
+ &omap44xx_emif1_hwmod,
+ &omap44xx_emif2_hwmod,
+
/* gpio class */
- &omap44xx_gpio1_hwmod,
+ &omap443x_gpio1_hwmod,
+ &omap446x_gpio1_hwmod,
&omap44xx_gpio2_hwmod,
&omap44xx_gpio3_hwmod,
&omap44xx_gpio4_hwmod,
@@ -5087,7 +6151,10 @@
&omap44xx_gpio6_hwmod,
/* hsi class */
-/* &omap44xx_hsi_hwmod, */
+ &omap44xx_hsi_hwmod,
+
+ /* gpu class */
+ &omap44xx_gpu_hwmod,
/* i2c class */
&omap44xx_i2c1_hwmod,
@@ -5101,19 +6168,28 @@
&omap44xx_ipu_c1_hwmod,
/* iss class */
-/* &omap44xx_iss_hwmod, */
+ &omap44xx_iss_hwmod,
+
+ /* fdif class */
+ &omap44xx_fdif_hwmod,
/* iva class */
&omap44xx_iva_hwmod,
&omap44xx_iva_seq0_hwmod,
&omap44xx_iva_seq1_hwmod,
+ /* sl2if class */
+ &omap44xx_sl2if_hwmod,
+
/* kbd class */
&omap44xx_kbd_hwmod,
/* mailbox class */
&omap44xx_mailbox_hwmod,
+ /* mcasp class */
+ &omap44xx_mcasp_hwmod,
+
/* mcbsp class */
&omap44xx_mcbsp1_hwmod,
&omap44xx_mcbsp2_hwmod,
@@ -5121,7 +6197,7 @@
&omap44xx_mcbsp4_hwmod,
/* mcpdm class */
-/* &omap44xx_mcpdm_hwmod, */
+ &omap44xx_mcpdm_hwmod,
/* mcspi class */
&omap44xx_mcspi1_hwmod,
@@ -5160,6 +6236,12 @@
&omap44xx_timer10_hwmod,
&omap44xx_timer11_hwmod,
+ /* ctrl module class */
+ &omap44xx_ctrl_module_core_hwmod,
+
+ /* thermal sensor hwmod */
+ &omap44xx_thermal_sensor_hwmod,
+
/* uart class */
&omap44xx_uart1_hwmod,
&omap44xx_uart2_hwmod,
@@ -5173,6 +6255,10 @@
&omap44xx_wd_timer2_hwmod,
&omap44xx_wd_timer3_hwmod,
+ &omap44xx_usb_host_hs_hwmod,
+ &omap44xx_usbhs_ohci_hwmod,
+ &omap44xx_usbhs_ehci_hwmod,
+ &omap44xx_usb_tll_hs_hwmod,
NULL,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.c b/arch/arm/mach-omap2/omap_hwmod_common_data.c
index 08a1342..0312771 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.c
@@ -49,6 +49,15 @@
.srst_shift = SYSC_TYPE2_SOFTRESET_SHIFT,
};
+/**
+ * struct omap_hwmod_sysc_type3 - TYPE3 sysconfig scheme.
+ *
+ * To be used by hwmod structure to specify the sysconfig offsets if the
+ * device ip is compliant only implements the sidle feature.
+ */
+struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3 = {
+ .sidle_shift = SYSC_TYPE3_SIDLEMODE_SHIFT,
+};
/*
* omap_hwmod class data
diff --git a/arch/arm/mach-omap2/omap_l3_noc.c b/arch/arm/mach-omap2/omap_l3_noc.c
index 7b9f190..9d152de 100644
--- a/arch/arm/mach-omap2/omap_l3_noc.c
+++ b/arch/arm/mach-omap2/omap_l3_noc.c
@@ -29,6 +29,29 @@
#include "omap_l3_noc.h"
+#define NUM_OF_L3_MASTERS ARRAY_SIZE(l3_masters)
+
+static void l3_dump_targ_context(u32 baseaddr)
+{
+ pr_err("COREREG : 0x%08x\n", readl(baseaddr + L3_COREREG));
+ pr_err("VERSIONREG : 0x%08x\n", readl(baseaddr + L3_VERSIONREG));
+ pr_err("MAINCTLREG : 0x%08x\n", readl(baseaddr + L3_MAINCTLREG));
+ pr_err("NTTPADDR_0 : 0x%08x\n", readl(baseaddr + L3_NTTPADDR_0));
+ pr_err("SVRTSTDLVL : 0x%08x\n", readl(baseaddr + L3_SVRTSTDLVL));
+ pr_err("SVRTCUSTOMLVL: 0x%08x\n", readl(baseaddr + L3_SVRTCUSTOMLVL));
+ pr_err("MAIN : 0x%08x\n", readl(baseaddr + L3_MAIN));
+ pr_err("HDR : 0x%08x\n", readl(baseaddr + L3_HDR));
+ pr_err("MSTADDR : 0x%08x\n", readl(baseaddr + L3_MSTADDR));
+ pr_err("SLVADDR : 0x%08x\n", readl(baseaddr + L3_SLVADDR));
+ pr_err("INFO : 0x%08x\n", readl(baseaddr + L3_INFO));
+ pr_err("SLVOFSLSB : 0x%08x\n", readl(baseaddr + L3_SLVOFSLSB));
+ pr_err("SLVOFSMSB : 0x%08x\n", readl(baseaddr + L3_SLVOFSMSB));
+ pr_err("CUSTOMINFO_INFO : 0x%08x\n", readl(baseaddr + L3_CUSTOMINFO_INFO));
+ pr_err("CUSTOMINFO_MSTADDR: 0x%08x\n", readl(baseaddr + L3_CUSTOMINFO_MSTADDR));
+ pr_err("CUSTOMINFO_OPCODE : 0x%08x\n", readl(baseaddr + L3_CUSTOMINFO_OPCODE));
+ pr_err("ADDRSPACESIZELOG : 0x%08x\n", readl(baseaddr + L3_ADDRSPACESIZELOG));
+}
+
/*
* Interrupt Handler for L3 error detection.
* 1) Identify the L3 clockdomain partition to which the error belongs to.
@@ -56,10 +79,10 @@
{
struct omap4_l3 *l3 = _l3;
- int inttype, i, j;
+ int inttype, i, j, k;
int err_src = 0;
u32 std_err_main_addr, std_err_main, err_reg;
- u32 base, slave_addr, clear;
+ u32 base, slave_addr, clear, regoffset, masterid;
char *source_name;
/* Get the Type of interrupt */
@@ -88,11 +111,16 @@
case STANDARD_ERROR:
source_name =
l3_targ_stderrlog_main_name[i][err_src];
+ regoffset = targ_reg_offset[i][err_src];
slave_addr = std_err_main_addr +
L3_SLAVE_ADDRESS_OFFSET;
+
WARN(true, "L3 standard error: SOURCE:%s at address 0x%x\n",
source_name, readl(slave_addr));
+
+ l3_dump_targ_context(base + regoffset);
+
/* clear the std error log*/
clear = std_err_main | CLEAR_STDERR_LOG;
writel(clear, std_err_main_addr);
@@ -101,9 +129,29 @@
case CUSTOM_ERROR:
source_name =
l3_targ_stderrlog_main_name[i][err_src];
+ regoffset = targ_reg_offset[i][err_src];
WARN(true, "CUSTOM SRESP error with SOURCE:%s\n",
source_name);
+
+ masterid = readl(base + regoffset +
+ L3_CUSTOMINFO_MSTADDR);
+
+ for (k = 0;
+ k < NUM_OF_L3_MASTERS;
+ k++) {
+ if (masterid == l3_masters[k].id) {
+ pr_err("Master 0x%x %10s\n",
+ masterid,
+ l3_masters[k].name);
+ pr_err("%s OPCODE 0x%08x\n",
+ source_name,
+ readl(base + regoffset +
+ L3_CUSTOMINFO_OPCODE));
+ break;
+ }
+ }
+
/* clear the std error log*/
clear = std_err_main | CLEAR_STDERR_LOG;
writel(clear, std_err_main_addr);
diff --git a/arch/arm/mach-omap2/omap_l3_noc.h b/arch/arm/mach-omap2/omap_l3_noc.h
index 359b833..56d6b0a 100644
--- a/arch/arm/mach-omap2/omap_l3_noc.h
+++ b/arch/arm/mach-omap2/omap_l3_noc.h
@@ -37,6 +37,24 @@
#define L3_APPLICATION_ERROR 0x0
#define L3_DEBUG_ERROR 0x1
+#define L3_COREREG 0x00
+#define L3_VERSIONREG 0x04
+#define L3_MAINCTLREG 0x08
+#define L3_NTTPADDR_0 0x10
+#define L3_SVRTSTDLVL 0x40
+#define L3_SVRTCUSTOMLVL 0x44
+#define L3_MAIN 0x48
+#define L3_HDR 0x4C
+#define L3_MSTADDR 0x50
+#define L3_SLVADDR 0x54
+#define L3_INFO 0x58
+#define L3_SLVOFSLSB 0x5C
+#define L3_SLVOFSMSB 0x60
+#define L3_CUSTOMINFO_INFO 0x64
+#define L3_CUSTOMINFO_MSTADDR 0x68
+#define L3_CUSTOMINFO_OPCODE 0x6C
+#define L3_ADDRSPACESIZELOG 0x80
+
u32 l3_flagmux[L3_MODULES] = {
0x50C,
0x100C,
@@ -79,6 +97,34 @@
0x0148 /* EMUSS */
};
+struct l3_masters_data {
+ u32 id;
+ char name[15];
+};
+
+struct l3_masters_data l3_masters[] = {
+ { 0x0 , "MPU"},
+ { 0x10, "CS_ADP"},
+ { 0x14, "Unknown"},
+ { 0x20, "DSP"},
+ { 0x30, "IVAHD"},
+ { 0x40, "ISS"},
+ { 0x44, "DucatiM3"},
+ { 0x48, "FaceDetect"},
+ { 0x50, "SDMA_Rd"},
+ { 0x54, "SDMA_Wr"},
+ { 0x58, "Unknown"},
+ { 0x5C, "Unknown"},
+ { 0x60, "SGX"},
+ { 0x70, "DSS"},
+ { 0x80, "C2C"},
+ { 0x88, "Unknown"},
+ { 0x8C, "Unknown"},
+ { 0x90, "HSI"},
+ { 0xA0, "MMC1"},
+ { 0xA4, "MMC2"},
+};
+
char *l3_targ_stderrlog_main_name[L3_MODULES][18] = {
{
"DMM1",
@@ -112,6 +158,39 @@
},
};
+u32 targ_reg_offset[L3_MODULES][18] = {
+ {
+ 0x100,
+ 0x200,
+ 0x300,
+ 0x400,
+ 0x0,
+ },
+ {
+ 0x500,
+ 0x300,
+ 0x100,
+ 0x400,
+ 0x700,
+ 0x000,
+ 0x000,
+ 0x000,
+ 0x000,
+ 0x600,
+ 0x800,
+ 0x000,
+ 0x000,
+ 0x000,
+ 0x000,
+ 0x100,
+ 0xA00,
+ 0xB00,
+ },
+ {
+ 0x000000
+ },
+};
+
u32 *l3_targ[L3_MODULES] = {
l3_targ_stderrlog_main_clk1,
l3_targ_stderrlog_main_clk2,
diff --git a/arch/arm/mach-omap2/omap_opp_data.h b/arch/arm/mach-omap2/omap_opp_data.h
index c784c12..37d16e1 100644
--- a/arch/arm/mach-omap2/omap_opp_data.h
+++ b/arch/arm/mach-omap2/omap_opp_data.h
@@ -49,6 +49,8 @@
*/
struct omap_opp_def {
char *hwmod_name;
+ char *voltdm_name;
+ char *clk_name;
unsigned long freq;
unsigned long u_volt;
@@ -59,9 +61,11 @@
/*
* Initialization wrapper used to define an OPP for OMAP variants.
*/
-#define OPP_INITIALIZER(_hwmod_name, _enabled, _freq, _uv) \
+#define OPP_INITIALIZER(_hwmod_name, _clk_name, _voltdm_name, _enabled, _freq, _uv) \
{ \
.hwmod_name = _hwmod_name, \
+ .clk_name = _clk_name, \
+ .voltdm_name = _voltdm_name, \
.default_available = _enabled, \
.freq = _freq, \
.u_volt = _uv, \
@@ -71,12 +75,14 @@
* Initialization wrapper used to define SmartReflex process data
* XXX Is this needed? Just use C99 initializers in data files?
*/
-#define VOLT_DATA_DEFINE(_v_nom, _efuse_offs, _errminlimit, _errgain) \
+#define VOLT_DATA_DEFINE(_v_nom, _v_margin, _efuse_offs, _errminlimit, _errgain, _abb_type) \
{ \
.volt_nominal = _v_nom, \
+ .volt_margin = _v_margin, \
.sr_efuse_offs = _efuse_offs, \
.sr_errminlimit = _errminlimit, \
- .vp_errgain = _errgain \
+ .vp_errgain = _errgain, \
+ .abb_type = _abb_type, \
}
/* Use this to initialize the default table */
@@ -86,11 +92,21 @@
extern struct omap_volt_data omap34xx_vddmpu_volt_data[];
extern struct omap_volt_data omap34xx_vddcore_volt_data[];
+extern struct omap_vdd_dep_info omap34xx_vddmpu_dep_info[];
extern struct omap_volt_data omap36xx_vddmpu_volt_data[];
extern struct omap_volt_data omap36xx_vddcore_volt_data[];
+extern struct omap_vdd_dep_info omap36xx_vddmpu_dep_info[];
-extern struct omap_volt_data omap44xx_vdd_mpu_volt_data[];
-extern struct omap_volt_data omap44xx_vdd_iva_volt_data[];
-extern struct omap_volt_data omap44xx_vdd_core_volt_data[];
+extern struct omap_volt_data omap443x_vdd_mpu_volt_data[];
+extern struct omap_volt_data omap443x_vdd_iva_volt_data[];
+extern struct omap_volt_data omap443x_vdd_core_volt_data[];
+extern struct omap_volt_data omap446x_vdd_mpu_volt_data[];
+extern struct omap_volt_data omap446x_vdd_iva_volt_data[];
+extern struct omap_volt_data omap446x_vdd_core_volt_data[];
+
+extern struct omap_vdd_dep_info omap443x_vddmpu_dep_info[];
+extern struct omap_vdd_dep_info omap443x_vddiva_dep_info[];
+extern struct omap_vdd_dep_info omap446x_vddmpu_dep_info[];
+extern struct omap_vdd_dep_info omap446x_vddiva_dep_info[];
#endif /* __ARCH_ARM_MACH_OMAP2_OMAP_OPP_DATA_H */
diff --git a/arch/arm/mach-omap2/omap_pmic.c b/arch/arm/mach-omap2/omap_pmic.c
new file mode 100644
index 0000000..3271bfe
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_pmic.c
@@ -0,0 +1,105 @@
+/*
+ * Registration hooks for PMICs used with OMAP
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include "voltage.h"
+
+#include "pm.h"
+
+/**
+ * omap_pmic_data_init() - trigger point for all PMIC initializers
+ */
+void __init omap_pmic_data_init(void)
+{
+ omap_twl_init();
+ omap_tps6236x_init();
+}
+
+/**
+ * omap_pmic_register_data() - Register the PMIC information to OMAP mapping
+ * @omap_pmic_maps: array ending with a empty element representing the maps
+ * @desc: description for this PMIC.
+ */
+int __init omap_pmic_register_data(struct omap_pmic_map *omap_pmic_maps,
+ struct omap_pmic_description *desc)
+{
+ struct voltagedomain *voltdm;
+ struct omap_pmic_map *map;
+ int r;
+
+ if (!omap_pmic_maps)
+ return 0;
+
+ map = omap_pmic_maps;
+
+ while (map->name) {
+ if (!omap_chip_is(map->omap_chip))
+ goto next;
+
+ /* The base PMIC is the one controlling core voltdm */
+ if (desc && !strcmp(map->name, "core"))
+ omap_pm_set_pmic_lp_time(desc->pmic_lp_tstart,
+ desc->pmic_lp_tshut);
+
+ voltdm = voltdm_lookup(map->name);
+ if (IS_ERR_OR_NULL(voltdm)) {
+ pr_err("%s: unable to find map %s\n", __func__,
+ map->name);
+ goto next;
+ }
+ if (IS_ERR_OR_NULL(map->pmic_data)) {
+ pr_warning("%s: domain[%s] has no pmic data\n",
+ __func__, map->name);
+ goto next;
+ }
+
+ r = omap_voltage_register_pmic(voltdm, map->pmic_data);
+ if (r) {
+ pr_warning("%s: domain[%s] register returned %d\n",
+ __func__, map->name, r);
+ goto next;
+ }
+ if (map->special_action) {
+ r = map->special_action(voltdm);
+ WARN(r, "%s: domain[%s] action returned %d\n", __func__,
+ map->name, r);
+ }
+next:
+ map++;
+ }
+
+ return 0;
+}
+
+int __init omap_pmic_update(struct omap_pmic_map *tmp_map, char *name,
+ u32 old_chip_id, u32 new_chip_id)
+{
+ while (tmp_map->name != NULL) {
+ if (!strcmp(tmp_map->name, name) &&
+ (tmp_map->omap_chip.oc & new_chip_id)) {
+ WARN(1, "%s: this map already exists:%s-%x\n",
+ __func__, name, new_chip_id);
+ return -1;
+ }
+ if (!strcmp(tmp_map->name, name) &&
+ (tmp_map->omap_chip.oc & old_chip_id))
+ break;
+ tmp_map++;
+ }
+ if (tmp_map->name != NULL) {
+ tmp_map->omap_chip.oc = new_chip_id;
+ return 0;
+ }
+ return -ENOENT;
+}
diff --git a/arch/arm/mach-omap2/omap_tps6236x.c b/arch/arm/mach-omap2/omap_tps6236x.c
new file mode 100644
index 0000000..c369d7a
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_tps6236x.c
@@ -0,0 +1,434 @@
+/*
+ * OMAP and TPS6236x specific initialization
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Vishwanath BS
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/i2c/twl.h>
+
+#include "pm.h"
+#include "vc.h"
+#include "mux.h"
+
+/* Voltage limits supported */
+#define MIN_VOLTAGE_TPS62360_62_UV 770000
+#define MAX_VOLTAGE_TPS62360_62_UV 1400000
+
+#define MIN_VOLTAGE_TPS62361_UV 500000
+#define MAX_VOLTAGE_TPS62361_UV 1770000
+
+#define MAX_VOLTAGE_RAMP_TPS6236X_UV 32000
+
+/*
+ * This is the voltage delta between 2 values in voltage register.
+ * when switching voltage V1 to V2, TPS62361 can ramp up or down
+ * initially with step sizes of 20mV with a last step of 10mV.
+ * In the case of TPS6236[0|2], it is a constant 10mV steps
+ * we choose the 10mV step for linearity when SR is configured.
+ */
+#define STEP_SIZE_TPS6236X 10000
+
+/* I2C access parameters */
+#define I2C_TPS6236X_SLAVE_ADDR 0x60
+
+#define DEF_SET_REG(VSEL0, VSEL1) (((VSEL1) << 1| (VSEL0) << 0) & 0x3)
+#define REG_TPS6236X_SET_0 0x00
+#define REG_TPS6236X_SET_1 0x01
+#define REG_TPS6236X_SET_2 0x02
+#define REG_TPS6236X_SET_3 0x03
+#define REG_TPS6236X_CTRL 0x04
+#define REG_TPS6236X_TEMP 0x05
+#define REG_TPS6236X_RAMP_CTRL 0x06
+#define REG_TPS6236X_CHIP_ID0 0x08
+#define REG_TPS6236X_CHIP_ID1 0x09
+
+#define MODE_TPS6236X_AUTO_PFM_PWM 0x00
+#define MODE_TPS6236X_FORCE_PWM BIT(7)
+
+/* We use Auto PFM/PWM mode currently seems to have the best trade off */
+#define VOLTAGE_PFM_MODE_VAL MODE_TPS6236X_AUTO_PFM_PWM
+
+#define REG_TPS6236X_RAMP_CTRL_RMP_MASK (0x7 << 5)
+#define REG_TPS6236X_RAMP_CTRL_EN_DISC BIT(2)
+#define REG_TPS6236X_RAMP_CTRL_RAMP_PFM BIT(1)
+
+#define REG_TPS6236X_CTRL_PD_EN BIT(7)
+#define REG_TPS6236X_CTRL_PD_VSEL0 BIT(6)
+#define REG_TPS6236X_CTRL_PD_VSEL1 BIT(5)
+
+/* TWL usage */
+#define TWL6030_REG_SYSEN_CFG_GRP 0xB3
+#define TWL6030_REG_SYSEN_CFG_TRANS 0xB4
+#define TWL6030_REG_VCORE3_CFG_GRP 0x5E
+#define TWL6030_REG_VMEM_CFG_GRP 0x64
+#define TWL6030_REG_MSK_TRANSITION 0x20
+#define TWL6030_BIT_APE_GRP BIT(0)
+#define TWL6030_BIT_CON_GRP BIT(1)
+#define TWL6030_BIT_MOD_GRP BIT(2)
+#define TWL6030_MSK_PREQ1 BIT(5)
+#define TWL6030_MSK_SYSEN_OFF (0x3 << 4)
+#define TWL6030_MSK_SYSEN_SLEEP (0x3 << 2)
+#define TWL6030_MSK_SYSEN_ACTIVE (0x3 << 0)
+
+/* Voltage params of the attached device (all in uV) */
+static unsigned long voltage_min;
+static unsigned long voltage_max;
+
+/* Which register do we use by default? */
+static int __initdata default_reg = -1;;
+
+/* Do we need to setup internal pullups? */
+static int __initdata pd_vsel0 = -1;
+static int __initdata pd_vsel1 = -1;
+
+static int __init _bd_setup(char *name,int gpio_vsel, int *pull, int *pd_vsel)
+{
+ int pull_dir;
+ int r;
+
+ if (gpio_vsel == -1) {
+ if (*pull != -1) {
+ *pd_vsel = (*pull == OMAP_PIN_OFF_OUTPUT_HIGH);
+ *pull = *pd_vsel;
+ } else {
+ *pull = 0;
+ }
+ return 0;
+ }
+
+ /* if we have a pull gpio, with bad dir, pull low */
+ if (*pull == -1 || (*pull != OMAP_PIN_OFF_OUTPUT_HIGH &&
+ *pull != OMAP_PIN_OFF_OUTPUT_LOW))
+ *pull = OMAP_PIN_OFF_OUTPUT_LOW;
+
+ r = omap_mux_init_gpio(gpio_vsel, *pull);
+ if (r) {
+ pr_err("%s: unable to mux gpio%d=%d\n", __func__,
+ gpio_vsel, r);
+ goto out;
+ }
+
+ pull_dir = (*pull == OMAP_PIN_OFF_OUTPUT_HIGH);
+ *pull = pull_dir;
+
+ r = gpio_request(gpio_vsel, name);
+ if (r) {
+ pr_err("%s: unable to req gpio%d=%d\n", __func__,
+ gpio_vsel, r);
+ goto out;
+ }
+ r = gpio_direction_output(gpio_vsel, pull_dir);
+ if (r) {
+ pr_err("%s: unable to pull[%d] gpio%d=%d\n", __func__,
+ gpio_vsel, pull_dir, r);
+ gpio_free(gpio_vsel);
+ goto out;
+ }
+out:
+ return r;
+}
+
+/* Convert the ramp voltage to ramp value. */
+static u8 __init tps6236x_ramp_value(unsigned long uv)
+{
+ if (!uv)
+ return 0;
+
+ if (uv > MAX_VOLTAGE_RAMP_TPS6236X_UV) {
+ pr_err("%s: uv%ld greater than max %d\n", __func__,
+ uv, MAX_VOLTAGE_RAMP_TPS6236X_UV);
+ uv = MAX_VOLTAGE_RAMP_TPS6236X_UV;
+ }
+ return fls(MAX_VOLTAGE_RAMP_TPS6236X_UV / uv) - 1;
+}
+
+static unsigned long tps6236x_vsel_to_uv(const u8 vsel)
+{
+ return (voltage_min +
+ (STEP_SIZE_TPS6236X * (vsel & ~VOLTAGE_PFM_MODE_VAL)));
+}
+
+static u8 tps6236x_uv_to_vsel(unsigned long uv)
+{
+ if (!uv)
+ return 0;
+
+ /* Round off requests to limits */
+ if (uv > voltage_max) {
+ pr_err("%s:Request for overvoltage[%ld] than supported[%ld]\n",
+ __func__, uv, voltage_max);
+ uv = voltage_max;
+ }
+ if (uv < voltage_min) {
+ pr_err("%s:Request for undervoltage[%ld] than supported[%ld]\n",
+ __func__, uv, voltage_min);
+ uv = voltage_min;
+ }
+ return DIV_ROUND_UP(uv - voltage_min, STEP_SIZE_TPS6236X) |
+ VOLTAGE_PFM_MODE_VAL;
+}
+
+static struct omap_voltdm_pmic omap4_mpu_pmic = {
+ .slew_rate = 32000,
+ .step_size = STEP_SIZE_TPS6236X,
+ .on_volt = 1375000,
+ .onlp_volt = 1375000,
+ .ret_volt = 830000,
+ .off_volt = 0,
+ .volt_setup_time = 0,
+ .switch_on_time = 1000,
+ .vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET,
+ .vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN,
+ .vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX,
+ .vp_vddmin = OMAP4_VP_MPU_VLIMITTO_VDDMIN,
+ .vp_vddmax = OMAP4_VP_MPU_VLIMITTO_VDDMAX,
+ .vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US,
+ .i2c_slave_addr = I2C_TPS6236X_SLAVE_ADDR,
+ .volt_reg_addr = REG_TPS6236X_SET_0,
+ .cmd_reg_addr = REG_TPS6236X_SET_0,
+ .i2c_high_speed = true,
+ .i2c_scll_low = 0x28,
+ .i2c_scll_high = 0x2C,
+ .i2c_hscll_low = 0x0B,
+ .i2c_hscll_high = 0x00,
+ .vsel_to_uv = tps6236x_vsel_to_uv,
+ .uv_to_vsel = tps6236x_uv_to_vsel,
+};
+
+/* As per SLVSAU9 */
+static __initdata struct omap_pmic_description tps_pmic_desc = {
+ .pmic_lp_tshut = 1, /* T-OFF 1ns rounded */
+ .pmic_lp_tstart = 1000, /* T-start */
+};
+/**
+ * _twl_i2c_rmw_u8() - Tiny helper function to do a read modify write for twl
+ * @mod_no: module number
+ * @mask: mask for the val
+ * @value: value to write
+ * @reg: register to write to
+ */
+static int __init _twl_i2c_rmw_u8(u8 mod_no, u8 mask, u8 value, u8 reg)
+{
+ int ret;
+ u8 val;
+
+ ret = twl_i2c_read_u8(mod_no, &val, reg);
+ if (ret)
+ goto out;
+
+ val &= ~mask;
+ val |= (value & mask);
+
+ ret = twl_i2c_write_u8(mod_no, val, reg);
+out:
+ return ret;
+}
+
+/**
+ * omap4_twl_tps62361_enable() - Enable tps chip
+ *
+ * This function enables TPS chip by associating SYSEN signal
+ * to APE resource group of TWL6030.
+ *
+ * Returns 0 on sucess, error is returned if I2C read/write fails.
+ */
+static int __init omap4_twl_tps62361_enable(struct voltagedomain *voltdm)
+{
+ int ret = 0;
+ int ret1;
+ u8 val;
+
+ /* Dont trust the bootloader. start with max, pm will set to proper */
+ val = voltdm->pmic->uv_to_vsel(voltdm->pmic->vp_vddmax);
+ ret = omap_vc_bypass_send_i2c_msg(voltdm, voltdm->pmic->i2c_slave_addr,
+ default_reg, val);
+
+ /* Setup Ramp */
+ val = tps6236x_ramp_value(voltdm->pmic->slew_rate) <<
+ __ffs(REG_TPS6236X_RAMP_CTRL_RMP_MASK);
+ val &= REG_TPS6236X_RAMP_CTRL_RMP_MASK;
+
+ /* We would like to ramp the voltage asap */
+ val |= REG_TPS6236X_RAMP_CTRL_RAMP_PFM;
+
+ /* We would like to ramp down the voltage asap as well*/
+ val |= REG_TPS6236X_RAMP_CTRL_EN_DISC;
+
+ ret = omap_vc_bypass_send_i2c_msg(voltdm, voltdm->pmic->i2c_slave_addr,
+ REG_TPS6236X_RAMP_CTRL, val);
+ if (ret)
+ goto out;
+
+ /* Setup the internal pulls to select if needed */
+ if (pd_vsel0 != -1 || pd_vsel1 != -1) {
+ val = REG_TPS6236X_CTRL_PD_EN;
+ val |= (pd_vsel0) ? 0 : REG_TPS6236X_CTRL_PD_VSEL0;
+ val |= (pd_vsel1) ? 0 : REG_TPS6236X_CTRL_PD_VSEL1;
+ ret = omap_vc_bypass_send_i2c_msg(voltdm,
+ voltdm->pmic->i2c_slave_addr,
+ REG_TPS6236X_CTRL, val);
+ if (ret)
+ goto out;
+ }
+
+ /* Enable thermal shutdown - 0 is enable :) */
+ ret = omap_vc_bypass_send_i2c_msg(voltdm,
+ voltdm->pmic->i2c_slave_addr,
+ REG_TPS6236X_TEMP, 0x0);
+ if (ret)
+ goto out;
+
+ /* if we have to work with TWL */
+#ifdef CONFIG_TWL4030_CORE
+
+ /* unmask PREQ transition Executes ACT2SLP and SLP2ACT sleep sequence */
+ ret1 = _twl_i2c_rmw_u8(TWL6030_MODULE_ID0, TWL6030_MSK_PREQ1,
+ 0x00, TWL6030_REG_MSK_TRANSITION);
+ if (ret1) {
+ pr_err("%s:Err:TWL6030: map APE PREQ1(%d)\n", __func__, ret1);
+ ret = ret1;
+ }
+
+ /* Setup SYSEN to be 1 on Active and 0 for sleep and OFF states */
+ ret1 = _twl_i2c_rmw_u8(TWL6030_MODULE_ID0, TWL6030_MSK_SYSEN_ACTIVE,
+ 0x01, TWL6030_REG_SYSEN_CFG_TRANS);
+ if (ret1) {
+ pr_err("%s:Err:TWL6030: sysen active(%d)\n", __func__, ret1);
+ ret = ret1;
+ }
+ ret1 = _twl_i2c_rmw_u8(TWL6030_MODULE_ID0, TWL6030_MSK_SYSEN_SLEEP,
+ 0x00, TWL6030_REG_SYSEN_CFG_TRANS);
+ if (ret1) {
+ pr_err("%s:Err:TWL6030: sysen sleep(%d)\n", __func__, ret1);
+ ret = ret1;
+ }
+ ret1 = _twl_i2c_rmw_u8(TWL6030_MODULE_ID0, TWL6030_MSK_SYSEN_OFF,
+ 0x00, TWL6030_REG_SYSEN_CFG_TRANS);
+ if (ret1) {
+ pr_err("%s:Err:TWL6030: sysen off(%d)\n", __func__, ret1);
+ ret = ret1;
+ }
+
+ /* Map up SYSEN on TWL core to control TPS */
+ ret1 = _twl_i2c_rmw_u8(TWL6030_MODULE_ID0, TWL6030_BIT_APE_GRP |
+ TWL6030_BIT_MOD_GRP | TWL6030_BIT_CON_GRP,
+ TWL6030_BIT_APE_GRP, TWL6030_REG_SYSEN_CFG_GRP);
+ if (ret1) {
+ pr_err("%s:Err:TWL6030: map APE SYEN(%d)\n", __func__, ret1);
+ ret = ret1;
+ }
+
+ /* Since we dont use VCORE3, this should not be associated with APE */
+ ret1 = _twl_i2c_rmw_u8(TWL6030_MODULE_ID0, TWL6030_BIT_APE_GRP,
+ 0x00, TWL6030_REG_VCORE3_CFG_GRP);
+ if (ret1) {
+ pr_err("%s:Err:TWL6030:unmap APE VCORE3(%d)\n", __func__, ret1);
+ ret = ret1;
+ }
+
+ /* Since we dont use VMEM, this should not be associated with APE */
+ ret1 = _twl_i2c_rmw_u8(TWL6030_MODULE_ID0, TWL6030_BIT_APE_GRP,
+ 0x00, TWL6030_REG_VMEM_CFG_GRP);
+ if (ret1) {
+ pr_err("%s:Err:TWL6030: unmap APE VMEM(%d)\n", __func__, ret1);
+ ret = ret1;
+ }
+#endif
+
+out:
+ if (ret)
+ pr_err("%s: Error enabling TPS(%d)\n", __func__, ret);
+
+ return ret;
+}
+
+static __initdata struct omap_pmic_map omap_tps_map[] = {
+ {
+ .name = "mpu",
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP446X),
+ .pmic_data = &omap4_mpu_pmic,
+ .special_action = omap4_twl_tps62361_enable,
+ },
+ /* Terminator */
+ { .name = NULL,.pmic_data = NULL},
+};
+
+int __init omap_tps6236x_init(void)
+{
+ struct omap_pmic_map *map;
+
+ /* Without registers, I wont proceed */
+ if (default_reg == -1)
+ return -EINVAL;
+
+ map = omap_tps_map;
+
+ /* setup all the pmic's voltage addresses to the default one */
+ while (map->name) {
+ map->pmic_data->volt_reg_addr = default_reg;
+ map->pmic_data->cmd_reg_addr = default_reg;
+ map++;
+ }
+
+ return omap_pmic_register_data(omap_tps_map, &tps_pmic_desc);
+}
+
+/**
+ * omap_tps6236x_board_setup() - provide the board config for TPS connect
+ * @use_62361: Do we use TPS62361 variant?
+ * @gpio_vsel0: If using GPIO to control VSEL0, provide gpio number, else -1
+ * @gpio_vsel1: If using GPIO to control VSEL1, provide gpio number, else -1
+ * @pull0: If using GPIO, provide mux mode OMAP_PIN_OFF_OUTPUT_[HIGH|LOW]
+ * else provide any internal pull required, -1 if unused.
+ * @pull1: If using GPIO, provide mux mode OMAP_PIN_OFF_OUTPUT_[HIGH|LOW]
+ * else provide any internal pull required, -1 if unused.
+ *
+ * TPS6236x variants of PMIC can be hooked in numerous combinations on to the
+ * board. Some platforms can choose to hardwire and save on a GPIO for other
+ * uses, while others may hook a single line for GPIO control and may ground
+ * the other line. support these configurations.
+ *
+ * WARNING: for platforms using GPIO, be careful to provide MUX setting
+ * considering OFF mode configuration as well.
+ */
+int __init omap_tps6236x_board_setup(bool use_62361, int gpio_vsel0,
+ int gpio_vsel1, int pull0, int pull1)
+{
+ int r;
+
+ r = _bd_setup("tps6236x_vsel0", gpio_vsel0, &pull0, &pd_vsel0);
+ if (r)
+ goto out;
+ r = _bd_setup("tps6236x_vsel1", gpio_vsel1, &pull1, &pd_vsel1);
+ if (r) {
+ if (gpio_vsel0 != -1)
+ gpio_free(gpio_vsel0);
+ goto out;
+ }
+
+ default_reg = ((pull1 & 0x1) << 1) | (pull0 & 0x1);
+
+ if (use_62361) {
+ voltage_min = MIN_VOLTAGE_TPS62361_UV;
+ voltage_max = MAX_VOLTAGE_TPS62361_UV;
+ } else {
+ voltage_min = MIN_VOLTAGE_TPS62360_62_UV;
+ voltage_max = MAX_VOLTAGE_TPS62360_62_UV;
+ }
+out:
+ return r;
+}
+
+int __init omap_tps6236x_update(char *name, u32 old_chip_id, u32 new_chip_id)
+{
+ return omap_pmic_update(omap_tps_map, name, old_chip_id, new_chip_id);
+}
diff --git a/arch/arm/mach-omap2/omap_twl.c b/arch/arm/mach-omap2/omap_twl.c
index 07d6140..2d00a0c 100644
--- a/arch/arm/mach-omap2/omap_twl.c
+++ b/arch/arm/mach-omap2/omap_twl.c
@@ -30,32 +30,31 @@
#define OMAP3_VP_VSTEPMAX_VSTEPMAX 0x04
#define OMAP3_VP_VLIMITTO_TIMEOUT_US 200
-#define OMAP3430_VP1_VLIMITTO_VDDMIN 0x14
-#define OMAP3430_VP1_VLIMITTO_VDDMAX 0x42
-#define OMAP3430_VP2_VLIMITTO_VDDMIN 0x18
-#define OMAP3430_VP2_VLIMITTO_VDDMAX 0x2c
-
-#define OMAP3630_VP1_VLIMITTO_VDDMIN 0x18
-#define OMAP3630_VP1_VLIMITTO_VDDMAX 0x3c
-#define OMAP3630_VP2_VLIMITTO_VDDMIN 0x18
-#define OMAP3630_VP2_VLIMITTO_VDDMAX 0x30
-
#define OMAP4_SRI2C_SLAVE_ADDR 0x12
#define OMAP4_VDD_MPU_SR_VOLT_REG 0x55
+#define OMAP4_VDD_MPU_SR_CMD_REG 0x56
#define OMAP4_VDD_IVA_SR_VOLT_REG 0x5B
+#define OMAP4_VDD_IVA_SR_CMD_REG 0x5C
#define OMAP4_VDD_CORE_SR_VOLT_REG 0x61
+#define OMAP4_VDD_CORE_SR_CMD_REG 0x62
-#define OMAP4_VP_CONFIG_ERROROFFSET 0x00
-#define OMAP4_VP_VSTEPMIN_VSTEPMIN 0x01
-#define OMAP4_VP_VSTEPMAX_VSTEPMAX 0x04
-#define OMAP4_VP_VLIMITTO_TIMEOUT_US 200
-
-#define OMAP4_VP_MPU_VLIMITTO_VDDMIN 0xA
-#define OMAP4_VP_MPU_VLIMITTO_VDDMAX 0x39
-#define OMAP4_VP_IVA_VLIMITTO_VDDMIN 0xA
-#define OMAP4_VP_IVA_VLIMITTO_VDDMAX 0x2D
-#define OMAP4_VP_CORE_VLIMITTO_VDDMIN 0xA
-#define OMAP4_VP_CORE_VLIMITTO_VDDMAX 0x28
+#define TWL6030_REG_VCORE1_CFG_GRP 0x52
+#define TWL6030_REG_VCORE1_CFG_TRANS 0x53
+#define TWL6030_REG_VCORE2_CFG_GRP 0x58
+#define TWL6030_REG_VCORE2_CFG_TRANS 0x59
+#define TWL6030_REG_VCORE3_CFG_GRP 0x5e
+#define TWL6030_REG_VCORE3_CFG_TRANS 0x5f
+#define TWL6030_BIT_APE_GRP BIT(0)
+/*
+ * Setup CFG_TRANS mode as follows:
+ * 0x00 (OFF) when in OFF state(bit offset 4) and in sleep (bit offset 2)
+ * 0x01 (PWM/PFM Auto) when in ACTive state (bit offset 0)
+ * Dont trust Bootloader or reset values to set them up for kernel.
+ */
+#define TWL6030_REG_VCOREx_CFG_TRANS_MODE (0x00 << 4 | \
+ 0x00 << 2 | \
+ 0x01 << 0)
+#define TWL6030_REG_VCOREx_CFG_TRANS_MODE_DESC "OFF=OFF SLEEP=OFF ACT=AUTO"
static bool is_offset_valid;
static u8 smps_offset;
@@ -69,6 +68,55 @@
#define REG_SMPS_OFFSET 0xE0
#define SMARTREFLEX_ENABLE BIT(3)
+/**
+ * struct twl_reg_setup_array - NULL terminated array giving configuration
+ * @addr: reg address to write to
+ * @val: value to write with
+ * @desc: description of this reg for error printing
+ * NOTE: a NULL pointer in this indicates end of array.
+ *
+ * VCORE register configurations as per need.
+ */
+struct twl_reg_setup_array {
+ u8 addr;
+ u8 val;
+ char *desc;
+};
+
+/**
+ * _twl_set_regs() - helper to setup a config array
+ * @gendesc: generic description - used with error message
+ * @sarray: NULL terminated array of configuration values
+ *
+ * Configures TWL with a set of values terminated. If any write fails,
+ * this continues till the last and returns back with the last error
+ * value.
+ */
+static int __init _twl_set_regs(char *gendesc,
+ struct twl_reg_setup_array *sarray)
+{
+ int i = 0;
+ int ret1;
+ int ret = 0;
+
+ while (sarray->desc) {
+ ret1 = twl_i2c_write_u8(TWL6030_MODULE_ID0,
+ sarray->val,
+ sarray->addr);
+ if (ret1) {
+ pr_err("%s: %s: failed(%d), array index=%d, desc=%s, "
+ "reg=0x%02x, val=0x%02x\n",
+ __func__, gendesc, ret1, i,
+ sarray->desc, sarray->addr, sarray->val);
+ ret = ret1;
+ }
+ sarray++;
+ i++;
+ }
+
+ return ret;
+}
+
static unsigned long twl4030_vsel_to_uv(const u8 vsel)
{
return (((vsel * 125) + 6000)) * 100;
@@ -95,6 +143,8 @@
is_offset_valid = true;
}
+ if (!vsel)
+ return 0;
/*
* There is no specific formula for voltage to vsel
* conversion above 1.3V. There are special hardcoded
@@ -106,9 +156,9 @@
return 1350000;
if (smps_offset & 0x8)
- return ((((vsel - 1) * 125) + 7000)) * 100;
+ return ((((vsel - 1) * 1266) + 70900)) * 10;
else
- return ((((vsel - 1) * 125) + 6000)) * 100;
+ return ((((vsel - 1) * 1266) + 60770)) * 10;
}
static u8 twl6030_uv_to_vsel(unsigned long uv)
@@ -127,6 +177,8 @@
is_offset_valid = true;
}
+ if (!uv)
+ return 0x00;
/*
* There is no specific formula for voltage to vsel
* conversion above 1.3V. There are special hardcoded
@@ -134,16 +186,21 @@
* hardcoding only for 1.35 V which is used for 1GH OPP for
* OMAP4430.
*/
- if (uv == 1350000)
+ if (uv > twl6030_vsel_to_uv(0x39)) {
+ if (uv == 1350000)
+ return 0x3A;
+ pr_err("%s:OUT OF RANGE! non mapped vsel for %ld Vs max %ld\n",
+ __func__, uv, twl6030_vsel_to_uv(0x39));
return 0x3A;
+ }
if (smps_offset & 0x8)
- return DIV_ROUND_UP(uv - 700000, 12500) + 1;
+ return DIV_ROUND_UP(uv - 709000, 12660) + 1;
else
- return DIV_ROUND_UP(uv - 600000, 12500) + 1;
+ return DIV_ROUND_UP(uv - 607700, 12660) + 1;
}
-static struct omap_volt_pmic_info omap3_mpu_volt_info = {
+static struct omap_voltdm_pmic omap3_mpu_pmic = {
.slew_rate = 4000,
.step_size = 12500,
.on_volt = 1200000,
@@ -158,12 +215,13 @@
.vp_vddmax = OMAP3430_VP1_VLIMITTO_VDDMAX,
.vp_timeout_us = OMAP3_VP_VLIMITTO_TIMEOUT_US,
.i2c_slave_addr = OMAP3_SRI2C_SLAVE_ADDR,
- .pmic_reg = OMAP3_VDD_MPU_SR_CONTROL_REG,
+ .volt_reg_addr = OMAP3_VDD_MPU_SR_CONTROL_REG,
+ .i2c_high_speed = true,
.vsel_to_uv = twl4030_vsel_to_uv,
.uv_to_vsel = twl4030_uv_to_vsel,
};
-static struct omap_volt_pmic_info omap3_core_volt_info = {
+static struct omap_voltdm_pmic omap3_core_pmic = {
.slew_rate = 4000,
.step_size = 12500,
.on_volt = 1200000,
@@ -178,19 +236,21 @@
.vp_vddmax = OMAP3430_VP2_VLIMITTO_VDDMAX,
.vp_timeout_us = OMAP3_VP_VLIMITTO_TIMEOUT_US,
.i2c_slave_addr = OMAP3_SRI2C_SLAVE_ADDR,
- .pmic_reg = OMAP3_VDD_CORE_SR_CONTROL_REG,
+ .volt_reg_addr = OMAP3_VDD_CORE_SR_CONTROL_REG,
+ .i2c_high_speed = true,
.vsel_to_uv = twl4030_vsel_to_uv,
.uv_to_vsel = twl4030_uv_to_vsel,
};
-static struct omap_volt_pmic_info omap4_mpu_volt_info = {
- .slew_rate = 4000,
- .step_size = 12500,
- .on_volt = 1350000,
- .onlp_volt = 1350000,
- .ret_volt = 837500,
- .off_volt = 600000,
+static struct omap_voltdm_pmic omap443x_mpu_pmic = {
+ .slew_rate = 9000,
+ .step_size = 12660,
+ .on_volt = 1375000,
+ .onlp_volt = 1375000,
+ .ret_volt = 830000,
+ .off_volt = 0,
.volt_setup_time = 0,
+ .switch_on_time = 549,
.vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET,
.vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN,
.vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX,
@@ -198,19 +258,26 @@
.vp_vddmax = OMAP4_VP_MPU_VLIMITTO_VDDMAX,
.vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US,
.i2c_slave_addr = OMAP4_SRI2C_SLAVE_ADDR,
- .pmic_reg = OMAP4_VDD_MPU_SR_VOLT_REG,
+ .volt_reg_addr = OMAP4_VDD_MPU_SR_VOLT_REG,
+ .cmd_reg_addr = OMAP4_VDD_MPU_SR_CMD_REG,
+ .i2c_high_speed = true,
+ .i2c_scll_low = 0x28,
+ .i2c_scll_high = 0x2C,
+ .i2c_hscll_low = 0x0B,
+ .i2c_hscll_high = 0x00,
.vsel_to_uv = twl6030_vsel_to_uv,
.uv_to_vsel = twl6030_uv_to_vsel,
};
-static struct omap_volt_pmic_info omap4_iva_volt_info = {
- .slew_rate = 4000,
- .step_size = 12500,
- .on_volt = 1100000,
- .onlp_volt = 1100000,
- .ret_volt = 837500,
- .off_volt = 600000,
+static struct omap_voltdm_pmic omap4_iva_pmic = {
+ .slew_rate = 9000,
+ .step_size = 12660,
+ .on_volt = 1188000,
+ .onlp_volt = 1188000,
+ .ret_volt = 830000,
+ .off_volt = 0,
.volt_setup_time = 0,
+ .switch_on_time = 549,
.vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET,
.vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN,
.vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX,
@@ -218,19 +285,26 @@
.vp_vddmax = OMAP4_VP_IVA_VLIMITTO_VDDMAX,
.vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US,
.i2c_slave_addr = OMAP4_SRI2C_SLAVE_ADDR,
- .pmic_reg = OMAP4_VDD_IVA_SR_VOLT_REG,
+ .volt_reg_addr = OMAP4_VDD_IVA_SR_VOLT_REG,
+ .cmd_reg_addr = OMAP4_VDD_IVA_SR_CMD_REG,
+ .i2c_high_speed = true,
+ .i2c_scll_low = 0x28,
+ .i2c_scll_high = 0x2C,
+ .i2c_hscll_low = 0x0B,
+ .i2c_hscll_high = 0x00,
.vsel_to_uv = twl6030_vsel_to_uv,
.uv_to_vsel = twl6030_uv_to_vsel,
};
-static struct omap_volt_pmic_info omap4_core_volt_info = {
- .slew_rate = 4000,
- .step_size = 12500,
- .on_volt = 1100000,
- .onlp_volt = 1100000,
- .ret_volt = 837500,
- .off_volt = 600000,
+static struct omap_voltdm_pmic omap443x_core_pmic = {
+ .slew_rate = 9000,
+ .step_size = 12660,
+ .on_volt = 1200000,
+ .onlp_volt = 1200000,
+ .ret_volt = 830000,
+ .off_volt = 0,
.volt_setup_time = 0,
+ .switch_on_time = 549,
.vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET,
.vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN,
.vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX,
@@ -238,43 +312,49 @@
.vp_vddmax = OMAP4_VP_CORE_VLIMITTO_VDDMAX,
.vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US,
.i2c_slave_addr = OMAP4_SRI2C_SLAVE_ADDR,
- .pmic_reg = OMAP4_VDD_CORE_SR_VOLT_REG,
+ .i2c_high_speed = true,
+ .i2c_scll_low = 0x28,
+ .i2c_scll_high = 0x2C,
+ .i2c_hscll_low = 0x0B,
+ .i2c_hscll_high = 0x00,
+ .volt_reg_addr = OMAP4_VDD_CORE_SR_VOLT_REG,
+ .cmd_reg_addr = OMAP4_VDD_CORE_SR_CMD_REG,
.vsel_to_uv = twl6030_vsel_to_uv,
.uv_to_vsel = twl6030_uv_to_vsel,
};
-int __init omap4_twl_init(void)
+/* Core uses the MPU rail of 4430 */
+static struct omap_voltdm_pmic omap446x_core_pmic = {
+ .slew_rate = 9000,
+ .step_size = 12660,
+ .on_volt = 1200000,
+ .onlp_volt = 1200000,
+ .ret_volt = 830000,
+ /* OMAP4 + TWL + TPS limitation keep off_volt same as ret_volt */
+ .off_volt = 830000,
+ .volt_setup_time = 0,
+ .switch_on_time = 549,
+ .vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET,
+ .vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN,
+ .vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX,
+ .vp_vddmin = OMAP4_VP_CORE_VLIMITTO_VDDMIN,
+ .vp_vddmax = OMAP4_VP_CORE_VLIMITTO_VDDMAX,
+ .vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US,
+ .i2c_slave_addr = OMAP4_SRI2C_SLAVE_ADDR,
+ .i2c_high_speed = true,
+ .i2c_scll_low = 0x28,
+ .i2c_scll_high = 0x2C,
+ .i2c_hscll_low = 0x0B,
+ .i2c_hscll_high = 0x00,
+ .volt_reg_addr = OMAP4_VDD_MPU_SR_VOLT_REG,
+ .cmd_reg_addr = OMAP4_VDD_MPU_SR_CMD_REG,
+ .vsel_to_uv = twl6030_vsel_to_uv,
+ .uv_to_vsel = twl6030_uv_to_vsel,
+};
+
+static int __init twl_set_sr(struct voltagedomain *voltdm)
{
- struct voltagedomain *voltdm;
-
- if (!cpu_is_omap44xx())
- return -ENODEV;
-
- voltdm = omap_voltage_domain_lookup("mpu");
- omap_voltage_register_pmic(voltdm, &omap4_mpu_volt_info);
-
- voltdm = omap_voltage_domain_lookup("iva");
- omap_voltage_register_pmic(voltdm, &omap4_iva_volt_info);
-
- voltdm = omap_voltage_domain_lookup("core");
- omap_voltage_register_pmic(voltdm, &omap4_core_volt_info);
-
- return 0;
-}
-
-int __init omap3_twl_init(void)
-{
- struct voltagedomain *voltdm;
-
- if (!cpu_is_omap34xx())
- return -ENODEV;
-
- if (cpu_is_omap3630()) {
- omap3_mpu_volt_info.vp_vddmin = OMAP3630_VP1_VLIMITTO_VDDMIN;
- omap3_mpu_volt_info.vp_vddmax = OMAP3630_VP1_VLIMITTO_VDDMAX;
- omap3_core_volt_info.vp_vddmin = OMAP3630_VP2_VLIMITTO_VDDMIN;
- omap3_core_volt_info.vp_vddmax = OMAP3630_VP2_VLIMITTO_VDDMAX;
- }
+ int r = 0;
/*
* The smartreflex bit on twl4030 specifies if the setting of voltage
@@ -286,15 +366,144 @@
* voltage scaling will not function on TWL over I2C_SR.
*/
if (!twl_sr_enable_autoinit)
- omap3_twl_set_sr_bit(true);
+ r = omap3_twl_set_sr_bit(true);
+ return r;
+}
- voltdm = omap_voltage_domain_lookup("mpu");
- omap_voltage_register_pmic(voltdm, &omap3_mpu_volt_info);
- voltdm = omap_voltage_domain_lookup("core");
- omap_voltage_register_pmic(voltdm, &omap3_core_volt_info);
+/* OMAP4430 - All vcores: 1, 2 and 3 should go down with PREQ */
+static __initdata struct twl_reg_setup_array omap4430_twl6030_setup[] = {
+ {
+ .addr = TWL6030_REG_VCORE1_CFG_GRP,
+ .val = TWL6030_BIT_APE_GRP,
+ .desc = "Pull VCORE1 down along with App processor's PREQ1",
+ },
+ {
+ .addr = TWL6030_REG_VCORE1_CFG_TRANS,
+ .val = TWL6030_REG_VCOREx_CFG_TRANS_MODE,
+ .desc = "VCORE1" TWL6030_REG_VCOREx_CFG_TRANS_MODE_DESC,
+ },
+ {
+ .addr = TWL6030_REG_VCORE2_CFG_GRP,
+ .val = TWL6030_BIT_APE_GRP,
+ .desc = "Pull VCORE2 down along with App processor's PREQ1",
+ },
+ {
+ .addr = TWL6030_REG_VCORE2_CFG_TRANS,
+ .val = TWL6030_REG_VCOREx_CFG_TRANS_MODE,
+ .desc = "VCORE2" TWL6030_REG_VCOREx_CFG_TRANS_MODE_DESC,
+ },
+ {
+ .addr = TWL6030_REG_VCORE3_CFG_GRP,
+ .val = TWL6030_BIT_APE_GRP,
+ .desc = "Pull VCORE3 down along with App processor's PREQ1",
+ },
+ {
+ .addr = TWL6030_REG_VCORE3_CFG_TRANS,
+ .val = TWL6030_REG_VCOREx_CFG_TRANS_MODE,
+ .desc = "VCORE3" TWL6030_REG_VCOREx_CFG_TRANS_MODE_DESC,
+ },
+ { .desc = NULL} /* TERMINATOR */
+};
- return 0;
+static int __init twl_set_4430vcore(struct voltagedomain *voltdm)
+{
+ return _twl_set_regs("OMAP4430 ", omap4430_twl6030_setup);
+}
+
+/* OMAP4460 - VCORE3 is unused, 1 and 2 should go down with PREQ */
+static __initdata struct twl_reg_setup_array omap4460_twl6030_setup[] = {
+ {
+ .addr = TWL6030_REG_VCORE1_CFG_GRP,
+ .val = TWL6030_BIT_APE_GRP,
+ .desc = "Pull VCORE1 down along with App processor's PREQ1",
+ },
+ {
+ .addr = TWL6030_REG_VCORE1_CFG_TRANS,
+ .val = TWL6030_REG_VCOREx_CFG_TRANS_MODE,
+ .desc = "VCORE1" TWL6030_REG_VCOREx_CFG_TRANS_MODE_DESC,
+ },
+ {
+ .addr = TWL6030_REG_VCORE2_CFG_GRP,
+ .val = TWL6030_BIT_APE_GRP,
+ .desc = "Pull VCORE2 down along with App processor's PREQ1",
+ },
+ {
+ .addr = TWL6030_REG_VCORE2_CFG_TRANS,
+ .val = TWL6030_REG_VCOREx_CFG_TRANS_MODE,
+ .desc = "VCORE2" TWL6030_REG_VCOREx_CFG_TRANS_MODE_DESC,
+ },
+ { .desc = NULL} /* TERMINATOR */
+};
+
+static int __init twl_set_4460vcore(struct voltagedomain *voltdm)
+{
+ return _twl_set_regs("OMAP4460 ", omap4460_twl6030_setup);
+}
+
+#define OMAP3_TWL4030_USED (CHIP_GE_OMAP3430ES2 | \
+ CHIP_GE_OMAP3630ES1_1 | \
+ CHIP_IS_OMAP3630ES1)
+
+static __initdata struct omap_pmic_map omap_twl_map[] = {
+ {
+ .name = "mpu_iva",
+ .omap_chip = OMAP_CHIP_INIT(OMAP3_TWL4030_USED),
+ .pmic_data = &omap3_mpu_pmic,
+ .special_action = twl_set_sr,
+ },
+ {
+ .name = "core",
+ .omap_chip = OMAP_CHIP_INIT(OMAP3_TWL4030_USED),
+ .pmic_data = &omap3_core_pmic,
+ },
+ {
+ .name = "mpu",
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP443X),
+ .pmic_data = &omap443x_mpu_pmic,
+ },
+ {
+ .name = "core",
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP443X),
+ .pmic_data = &omap443x_core_pmic,
+ .special_action = twl_set_4430vcore,
+ },
+ {
+ .name = "core",
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP446X),
+ .pmic_data = &omap446x_core_pmic,
+ .special_action = twl_set_4460vcore,
+ },
+ {
+ .name = "iva",
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+ .pmic_data = &omap4_iva_pmic,
+ },
+ /* Terminator */
+ { .name = NULL, .pmic_data = NULL},
+};
+
+/* As per SWCS045 */
+static __initdata struct omap_pmic_description twl6030_pmic_desc = {
+ .pmic_lp_tshut = 500, /* T-OFF */
+ .pmic_lp_tstart = 500, /* T-ON */
+};
+
+int __init omap_twl_init(void)
+{
+ struct omap_pmic_description *desc = NULL;
+
+ /* Reuse OMAP3430 values */
+ if (cpu_is_omap3630()) {
+ omap3_mpu_pmic.vp_vddmin = OMAP3630_VP1_VLIMITTO_VDDMIN;
+ omap3_mpu_pmic.vp_vddmax = OMAP3630_VP1_VLIMITTO_VDDMAX;
+ omap3_core_pmic.vp_vddmin = OMAP3630_VP2_VLIMITTO_VDDMIN;
+ omap3_core_pmic.vp_vddmax = OMAP3630_VP2_VLIMITTO_VDDMAX;
+ }
+ if (cpu_is_omap44xx())
+ desc = &twl6030_pmic_desc;
+
+ return omap_pmic_register_data(omap_twl_map, desc);
}
/**
@@ -337,3 +546,8 @@
pr_err("%s: Error access to TWL4030 (%d)\n", __func__, ret);
return ret;
}
+
+int __init omap_twl_pmic_update(char *name, u32 old_chip_id, u32 new_chip_id)
+{
+ return omap_pmic_update(omap_twl_map, name, old_chip_id, new_chip_id);
+}
diff --git a/arch/arm/mach-omap2/opp.c b/arch/arm/mach-omap2/opp.c
index 0627494..1858712 100644
--- a/arch/arm/mach-omap2/opp.c
+++ b/arch/arm/mach-omap2/opp.c
@@ -18,10 +18,13 @@
*/
#include <linux/module.h>
#include <linux/opp.h>
+#include <linux/clk.h>
#include <plat/omap_device.h>
+#include <plat/clock.h>
#include "omap_opp_data.h"
+#include "dvfs.h"
/* Temp variable to allow multiple calls */
static u8 __initdata omap_table_init;
@@ -38,6 +41,8 @@
u32 opp_def_size)
{
int i, r;
+ struct clk *clk;
+ long round_rate;
if (!opp_def || !opp_def_size) {
pr_err("%s: invalid params!\n", __func__);
@@ -58,19 +63,34 @@
struct device *dev;
if (!opp_def->hwmod_name) {
- pr_err("%s: NULL name of omap_hwmod, failing [%d].\n",
- __func__, i);
- return -EINVAL;
+ WARN(1, "%s: NULL name of omap_hwmod, failing"
+ " [%d].\n", __func__, i);
+ continue;
}
oh = omap_hwmod_lookup(opp_def->hwmod_name);
if (!oh || !oh->od) {
- pr_warn("%s: no hwmod or odev for %s, [%d] "
+ WARN(1, "%s: no hwmod or odev for %s, [%d] "
"cannot add OPPs.\n", __func__,
opp_def->hwmod_name, i);
- return -EINVAL;
+ continue;
}
dev = &oh->od->pdev.dev;
+ clk = omap_clk_get_by_name(opp_def->clk_name);
+ if (clk) {
+ round_rate = clk_round_rate(clk, opp_def->freq);
+ if (round_rate > 0) {
+ opp_def->freq = round_rate;
+ } else {
+ WARN(1, "%s: round_rate for clock %s failed\n",
+ __func__, opp_def->clk_name);
+ continue; /* skip Bad OPP */
+ }
+ } else {
+ WARN(1, "%s: No clock by name %s found\n", __func__,
+ opp_def->clk_name);
+ continue; /* skip Bad OPP */
+ }
r = opp_add(dev, opp_def->freq, opp_def->u_volt);
if (r) {
dev_err(dev, "%s: add OPP %ld failed for %s [%d] "
@@ -85,6 +105,12 @@
"[%d] result=%d\n",
__func__, opp_def->freq,
opp_def->hwmod_name, i, r);
+
+ r = omap_dvfs_register_device(dev,
+ opp_def->voltdm_name, opp_def->clk_name);
+ if (r)
+ dev_err(dev, "%s:%s:err dvfs register %d %d\n",
+ __func__, opp_def->hwmod_name, r, i);
}
}
diff --git a/arch/arm/mach-omap2/opp3xxx_data.c b/arch/arm/mach-omap2/opp3xxx_data.c
index d95f3f9..41619ea 100644
--- a/arch/arm/mach-omap2/opp3xxx_data.c
+++ b/arch/arm/mach-omap2/opp3xxx_data.c
@@ -36,12 +36,12 @@
#define OMAP3430_VDD_MPU_OPP5_UV 1350000
struct omap_volt_data omap34xx_vddmpu_volt_data[] = {
- VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP1_UV, OMAP343X_CONTROL_FUSE_OPP1_VDD1, 0xf4, 0x0c),
- VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP2_UV, OMAP343X_CONTROL_FUSE_OPP2_VDD1, 0xf4, 0x0c),
- VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP3_UV, OMAP343X_CONTROL_FUSE_OPP3_VDD1, 0xf9, 0x18),
- VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP4_UV, OMAP343X_CONTROL_FUSE_OPP4_VDD1, 0xf9, 0x18),
- VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP5_UV, OMAP343X_CONTROL_FUSE_OPP5_VDD1, 0xf9, 0x18),
- VOLT_DATA_DEFINE(0, 0, 0, 0),
+ VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP1_UV, 0, OMAP343X_CONTROL_FUSE_OPP1_VDD1, 0xf4, 0x0c, OMAP_ABB_NONE),
+ VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP2_UV, 0, OMAP343X_CONTROL_FUSE_OPP2_VDD1, 0xf4, 0x0c, OMAP_ABB_NONE),
+ VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP3_UV, 0, OMAP343X_CONTROL_FUSE_OPP3_VDD1, 0xf9, 0x18, OMAP_ABB_NONE),
+ VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP4_UV, 0, OMAP343X_CONTROL_FUSE_OPP4_VDD1, 0xf9, 0x18, OMAP_ABB_NONE),
+ VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP5_UV, 0, OMAP343X_CONTROL_FUSE_OPP5_VDD1, 0xf9, 0x18, OMAP_ABB_NONE),
+ VOLT_DATA_DEFINE(0, 0, 0, 0, 0, 0),
};
/* VDD2 */
@@ -51,10 +51,28 @@
#define OMAP3430_VDD_CORE_OPP3_UV 1150000
struct omap_volt_data omap34xx_vddcore_volt_data[] = {
- VOLT_DATA_DEFINE(OMAP3430_VDD_CORE_OPP1_UV, OMAP343X_CONTROL_FUSE_OPP1_VDD2, 0xf4, 0x0c),
- VOLT_DATA_DEFINE(OMAP3430_VDD_CORE_OPP2_UV, OMAP343X_CONTROL_FUSE_OPP2_VDD2, 0xf4, 0x0c),
- VOLT_DATA_DEFINE(OMAP3430_VDD_CORE_OPP3_UV, OMAP343X_CONTROL_FUSE_OPP3_VDD2, 0xf9, 0x18),
- VOLT_DATA_DEFINE(0, 0, 0, 0),
+ VOLT_DATA_DEFINE(OMAP3430_VDD_CORE_OPP1_UV, 0, OMAP343X_CONTROL_FUSE_OPP1_VDD2, 0xf4, 0x0c, OMAP_ABB_NONE),
+ VOLT_DATA_DEFINE(OMAP3430_VDD_CORE_OPP2_UV, 0, OMAP343X_CONTROL_FUSE_OPP2_VDD2, 0xf4, 0x0c, OMAP_ABB_NONE),
+ VOLT_DATA_DEFINE(OMAP3430_VDD_CORE_OPP3_UV, 0, OMAP343X_CONTROL_FUSE_OPP3_VDD2, 0xf9, 0x18, OMAP_ABB_NONE),
+ VOLT_DATA_DEFINE(0, 0, 0, 0, 0, 0),
+};
+
+/* OMAP 3430 MPU Core VDD dependency table */
+static struct omap_vdd_dep_volt omap34xx_vdd_mpu_core_dep_data[] = {
+ {.main_vdd_volt = OMAP3430_VDD_MPU_OPP1_UV, .dep_vdd_volt = OMAP3430_VDD_CORE_OPP2_UV},
+ {.main_vdd_volt = OMAP3430_VDD_MPU_OPP2_UV, .dep_vdd_volt = OMAP3430_VDD_CORE_OPP2_UV},
+ {.main_vdd_volt = OMAP3430_VDD_MPU_OPP3_UV, .dep_vdd_volt = OMAP3430_VDD_CORE_OPP3_UV},
+ {.main_vdd_volt = OMAP3430_VDD_MPU_OPP4_UV, .dep_vdd_volt = OMAP3430_VDD_CORE_OPP3_UV},
+ {.main_vdd_volt = OMAP3430_VDD_MPU_OPP5_UV, .dep_vdd_volt = OMAP3430_VDD_CORE_OPP3_UV},
+};
+
+struct omap_vdd_dep_info omap34xx_vddmpu_dep_info[] = {
+ {
+ .name = "core",
+ .dep_table = omap34xx_vdd_mpu_core_dep_data,
+ .nr_dep_entries = ARRAY_SIZE(omap34xx_vdd_mpu_core_dep_data),
+ },
+ {.name = NULL, .dep_table = NULL, .nr_dep_entries = 0},
};
/* 36xx */
@@ -67,11 +85,11 @@
#define OMAP3630_VDD_MPU_OPP1G_UV 1375000
struct omap_volt_data omap36xx_vddmpu_volt_data[] = {
- VOLT_DATA_DEFINE(OMAP3630_VDD_MPU_OPP50_UV, OMAP3630_CONTROL_FUSE_OPP50_VDD1, 0xf4, 0x0c),
- VOLT_DATA_DEFINE(OMAP3630_VDD_MPU_OPP100_UV, OMAP3630_CONTROL_FUSE_OPP100_VDD1, 0xf9, 0x16),
- VOLT_DATA_DEFINE(OMAP3630_VDD_MPU_OPP120_UV, OMAP3630_CONTROL_FUSE_OPP120_VDD1, 0xfa, 0x23),
- VOLT_DATA_DEFINE(OMAP3630_VDD_MPU_OPP1G_UV, OMAP3630_CONTROL_FUSE_OPP1G_VDD1, 0xfa, 0x27),
- VOLT_DATA_DEFINE(0, 0, 0, 0),
+ VOLT_DATA_DEFINE(OMAP3630_VDD_MPU_OPP50_UV, 0, OMAP3630_CONTROL_FUSE_OPP50_VDD1, 0xf4, 0x0c, OMAP_ABB_NOMINAL_OPP),
+ VOLT_DATA_DEFINE(OMAP3630_VDD_MPU_OPP100_UV, 0, OMAP3630_CONTROL_FUSE_OPP100_VDD1, 0xf9, 0x16, OMAP_ABB_NOMINAL_OPP),
+ VOLT_DATA_DEFINE(OMAP3630_VDD_MPU_OPP120_UV, 0, OMAP3630_CONTROL_FUSE_OPP120_VDD1, 0xfa, 0x23, OMAP_ABB_NOMINAL_OPP),
+ VOLT_DATA_DEFINE(OMAP3630_VDD_MPU_OPP1G_UV, 0, OMAP3630_CONTROL_FUSE_OPP1G_VDD1, 0xfa, 0x27, OMAP_ABB_FAST_OPP),
+ VOLT_DATA_DEFINE(0, 0, 0, 0, 0, 0),
};
/* VDD2 */
@@ -80,24 +98,24 @@
#define OMAP3630_VDD_CORE_OPP100_UV 1200000
struct omap_volt_data omap36xx_vddcore_volt_data[] = {
- VOLT_DATA_DEFINE(OMAP3630_VDD_CORE_OPP50_UV, OMAP3630_CONTROL_FUSE_OPP50_VDD2, 0xf4, 0x0c),
- VOLT_DATA_DEFINE(OMAP3630_VDD_CORE_OPP100_UV, OMAP3630_CONTROL_FUSE_OPP100_VDD2, 0xf9, 0x16),
- VOLT_DATA_DEFINE(0, 0, 0, 0),
+ VOLT_DATA_DEFINE(OMAP3630_VDD_CORE_OPP50_UV, 0, OMAP3630_CONTROL_FUSE_OPP50_VDD2, 0xf4, 0x0c, OMAP_ABB_NONE),
+ VOLT_DATA_DEFINE(OMAP3630_VDD_CORE_OPP100_UV, 0, OMAP3630_CONTROL_FUSE_OPP100_VDD2, 0xf9, 0x16, OMAP_ABB_NONE),
+ VOLT_DATA_DEFINE(0, 0, 0, 0, 0, 0),
};
/* OPP data */
static struct omap_opp_def __initdata omap34xx_opp_def_list[] = {
/* MPU OPP1 */
- OPP_INITIALIZER("mpu", true, 125000000, OMAP3430_VDD_MPU_OPP1_UV),
+ OPP_INITIALIZER("mpu", "dpll1_ck", "mpu_iva", true, 125000000, OMAP3430_VDD_MPU_OPP1_UV),
/* MPU OPP2 */
- OPP_INITIALIZER("mpu", true, 250000000, OMAP3430_VDD_MPU_OPP2_UV),
+ OPP_INITIALIZER("mpu", "dpll1_ck", "mpu_iva", true, 250000000, OMAP3430_VDD_MPU_OPP2_UV),
/* MPU OPP3 */
- OPP_INITIALIZER("mpu", true, 500000000, OMAP3430_VDD_MPU_OPP3_UV),
+ OPP_INITIALIZER("mpu", "dpll1_ck", "mpu_iva", true, 500000000, OMAP3430_VDD_MPU_OPP3_UV),
/* MPU OPP4 */
- OPP_INITIALIZER("mpu", true, 550000000, OMAP3430_VDD_MPU_OPP4_UV),
+ OPP_INITIALIZER("mpu", "dpll1_ck", "mpu_iva", true, 550000000, OMAP3430_VDD_MPU_OPP4_UV),
/* MPU OPP5 */
- OPP_INITIALIZER("mpu", true, 600000000, OMAP3430_VDD_MPU_OPP5_UV),
+ OPP_INITIALIZER("mpu", "dpll1_ck", "mpu_iva", true, 600000000, OMAP3430_VDD_MPU_OPP5_UV),
/*
* L3 OPP1 - 41.5 MHz is disabled because: The voltage for that OPP is
@@ -107,47 +125,64 @@
* impact that frequency will do to the MPU and the whole system in
* general.
*/
- OPP_INITIALIZER("l3_main", false, 41500000, OMAP3430_VDD_CORE_OPP1_UV),
+ OPP_INITIALIZER("l3_main", "dpll3_ck", "core", false, 41500000, OMAP3430_VDD_CORE_OPP1_UV),
/* L3 OPP2 */
- OPP_INITIALIZER("l3_main", true, 83000000, OMAP3430_VDD_CORE_OPP2_UV),
+ OPP_INITIALIZER("l3_main", "dpll3_ck", "core", true, 83000000, OMAP3430_VDD_CORE_OPP2_UV),
/* L3 OPP3 */
- OPP_INITIALIZER("l3_main", true, 166000000, OMAP3430_VDD_CORE_OPP3_UV),
+ OPP_INITIALIZER("l3_main", "dpll3_ck", "core", true, 166000000, OMAP3430_VDD_CORE_OPP3_UV),
/* DSP OPP1 */
- OPP_INITIALIZER("iva", true, 90000000, OMAP3430_VDD_MPU_OPP1_UV),
+ OPP_INITIALIZER("iva", "dpll2_ck", "mpu_iva", true, 90000000, OMAP3430_VDD_MPU_OPP1_UV),
/* DSP OPP2 */
- OPP_INITIALIZER("iva", true, 180000000, OMAP3430_VDD_MPU_OPP2_UV),
+ OPP_INITIALIZER("iva", "dpll2_ck", "mpu_iva", true, 180000000, OMAP3430_VDD_MPU_OPP2_UV),
/* DSP OPP3 */
- OPP_INITIALIZER("iva", true, 360000000, OMAP3430_VDD_MPU_OPP3_UV),
+ OPP_INITIALIZER("iva", "dpll2_ck", "mpu_iva", true, 360000000, OMAP3430_VDD_MPU_OPP3_UV),
/* DSP OPP4 */
- OPP_INITIALIZER("iva", true, 400000000, OMAP3430_VDD_MPU_OPP4_UV),
+ OPP_INITIALIZER("iva", "dpll2_ck", "mpu_iva", true, 400000000, OMAP3430_VDD_MPU_OPP4_UV),
/* DSP OPP5 */
- OPP_INITIALIZER("iva", true, 430000000, OMAP3430_VDD_MPU_OPP5_UV),
+ OPP_INITIALIZER("iva", "dpll2_ck", "mpu_iva", true, 430000000, OMAP3430_VDD_MPU_OPP5_UV),
};
static struct omap_opp_def __initdata omap36xx_opp_def_list[] = {
/* MPU OPP1 - OPP50 */
- OPP_INITIALIZER("mpu", true, 300000000, OMAP3630_VDD_MPU_OPP50_UV),
+ OPP_INITIALIZER("mpu", "dpll1_ck", "mpu_iva", true, 300000000, OMAP3630_VDD_MPU_OPP50_UV),
/* MPU OPP2 - OPP100 */
- OPP_INITIALIZER("mpu", true, 600000000, OMAP3630_VDD_MPU_OPP100_UV),
+ OPP_INITIALIZER("mpu", "dpll1_ck", "mpu_iva", true, 600000000, OMAP3630_VDD_MPU_OPP100_UV),
/* MPU OPP3 - OPP-Turbo */
- OPP_INITIALIZER("mpu", false, 800000000, OMAP3630_VDD_MPU_OPP120_UV),
+ OPP_INITIALIZER("mpu", "dpll1_ck", "mpu_iva", false, 800000000, OMAP3630_VDD_MPU_OPP120_UV),
/* MPU OPP4 - OPP-SB */
- OPP_INITIALIZER("mpu", false, 1000000000, OMAP3630_VDD_MPU_OPP1G_UV),
+ OPP_INITIALIZER("mpu", "dpll1_ck", "mpu_iva", false, 1000000000, OMAP3630_VDD_MPU_OPP1G_UV),
/* L3 OPP1 - OPP50 */
- OPP_INITIALIZER("l3_main", true, 100000000, OMAP3630_VDD_CORE_OPP50_UV),
+ OPP_INITIALIZER("l3_main", "dpll3_ck", "core", true, 100000000, OMAP3630_VDD_CORE_OPP50_UV),
/* L3 OPP2 - OPP100, OPP-Turbo, OPP-SB */
- OPP_INITIALIZER("l3_main", true, 200000000, OMAP3630_VDD_CORE_OPP100_UV),
+ OPP_INITIALIZER("l3_main", "dpll3_ck", "core", true, 200000000, OMAP3630_VDD_CORE_OPP100_UV),
/* DSP OPP1 - OPP50 */
- OPP_INITIALIZER("iva", true, 260000000, OMAP3630_VDD_MPU_OPP50_UV),
+ OPP_INITIALIZER("iva", "dpll2_ck", "mpu_iva", true, 260000000, OMAP3630_VDD_MPU_OPP50_UV),
/* DSP OPP2 - OPP100 */
- OPP_INITIALIZER("iva", true, 520000000, OMAP3630_VDD_MPU_OPP100_UV),
+ OPP_INITIALIZER("iva", "dpll2_ck", "mpu_iva", true, 520000000, OMAP3630_VDD_MPU_OPP100_UV),
/* DSP OPP3 - OPP-Turbo */
- OPP_INITIALIZER("iva", false, 660000000, OMAP3630_VDD_MPU_OPP120_UV),
+ OPP_INITIALIZER("iva", "dpll2_ck", "mpu_iva", false, 660000000, OMAP3630_VDD_MPU_OPP120_UV),
/* DSP OPP4 - OPP-SB */
- OPP_INITIALIZER("iva", false, 800000000, OMAP3630_VDD_MPU_OPP1G_UV),
+ OPP_INITIALIZER("iva", "dpll2_ck", "mpu_iva", false, 800000000, OMAP3630_VDD_MPU_OPP1G_UV),
+};
+
+/* OMAP 3630 MPU Core VDD dependency table */
+static struct omap_vdd_dep_volt omap36xx_vdd_mpu_core_dep_data[] = {
+ {.main_vdd_volt = OMAP3630_VDD_MPU_OPP50_UV, .dep_vdd_volt = OMAP3630_VDD_CORE_OPP50_UV},
+ {.main_vdd_volt = OMAP3630_VDD_MPU_OPP100_UV, .dep_vdd_volt = OMAP3630_VDD_CORE_OPP100_UV},
+ {.main_vdd_volt = OMAP3630_VDD_MPU_OPP120_UV, .dep_vdd_volt = OMAP3630_VDD_CORE_OPP100_UV},
+ {.main_vdd_volt = OMAP3630_VDD_MPU_OPP1G_UV, .dep_vdd_volt = OMAP3630_VDD_CORE_OPP100_UV},
+};
+
+struct omap_vdd_dep_info omap36xx_vddmpu_dep_info[] = {
+ {
+ .name = "core",
+ .dep_table = omap36xx_vdd_mpu_core_dep_data,
+ .nr_dep_entries = ARRAY_SIZE(omap36xx_vdd_mpu_core_dep_data),
+ },
+ {.name = NULL, .dep_table = NULL, .nr_dep_entries = 0},
};
/**
diff --git a/arch/arm/mach-omap2/opp4xxx_data.c b/arch/arm/mach-omap2/opp4xxx_data.c
index 2293ba2..e36ca46 100644
--- a/arch/arm/mach-omap2/opp4xxx_data.c
+++ b/arch/arm/mach-omap2/opp4xxx_data.c
@@ -1,7 +1,7 @@
/*
* OMAP4 OPP table definitions.
*
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
* Nishanth Menon
* Kevin Hilman
* Thara Gopinath
@@ -19,8 +19,10 @@
* GNU General Public License for more details.
*/
#include <linux/module.h>
+#include <linux/opp.h>
#include <plat/cpu.h>
+#include <plat/common.h>
#include "control.h"
#include "omap_opp_data.h"
@@ -33,60 +35,276 @@
#define OMAP4430_VDD_MPU_OPP50_UV 1025000
#define OMAP4430_VDD_MPU_OPP100_UV 1200000
-#define OMAP4430_VDD_MPU_OPPTURBO_UV 1313000
-#define OMAP4430_VDD_MPU_OPPNITRO_UV 1375000
+#define OMAP4430_VDD_MPU_OPPTURBO_UV 1325000
+#define OMAP4430_VDD_MPU_OPPNITRO_UV 1388000
-struct omap_volt_data omap44xx_vdd_mpu_volt_data[] = {
- VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPP50_UV, OMAP44XX_CONTROL_FUSE_MPU_OPP50, 0xf4, 0x0c),
- VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPP100_UV, OMAP44XX_CONTROL_FUSE_MPU_OPP100, 0xf9, 0x16),
- VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPPTURBO_UV, OMAP44XX_CONTROL_FUSE_MPU_OPPTURBO, 0xfa, 0x23),
- VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPPNITRO_UV, OMAP44XX_CONTROL_FUSE_MPU_OPPNITRO, 0xfa, 0x27),
- VOLT_DATA_DEFINE(0, 0, 0, 0),
+struct omap_volt_data omap443x_vdd_mpu_volt_data[] = {
+ VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPP50_UV, 0, OMAP44XX_CONTROL_FUSE_MPU_OPP50, 0xf4, 0x0c, OMAP_ABB_NOMINAL_OPP),
+ VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPP100_UV, 0, OMAP44XX_CONTROL_FUSE_MPU_OPP100, 0xf9, 0x16, OMAP_ABB_NOMINAL_OPP),
+ VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPPTURBO_UV, 0, OMAP44XX_CONTROL_FUSE_MPU_OPPTURBO, 0xfa, 0x23, OMAP_ABB_NOMINAL_OPP),
+ VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPPNITRO_UV, 0, OMAP44XX_CONTROL_FUSE_MPU_OPPNITRO, 0xfa, 0x27, OMAP_ABB_FAST_OPP),
+ VOLT_DATA_DEFINE(0, 0, 0, 0, 0, 0),
};
-#define OMAP4430_VDD_IVA_OPP50_UV 1013000
-#define OMAP4430_VDD_IVA_OPP100_UV 1188000
-#define OMAP4430_VDD_IVA_OPPTURBO_UV 1300000
+#define OMAP4430_VDD_IVA_OPP50_UV 950000
+#define OMAP4430_VDD_IVA_OPP100_UV 1114000
+#define OMAP4430_VDD_IVA_OPPTURBO_UV 1291000
-struct omap_volt_data omap44xx_vdd_iva_volt_data[] = {
- VOLT_DATA_DEFINE(OMAP4430_VDD_IVA_OPP50_UV, OMAP44XX_CONTROL_FUSE_IVA_OPP50, 0xf4, 0x0c),
- VOLT_DATA_DEFINE(OMAP4430_VDD_IVA_OPP100_UV, OMAP44XX_CONTROL_FUSE_IVA_OPP100, 0xf9, 0x16),
- VOLT_DATA_DEFINE(OMAP4430_VDD_IVA_OPPTURBO_UV, OMAP44XX_CONTROL_FUSE_IVA_OPPTURBO, 0xfa, 0x23),
- VOLT_DATA_DEFINE(0, 0, 0, 0),
+struct omap_volt_data omap443x_vdd_iva_volt_data[] = {
+ VOLT_DATA_DEFINE(OMAP4430_VDD_IVA_OPP50_UV, 0, OMAP44XX_CONTROL_FUSE_IVA_OPP50, 0xf4, 0x0c, OMAP_ABB_NOMINAL_OPP),
+ VOLT_DATA_DEFINE(OMAP4430_VDD_IVA_OPP100_UV, 0, OMAP44XX_CONTROL_FUSE_IVA_OPP100, 0xf9, 0x16, OMAP_ABB_NOMINAL_OPP),
+ VOLT_DATA_DEFINE(OMAP4430_VDD_IVA_OPPTURBO_UV, 0, OMAP44XX_CONTROL_FUSE_IVA_OPPTURBO, 0xfa, 0x23, OMAP_ABB_NOMINAL_OPP),
+ VOLT_DATA_DEFINE(0, 0, 0, 0, 0, 0),
};
-#define OMAP4430_VDD_CORE_OPP50_UV 1025000
-#define OMAP4430_VDD_CORE_OPP100_UV 1200000
+#define OMAP4430_VDD_CORE_OPP50_UV 962000
+#define OMAP4430_VDD_CORE_OPP100_UV 1127000
-struct omap_volt_data omap44xx_vdd_core_volt_data[] = {
- VOLT_DATA_DEFINE(OMAP4430_VDD_CORE_OPP50_UV, OMAP44XX_CONTROL_FUSE_CORE_OPP50, 0xf4, 0x0c),
- VOLT_DATA_DEFINE(OMAP4430_VDD_CORE_OPP100_UV, OMAP44XX_CONTROL_FUSE_CORE_OPP100, 0xf9, 0x16),
- VOLT_DATA_DEFINE(0, 0, 0, 0),
+struct omap_volt_data omap443x_vdd_core_volt_data[] = {
+ VOLT_DATA_DEFINE(OMAP4430_VDD_CORE_OPP50_UV, 0, OMAP44XX_CONTROL_FUSE_CORE_OPP50, 0xf4, 0x0c, OMAP_ABB_NONE),
+ VOLT_DATA_DEFINE(OMAP4430_VDD_CORE_OPP100_UV, 0, OMAP44XX_CONTROL_FUSE_CORE_OPP100, 0xf9, 0x16, OMAP_ABB_NONE),
+ VOLT_DATA_DEFINE(0, 0, 0, 0, 0, 0),
};
+/* Dependency of domains are as follows for OMAP4430 (OPP based):
+ *
+ * MPU IVA CORE
+ * 50 50 50+
+ * 50 100+ 100
+ * 100+ 50 100
+ * 100+ 100+ 100
+ */
-static struct omap_opp_def __initdata omap44xx_opp_def_list[] = {
+/* OMAP 4430 MPU Core VDD dependency table */
+static struct omap_vdd_dep_volt omap443x_vdd_mpu_core_dep_data[] = {
+ {.main_vdd_volt = OMAP4430_VDD_MPU_OPP50_UV, .dep_vdd_volt = OMAP4430_VDD_CORE_OPP50_UV},
+ {.main_vdd_volt = OMAP4430_VDD_MPU_OPP100_UV, .dep_vdd_volt = OMAP4430_VDD_CORE_OPP100_UV},
+ {.main_vdd_volt = OMAP4430_VDD_MPU_OPPTURBO_UV, .dep_vdd_volt = OMAP4430_VDD_CORE_OPP100_UV},
+ {.main_vdd_volt = OMAP4430_VDD_MPU_OPPNITRO_UV, .dep_vdd_volt = OMAP4430_VDD_CORE_OPP100_UV},
+};
+
+struct omap_vdd_dep_info omap443x_vddmpu_dep_info[] = {
+ {
+ .name = "core",
+ .dep_table = omap443x_vdd_mpu_core_dep_data,
+ .nr_dep_entries = ARRAY_SIZE(omap443x_vdd_mpu_core_dep_data),
+ },
+ {.name = NULL, .dep_table = NULL, .nr_dep_entries = 0},
+};
+
+/* OMAP 4430 MPU IVA VDD dependency table */
+static struct omap_vdd_dep_volt omap443x_vdd_iva_core_dep_data[] = {
+ {.main_vdd_volt = OMAP4430_VDD_IVA_OPP50_UV, .dep_vdd_volt = OMAP4430_VDD_CORE_OPP50_UV},
+ {.main_vdd_volt = OMAP4430_VDD_IVA_OPP100_UV, .dep_vdd_volt = OMAP4430_VDD_CORE_OPP100_UV},
+ {.main_vdd_volt = OMAP4430_VDD_IVA_OPPTURBO_UV, .dep_vdd_volt = OMAP4430_VDD_CORE_OPP100_UV},
+};
+
+struct omap_vdd_dep_info omap443x_vddiva_dep_info[] = {
+ {
+ .name = "core",
+ .dep_table = omap443x_vdd_iva_core_dep_data,
+ .nr_dep_entries = ARRAY_SIZE(omap443x_vdd_iva_core_dep_data),
+ },
+ {.name = NULL, .dep_table = NULL, .nr_dep_entries = 0},
+};
+
+static struct omap_opp_def __initdata omap443x_opp_def_list[] = {
/* MPU OPP1 - OPP50 */
- OPP_INITIALIZER("mpu", true, 300000000, OMAP4430_VDD_MPU_OPP50_UV),
+ OPP_INITIALIZER("mpu", "dpll_mpu_ck", "mpu", true, 300000000, OMAP4430_VDD_MPU_OPP50_UV),
/* MPU OPP2 - OPP100 */
- OPP_INITIALIZER("mpu", true, 600000000, OMAP4430_VDD_MPU_OPP100_UV),
+ OPP_INITIALIZER("mpu", "dpll_mpu_ck", "mpu", true, 600000000, OMAP4430_VDD_MPU_OPP100_UV),
/* MPU OPP3 - OPP-Turbo */
- OPP_INITIALIZER("mpu", true, 800000000, OMAP4430_VDD_MPU_OPPTURBO_UV),
+ OPP_INITIALIZER("mpu", "dpll_mpu_ck", "mpu", true, 800000000, OMAP4430_VDD_MPU_OPPTURBO_UV),
/* MPU OPP4 - OPP-SB */
- OPP_INITIALIZER("mpu", true, 1008000000, OMAP4430_VDD_MPU_OPPNITRO_UV),
+ OPP_INITIALIZER("mpu", "dpll_mpu_ck", "mpu", true, 1008000000, OMAP4430_VDD_MPU_OPPNITRO_UV),
/* L3 OPP1 - OPP50 */
- OPP_INITIALIZER("l3_main_1", true, 100000000, OMAP4430_VDD_CORE_OPP50_UV),
+ OPP_INITIALIZER("l3_main_1", "virt_l3_ck", "core", true, 100000000, OMAP4430_VDD_CORE_OPP50_UV),
/* L3 OPP2 - OPP100, OPP-Turbo, OPP-SB */
- OPP_INITIALIZER("l3_main_1", true, 200000000, OMAP4430_VDD_CORE_OPP100_UV),
+ OPP_INITIALIZER("l3_main_1", "virt_l3_ck", "core", true, 200000000, OMAP4430_VDD_CORE_OPP100_UV),
/* IVA OPP1 - OPP50 */
- OPP_INITIALIZER("iva", true, 133000000, OMAP4430_VDD_IVA_OPP50_UV),
+ OPP_INITIALIZER("iva", "dpll_iva_m5x2_ck", "iva", true, 133000000, OMAP4430_VDD_IVA_OPP50_UV),
/* IVA OPP2 - OPP100 */
- OPP_INITIALIZER("iva", true, 266100000, OMAP4430_VDD_IVA_OPP100_UV),
+ OPP_INITIALIZER("iva", "dpll_iva_m5x2_ck", "iva", true, 266100000, OMAP4430_VDD_IVA_OPP100_UV),
/* IVA OPP3 - OPP-Turbo */
- OPP_INITIALIZER("iva", false, 332000000, OMAP4430_VDD_IVA_OPPTURBO_UV),
- /* TODO: add DSP, aess, fdif, gpu */
+ OPP_INITIALIZER("iva", "dpll_iva_m5x2_ck", "iva", false, 332000000, OMAP4430_VDD_IVA_OPPTURBO_UV),
+ /* SGX OPP1 - OPP50 */
+ OPP_INITIALIZER("gpu", "dpll_per_m7x2_ck", "core", true, 153600000, OMAP4430_VDD_CORE_OPP50_UV),
+ /* SGX OPP2 - OPP100 */
+ OPP_INITIALIZER("gpu", "dpll_per_m7x2_ck", "core", true, 307200000, OMAP4430_VDD_CORE_OPP100_UV),
+ /* FDIF OPP1 - OPP25 */
+ OPP_INITIALIZER("fdif", "fdif_fck", "core", true, 32000000, OMAP4430_VDD_CORE_OPP50_UV),
+ /* FDIF OPP2 - OPP50 */
+ OPP_INITIALIZER("fdif", "fdif_fck", "core", true, 64000000, OMAP4430_VDD_CORE_OPP50_UV),
+ /* FDIF OPP3 - OPP100 */
+ OPP_INITIALIZER("fdif", "fdif_fck", "core", true, 128000000, OMAP4430_VDD_CORE_OPP100_UV),
+ /* DSP OPP1 - OPP50 */
+ OPP_INITIALIZER("dsp", "dpll_iva_m4x2_ck", "iva", true, 232750000, OMAP4430_VDD_IVA_OPP50_UV),
+ /* DSP OPP2 - OPP100 */
+ OPP_INITIALIZER("dsp", "dpll_iva_m4x2_ck", "iva", true, 465500000, OMAP4430_VDD_IVA_OPP100_UV),
+ /* DSP OPP3 - OPPTB */
+ OPP_INITIALIZER("dsp", "dpll_iva_m4x2_ck", "iva", false, 496000000, OMAP4430_VDD_IVA_OPPTURBO_UV),
+ /* HSI OPP1 - OPP50 */
+ OPP_INITIALIZER("hsi", "hsi_fck", "core", true, 96000000, OMAP4430_VDD_CORE_OPP50_UV),
+ /* HSI OPP2 - OPP100 */
+ OPP_INITIALIZER("hsi", "hsi_fck", "core", true, 96000000, OMAP4430_VDD_CORE_OPP100_UV),
+ /* ABE OPP1 - OPP50 */
+ OPP_INITIALIZER("aess", "abe_clk", "iva", true, 98304000, OMAP4430_VDD_IVA_OPP50_UV),
+ /* ABE OPP2 - OPP100 */
+ OPP_INITIALIZER("aess", "abe_clk", "iva", true, 196608000, OMAP4430_VDD_IVA_OPP100_UV),
};
+#define OMAP4460_VDD_MPU_OPP50_UV 1025000
+#define OMAP4460_VDD_MPU_OPP100_UV 1203000
+#define OMAP4460_VDD_MPU_OPPTURBO_UV 1317000
+#define OMAP4460_VDD_MPU_OPPNITRO_UV 1380000
+
+struct omap_volt_data omap446x_vdd_mpu_volt_data[] = {
+ VOLT_DATA_DEFINE(OMAP4460_VDD_MPU_OPP50_UV, 10000, OMAP44XX_CONTROL_FUSE_MPU_OPP50, 0xf4, 0x0c, OMAP_ABB_NOMINAL_OPP),
+ VOLT_DATA_DEFINE(OMAP4460_VDD_MPU_OPP100_UV, 0, OMAP44XX_CONTROL_FUSE_MPU_OPP100, 0xf9, 0x16, OMAP_ABB_NOMINAL_OPP),
+ VOLT_DATA_DEFINE(OMAP4460_VDD_MPU_OPPTURBO_UV, 0, OMAP44XX_CONTROL_FUSE_MPU_OPPTURBO, 0xfa, 0x23, OMAP_ABB_NOMINAL_OPP),
+ VOLT_DATA_DEFINE(OMAP4460_VDD_MPU_OPPNITRO_UV, 0, OMAP44XX_CONTROL_FUSE_MPU_OPPNITRO, 0xfa, 0x27, OMAP_ABB_FAST_OPP),
+ VOLT_DATA_DEFINE(0, 0, 0, 0, 0, 0),
+};
+
+#define OMAP4460_VDD_IVA_OPP50_UV 950000
+#define OMAP4460_VDD_IVA_OPP100_UV 1140000
+#define OMAP4460_VDD_IVA_OPPTURBO_UV 1291000
+#define OMAP4460_VDD_IVA_OPPNITRO_UV 1375000
+
+struct omap_volt_data omap446x_vdd_iva_volt_data[] = {
+ VOLT_DATA_DEFINE(OMAP4460_VDD_IVA_OPP50_UV, 13000, OMAP44XX_CONTROL_FUSE_IVA_OPP50, 0xf4, 0x0c, OMAP_ABB_NOMINAL_OPP),
+ VOLT_DATA_DEFINE(OMAP4460_VDD_IVA_OPP100_UV, 0, OMAP44XX_CONTROL_FUSE_IVA_OPP100, 0xf9, 0x16, OMAP_ABB_NOMINAL_OPP),
+ VOLT_DATA_DEFINE(OMAP4460_VDD_IVA_OPPTURBO_UV, 0, OMAP44XX_CONTROL_FUSE_IVA_OPPTURBO, 0xfa, 0x23, OMAP_ABB_NOMINAL_OPP),
+ VOLT_DATA_DEFINE(OMAP4460_VDD_IVA_OPPNITRO_UV, 0, OMAP44XX_CONTROL_FUSE_IVA_OPPNITRO, 0xfa, 0x23, OMAP_ABB_FAST_OPP),
+ VOLT_DATA_DEFINE(0, 0, 0, 0, 0, 0),
+};
+
+#define OMAP4460_VDD_CORE_OPP50_UV 962000
+#define OMAP4460_VDD_CORE_OPP100_UV 1127000
+#define OMAP4460_VDD_CORE_OPP100_OV_UV 1250000
+
+struct omap_volt_data omap446x_vdd_core_volt_data[] = {
+ VOLT_DATA_DEFINE(OMAP4460_VDD_CORE_OPP50_UV, 38000, OMAP44XX_CONTROL_FUSE_CORE_OPP50, 0xf4, 0x0c, OMAP_ABB_NONE),
+ VOLT_DATA_DEFINE(OMAP4460_VDD_CORE_OPP100_UV, 13000, OMAP44XX_CONTROL_FUSE_CORE_OPP100, 0xf9, 0x16, OMAP_ABB_NONE),
+ VOLT_DATA_DEFINE(OMAP4460_VDD_CORE_OPP100_OV_UV, 13000, OMAP44XX_CONTROL_FUSE_CORE_OPP100OV, 0xf9, 0x16, OMAP_ABB_NONE),
+ VOLT_DATA_DEFINE(0, 0, 0, 0, 0, 0),
+};
+
+/* OMAP 4460 MPU Core VDD dependency table */
+static struct omap_vdd_dep_volt omap446x_vdd_mpu_core_dep_data[] = {
+ {.main_vdd_volt = OMAP4460_VDD_MPU_OPP50_UV, .dep_vdd_volt = OMAP4460_VDD_CORE_OPP50_UV},
+ {.main_vdd_volt = OMAP4460_VDD_MPU_OPP100_UV, .dep_vdd_volt = OMAP4460_VDD_CORE_OPP100_UV},
+ {.main_vdd_volt = OMAP4460_VDD_MPU_OPPTURBO_UV, .dep_vdd_volt = OMAP4460_VDD_CORE_OPP100_UV},
+ {.main_vdd_volt = OMAP4460_VDD_MPU_OPPNITRO_UV, .dep_vdd_volt = OMAP4460_VDD_CORE_OPP100_UV},
+};
+
+struct omap_vdd_dep_info omap446x_vddmpu_dep_info[] = {
+ {
+ .name = "core",
+ .dep_table = omap446x_vdd_mpu_core_dep_data,
+ .nr_dep_entries = ARRAY_SIZE(omap446x_vdd_mpu_core_dep_data),
+ },
+ {.name = NULL, .dep_table = NULL, .nr_dep_entries = 0},
+};
+
+/* OMAP 4460 MPU IVA VDD dependency table */
+static struct omap_vdd_dep_volt omap446x_vdd_iva_core_dep_data[] = {
+ {.main_vdd_volt = OMAP4460_VDD_IVA_OPP50_UV, .dep_vdd_volt = OMAP4460_VDD_CORE_OPP50_UV},
+ {.main_vdd_volt = OMAP4460_VDD_IVA_OPP100_UV, .dep_vdd_volt = OMAP4460_VDD_CORE_OPP100_UV},
+ {.main_vdd_volt = OMAP4460_VDD_IVA_OPPTURBO_UV, .dep_vdd_volt = OMAP4460_VDD_CORE_OPP100_UV},
+};
+
+struct omap_vdd_dep_info omap446x_vddiva_dep_info[] = {
+ {
+ .name = "core",
+ .dep_table = omap446x_vdd_iva_core_dep_data,
+ .nr_dep_entries = ARRAY_SIZE(omap446x_vdd_iva_core_dep_data),
+ },
+ {.name = NULL, .dep_table = NULL, .nr_dep_entries = 0},
+};
+
+static struct omap_opp_def __initdata omap446x_opp_def_list[] = {
+ /* MPU OPP1 - OPP50 */
+ OPP_INITIALIZER("mpu", "virt_dpll_mpu_ck", "mpu", true, 350000000, OMAP4460_VDD_MPU_OPP50_UV),
+ /* MPU OPP2 - OPP100 */
+ OPP_INITIALIZER("mpu", "virt_dpll_mpu_ck", "mpu", true, 700000000, OMAP4460_VDD_MPU_OPP100_UV),
+ /* MPU OPP3 - OPP-Turbo */
+ OPP_INITIALIZER("mpu", "virt_dpll_mpu_ck", "mpu", true, 920000000, OMAP4460_VDD_MPU_OPPTURBO_UV),
+ /* MPU OPP4 - OPP-Nitro */
+ OPP_INITIALIZER("mpu", "virt_dpll_mpu_ck", "mpu", false, 1200000000, OMAP4460_VDD_MPU_OPPNITRO_UV),
+ /* MPU OPP4 - OPP-Nitro SpeedBin */
+ OPP_INITIALIZER("mpu", "virt_dpll_mpu_ck", "mpu", false, 1500000000, OMAP4460_VDD_MPU_OPPNITRO_UV),
+ /* L3 OPP1 - OPP50 */
+ OPP_INITIALIZER("l3_main_1", "virt_l3_ck", "core", true, 100000000, OMAP4460_VDD_CORE_OPP50_UV),
+ /* L3 OPP2 - OPP100 */
+ OPP_INITIALIZER("l3_main_1", "virt_l3_ck", "core", true, 200000000, OMAP4460_VDD_CORE_OPP100_UV),
+ OPP_INITIALIZER("l3_main_1", "virt_l3_ck", "core", true, 200000000, OMAP4460_VDD_CORE_OPP100_OV_UV),
+ /* IVA OPP1 - OPP50 */
+ OPP_INITIALIZER("iva", "dpll_iva_m5x2_ck", "iva", true, 133000000, OMAP4460_VDD_IVA_OPP50_UV),
+ /* IVA OPP2 - OPP100 */
+ OPP_INITIALIZER("iva", "dpll_iva_m5x2_ck", "iva", true, 266100000, OMAP4460_VDD_IVA_OPP100_UV),
+ /*
+ * IVA OPP3 - OPP-Turbo + Disabled as the reference schematics
+ * recommends Phoenix VCORE2 which can supply only 600mA - so the ones
+ * above this OPP frequency, even though OMAP is capable, should be
+ * enabled by board file which is sure of the chip power capability
+ */
+ OPP_INITIALIZER("iva", "dpll_iva_m5x2_ck", "iva", false, 332000000, OMAP4460_VDD_IVA_OPPTURBO_UV),
+ /* IVA OPP4 - OPP-Nitro */
+ OPP_INITIALIZER("iva", "dpll_iva_m5x2_ck", "iva", false, 430000000, OMAP4460_VDD_IVA_OPPNITRO_UV),
+ /* IVA OPP5 - OPP-Nitro SpeedBin*/
+ OPP_INITIALIZER("iva", "dpll_iva_m5x2_ck", "iva", false, 500000000, OMAP4460_VDD_IVA_OPPNITRO_UV),
+
+ /* SGX OPP1 - OPP50 */
+ OPP_INITIALIZER("gpu", "dpll_per_m7x2_ck", "core", true, 153600000, OMAP4460_VDD_CORE_OPP50_UV),
+ /* SGX OPP2 - OPP100 */
+ OPP_INITIALIZER("gpu", "dpll_per_m7x2_ck", "core", true, 307200000, OMAP4460_VDD_CORE_OPP100_UV),
+ /* SGX OPP3 - OPPOV */
+ OPP_INITIALIZER("gpu", "dpll_per_m7x2_ck", "core", false, 384000000, OMAP4460_VDD_CORE_OPP100_OV_UV),
+ /* FDIF OPP1 - OPP25 */
+ OPP_INITIALIZER("fdif", "fdif_fck", "core", true, 32000000, OMAP4460_VDD_CORE_OPP50_UV),
+ /* FDIF OPP2 - OPP50 */
+ OPP_INITIALIZER("fdif", "fdif_fck", "core", true, 64000000, OMAP4460_VDD_CORE_OPP50_UV),
+ /* FDIF OPP3 - OPP100 */
+ OPP_INITIALIZER("fdif", "fdif_fck", "core", true, 128000000, OMAP4460_VDD_CORE_OPP100_UV),
+ /* DSP OPP1 - OPP50 */
+ OPP_INITIALIZER("dsp", "dpll_iva_m4x2_ck", "iva", true, 232750000, OMAP4460_VDD_IVA_OPP50_UV),
+ /* DSP OPP2 - OPP100 */
+ OPP_INITIALIZER("dsp", "dpll_iva_m4x2_ck", "iva", true, 465500000, OMAP4460_VDD_IVA_OPP100_UV),
+ /* DSP OPP3 - OPPTB */
+ OPP_INITIALIZER("dsp", "dpll_iva_m4x2_ck", "iva", false, 496000000, OMAP4460_VDD_IVA_OPPTURBO_UV),
+ /* HSI OPP1 - OPP50 */
+ OPP_INITIALIZER("hsi", "hsi_fck", "core", true, 96000000, OMAP4460_VDD_CORE_OPP50_UV),
+ /* HSI OPP2 - OPP100 */
+ OPP_INITIALIZER("hsi", "hsi_fck", "core", true, 96000000, OMAP4460_VDD_CORE_OPP100_UV),
+ /* ABE OPP1 - OPP50 */
+ OPP_INITIALIZER("aess", "abe_clk", "iva", true, 98304000, OMAP4460_VDD_IVA_OPP50_UV),
+ /* ABE OPP2 - OPP100 */
+ OPP_INITIALIZER("aess", "abe_clk", "iva", true, 196608000, OMAP4460_VDD_IVA_OPP100_UV),
+};
+
+/**
+ * omap4_mpu_opp_enable() - helper to enable the OPP
+ * @freq: frequency to enable
+ */
+static void __init omap4_mpu_opp_enable(unsigned long freq)
+{
+ struct device *mpu_dev;
+ int r;
+
+ mpu_dev = omap2_get_mpuss_device();
+ if (!mpu_dev) {
+ pr_err("%s: no mpu_dev, did not enable f=%ld\n", __func__,
+ freq);
+ return;
+ }
+
+ r = opp_enable(mpu_dev, freq);
+ if (r < 0)
+ dev_err(mpu_dev, "%s: opp_enable failed(%d) f=%ld\n", __func__,
+ r, freq);
+}
+
/**
* omap4_opp_init() - initialize omap4 opp table
*/
@@ -96,9 +314,19 @@
if (!cpu_is_omap44xx())
return r;
+ if (cpu_is_omap443x())
+ r = omap_init_opp_table(omap443x_opp_def_list,
+ ARRAY_SIZE(omap443x_opp_def_list));
+ else if (cpu_is_omap446x())
+ r = omap_init_opp_table(omap446x_opp_def_list,
+ ARRAY_SIZE(omap446x_opp_def_list));
- r = omap_init_opp_table(omap44xx_opp_def_list,
- ARRAY_SIZE(omap44xx_opp_def_list));
+ if (!r) {
+ if (omap4_has_mpu_1_2ghz())
+ omap4_mpu_opp_enable(1200000000);
+ if (omap4_has_mpu_1_5ghz())
+ omap4_mpu_opp_enable(1500000000);
+ }
return r;
}
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index e01da45..5942d23 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -38,11 +38,27 @@
#include "prm2xxx_3xxx.h"
#include "pm.h"
+#define PM_DEBUG_MAX_SAVED_REGS 64
+#define PM_DEBUG_PRM_MIN 0x4A306000
+#define PM_DEBUG_PRM_MAX (0x4A307F00 + (PM_DEBUG_MAX_SAVED_REGS * 4) - 1)
+#define PM_DEBUG_CM1_MIN 0x4A004000
+#define PM_DEBUG_CM1_MAX (0x4A004F00 + (PM_DEBUG_MAX_SAVED_REGS * 4) - 1)
+#define PM_DEBUG_CM2_MIN 0x4A008000
+#define PM_DEBUG_CM2_MAX (0x4A009F00 + (PM_DEBUG_MAX_SAVED_REGS * 4) - 1)
+
int omap2_pm_debug;
u32 enable_off_mode;
u32 sleep_while_idle;
u32 wakeup_timer_seconds;
u32 wakeup_timer_milliseconds;
+u32 omap4_device_off_counter = 0;
+
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+static u32 saved_reg_num;
+static u32 saved_reg_num_used;
+static u32 saved_reg_addr;
+static u32 saved_reg_buff[2][PM_DEBUG_MAX_SAVED_REGS];
+#endif
#define DUMP_PRM_MOD_REG(mod, reg) \
regs[reg_count].name = #mod "." #reg; \
@@ -194,6 +210,8 @@
enum {
DEBUG_FILE_COUNTERS = 0,
DEBUG_FILE_TIMERS,
+ DEBUG_FILE_LAST_COUNTERS,
+ DEBUG_FILE_LAST_TIMERS,
};
struct pm_module_def {
@@ -367,7 +385,7 @@
/* Update timer for previous state */
t = sched_clock();
- pwrdm->state_timer[prev] += t - pwrdm->timer;
+ pwrdm->time.state[prev] += t - pwrdm->timer;
pwrdm->timer = t;
}
@@ -389,10 +407,53 @@
return 0;
}
+static int pwrdm_dbg_show_count_stats(struct powerdomain *pwrdm,
+ struct powerdomain_count_stats *stats, struct seq_file *s)
+{
+ int i;
+
+ seq_printf(s, "%s (%s)", pwrdm->name,
+ pwrdm_state_names[pwrdm->state]);
+
+ for (i = 0; i < PWRDM_MAX_PWRSTS; i++)
+ seq_printf(s, ",%s:%d", pwrdm_state_names[i],
+ stats->state[i]);
+
+ seq_printf(s, ",RET-LOGIC-OFF:%d", stats->ret_logic_off);
+ for (i = 0; i < pwrdm->banks; i++)
+ seq_printf(s, ",RET-MEMBANK%d-OFF:%d", i + 1,
+ stats->ret_mem_off[i]);
+
+ seq_printf(s, "\n");
+
+ return 0;
+}
+
+static int pwrdm_dbg_show_time_stats(struct powerdomain *pwrdm,
+ struct powerdomain_time_stats *stats, struct seq_file *s)
+{
+ int i;
+ u64 total = 0;
+
+ seq_printf(s, "%s (%s)", pwrdm->name,
+ pwrdm_state_names[pwrdm->state]);
+
+ for (i = 0; i < 4; i++)
+ total += stats->state[i];
+
+ for (i = 0; i < 4; i++)
+ seq_printf(s, ",%s:%lld (%lld%%)", pwrdm_state_names[i],
+ stats->state[i],
+ total ? div64_u64(stats->state[i] * 100, total) : 0);
+
+ seq_printf(s, "\n");
+
+ return 0;
+}
+
static int pwrdm_dbg_show_counter(struct powerdomain *pwrdm, void *user)
{
struct seq_file *s = (struct seq_file *)user;
- int i;
if (strcmp(pwrdm->name, "emu_pwrdm") == 0 ||
strcmp(pwrdm->name, "wkup_pwrdm") == 0 ||
@@ -403,18 +464,7 @@
printk(KERN_ERR "pwrdm state mismatch(%s) %d != %d\n",
pwrdm->name, pwrdm->state, pwrdm_read_pwrst(pwrdm));
- seq_printf(s, "%s (%s)", pwrdm->name,
- pwrdm_state_names[pwrdm->state]);
- for (i = 0; i < PWRDM_MAX_PWRSTS; i++)
- seq_printf(s, ",%s:%d", pwrdm_state_names[i],
- pwrdm->state_counter[i]);
-
- seq_printf(s, ",RET-LOGIC-OFF:%d", pwrdm->ret_logic_off_counter);
- for (i = 0; i < pwrdm->banks; i++)
- seq_printf(s, ",RET-MEMBANK%d-OFF:%d", i + 1,
- pwrdm->ret_mem_off_counter[i]);
-
- seq_printf(s, "\n");
+ pwrdm_dbg_show_count_stats(pwrdm, &pwrdm->count, s);
return 0;
}
@@ -422,7 +472,6 @@
static int pwrdm_dbg_show_timer(struct powerdomain *pwrdm, void *user)
{
struct seq_file *s = (struct seq_file *)user;
- int i;
if (strcmp(pwrdm->name, "emu_pwrdm") == 0 ||
strcmp(pwrdm->name, "wkup_pwrdm") == 0 ||
@@ -431,20 +480,58 @@
pwrdm_state_switch(pwrdm);
- seq_printf(s, "%s (%s)", pwrdm->name,
- pwrdm_state_names[pwrdm->state]);
+ pwrdm_dbg_show_time_stats(pwrdm, &pwrdm->time, s);
- for (i = 0; i < 4; i++)
- seq_printf(s, ",%s:%lld", pwrdm_state_names[i],
- pwrdm->state_timer[i]);
+ return 0;
+}
- seq_printf(s, "\n");
+static int pwrdm_dbg_show_last_counter(struct powerdomain *pwrdm, void *user)
+{
+ struct seq_file *s = (struct seq_file *)user;
+ struct powerdomain_count_stats stats;
+ int i;
+
+ if (strcmp(pwrdm->name, "emu_pwrdm") == 0 ||
+ strcmp(pwrdm->name, "wkup_pwrdm") == 0 ||
+ strncmp(pwrdm->name, "dpll", 4) == 0)
+ return 0;
+
+ stats = pwrdm->count;
+ for (i = 0; i < PWRDM_MAX_PWRSTS; i++)
+ stats.state[i] -= pwrdm->last_count.state[i];
+ for (i = 0; i < PWRDM_MAX_MEM_BANKS; i++)
+ stats.ret_mem_off[i] -= pwrdm->last_count.ret_mem_off[i];
+ stats.ret_logic_off -= pwrdm->last_count.ret_logic_off;
+
+ pwrdm->last_count = pwrdm->count;
+
+ pwrdm_dbg_show_count_stats(pwrdm, &stats, s);
+
+ return 0;
+}
+
+static int pwrdm_dbg_show_last_timer(struct powerdomain *pwrdm, void *user)
+{
+ struct seq_file *s = (struct seq_file *)user;
+ struct powerdomain_time_stats stats;
+ int i;
+
+ stats = pwrdm->time;
+ for (i = 0; i < PWRDM_MAX_PWRSTS; i++)
+ stats.state[i] -= pwrdm->last_time.state[i];
+
+ pwrdm->last_time = pwrdm->time;
+
+ pwrdm_dbg_show_time_stats(pwrdm, &stats, s);
+
return 0;
}
static int pm_dbg_show_counters(struct seq_file *s, void *unused)
{
pwrdm_for_each(pwrdm_dbg_show_counter, s);
+ if (cpu_is_omap44xx())
+ seq_printf(s, "DEVICE-OFF:%d\n", omap4_device_off_counter);
clkdm_for_each(clkdm_dbg_show_counter, s);
return 0;
@@ -456,6 +543,18 @@
return 0;
}
+static int pm_dbg_show_last_counters(struct seq_file *s, void *unused)
+{
+ pwrdm_for_each(pwrdm_dbg_show_last_counter, s);
+ return 0;
+}
+
+static int pm_dbg_show_last_timers(struct seq_file *s, void *unused)
+{
+ pwrdm_for_each(pwrdm_dbg_show_last_timer, s);
+ return 0;
+}
+
static int pm_dbg_open(struct inode *inode, struct file *file)
{
switch ((int)inode->i_private) {
@@ -463,9 +562,15 @@
return single_open(file, pm_dbg_show_counters,
&inode->i_private);
case DEBUG_FILE_TIMERS:
- default:
return single_open(file, pm_dbg_show_timers,
&inode->i_private);
+ case DEBUG_FILE_LAST_COUNTERS:
+ return single_open(file, pm_dbg_show_last_counters,
+ &inode->i_private);
+ case DEBUG_FILE_LAST_TIMERS:
+ default:
+ return single_open(file, pm_dbg_show_last_timers,
+ &inode->i_private);
};
}
@@ -488,7 +593,7 @@
.release = single_release,
};
-int pm_dbg_regset_init(int reg_set)
+int __init pm_dbg_regset_init(int reg_set)
{
char name[2];
@@ -539,6 +644,47 @@
DEFINE_SIMPLE_ATTRIBUTE(pwrdm_suspend_fops, pwrdm_suspend_get,
pwrdm_suspend_set, "%llu\n");
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+static bool is_addr_valid()
+{
+ int saved_reg_addr_max = 0;
+ /* Only for OMAP4 for the timebeing */
+ if (!cpu_is_omap44xx())
+ return false;
+
+ saved_reg_num = (saved_reg_num > PM_DEBUG_MAX_SAVED_REGS) ?
+ PM_DEBUG_MAX_SAVED_REGS : saved_reg_num;
+
+ saved_reg_addr_max = saved_reg_addr + (saved_reg_num * 4) - 1;
+
+ if (saved_reg_addr >= PM_DEBUG_PRM_MIN &&
+ saved_reg_addr_max <= PM_DEBUG_PRM_MAX)
+ return true;
+ if (saved_reg_addr >= PM_DEBUG_CM1_MIN &&
+ saved_reg_addr_max <= PM_DEBUG_CM1_MAX)
+ return true;
+ if (saved_reg_addr >= PM_DEBUG_CM2_MIN &&
+ saved_reg_addr_max <= PM_DEBUG_CM2_MAX)
+ return true;
+ return false;
+}
+
+void omap4_pm_suspend_save_regs()
+{
+ int i = 0;
+ if (!saved_reg_num || !is_addr_valid())
+ return;
+
+ saved_reg_num_used = saved_reg_num;
+
+ for (i = 0; i < saved_reg_num; i++) {
+ saved_reg_buff[1][i] = omap_readl(saved_reg_addr + (i*4));
+ saved_reg_buff[0][i] = saved_reg_addr + (i*4);
+ }
+ return;
+}
+#endif
+
static int __init pwrdms_setup(struct powerdomain *pwrdm, void *dir)
{
int i;
@@ -548,7 +694,7 @@
t = sched_clock();
for (i = 0; i < 4; i++)
- pwrdm->state_timer[i] = 0;
+ pwrdm->time.state[i] = 0;
pwrdm->timer = t;
@@ -567,7 +713,19 @@
{
u32 *option = data;
+ if (option == &enable_off_mode) {
+ enable_off_mode = off_mode_enabled;
+ }
+
*val = *option;
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+ if (option == &saved_reg_addr) {
+ int i;
+ for (i = 0; i < saved_reg_num_used; i++)
+ pr_info(" %x = %x\n", saved_reg_buff[0][i],
+ saved_reg_buff[1][i]);
+ }
+#endif
return 0;
}
@@ -579,7 +737,10 @@
if (option == &wakeup_timer_milliseconds && val >= 1000)
return -EINVAL;
- *option = val;
+ if (cpu_is_omap443x() && omap_type() == OMAP2_DEVICE_TYPE_GP)
+ *option = 0;
+ else
+ *option = val;
if (option == &enable_off_mode) {
if (val)
@@ -595,7 +756,7 @@
DEFINE_SIMPLE_ATTRIBUTE(pm_dbg_option_fops, option_get, option_set, "%llu\n");
-static int pm_dbg_init(void)
+static int __init pm_dbg_init(void)
{
int i;
struct dentry *d;
@@ -604,9 +765,11 @@
if (pm_dbg_init_done)
return 0;
- if (cpu_is_omap34xx())
+ if (cpu_is_omap34xx()) {
pm_dbg_reg_modules = omap3_pm_reg_modules;
- else {
+ } else if (cpu_is_omap44xx()) {
+ /* Allow pm_dbg_init on OMAP4. */
+ } else {
printk(KERN_ERR "%s: only OMAP3 supported\n", __func__);
return -ENODEV;
}
@@ -619,9 +782,16 @@
d, (void *)DEBUG_FILE_COUNTERS, &debug_fops);
(void) debugfs_create_file("time", S_IRUGO,
d, (void *)DEBUG_FILE_TIMERS, &debug_fops);
+ (void) debugfs_create_file("last_count", S_IRUGO,
+ d, (void *)DEBUG_FILE_LAST_COUNTERS, &debug_fops);
+ (void) debugfs_create_file("last_time", S_IRUGO,
+ d, (void *)DEBUG_FILE_LAST_TIMERS, &debug_fops);
pwrdm_for_each(pwrdms_setup, (void *)d);
+ if (cpu_is_omap44xx())
+ goto skip_reg_debufs;
+
pm_dbg_dir = debugfs_create_dir("registers", d);
if (IS_ERR(pm_dbg_dir))
return PTR_ERR(pm_dbg_dir);
@@ -637,15 +807,29 @@
}
- (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d,
- &enable_off_mode, &pm_dbg_option_fops);
(void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d,
&sleep_while_idle, &pm_dbg_option_fops);
+
+skip_reg_debufs:
+#ifdef CONFIG_OMAP_ALLOW_OSWR
+ (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d,
+ &enable_off_mode, &pm_dbg_option_fops);
+#endif
(void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d,
&wakeup_timer_seconds, &pm_dbg_option_fops);
(void) debugfs_create_file("wakeup_timer_milliseconds",
S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds,
&pm_dbg_option_fops);
+
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+ (void) debugfs_create_file("saved_reg_show",
+ S_IRUGO | S_IWUSR, d, &saved_reg_addr,
+ &pm_dbg_option_fops);
+ debugfs_create_u32("saved_reg_addr", S_IRUGO | S_IWUGO, d,
+ &saved_reg_addr);
+ debugfs_create_u32("saved_reg_num", S_IRUGO | S_IWUGO, d,
+ &saved_reg_num);
+#endif
pm_dbg_init_done = 1;
return 0;
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 49486f5..4464039 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -24,30 +24,82 @@
#include "clockdomain.h"
#include "pm.h"
+/**
+ * struct omap2_pm_lp_description - Describe low power behavior of the system
+ * @oscillator_startup_time: Time rounded up to uSec for the oscillator to
+ * provide a stable clock from power on.
+ * @oscillator_shutdown_time: Time rounded up to uSec for oscillator to safely
+ * switch off.
+ * @pmic_startup_time: Time rounded up to uSec for the PMIC to
+ * provide be ready for operation from low power
+ * state. Note: this is not the same as voltage
+ * rampup time, instead, consider the PMIC to be
+ * in lowest power state(say OFF), this is the time
+ * required for it to become ready for it's DCDCs
+ * or LDOs to start operation.
+ * @pmic_shutdown_time: Time rounded up to uSec for the PMIC to
+ * go to low power after the LDOs are pulled to
+ * appropriate state. Note: this is not the same as
+ * voltage rampdown time, instead, consider the
+ * PMIC to have switched it's LDOs down, this is
+ * time taken to reach it's lowest power state(say
+ * sleep/OFF).
+ *
+ * With complex systems like OMAP, we need a generic description of system
+ * behavior beyond the normal description of device/peripheral operation
+ * which in conjunction with other parameters describe and control the low
+ * power operation of the device. This information tends to be specific
+ * to every board.
+ */
+struct omap2_pm_lp_description {
+ u32 oscillator_startup_time;
+ u32 oscillator_shutdown_time;
+ u32 pmic_startup_time;
+ u32 pmic_shutdown_time;
+};
+
+/*
+ * Setup time to be the max... we want to err towards the worst
+ * as default. rest of the system can populate these with more
+ * optimal values
+ */
+static struct omap2_pm_lp_description _pm_lp_desc = {
+ .oscillator_startup_time = ULONG_MAX,
+ .oscillator_shutdown_time = ULONG_MAX,
+ .pmic_startup_time = ULONG_MAX,
+ .pmic_shutdown_time = ULONG_MAX,
+};
+
static struct omap_device_pm_latency *pm_lats;
static struct device *mpu_dev;
static struct device *iva_dev;
static struct device *l3_dev;
static struct device *dsp_dev;
+static struct device *fdif_dev;
+
+bool omap_pm_is_ready_status;
struct device *omap2_get_mpuss_device(void)
{
WARN_ON_ONCE(!mpu_dev);
return mpu_dev;
}
+EXPORT_SYMBOL(omap2_get_mpuss_device);
struct device *omap2_get_iva_device(void)
{
WARN_ON_ONCE(!iva_dev);
return iva_dev;
}
+EXPORT_SYMBOL(omap2_get_iva_device);
struct device *omap2_get_l3_device(void)
{
WARN_ON_ONCE(!l3_dev);
return l3_dev;
}
+EXPORT_SYMBOL(omap2_get_l3_device);
struct device *omap4_get_dsp_device(void)
{
@@ -56,6 +108,89 @@
}
EXPORT_SYMBOL(omap4_get_dsp_device);
+struct device *omap4_get_fdif_device(void)
+{
+ WARN_ON_ONCE(!fdif_dev);
+ return fdif_dev;
+}
+EXPORT_SYMBOL(omap4_get_fdif_device);
+
+/**
+ * omap_pm_get_pmic_lp_time() - retrieve the oscillator time
+ * @tstart: pointer to startup time in uSec
+ * @tshut: pointer to shutdown time in uSec
+ *
+ * if the pointers are invalid, returns error, else
+ * populates the tstart and tshut values with the currently
+ * stored values.
+ */
+int omap_pm_get_osc_lp_time(u32 *tstart, u32 *tshut)
+{
+ if (!tstart || !tshut)
+ return -EINVAL;
+
+ *tstart = _pm_lp_desc.oscillator_startup_time;
+ *tshut = _pm_lp_desc.oscillator_shutdown_time;
+
+ return 0;
+}
+
+/**
+ * omap_pm_get_pmic_lp_time() - retrieve the PMIC time
+ * @tstart: pointer to startup time in uSec
+ * @tshut: pointer to shutdown time in uSec
+ *
+ * if the pointers are invalid, returns error, else
+ * populates the tstart and tshut values with the currently
+ * stored values.
+ */
+int omap_pm_get_pmic_lp_time(u32 *tstart, u32 *tshut)
+{
+ if (!tstart || !tshut)
+ return -EINVAL;
+
+ *tstart = _pm_lp_desc.pmic_startup_time;
+ *tshut = _pm_lp_desc.pmic_shutdown_time;
+
+ return 0;
+}
+
+/**
+ * omap_pm_set_osc_lp_time() - setup the system oscillator time
+ * @tstart: startup time rounded up to uSec
+ * @tshut: shutdown time rounded up to uSec
+ *
+ * All boards do need an oscillator for the device to function.
+ * The startup and stop time of these oscillators vary. Populate
+ * from the board file to optimize the timing.
+ * This function is meant to be used at boot-time configuration.
+ *
+ * NOTE: This API is intended to be invoked from board file
+ */
+void __init omap_pm_set_osc_lp_time(u32 tstart, u32 tshut)
+{
+ _pm_lp_desc.oscillator_startup_time = tstart;
+ _pm_lp_desc.oscillator_shutdown_time = tshut;
+}
+
+/**
+ * omap_pm_set_pmic_lp_time() - setup the pmic low power time
+ * @tstart: startup time rounded up to uSec
+ * @tshut: shutdown time rounded up to uSec
+ *
+ * Store the time for PMIC to enter to lowest state supported.
+ * in the case of multiple PMIC on a platform, choose the one
+ * that ends the sequence for LP state such as OFF and starts
+ * the sequence such as wakeup from OFF - e.g. a PMIC that
+ * controls core-domain.
+ * This function is meant to be used at boot-time configuration.
+ */
+void __init omap_pm_set_pmic_lp_time(u32 tstart, u32 tshut)
+{
+ _pm_lp_desc.pmic_startup_time = tstart;
+ _pm_lp_desc.pmic_shutdown_time = tshut;
+}
+
/* static int _init_omap_device(struct omap_hwmod *oh, void *user) */
static int _init_omap_device(char *name, struct device **new_dev)
{
@@ -90,6 +225,7 @@
_init_omap_device("l3_main_1", &l3_dev);
_init_omap_device("dsp", &dsp_dev);
_init_omap_device("iva", &iva_dev);
+ _init_omap_device("fdif", &fdif_dev);
} else {
_init_omap_device("l3_main", &l3_dev);
}
@@ -106,8 +242,9 @@
int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
{
u32 cur_state;
- int sleep_switch = 0;
+ int sleep_switch = -1;
int ret = 0;
+ int hwsup = 0;
if (pwrdm == NULL || IS_ERR(pwrdm))
return -EINVAL;
@@ -127,6 +264,7 @@
(pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE)) {
sleep_switch = LOWPOWERSTATE_SWITCH;
} else {
+ hwsup = clkdm_is_idle(pwrdm->pwrdm_clkdms[0]);
clkdm_wakeup(pwrdm->pwrdm_clkdms[0]);
pwrdm_wait_transition(pwrdm);
sleep_switch = FORCEWAKEUP_SWITCH;
@@ -142,7 +280,7 @@
switch (sleep_switch) {
case FORCEWAKEUP_SWITCH:
- if (pwrdm->pwrdm_clkdms[0]->flags & CLKDM_CAN_ENABLE_AUTO)
+ if (hwsup)
clkdm_allow_idle(pwrdm->pwrdm_clkdms[0]);
else
clkdm_sleep(pwrdm->pwrdm_clkdms[0]);
@@ -160,6 +298,45 @@
return ret;
}
+static int __init boot_volt_scale(struct voltagedomain *voltdm,
+ unsigned long boot_v)
+{
+ struct omap_volt_data *vdata;
+ int ret = 0;
+
+ vdata = omap_voltage_get_voltdata(voltdm, boot_v);
+ if (IS_ERR_OR_NULL(vdata)) {
+ pr_err("%s:%s: Bad New voltage data for %ld\n",
+ __func__, voltdm->name, boot_v);
+ return PTR_ERR(vdata);
+ }
+ /*
+ * DO NOT DO abb prescale -
+ * case 1: OPP needs FBB, bootloader configured FBB
+ * - doing a prescale results in bypass -> system fail
+ * case 2: OPP needs FBB, bootloader does not configure FBB
+ * - FBB will be configured in postscale
+ * case 3: OPP needs bypass, bootloader configures FBB
+ * - bypass will be configured in postscale
+ * case 4: OPP needs bypass, bootloader configured in bypass
+ * - bypass programming in postscale skipped
+ */
+ ret = voltdm_scale(voltdm, vdata);
+ if (ret) {
+ pr_err("%s: Fail set voltage(v=%ld)on vdd%s\n",
+ __func__, boot_v, voltdm->name);
+ return ret;
+ }
+ if (voltdm->abb) {
+ ret = omap_ldo_abb_post_scale(voltdm, vdata);
+ if (ret) {
+ pr_err("%s: Fail abb postscale(v=%ld)vdd%s\n",
+ __func__, boot_v, voltdm->name);
+ }
+ }
+ return ret;
+}
+
/*
* This API is to be called during init to put the various voltage
* domains to the voltage as per the opp table. Typically we boot up
@@ -174,14 +351,15 @@
struct voltagedomain *voltdm;
struct clk *clk;
struct opp *opp;
- unsigned long freq, bootup_volt;
+ unsigned long freq_cur, freq_valid, bootup_volt;
+ int ret = -EINVAL;
if (!vdd_name || !clk_name || !dev) {
printk(KERN_ERR "%s: Invalid parameters!\n", __func__);
goto exit;
}
- voltdm = omap_voltage_domain_lookup(vdd_name);
+ voltdm = voltdm_lookup(vdd_name);
if (IS_ERR(voltdm)) {
printk(KERN_ERR "%s: Unable to get vdd pointer for vdd_%s\n",
__func__, vdd_name);
@@ -195,25 +373,78 @@
goto exit;
}
- freq = clk->rate;
- clk_put(clk);
+ freq_cur = clk->rate;
+ freq_valid = freq_cur;
- opp = opp_find_freq_ceil(dev, &freq);
+ rcu_read_lock();
+ opp = opp_find_freq_ceil(dev, &freq_valid);
if (IS_ERR(opp)) {
- printk(KERN_ERR "%s: unable to find boot up OPP for vdd_%s\n",
- __func__, vdd_name);
- goto exit;
+ opp = opp_find_freq_floor(dev, &freq_valid);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ pr_err("%s: no boot OPP match for %ld on vdd_%s\n",
+ __func__, freq_cur, vdd_name);
+ ret = -ENOENT;
+ goto exit_ck;
+ }
}
bootup_volt = opp_get_voltage(opp);
+ rcu_read_unlock();
if (!bootup_volt) {
printk(KERN_ERR "%s: unable to find voltage corresponding"
"to the bootup OPP for vdd_%s\n", __func__, vdd_name);
- goto exit;
+ ret = -ENOENT;
+ goto exit_ck;
}
- omap_voltage_scale_vdd(voltdm, bootup_volt);
- return 0;
+ /*
+ * Frequency and Voltage have to be sequenced: if we move from
+ * a lower frequency to higher frequency, raise voltage, followed by
+ * frequency, and vice versa. we assume that the voltage at boot
+ * is the required voltage for the frequency it was set for.
+ * NOTE:
+ * we can check the frequency, but there is numerous ways to set
+ * voltage. We play the safe path and just set the voltage.
+ */
+
+ if (freq_cur < freq_valid) {
+ ret = boot_volt_scale(voltdm, bootup_volt);
+ if (ret) {
+ pr_err("%s: Fail set voltage-%s(f=%ld v=%ld)on vdd%s\n",
+ __func__, vdd_name, freq_valid,
+ bootup_volt, vdd_name);
+ goto exit_ck;
+ }
+ }
+
+ /* Set freq only if there is a difference in freq */
+ if (freq_valid != freq_cur) {
+ ret = clk_set_rate(clk, freq_valid);
+ if (ret) {
+ pr_err("%s: Fail set clk-%s(f=%ld v=%ld)on vdd%s\n",
+ __func__, clk_name, freq_valid,
+ bootup_volt, vdd_name);
+ goto exit_ck;
+ }
+ }
+
+ if (freq_cur >= freq_valid) {
+ ret = boot_volt_scale(voltdm, bootup_volt);
+ if (ret) {
+ pr_err("%s: Fail set voltage-%s(f=%ld v=%ld)on vdd%s\n",
+ __func__, clk_name, freq_valid,
+ bootup_volt, vdd_name);
+ goto exit_ck;
+ }
+ }
+
+ ret = 0;
+exit_ck:
+ clk_put(clk);
+
+ if (!ret)
+ return 0;
exit:
printk(KERN_ERR "%s: Unable to put vdd_%s to its init voltage\n\n",
@@ -226,7 +457,7 @@
if (!cpu_is_omap34xx())
return;
- omap2_set_init_voltage("mpu", "dpll1_ck", mpu_dev);
+ omap2_set_init_voltage("mpu_iva", "dpll1_ck", mpu_dev);
omap2_set_init_voltage("core", "l3_ick", l3_dev);
}
@@ -235,8 +466,12 @@
if (!cpu_is_omap44xx())
return;
- omap2_set_init_voltage("mpu", "dpll_mpu_ck", mpu_dev);
- omap2_set_init_voltage("core", "l3_div_ck", l3_dev);
+ if (cpu_is_omap446x()) {
+ omap2_set_init_voltage("mpu", "virt_dpll_mpu_ck", mpu_dev);
+ } else {
+ omap2_set_init_voltage("mpu", "dpll_mpu_ck", mpu_dev);
+ }
+ omap2_set_init_voltage("core", "virt_l3_ck", l3_dev);
omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", iva_dev);
}
@@ -251,9 +486,8 @@
static int __init omap2_common_pm_late_init(void)
{
- /* Init the OMAP TWL parameters */
- omap3_twl_init();
- omap4_twl_init();
+ /* Init the OMAP PMIC parameters */
+ omap_pmic_data_init();
/* Init the voltage layer */
omap_voltage_late_init();
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 45bcfce..c9da6b9 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -17,10 +17,34 @@
extern void *omap3_secure_ram_storage;
extern void omap3_pm_off_mode_enable(int);
-extern void omap_sram_idle(void);
+extern void omap_sram_idle(bool suspend);
extern int omap3_can_sleep(void);
extern int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state);
extern int omap3_idle_init(void);
+extern int omap4_idle_init(void);
+extern void omap4_enter_sleep(unsigned int cpu, unsigned int power_state,
+ bool suspend);
+extern void omap4_trigger_ioctrl(void);
+extern u32 omap4_device_off_counter;
+
+#ifdef CONFIG_PM
+extern void omap4_device_set_state_off(u8 enable);
+extern bool omap4_device_prev_state_off(void);
+extern bool omap4_device_next_state_off(void);
+extern void omap4_device_clear_prev_off_state(void);
+#else
+static inline void omap4_device_set_state_off(u8 enable)
+{
+}
+static inline bool omap4_device_prev_state_off(void)
+{
+ return false;
+}
+static inline bool omap4_device_next_state_off(void)
+{
+ return false;
+}
+#endif
#if defined(CONFIG_PM_OPP)
extern int omap3_opp_init(void);
@@ -36,6 +60,15 @@
}
#endif
+#ifdef CONFIG_PM
+int omap4_pm_cold_reset(char *reason);
+#else
+int omap4_pm_cold_reset(char *reason)
+{
+ return -EINVAL;
+}
+#endif
+
/*
* cpuidle mach specific parameters
*
@@ -68,15 +101,18 @@
extern void omap2_pm_dump(int mode, int resume, unsigned int us);
extern void omap2_pm_wakeup_on_timer(u32 seconds, u32 milliseconds);
extern int omap2_pm_debug;
-extern u32 enable_off_mode;
extern u32 sleep_while_idle;
#else
#define omap2_pm_dump(mode, resume, us) do {} while (0);
#define omap2_pm_wakeup_on_timer(seconds, milliseconds) do {} while (0);
#define omap2_pm_debug 0
-#define enable_off_mode 0
#define sleep_while_idle 0
#endif
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+extern void omap4_pm_suspend_save_regs(void);
+#else
+static inline void omap4_pm_suspend_save_regs(void) { }
+#endif
#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
extern void pm_dbg_update_time(struct powerdomain *pwrdm, int prev);
@@ -125,19 +161,132 @@
static inline void omap_enable_smartreflex_on_init(void) {}
#endif
-#ifdef CONFIG_TWL4030_CORE
-extern int omap3_twl_init(void);
-extern int omap4_twl_init(void);
-extern int omap3_twl_set_sr_bit(bool enable);
+/**
+ * struct omap_pmic_map - Describe the OMAP PMIC data for OMAP
+ * @name: name of the voltage domain
+ * @pmic_data: pmic data associated with it
+ * @omap_chip: initialize with OMAP_CHIP_INIT the OMAP chips this data maps to
+ * @special_action: callback for any specific action to take for that map
+ *
+ * Since we support multiple PMICs each potentially functioning on multiple
+ * OMAP devices, we describe the parameters in a map allowing us to reuse the
+ * data as necessary.
+ */
+struct omap_pmic_map {
+ char *name;
+ struct omap_voltdm_pmic *pmic_data;
+ struct omap_chip_id omap_chip;
+ int (*special_action)(struct voltagedomain *);
+};
+
+/**
+ * struct omap_pmic_description - Describe low power behavior of the PMIC
+ * @pmic_lp_tshut: Time rounded up to uSec for the PMIC to
+ * go to low power after the LDOs are pulled to
+ * appropriate state. Note: this is not the same as
+ * voltage rampdown time, instead, consider the
+ * PMIC to have switched it's LDOs down, this is
+ * time taken to reach it's lowest power state(say
+ * sleep/OFF).
+ * @pmic_lp_tstart: Time rounded up to uSec for the PMIC to
+ * provide be ready for operation from low power
+ * state. Note: this is not the same as voltage
+ * rampup time, instead, consider the PMIC to be
+ * in lowest power state(say OFF), this is the time
+ * required for it to become ready for it's DCDCs
+ * or LDOs to start operation.
+ */
+struct omap_pmic_description {
+ u32 pmic_lp_tshut;
+ u32 pmic_lp_tstart;
+};
+
+#ifdef CONFIG_PM
+extern int omap_pmic_register_data(struct omap_pmic_map *map,
+ struct omap_pmic_description *desc);
#else
-static inline int omap3_twl_init(void)
+static inline int omap_pmic_register_data(struct omap_pmic_map *map,
+ struct omap_pmic_description *desc)
{
return -EINVAL;
}
-static inline int omap4_twl_init(void)
+#endif
+extern void omap_pmic_data_init(void);
+
+extern int omap_pmic_update(struct omap_pmic_map *tmp_map, char *name,
+ u32 old_chip_id, u32 new_chip_id);
+
+#ifdef CONFIG_TWL4030_CORE
+extern int omap_twl_init(void);
+extern int omap3_twl_set_sr_bit(bool enable);
+extern int omap_twl_pmic_update(char *name, u32 old_chip_id, u32 new_chip_id);
+#else
+static inline int omap_twl_init(void)
+{
+ return -EINVAL;
+}
+static inline int omap_twl_pmic_update(char *name, u32 old_chip_id,
+ u32 new_chip_id)
{
return -EINVAL;
}
#endif
+#ifdef CONFIG_OMAP_TPS6236X
+extern int omap_tps6236x_board_setup(bool use_62361, int gpio_vsel0,
+ int gpio_vsel1, int pull0, int pull1);
+extern int omap_tps6236x_init(void);
+
+extern int omap_tps6236x_update(char *name, u32 old_chip_id, u32 new_chip_id);
+#else
+static inline int omap_tps6236x_board_setup(bool use_62361, int gpio_vsel0,
+ int gpio_vsel1, int pull0, int pull1)
+{
+ return -EINVAL;
+}
+static inline int omap_tps6236x_init(void)
+{
+ return -EINVAL;
+}
+static inline int omap_tps6236x_update(char *name, u32 old_chip_id,
+ u32 new_chip_id)
+{
+ return -EINVAL;
+}
+#endif
+
+extern int omap4_ldo_trim_configure(void);
+
+#ifdef CONFIG_PM
+extern bool omap_pm_is_ready_status;
+/**
+ * omap_pm_is_ready() - tells if OMAP pm framework is done it's initialization
+ *
+ * In few cases, to sequence operations properly, we'd like to know if OMAP's PM
+ * framework has completed all it's expected initializations.
+ */
+static inline bool omap_pm_is_ready(void)
+{
+ return omap_pm_is_ready_status;
+}
+extern int omap_pm_get_osc_lp_time(u32 *tstart, u32 *tshut);
+extern int omap_pm_get_pmic_lp_time(u32 *tstart, u32 *tshut);
+extern void omap_pm_set_osc_lp_time(u32 tstart, u32 tshut);
+extern void omap_pm_set_pmic_lp_time(u32 tstart, u32 tshut);
+#else
+static inline bool omap_pm_is_ready(void)
+{
+ return false;
+}
+static inline int omap_pm_get_osc_lp_time(u32 *tstart, u32 *tshut)
+{
+ return -EINVAL;
+}
+static inline int omap_pm_get_pmic_lp_time(u32 *tstart, u32 *tshut)
+{
+ return -EINVAL;
+}
+static inline void omap_pm_set_osc_lp_time(u32 tstart, u32 tshut) { }
+static inline void omap_pm_set_pmic_lp_time(u32 tstart, u32 tshut) { }
+#endif
#endif
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index df3ded6..faa8463 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -132,26 +132,12 @@
if (omap_irq_pending())
goto no_sleep;
- /* Block console output in case it is on one of the OMAP UARTs */
- if (!is_suspending())
- if (!console_trylock())
- goto no_sleep;
-
- omap_uart_prepare_idle(0);
- omap_uart_prepare_idle(1);
- omap_uart_prepare_idle(2);
-
/* Jump to SRAM suspend code */
omap2_sram_suspend(sdrc_read_reg(SDRC_DLLA_CTRL),
OMAP_SDRC_REGADDR(SDRC_DLLA_CTRL),
OMAP_SDRC_REGADDR(SDRC_POWER));
- omap_uart_resume_idle(2);
- omap_uart_resume_idle(1);
- omap_uart_resume_idle(0);
-
- if (!is_suspending())
- console_unlock();
+ omap_uart_resume_idle();
no_sleep:
if (omap2_pm_debug) {
@@ -162,7 +148,7 @@
tmp = timespec_to_ns(&ts_idle) * NSEC_PER_USEC;
omap2_pm_dump(0, 1, tmp);
}
- omap2_gpio_resume_after_idle();
+ omap2_gpio_resume_after_idle(0);
clk_enable(osc_ck);
@@ -267,8 +253,6 @@
{
if (omap2_fclks_active())
return 0;
- if (!omap_uart_can_sleep())
- return 0;
if (osc_ck->usecount > 1)
return 0;
if (omap_dma_running())
@@ -319,7 +303,6 @@
mir1 = omap_readl(0x480fe0a4);
omap_writel(1 << 5, 0x480fe0ac);
- omap_uart_prepare_suspend();
omap2_enter_full_retention();
omap_writel(mir1, 0x480fe0a4);
@@ -518,6 +501,8 @@
suspend_set_ops(&omap_pm_ops);
pm_idle = omap2_pm_idle;
+ omap_pm_is_ready_status = true;
+
return 0;
}
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index c155c9d..cb1e8d8 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -91,16 +91,6 @@
static struct powerdomain *core_pwrdm, *per_pwrdm;
static struct powerdomain *cam_pwrdm;
-static inline void omap3_per_save_context(void)
-{
- omap_gpio_save_context();
-}
-
-static inline void omap3_per_restore_context(void)
-{
- omap_gpio_restore_context();
-}
-
static void omap3_enable_io_chain(void)
{
int timeout = 0;
@@ -146,7 +136,7 @@
/* Save the Interrupt controller context */
omap_intc_save_context();
/* Save the GPMC context */
- omap3_gpmc_save_context();
+ omap_gpmc_save_context();
/* Save the system control module context, padconf already save above*/
omap3_control_save_context();
omap_dma_global_context_save();
@@ -157,7 +147,7 @@
/* Restore the control module context, padconf restored by h/w */
omap3_control_restore_context();
/* Restore the GPMC context */
- omap3_gpmc_restore_context();
+ omap_gpmc_restore_context();
/* Restore the interrupt controller context */
omap_intc_restore_context();
omap_dma_global_context_restore();
@@ -216,6 +206,8 @@
wkst = omap2_prm_read_mod_reg(module, wkst_off);
wkst &= omap2_prm_read_mod_reg(module, grpsel_off);
+
+ c += omap_uart_resume_idle();
if (wkst) {
iclk = omap2_cm_read_mod_reg(module, iclk_off);
fclk = omap2_cm_read_mod_reg(module, fclk_off);
@@ -336,7 +328,7 @@
set_cr(control_reg_value);
}
-void omap_sram_idle(void)
+void omap_sram_idle(bool suspend)
{
/* Variable to tell what needs to be saved and restored
* in omap_sram_idle*/
@@ -375,7 +367,6 @@
printk(KERN_ERR "Invalid mpu state in sram_idle\n");
return;
}
- pwrdm_pre_transition();
/* NEON control */
if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
@@ -391,27 +382,15 @@
omap3_enable_io_chain();
}
- /* Block console output in case it is on one of the OMAP UARTs */
- if (!is_suspending())
- if (per_next_state < PWRDM_POWER_ON ||
- core_next_state < PWRDM_POWER_ON)
- if (!console_trylock())
- goto console_still_active;
-
+ pwrdm_pre_transition();
/* PER */
if (per_next_state < PWRDM_POWER_ON) {
per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
- omap_uart_prepare_idle(2);
- omap_uart_prepare_idle(3);
- omap2_gpio_prepare_for_idle(per_going_off);
- if (per_next_state == PWRDM_POWER_OFF)
- omap3_per_save_context();
+ omap2_gpio_prepare_for_idle(per_going_off, suspend);
}
/* CORE */
if (core_next_state < PWRDM_POWER_ON) {
- omap_uart_prepare_idle(0);
- omap_uart_prepare_idle(1);
if (core_next_state == PWRDM_POWER_OFF) {
omap3_core_save_context();
omap3_cm_save_context();
@@ -458,8 +437,6 @@
omap3_sram_restore_context();
omap2_sms_restore_context();
}
- omap_uart_resume_idle(0);
- omap_uart_resume_idle(1);
if (core_next_state == PWRDM_POWER_OFF)
omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
OMAP3430_GR_MOD,
@@ -467,20 +444,14 @@
}
omap3_intc_resume_idle();
+ pwrdm_post_transition();
+
/* PER */
if (per_next_state < PWRDM_POWER_ON) {
per_prev_state = pwrdm_read_prev_pwrst(per_pwrdm);
- omap2_gpio_resume_after_idle();
- if (per_prev_state == PWRDM_POWER_OFF)
- omap3_per_restore_context();
- omap_uart_resume_idle(2);
- omap_uart_resume_idle(3);
+ omap2_gpio_resume_after_idle(per_going_off);
}
- if (!is_suspending())
- console_unlock();
-
-console_still_active:
/* Disable IO-PAD and IO-CHAIN wakeup */
if (omap3_has_io_wakeup() &&
(per_next_state < PWRDM_POWER_ON ||
@@ -490,8 +461,6 @@
omap3_disable_io_chain();
}
- pwrdm_post_transition();
-
clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]);
}
@@ -499,8 +468,6 @@
{
if (!sleep_while_idle)
return 0;
- if (!omap_uart_can_sleep())
- return 0;
return 1;
}
@@ -518,7 +485,7 @@
trace_power_start(POWER_CSTATE, 1, smp_processor_id());
trace_cpu_idle(1, smp_processor_id());
- omap_sram_idle();
+ omap_sram_idle(false);
trace_power_end(smp_processor_id());
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
@@ -549,10 +516,9 @@
goto restore;
}
- omap_uart_prepare_suspend();
omap3_intc_suspend();
- omap_sram_idle();
+ omap_sram_idle(true);
restore:
/* Restore next_pwrsts */
@@ -596,14 +562,12 @@
{
disable_hlt();
suspend_state = state;
- omap_uart_enable_irqs(0);
return 0;
}
static void omap3_pm_end(void)
{
suspend_state = PM_SUSPEND_ON;
- omap_uart_enable_irqs(1);
enable_hlt();
return;
}
@@ -956,6 +920,7 @@
}
omap3_save_scratchpad_contents();
+ omap_pm_is_ready_status = true;
err1:
return ret;
err2:
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index 59a870b..b9e2bf1 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -1,8 +1,9 @@
/*
* OMAP4 Power Management Routines
*
- * Copyright (C) 2010 Texas Instruments, Inc.
+ * Copyright (C) 2010-2011 Texas Instruments, Inc.
* Rajendra Nayak <rnayak@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -12,28 +13,713 @@
#include <linux/pm.h>
#include <linux/suspend.h>
#include <linux/module.h>
+#include <linux/clk.h>
#include <linux/list.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+
+#include <asm/hardware/gic.h>
+#include <mach/omap4-common.h>
+#include <plat/omap_hsi.h>
+#include <plat/common.h>
+#include <plat/temperature_sensor.h>
+#include <plat/usb.h>
+#include <plat/prcm.h>
+#include <plat/omap-pm.h>
+#include <plat/gpmc.h>
+#include <plat/dma.h>
+
+#include <mach/omap_fiq_debugger.h>
#include "powerdomain.h"
-#include <mach/omap4-common.h>
+#include "clockdomain.h"
+#include "pm.h"
+#include "prm-regbits-44xx.h"
+#include "prcm44xx.h"
+#include "prm44xx.h"
+#include "prminst44xx.h"
+#include "clock.h"
+#include "cm2_44xx.h"
+#include "cm1_44xx.h"
+#include "cm44xx.h"
+#include "cm-regbits-44xx.h"
+#include "cminst44xx.h"
+#include "scrm44xx.h"
+#include "prcm-debug.h"
+
+#include "smartreflex.h"
+#include "dvfs.h"
+#include "voltage.h"
+#include "vc.h"
+#include "control.h"
struct power_state {
struct powerdomain *pwrdm;
u32 next_state;
#ifdef CONFIG_SUSPEND
u32 saved_state;
+ u32 saved_logic_state;
#endif
struct list_head node;
};
static LIST_HEAD(pwrst_list);
+static struct powerdomain *mpu_pwrdm, *cpu0_pwrdm;
+static struct powerdomain *core_pwrdm, *per_pwrdm;
+
+static struct voltagedomain *mpu_voltdm, *iva_voltdm, *core_voltdm;
+
+static struct clockdomain *tesla_clkdm;
+static struct powerdomain *tesla_pwrdm;
+
+static struct clockdomain *emif_clkdm, *mpuss_clkdm;
+
+/* Yet un-named erratum which requires AUTORET to be disabled for IVA PD */
+#define OMAP4_PM_ERRATUM_IVA_AUTO_RET_iXXX BIT(1)
+
+/* Dynamic dependendency Cannot be enabled due to i688 erratum ID for 443x */
+#define OMAP4_PM_ERRATUM_MPU_EMIF_NO_DYNDEP_i688 BIT(3)
+/*
+ * Dynamic dependendency Cannot be enabled due to i688 erratum ID for above 443x
+ * NOTE: this is NOT YET a confirmed erratum for 446x, but provided here in
+ * anticipation.
+ * If a fix is found at a later date, the code using this can be removed.
+ * WA involves:
+ * Enable MPU->EMIF SD before WFI and disable while coming out of WFI.
+ * This works around system hang/lockups seen when only MPU->EMIF
+ * dynamic dependency set. Allows dynamic dependency to be used
+ * in all active usecases and get all the power savings accordingly.
+ * TODO: Once this is available as final Errata, update with proper
+ * fix.
+ */
+#define OMAP4_PM_ERRATUM_MPU_EMIF_NO_DYNDEP_IDLE_iXXX BIT(4)
+
+static u8 pm44xx_errata;
+#define is_pm44xx_erratum(erratum) (pm44xx_errata & OMAP4_PM_ERRATUM_##erratum)
+
+#define MAX_IOPAD_LATCH_TIME 1000
+void omap4_trigger_ioctrl(void)
+{
+ int i = 0;
+
+ /* Enable GLOBAL_WUEN */
+ if (!omap4_cminst_read_inst_reg_bits(OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_IO_PMCTRL_OFFSET,
+ OMAP4430_GLOBAL_WUEN_MASK))
+ omap4_prminst_rmw_inst_reg_bits(OMAP4430_GLOBAL_WUEN_MASK,
+ OMAP4430_GLOBAL_WUEN_MASK, OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_IO_PMCTRL_OFFSET);
+
+ /* Trigger WUCLKIN enable */
+ omap4_prminst_rmw_inst_reg_bits(OMAP4430_WUCLK_CTRL_MASK, OMAP4430_WUCLK_CTRL_MASK,
+ OMAP4430_PRM_PARTITION, OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_IO_PMCTRL_OFFSET);
+ omap_test_timeout((((omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_IO_PMCTRL_OFFSET) &
+ OMAP4430_WUCLK_STATUS_MASK) >>
+ OMAP4430_WUCLK_STATUS_SHIFT) == 1),
+ MAX_IOPAD_LATCH_TIME, i);
+ if (i == MAX_IOPAD_LATCH_TIME)
+ pr_err("%s: Max IO latch time reached for WUCLKIN enable\n",
+ __func__);
+
+ /* Trigger WUCLKIN disable */
+ omap4_prminst_rmw_inst_reg_bits(OMAP4430_WUCLK_CTRL_MASK, 0x0,
+ OMAP4430_PRM_PARTITION, OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_IO_PMCTRL_OFFSET);
+
+ /* Ensure this is cleared */
+ omap_test_timeout((((omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_IO_PMCTRL_OFFSET) &
+ OMAP4430_WUCLK_STATUS_MASK) >>
+ OMAP4430_WUCLK_STATUS_SHIFT) == 0),
+ MAX_IOPAD_LATCH_TIME, i);
+ if (i == MAX_IOPAD_LATCH_TIME)
+ pr_err("%s: Max IO latch time reached for WUCLKIN disable\n",
+ __func__);
+ return;
+}
#ifdef CONFIG_SUSPEND
+/* This is a common low power function called from suspend and
+ * cpuidle
+ */
+
+void omap4_enter_sleep(unsigned int cpu, unsigned int power_state, bool suspend)
+{
+ int cpu0_next_state = PWRDM_POWER_ON;
+ int per_next_state = PWRDM_POWER_ON;
+ int core_next_state = PWRDM_POWER_ON;
+ int mpu_next_state = PWRDM_POWER_ON;
+ int ret;
+ int staticdep_wa_applied = 0;
+
+ pwrdm_clear_all_prev_pwrst(cpu0_pwrdm);
+ pwrdm_clear_all_prev_pwrst(mpu_pwrdm);
+ pwrdm_clear_all_prev_pwrst(core_pwrdm);
+ pwrdm_clear_all_prev_pwrst(per_pwrdm);
+ omap4_device_clear_prev_off_state();
+
+ /*
+ * Just return if we detect a scenario where we conflict
+ * with DVFS
+ */
+ if (omap_dvfs_is_any_dev_scaling())
+ return;
+
+ cpu0_next_state = pwrdm_read_next_pwrst(cpu0_pwrdm);
+ per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
+ core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
+ mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
+
+ ret = omap2_gpio_prepare_for_idle(omap4_device_next_state_off(), suspend);
+ if (ret)
+ goto abort_gpio;
+
+ if (is_pm44xx_erratum(MPU_EMIF_NO_DYNDEP_IDLE_iXXX) &&
+ mpu_next_state <= PWRDM_POWER_INACTIVE) {
+ /* Configures MEMIF clockdomain in SW_WKUP */
+ if (clkdm_wakeup(emif_clkdm)) {
+ pr_err("%s: Failed to force wakeup of %s\n",
+ __func__, emif_clkdm->name);
+ } else {
+ /* Enable MPU-EMIF Static Dependency around WFI */
+ if (clkdm_add_wkdep(mpuss_clkdm, emif_clkdm))
+ pr_err("%s: Failed to Add wkdep %s->%s\n",
+ __func__, mpuss_clkdm->name,
+ emif_clkdm->name);
+ else
+ staticdep_wa_applied = 1;
+
+ /* Configures MEMIF clockdomain back to HW_AUTO */
+ clkdm_allow_idle(emif_clkdm);
+ }
+ }
+ if (mpu_next_state < PWRDM_POWER_INACTIVE) {
+ if (omap_sr_disable_reset_volt(mpu_voltdm))
+ goto abort_device_off;
+
+ omap_sr_disable_reset_volt(mpu_voltdm);
+ omap_vc_set_auto_trans(mpu_voltdm,
+ OMAP_VC_CHANNEL_AUTO_TRANSITION_RETENTION);
+ }
+
+ if (core_next_state < PWRDM_POWER_ON) {
+ /*
+ * Note: IVA can hit RET outside of cpuidle and hence this is
+ * not the right optimal place to enable IVA AUTO RET. But since
+ * enabling AUTO RET requires SR to disabled, its done here for
+ * now. Needs a relook to see if this can be optimized.
+ */
+ if (omap_sr_disable_reset_volt(iva_voltdm))
+ goto abort_device_off;
+ if (omap_sr_disable_reset_volt(core_voltdm))
+ goto abort_device_off;
+ omap_vc_set_auto_trans(core_voltdm,
+ OMAP_VC_CHANNEL_AUTO_TRANSITION_RETENTION);
+ if (!is_pm44xx_erratum(IVA_AUTO_RET_iXXX)) {
+ omap_vc_set_auto_trans(iva_voltdm,
+ OMAP_VC_CHANNEL_AUTO_TRANSITION_RETENTION);
+ }
+
+ omap_temp_sensor_prepare_idle();
+ }
+
+ if (omap4_device_next_state_off()) {
+ omap_gpmc_save_context();
+ omap_dma_global_context_save();
+ }
+
+ if (suspend && cpu_is_omap44xx())
+ omap4_pm_suspend_save_regs();
+
+ if (omap4_device_next_state_off()) {
+ /* Save the device context to SAR RAM */
+ if (omap4_sar_save())
+ goto abort_device_off;
+ omap4_sar_overwrite();
+ omap4_cm_prepare_off();
+ omap4_dpll_prepare_off();
+
+ /* Extend Non-EMIF I/O isolation */
+ omap4_prminst_rmw_inst_reg_bits(OMAP4430_ISOOVR_EXTEND_MASK,
+ OMAP4430_ISOOVR_EXTEND_MASK, OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_IO_PMCTRL_OFFSET);
+ }
+
+ omap4_enter_lowpower(cpu, power_state);
+
+ if (omap4_device_prev_state_off()) {
+ /* Reconfigure the trim settings as well */
+ omap4_ldo_trim_configure();
+ omap4_dpll_resume_off();
+ omap4_cm_resume_off();
+#ifdef CONFIG_PM_DEBUG
+ omap4_device_off_counter++;
+#endif
+ }
+
+abort_device_off:
+ if (core_next_state < PWRDM_POWER_ON) {
+ /* See note above */
+ omap_vc_set_auto_trans(core_voltdm,
+ OMAP_VC_CHANNEL_AUTO_TRANSITION_DISABLE);
+ if (!is_pm44xx_erratum(IVA_AUTO_RET_iXXX)) {
+ omap_vc_set_auto_trans(iva_voltdm,
+ OMAP_VC_CHANNEL_AUTO_TRANSITION_DISABLE);
+ }
+
+ omap_temp_sensor_resume_idle();
+ omap_sr_enable(iva_voltdm,
+ omap_voltage_get_curr_vdata(iva_voltdm));
+ omap_sr_enable(core_voltdm,
+ omap_voltage_get_curr_vdata(core_voltdm));
+ }
+
+ if (omap4_device_prev_state_off()) {
+ omap_dma_global_context_restore();
+ omap_gpmc_restore_context();
+ }
+
+ omap2_gpio_resume_after_idle(omap4_device_next_state_off());
+
+ if (omap4_device_next_state_off()) {
+ /* Disable the extension of Non-EMIF I/O isolation */
+ omap4_prminst_rmw_inst_reg_bits(OMAP4430_ISOOVR_EXTEND_MASK,
+ 0, OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_IO_PMCTRL_OFFSET);
+ }
+
+ if (mpu_next_state < PWRDM_POWER_INACTIVE) {
+ omap_vc_set_auto_trans(mpu_voltdm,
+ OMAP_VC_CHANNEL_AUTO_TRANSITION_DISABLE);
+ omap_sr_enable(mpu_voltdm,
+ omap_voltage_get_curr_vdata(mpu_voltdm));
+ }
+
+ /*
+ * NOTE: is_pm44xx_erratum is not strictly required, but retained for
+ * code context redability.
+ */
+ if (is_pm44xx_erratum(MPU_EMIF_NO_DYNDEP_IDLE_iXXX) &&
+ staticdep_wa_applied) {
+ /* Configures MEMIF clockdomain in SW_WKUP */
+ if (clkdm_wakeup(emif_clkdm))
+ pr_err("%s: Failed to force wakeup of %s\n",
+ __func__, emif_clkdm->name);
+ /* Disable MPU-EMIF Static Dependency on WFI exit */
+ else if (clkdm_del_wkdep(mpuss_clkdm, emif_clkdm))
+ pr_err("%s: Failed to remove wkdep %s->%s\n",
+ __func__, mpuss_clkdm->name,
+ emif_clkdm->name);
+ /* Configures MEMIF clockdomain back to HW_AUTO */
+ clkdm_allow_idle(emif_clkdm);
+ }
+
+abort_gpio:
+ return;
+}
+
+#ifdef CONFIG_PM_DEBUG
+#define GPIO_BANKS 6
+#define MODULEMODE_DISABLED 0x0
+#define MODULEMODE_AUTO 0x1
+
+static void _print_wakeirq(int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (irq == OMAP44XX_IRQ_LOCALTIMER)
+ pr_info("Resume caused by IRQ %d, localtimer\n", irq);
+ else if (!desc || !desc->action || !desc->action->name)
+ pr_info("Resume caused by IRQ %d\n", irq);
+ else
+ pr_info("Resume caused by IRQ %d, %s\n", irq,
+ desc->action->name);
+}
+
+static void _print_gpio_wakeirq(int irq)
+{
+ int bank = irq - OMAP44XX_IRQ_GPIO1;
+ int bit;
+ int gpioirq;
+ int restoremod = 0;
+ int timeout = 10;
+ u32 wken, irqst, gpio;
+ u32 clkctrl;
+ long unsigned int wkirqs;
+ void *gpio_base[GPIO_BANKS] = {
+ OMAP2_L4_IO_ADDRESS(0x4a310000),
+ OMAP2_L4_IO_ADDRESS(0x48055000),
+ OMAP2_L4_IO_ADDRESS(0x48057000),
+ OMAP2_L4_IO_ADDRESS(0x48059000),
+ OMAP2_L4_IO_ADDRESS(0x4805b000),
+ OMAP2_L4_IO_ADDRESS(0x4805d000),
+ };
+ void *gpio_clkctrl[GPIO_BANKS] = {
+ OMAP4430_CM_WKUP_GPIO1_CLKCTRL,
+ OMAP4430_CM_L4PER_GPIO2_CLKCTRL,
+ OMAP4430_CM_L4PER_GPIO3_CLKCTRL,
+ OMAP4430_CM_L4PER_GPIO4_CLKCTRL,
+ OMAP4430_CM_L4PER_GPIO5_CLKCTRL,
+ OMAP4430_CM_L4PER_GPIO6_CLKCTRL,
+ };
+
+ /*
+ * GPIO1 is in CD_WKUP.
+ * GPIO2-6 are in CD_l4_PER.
+ *
+ * Both of these clock domains are static dependencies of
+ * the MPUSS clock domain (CD_CORTEXA9) and are guaranteed
+ * to be already enabled (_CLKSTCTRL.CLKTRCTRL = HW_AUTO).
+ *
+ * Ensure the GPIO module is enabled (_CLKCTRL.MODULEMODE =
+ * h/w managed). If not, will set it back to disabled when
+ * done.
+ */
+
+ clkctrl = __raw_readl(gpio_clkctrl[bank]);
+
+ if ((clkctrl & OMAP4430_MODULEMODE_MASK) !=
+ MODULEMODE_AUTO << OMAP4430_MODULEMODE_SHIFT) {
+ restoremod = 1;
+ __raw_writel((clkctrl & ~(OMAP4430_MODULEMODE_MASK)) |
+ MODULEMODE_AUTO << OMAP4430_MODULEMODE_SHIFT,
+ gpio_clkctrl[bank]);
+
+ while ((__raw_readl(gpio_clkctrl[bank]) &
+ OMAP4430_MODULEMODE_MASK) !=
+ MODULEMODE_AUTO << OMAP4430_MODULEMODE_SHIFT &&
+ --timeout)
+ udelay(5);
+
+ if (!timeout)
+ goto punt;
+ }
+
+ wken = __raw_readl(gpio_base[bank] + OMAP4_GPIO_IRQWAKEN0);
+ irqst = __raw_readl(gpio_base[bank] + OMAP4_GPIO_IRQSTATUS0);
+ wkirqs = irqst & wken;
+
+ if (!wkirqs)
+ wkirqs = irqst;
+
+ if (!wkirqs)
+ goto punt;
+
+ for_each_set_bit(bit, &wkirqs, 32) {
+ gpio = bit + bank * 32;
+ gpioirq = gpio_to_irq(gpio);
+
+ if (gpioirq < 0)
+ pr_info("Resume caused by GPIO %d\n", (int)gpio);
+ else
+ _print_wakeirq(gpioirq);
+ }
+
+ goto out;
+
+punt:
+ pr_info("Resume caused by IRQ %d, unknown GPIO%d interrupt\n", irq,
+ bank + 1);
+
+out:
+ if (restoremod)
+ __raw_writel(clkctrl, gpio_clkctrl[bank]);
+}
+
+#define CONTROL_PADCONF_WAKEUPEVENT_0 0x4a1001d8
+#define CONTROL_WKUP_PADCONF_WAKEUPEVENT_0 0x4a31E07C
+
+static void _print_prcm_wakeirq(int irq)
+{
+ int i, bit;
+ int iopad_wake_found = 0;
+ u32 prcm_irqs1, prcm_irqs2;
+ long unsigned int wkup_pad_event;
+
+ prcm_irqs1 = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
+ OMAP4_PRM_IRQSTATUS_MPU_OFFSET);
+ prcm_irqs1 &= omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
+ OMAP4_PRM_IRQENABLE_MPU_OFFSET);
+ prcm_irqs2 = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
+ OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET);
+ prcm_irqs2 &= omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
+ OMAP4_PRM_IRQENABLE_MPU_2_OFFSET);
+
+ if (prcm_irqs1 & OMAP4430_IO_ST_MASK) {
+ for (i = 0; i <= 6; i++) {
+ long unsigned int wkevt =
+ omap_readl(CONTROL_PADCONF_WAKEUPEVENT_0 + i*4);
+
+ for_each_set_bit(bit, &wkevt, 32) {
+ pr_info("Resume caused by I/O pad: CONTROL_PADCONF_WAKEUPEVENT_%d[%d]\n",
+ i, bit);
+ iopad_wake_found = 1;
+ }
+ }
+ wkup_pad_event = omap_readl(CONTROL_WKUP_PADCONF_WAKEUPEVENT_0);
+ for_each_set_bit(bit, &wkup_pad_event, 25) {
+ pr_info("Resume caused by wakeup I/O pad: CONTROL_WKUP_PADCONF_WAKEUPEVENT_0[%d]\n", bit);
+ iopad_wake_found = 1;
+ }
+ }
+
+ if (prcm_irqs1 & ~OMAP4430_IO_ST_MASK || !iopad_wake_found ||
+ prcm_irqs2)
+ pr_info("Resume caused by IRQ %d, prcm: 0x%x 0x%x\n", irq,
+ prcm_irqs1, prcm_irqs2);
+}
+
+static void omap4_print_wakeirq(void)
+{
+ int irq;
+
+ irq = gic_cpu_read(GIC_CPU_HIGHPRI) & 0x3ff;
+
+ if ((irq == 1022) || (irq == 1023)) {
+ pr_info("GIC returns spurious interrupt for resume IRQ\n");
+ return;
+ }
+
+ if (irq >= OMAP44XX_IRQ_GPIO1 &&
+ irq <= OMAP44XX_IRQ_GPIO1 + GPIO_BANKS - 1)
+ _print_gpio_wakeirq(irq);
+ else if (irq == OMAP44XX_IRQ_PRCM)
+ _print_prcm_wakeirq(irq);
+ else
+ _print_wakeirq(irq);
+}
+#else
+static void omap4_print_wakeirq(void)
+{
+}
+#endif
+
+/**
+ * get_achievable_state() - Provide achievable state
+ * @available_states: what states are available
+ * @req_min_state: what state is the minimum we'd like to hit
+ * @is_parent_pd: is this a parent power domain?
+ *
+ * Power domains have varied capabilities. When attempting a low power
+ * state such as OFF/RET, a specific min requested state may not be
+ * supported on the power domain, in which case:
+ * a) if this power domain is a parent power domain, we do not intend
+ * for it to go to a lower power state(because we are not targetting it),
+ * select the next higher power state which is supported is returned.
+ * b) However, for all children power domains, we first try to match
+ * with a lower power domain state before attempting a higher state.
+ * This is because a combination of system power states where the
+ * parent PD's state is not in line with expectation can result in
+ * system instabilities.
+ */
+static inline u8 get_achievable_state(u8 available_states, u8 req_min_state,
+ bool is_parent_pd)
+{
+ u8 max_mask = 0xFF << req_min_state;
+ u8 min_mask = ~max_mask;
+
+ /* First see if we have an accurate match */
+ if (available_states & BIT(req_min_state))
+ return req_min_state;
+
+ /* See if a lower power state is possible on this child domain */
+ if (!is_parent_pd && available_states & min_mask)
+ return __ffs(available_states & min_mask);
+
+ if (available_states & max_mask)
+ return __ffs(available_states & max_mask);
+
+ return PWRDM_POWER_ON;
+}
+
+/**
+ * omap4_configure_pwdm_suspend() - Program powerdomain on suspend
+ * @is_off_mode: is this an OFF mode transition?
+ *
+ * Program all powerdomain to required power domain state: This logic
+ * Takes the requested mode -OFF/RET translates it to logic and power
+ * states. This then walks down the power domain states to program
+ * each domain to the state requested. if the requested state is not
+ * available, it will check for the higher state.
+ */
+static void omap4_configure_pwdm_suspend(bool is_off_mode)
+{
+ struct power_state *pwrst;
+ u32 state;
+ u32 logic_state, als;
+
+#ifdef CONFIG_OMAP_ALLOW_OSWR
+ if (is_off_mode) {
+ state = PWRDM_POWER_OFF;
+ logic_state = PWRDM_POWER_OFF;
+ } else {
+ state = PWRDM_POWER_RET;
+ logic_state = PWRDM_POWER_OFF;
+ }
+#else
+ state = PWRDM_POWER_RET;
+ logic_state = PWRDM_POWER_RET;
+#endif
+
+ list_for_each_entry(pwrst, &pwrst_list, node) {
+ bool parent_power_domain = false;
+
+ pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
+ pwrst->saved_logic_state = pwrdm_read_logic_retst(pwrst->pwrdm);
+
+ if ((!strcmp(pwrst->pwrdm->name, "cpu0_pwrdm")) ||
+ (!strcmp(pwrst->pwrdm->name, "cpu1_pwrdm")))
+ continue;
+ if (!strcmp(pwrst->pwrdm->name, "core_pwrdm") ||
+ !strcmp(pwrst->pwrdm->name, "mpu_pwrdm") ||
+ !strcmp(pwrst->pwrdm->name, "iva_pwrdm"))
+ parent_power_domain = true;
+ /*
+ * Write only to registers which are writable! Don't touch
+ * read-only/reserved registers. If pwrdm->pwrsts_logic_ret or
+ * pwrdm->pwrsts are 0, consider those power domains containing
+ * readonly/reserved registers which cannot be controlled by
+ * software.
+ */
+ if (pwrst->pwrdm->pwrsts_logic_ret) {
+ als =
+ get_achievable_state(pwrst->pwrdm->pwrsts_logic_ret,
+ logic_state, parent_power_domain);
+ if (als < pwrst->saved_logic_state)
+ pwrdm_set_logic_retst(pwrst->pwrdm, als);
+ }
+ if (pwrst->pwrdm->pwrsts) {
+ pwrst->next_state =
+ get_achievable_state(pwrst->pwrdm->pwrsts, state,
+ parent_power_domain);
+ if (pwrst->next_state < pwrst->saved_state)
+ omap_set_pwrdm_state(pwrst->pwrdm,
+ pwrst->next_state);
+ else
+ pwrst->next_state = pwrst->saved_state;
+ }
+ }
+}
+
+/**
+ * omap4_restore_pwdms_after_suspend() - Restore powerdomains after suspend
+ *
+ * Re-program all powerdomains to saved power domain states.
+ *
+ * returns 0 if all power domains hit targeted power state, -1 if any domain
+ * failed to hit targeted power state (status related to the actual restore
+ * is not returned).
+ */
+static int omap4_restore_pwdms_after_suspend(void)
+{
+ struct power_state *pwrst;
+ int cstate, pstate, ret = 0;
+
+ /* Restore next powerdomain state */
+ list_for_each_entry(pwrst, &pwrst_list, node) {
+ cstate = pwrdm_read_pwrst(pwrst->pwrdm);
+ pstate = pwrdm_read_prev_pwrst(pwrst->pwrdm);
+ if (pstate > pwrst->next_state) {
+ pr_info("Powerdomain (%s) didn't enter "
+ "target state %d Vs achieved state %d. "
+ "current state %d\n",
+ pwrst->pwrdm->name, pwrst->next_state,
+ pstate, cstate);
+ ret = -1;
+ }
+
+ /* If state already ON due to h/w dep, don't do anything */
+ if (cstate == PWRDM_POWER_ON)
+ continue;
+
+ /* If we have already achieved saved state, nothing to do */
+ if (cstate == pwrst->saved_state)
+ continue;
+
+ /* mpuss code takes care of this */
+ if ((!strcmp(pwrst->pwrdm->name, "cpu0_pwrdm")) ||
+ (!strcmp(pwrst->pwrdm->name, "cpu1_pwrdm")))
+ continue;
+
+ /*
+ * Skip pd program if saved state higher than current state
+ * Since we would have already returned if the state
+ * was ON, if the current state is yet another low power
+ * state, the PRCM specification clearly states that
+ * transition from a lower LP state to a higher LP state
+ * is forbidden.
+ */
+ if (pwrst->saved_state > cstate)
+ continue;
+
+ if (pwrst->pwrdm->pwrsts)
+ omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
+
+ if (pwrst->pwrdm->pwrsts_logic_ret)
+ pwrdm_set_logic_retst(pwrst->pwrdm,
+ pwrst->saved_logic_state);
+ }
+
+ return ret;
+}
+
static int omap4_pm_suspend(void)
{
- do_wfi();
+ int ret = 0;
+
+ /*
+ * If any device was in the middle of a scale operation
+ * then abort, as we cannot predict which part of the scale
+ * operation we interrupted.
+ */
+ if (omap_dvfs_is_any_dev_scaling()) {
+ pr_err("%s: oops.. middle of scale op.. aborting suspend\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ /* Wakeup timer from suspend */
+ if (wakeup_timer_seconds || wakeup_timer_milliseconds)
+ omap2_pm_wakeup_on_timer(wakeup_timer_seconds,
+ wakeup_timer_milliseconds);
+
+ omap4_configure_pwdm_suspend(off_mode_enabled);
+
+ /* Enable Device OFF */
+ if (off_mode_enabled)
+ omap4_device_set_state_off(1);
+
+ /*
+ * For MPUSS to hit power domain retention(CSWR or OSWR),
+ * CPU0 and CPU1 power domain needs to be in OFF or DORMANT
+ * state. For MPUSS to reach off-mode. CPU0 and CPU1 power domain
+ * should be in off state.
+ * Only master CPU followes suspend path. All other CPUs follow
+ * cpu-hotplug path in system wide suspend. On OMAP4, CPU power
+ * domain CSWR is not supported by hardware.
+ * More details can be found in OMAP4430 TRM section 4.3.4.2.
+ */
+ omap4_enter_sleep(0, PWRDM_POWER_OFF, true);
+ omap4_print_wakeirq();
+ prcmdebug_dump(PRCMDEBUG_LASTSLEEP);
+
+ /* Disable Device OFF state*/
+ if (off_mode_enabled)
+ omap4_device_set_state_off(0);
+
+ ret = omap4_restore_pwdms_after_suspend();
+
+ if (ret)
+ pr_err("Could not enter target state in pm_suspend\n");
+ else
+ pr_err("Successfully put all powerdomains to target state\n");
+
return 0;
}
@@ -71,8 +757,55 @@
.enter = omap4_pm_enter,
.valid = suspend_valid_only_mem,
};
+#else
+void omap4_enter_sleep(unsigned int cpu, unsigned int power_state){ return; }
#endif /* CONFIG_SUSPEND */
+/**
+ * omap4_pm_cold_reset() - Cold reset OMAP4
+ * @reason: why am I resetting.
+ *
+ * As per the TRM, it is recommended that we set all the power domains to
+ * ON state before we trigger cold reset.
+ */
+int omap4_pm_cold_reset(char *reason)
+{
+ struct power_state *pwrst;
+
+ /* Switch ON all pwrst registers */
+ list_for_each_entry(pwrst, &pwrst_list, node) {
+ if (pwrst->pwrdm->pwrsts_logic_ret)
+ pwrdm_set_logic_retst(pwrst->pwrdm, PWRDM_POWER_ON);
+ if (pwrst->pwrdm->pwrsts)
+ omap_set_pwrdm_state(pwrst->pwrdm, PWRDM_POWER_ON);
+ }
+
+ WARN(1, "Arch Cold reset has been triggered due to %s\n", reason);
+ omap4_prm_global_cold_sw_reset(); /* never returns */
+
+ /* If we reached here - something bad went on.. */
+ BUG();
+
+ /* make the compiler happy */
+ return -EINTR;
+}
+
+/*
+ * Enable hardware supervised mode for all clockdomains if it's
+ * supported. Initiate sleep transition for other clockdomains, if
+ * they are not used
+ */
+static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
+{
+ if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
+ clkdm_allow_idle(clkdm);
+ else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
+ atomic_read(&clkdm->usecount) == 0)
+ clkdm_sleep(clkdm);
+ return 0;
+}
+
+
static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
{
struct power_state *pwrst;
@@ -83,11 +816,397 @@
pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
if (!pwrst)
return -ENOMEM;
+
pwrst->pwrdm = pwrdm;
- pwrst->next_state = PWRDM_POWER_ON;
+ if ((!strcmp(pwrdm->name, "mpu_pwrdm")) ||
+ (!strcmp(pwrdm->name, "core_pwrdm")) ||
+ (!strcmp(pwrdm->name, "cpu0_pwrdm")) ||
+ (!strcmp(pwrdm->name, "cpu1_pwrdm")))
+ pwrst->next_state = PWRDM_POWER_ON;
+ else
+ pwrst->next_state = PWRDM_POWER_RET;
list_add(&pwrst->node, &pwrst_list);
- return pwrdm_set_next_pwrst(pwrst->pwrdm, pwrst->next_state);
+ return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
+}
+
+static int __init _voltdm_sum_time(struct voltagedomain *voltdm, void *user)
+{
+ struct omap_voltdm_pmic *pmic;
+ u32 *max_time = (u32 *)user;
+
+ if (!voltdm || !max_time) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ pmic = voltdm->pmic;
+ if (pmic) {
+ *max_time += pmic->on_volt / pmic->slew_rate;
+ *max_time += pmic->switch_on_time;
+ }
+
+ return 0;
+}
+
+static u32 __init _usec_to_val_scrm(unsigned long rate, u32 usec,
+ u32 shift, u32 mask)
+{
+ u32 val;
+
+ /* limit to max value */
+ val = ((mask >> shift) * 1000000) / rate;
+ if (usec > val)
+ usec = val;
+
+ /* convert the time in usec to cycles */
+ val = DIV_ROUND_UP(rate * usec, 1000000);
+ return (val << shift) & mask;
+
+}
+
+static void __init syscontrol_setup_regs(void)
+{
+ u32 v;
+
+ /* Disable LPDDR VREF manual control and enable Auto control */
+ v = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_LPDDR2IO1_3);
+ v &= ~(OMAP4_LPDDR21_VREF_EN_CA_MASK | OMAP4_LPDDR21_VREF_EN_DQ_MASK);
+ v |= OMAP4_LPDDR21_VREF_AUTO_EN_CA_MASK | OMAP4_LPDDR21_VREF_AUTO_EN_DQ_MASK;
+ omap4_ctrl_pad_writel(v, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_LPDDR2IO1_3);
+
+ v = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_LPDDR2IO2_3);
+ v &= ~(OMAP4_LPDDR21_VREF_EN_CA_MASK | OMAP4_LPDDR21_VREF_EN_DQ_MASK);
+ v |= OMAP4_LPDDR21_VREF_AUTO_EN_CA_MASK | OMAP4_LPDDR21_VREF_AUTO_EN_DQ_MASK;
+ omap4_ctrl_pad_writel(v, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_LPDDR2IO2_3);
+
+ /*
+ * Workaround for CK differential IO PADn, PADp values due to bug in
+ * EMIF CMD phy.
+ */
+ v = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_LPDDR2IO1_2);
+ v &= ~OMAP4_LPDDR2IO1_GR10_WD_MASK;
+ omap4_ctrl_pad_writel(v, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_LPDDR2IO1_2);
+ v = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_LPDDR2IO2_2);
+ v &= ~OMAP4_LPDDR2IO2_GR10_WD_MASK;
+ omap4_ctrl_pad_writel(v, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_LPDDR2IO2_2);
+}
+
+static void __init prcm_setup_regs(void)
+{
+ struct clk *clk32k = clk_get(NULL, "sys_32k_ck");
+ unsigned long rate32k = 0;
+ u32 val, tshut, tstart;
+ u32 reset_delay_time = 0;
+
+ if (clk32k) {
+ rate32k = clk_get_rate(clk32k);
+ clk_put(clk32k);
+ } else {
+ pr_err("%s: no 32k clk!!!\n", __func__);
+ dump_stack();
+ }
+
+ /* Enable IO_ST interrupt */
+ omap4_prminst_rmw_inst_reg_bits(OMAP4430_IO_ST_MASK, OMAP4430_IO_ST_MASK,
+ OMAP4430_PRM_PARTITION, OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQENABLE_MPU_OFFSET);
+
+ /*
+ * Errata ID: i608 Impacted OMAP4430 ES 1.0,2.0,2.1,2.2
+ * On OMAP4, Retention-Till-Access Memory feature is not working
+ * reliably and hardware recommondation is keep it disabled by
+ * default
+ */
+ omap4_prminst_rmw_inst_reg_bits(OMAP4430_DISABLE_RTA_EXPORT_MASK,
+ 0x1 << OMAP4430_DISABLE_RTA_EXPORT_SHIFT,
+ OMAP4430_PRM_PARTITION, OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_SRAM_WKUP_SETUP_OFFSET);
+ omap4_prminst_rmw_inst_reg_bits(OMAP4430_DISABLE_RTA_EXPORT_MASK,
+ 0x1 << OMAP4430_DISABLE_RTA_EXPORT_SHIFT,
+ OMAP4430_PRM_PARTITION, OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_LDO_SRAM_CORE_SETUP_OFFSET);
+ omap4_prminst_rmw_inst_reg_bits(OMAP4430_DISABLE_RTA_EXPORT_MASK,
+ 0x1 << OMAP4430_DISABLE_RTA_EXPORT_SHIFT,
+ OMAP4430_PRM_PARTITION, OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_LDO_SRAM_MPU_SETUP_OFFSET);
+ omap4_prminst_rmw_inst_reg_bits(OMAP4430_DISABLE_RTA_EXPORT_MASK,
+ 0x1 << OMAP4430_DISABLE_RTA_EXPORT_SHIFT,
+ OMAP4430_PRM_PARTITION, OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_LDO_SRAM_IVA_SETUP_OFFSET);
+
+ /* Allow SRAM LDO to enter RET during low power state*/
+ if (cpu_is_omap446x()) {
+ omap4_prminst_rmw_inst_reg_bits(OMAP4430_RETMODE_ENABLE_MASK,
+ 0x1 << OMAP4430_RETMODE_ENABLE_SHIFT, OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_LDO_SRAM_CORE_CTRL_OFFSET);
+ omap4_prminst_rmw_inst_reg_bits(OMAP4430_RETMODE_ENABLE_MASK,
+ 0x1 << OMAP4430_RETMODE_ENABLE_SHIFT, OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_LDO_SRAM_MPU_CTRL_OFFSET);
+ omap4_prminst_rmw_inst_reg_bits(OMAP4430_RETMODE_ENABLE_MASK,
+ 0x1 << OMAP4430_RETMODE_ENABLE_SHIFT, OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_LDO_SRAM_IVA_CTRL_OFFSET);
+ }
+ /* Toggle CLKREQ in RET and OFF states */
+ omap4_prminst_write_inst_reg(0x2, OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_CLKREQCTRL_OFFSET);
+
+ if (!rate32k)
+ goto no_32k;
+
+ /* Setup max clksetup time for oscillator */
+ omap_pm_get_osc_lp_time(&tstart, &tshut);
+ val = _usec_to_val_scrm(rate32k, tstart, OMAP4_SETUPTIME_SHIFT,
+ OMAP4_SETUPTIME_MASK);
+ val |= _usec_to_val_scrm(rate32k, tshut, OMAP4_DOWNTIME_SHIFT,
+ OMAP4_DOWNTIME_MASK);
+ omap4_prminst_write_inst_reg(val, OMAP4430_SCRM_PARTITION, 0x0,
+ OMAP4_SCRM_CLKSETUPTIME_OFFSET);
+
+ /*
+ * Setup OMAP WARMRESET time:
+ * we use the sum of each voltage domain setup times to handle
+ * the worst case condition where the device resets from OFF mode.
+ * hence we leave PRM_VOLTSETUP_WARMRESET alone as this is
+ * already part of RSTTIME1 we program in.
+ * in addition, to handle oscillator switch off and switch back on
+ * (in case WDT triggered while CLKREQ goes low), we also
+ * add in the additional latencies.
+ */
+ if (!voltdm_for_each(_voltdm_sum_time, (void *)&reset_delay_time)) {
+ reset_delay_time += tstart + tshut;
+ val = _usec_to_val_scrm(rate32k, reset_delay_time,
+ OMAP4430_RSTTIME1_SHIFT, OMAP4430_RSTTIME1_MASK);
+ omap4_prminst_rmw_inst_reg_bits(OMAP4430_RSTTIME1_MASK, val,
+ OMAP4430_PRM_PARTITION, OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_RSTTIME_OFFSET);
+ }
+
+ /* Setup max PMIC startup time */
+ omap_pm_get_pmic_lp_time(&tstart, &tshut);
+ val = _usec_to_val_scrm(rate32k, tstart, OMAP4_WAKEUPTIME_SHIFT,
+ OMAP4_WAKEUPTIME_MASK);
+ val |= _usec_to_val_scrm(rate32k, tshut, OMAP4_SLEEPTIME_SHIFT,
+ OMAP4_SLEEPTIME_MASK);
+ omap4_prminst_write_inst_reg(val, OMAP4430_SCRM_PARTITION, 0x0,
+ OMAP4_SCRM_PMICSETUPTIME_OFFSET);
+
+no_32k:
+ /*
+ * De-assert PWRREQ signal in Device OFF state
+ * 0x3: PWRREQ is de-asserted if all voltage domain are in
+ * OFF state. Conversely, PWRREQ is asserted upon any
+ * voltage domain entering or staying in ON or SLEEP or
+ * RET state.
+ */
+ omap4_prminst_write_inst_reg(0x3, OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_PWRREQCTRL_OFFSET);
+
+}
+
+
+/* omap_pm_clear_dsp_wake_up - SW WA for hardcoded wakeup dependency
+* from HSI to DSP
+*
+* Due to HW bug, same SWakeup signal is used for both MPU and DSP.
+* Thus Swakeup will unexpectedly wakeup the DSP domain even if nothing runs on
+* DSP. Since MPU is faster to process SWakeup, it acknowledges the Swakeup to
+* HSI before the DSP has completed its domain transition. This leaves the DSP
+* Power Domain in INTRANSITION state forever, and prevents the DEVICE-OFF mode.
+*
+* Workaround consists in :
+* when a SWakeup is asserted from HSI to MPU (and DSP) :
+* - force a DSP SW wakeup
+* - wait DSP module to be fully ON
+* - Configure a DSP CLK CTRL to HW_AUTO
+* - Wait on DSP module to be OFF
+*
+* Note : we detect a Swakeup is asserted to MPU by checking when an interrupt
+* is received while HSI module is ON.
+*
+* Bug ref is HSI-C1BUG00106 : dsp swakeup generated by HSI same as mpu swakeup
+*/
+static void omap_pm_clear_dsp_wake_up(void)
+{
+ int ret;
+ int timeout = 10;
+
+ if (!tesla_pwrdm || !tesla_clkdm) {
+ WARN_ONCE(1, "%s: unable to use tesla workaround\n", __func__);
+ return;
+ }
+
+ ret = pwrdm_read_pwrst(tesla_pwrdm);
+ /*
+ * If current Tesla power state is in RET/OFF and not in transition,
+ * then not hit by errata.
+ */
+ if (ret <= PWRDM_POWER_RET) {
+ if (!(omap4_prminst_read_inst_reg(tesla_pwrdm->prcm_partition,
+ tesla_pwrdm->prcm_offs, OMAP4_PM_PWSTST)
+ & OMAP_INTRANSITION_MASK))
+ return;
+ }
+
+ if (clkdm_wakeup(tesla_clkdm))
+ pr_err("%s: Failed to force wakeup of %s\n", __func__,
+ tesla_clkdm->name);
+
+ /* This takes less than a few microseconds, hence in context */
+ pwrdm_wait_transition(tesla_pwrdm);
+
+ /*
+ * Check current power state of Tesla after transition, to make sure
+ * that Tesla is indeed turned ON.
+ */
+ ret = pwrdm_read_pwrst(tesla_pwrdm);
+ do {
+ pwrdm_wait_transition(tesla_pwrdm);
+ ret = pwrdm_read_pwrst(tesla_pwrdm);
+ } while ((ret < PWRDM_POWER_INACTIVE) && --timeout);
+
+ if (!timeout)
+ pr_err("%s: Tesla failed to transition to ON state!\n",
+ __func__);
+
+ timeout = 10;
+ clkdm_allow_idle(tesla_clkdm);
+
+ /* Ensure Tesla power state in OFF state */
+ ret = pwrdm_read_pwrst(tesla_pwrdm);
+ do {
+ pwrdm_wait_transition(tesla_pwrdm);
+ ret = pwrdm_read_pwrst(tesla_pwrdm);
+ } while ((ret >= PWRDM_POWER_INACTIVE) && --timeout);
+
+ if (!timeout)
+ pr_err("%s: Tesla failed to transition to OFF state\n",
+ __func__);
+
+}
+
+static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
+{
+ u32 irqenable_mpu, irqstatus_mpu;
+
+ irqenable_mpu = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
+ OMAP4_PRM_IRQENABLE_MPU_OFFSET);
+ irqstatus_mpu = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
+ OMAP4_PRM_IRQSTATUS_MPU_OFFSET);
+
+ /* Check if a IO_ST interrupt */
+ if (irqstatus_mpu & OMAP4430_IO_ST_MASK) {
+ /* Check if HSI caused the IO wakeup */
+ if (omap_hsi_is_io_wakeup_from_hsi()) {
+ omap_pm_clear_dsp_wake_up();
+ omap_hsi_wakeup(0);
+ }
+ omap_uart_resume_idle();
+ usbhs_wakeup();
+ omap_debug_uart_resume_idle();
+ omap4_trigger_ioctrl();
+ }
+
+ /* Clear the interrupt */
+ irqstatus_mpu &= irqenable_mpu;
+ omap4_prm_write_inst_reg(irqstatus_mpu, OMAP4430_PRM_OCP_SOCKET_INST,
+ OMAP4_PRM_IRQSTATUS_MPU_OFFSET);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * omap_default_idle() - implement a default idle for !CONFIG_CPUIDLE
+ *
+ * Implements OMAP4 memory, IO ordering requirements which can't be addressed
+ * with default arch_idle() hook. Used by all CPUs with !CONFIG_CPUIDLE and
+ * by secondary CPU with CONFIG_CPUIDLE.
+ */
+static void omap_default_idle(void)
+{
+ local_irq_disable();
+ local_fiq_disable();
+
+ omap_do_wfi();
+
+ local_fiq_enable();
+ local_irq_enable();
+}
+
+/**
+ * omap4_device_set_state_off() - setup device off state
+ * @enable: set to off or not.
+ *
+ * When Device OFF is enabled, Device is allowed to perform
+ * transition to off mode as soon as all power domains in MPU, IVA
+ * and CORE voltage are in OFF or OSWR state (open switch retention)
+ */
+void omap4_device_set_state_off(u8 enable)
+{
+#ifdef CONFIG_OMAP_ALLOW_OSWR
+ if (enable)
+ omap4_prminst_write_inst_reg(0x1 <<
+ OMAP4430_DEVICE_OFF_ENABLE_SHIFT,
+ OMAP4430_PRM_PARTITION, OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_DEVICE_OFF_CTRL_OFFSET);
+ else
+#endif
+ omap4_prminst_write_inst_reg(0x0 <<
+ OMAP4430_DEVICE_OFF_ENABLE_SHIFT,
+ OMAP4430_PRM_PARTITION, OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_DEVICE_OFF_CTRL_OFFSET);
+}
+
+/**
+ * omap4_device_prev_state_off:
+ * returns true if the device hit OFF mode
+ * This is API to check whether OMAP is waking up from device OFF mode.
+ * There is no other status bit available for SW to read whether last state
+ * entered was device OFF. To work around this, CORE PD, RFF context state
+ * is used which is lost only when we hit device OFF state
+ */
+bool omap4_device_prev_state_off(void)
+{
+ u32 reg;
+
+ reg = omap4_prminst_read_inst_reg(core_pwrdm->prcm_partition,
+ core_pwrdm->prcm_offs,
+ OMAP4_RM_L3_1_L3_1_CONTEXT_OFFSET)
+ & OMAP4430_LOSTCONTEXT_RFF_MASK;
+
+ return reg ? true : false;
+}
+
+void omap4_device_clear_prev_off_state(void)
+{
+ omap4_prminst_write_inst_reg(OMAP4430_LOSTCONTEXT_RFF_MASK |
+ OMAP4430_LOSTCONTEXT_DFF_MASK,
+ core_pwrdm->prcm_partition,
+ core_pwrdm->prcm_offs,
+ OMAP4_RM_L3_1_L3_1_CONTEXT_OFFSET);
+}
+
+/**
+ * omap4_device_next_state_off:
+ * returns true if the device next state is OFF
+ * This is API to check whether OMAP is programmed for device OFF
+ */
+bool omap4_device_next_state_off(void)
+{
+ return omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_DEVICE_OFF_CTRL_OFFSET)
+ & OMAP4430_DEVICE_OFF_ENABLE_MASK ? true : false;
+}
+
+static void __init omap4_pm_setup_errata(void)
+{
+ /*
+ * Current understanding is that the following errata impacts
+ * all OMAP4 silica
+ */
+ if (cpu_is_omap44xx())
+ pm44xx_errata |= OMAP4_PM_ERRATUM_IVA_AUTO_RET_iXXX;
+ /* Dynamic Dependency errata for all silicon !=443x */
+ if (cpu_is_omap443x())
+ pm44xx_errata |= OMAP4_PM_ERRATUM_MPU_EMIF_NO_DYNDEP_i688;
+ else
+ pm44xx_errata |= OMAP4_PM_ERRATUM_MPU_EMIF_NO_DYNDEP_IDLE_iXXX;
}
/**
@@ -98,23 +1217,173 @@
*/
static int __init omap4_pm_init(void)
{
- int ret;
+ int ret = 0;
+ struct clockdomain *l3_1_clkdm;
+ struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per, *l4_cfg;
if (!cpu_is_omap44xx())
return -ENODEV;
+ if (omap_rev() == OMAP4430_REV_ES1_0) {
+ WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
+ return -ENODEV;
+ }
+
pr_err("Power Management for TI OMAP4.\n");
+ /* setup the erratas */
+ omap4_pm_setup_errata();
+
+ prcm_setup_regs();
+ syscontrol_setup_regs();
+
+ ret = request_irq(OMAP44XX_IRQ_PRCM,
+ (irq_handler_t)prcm_interrupt_handler,
+ IRQF_NO_SUSPEND | IRQF_DISABLED, "prcm", NULL);
+ if (ret) {
+ printk(KERN_ERR "request_irq failed to register for 0x%x\n",
+ OMAP44XX_IRQ_PRCM);
+ goto err2;
+ }
ret = pwrdm_for_each(pwrdms_setup, NULL);
if (ret) {
pr_err("Failed to setup powerdomains\n");
goto err2;
}
+ /*
+ * On 4430:
+ * The dynamic dependency between MPUSS -> MEMIF and
+ * MPUSS -> L3_* and DUCATI -> doesn't work as expected.
+ * The hardware recommendation is to keep above dependencies.
+ * Without this system locks up or randomly crashesh.
+ *
+ * On 4460:
+ * The dynamic dependency between MPUSS -> MEMIF doesn't work
+ * as expected if MPUSS OSWR is enabled in idle.
+ * The dynamic dependency between MPUSS -> L4 PER & CFG
+ * doesn't work as expected. The hardware recommendation is
+ * to keep above dependencies. Without this system locks up or
+ * randomly crashes.
+ */
+ mpuss_clkdm = clkdm_lookup("mpuss_clkdm");
+ emif_clkdm = clkdm_lookup("l3_emif_clkdm");
+ l3_1_clkdm = clkdm_lookup("l3_1_clkdm");
+ l3_2_clkdm = clkdm_lookup("l3_2_clkdm");
+ ducati_clkdm = clkdm_lookup("ducati_clkdm");
+ l4_per = clkdm_lookup("l4_per_clkdm");
+ l4_cfg = clkdm_lookup("l4_cfg_clkdm");
+ if ((!mpuss_clkdm) || (!emif_clkdm) || (!l3_1_clkdm) ||
+ (!l3_2_clkdm) || (!ducati_clkdm) || (!l4_per) || (!l4_cfg))
+ goto err2;
+
+ /* if we cannot ever enable static dependency. */
+ if (is_pm44xx_erratum(MPU_EMIF_NO_DYNDEP_i688))
+ ret |= clkdm_add_wkdep(mpuss_clkdm, emif_clkdm);
+
+ if (cpu_is_omap443x()) {
+ ret |= clkdm_add_wkdep(mpuss_clkdm, l3_1_clkdm);
+ ret |= clkdm_add_wkdep(mpuss_clkdm, l3_2_clkdm);
+ ret |= clkdm_add_wkdep(ducati_clkdm, l3_1_clkdm);
+ ret |= clkdm_add_wkdep(ducati_clkdm, l3_2_clkdm);
+ ret |= clkdm_add_wkdep(mpuss_clkdm, l4_per);
+ ret |= clkdm_add_wkdep(mpuss_clkdm, l4_cfg);
+ ret |= clkdm_add_wkdep(ducati_clkdm, l4_per);
+ ret |= clkdm_add_wkdep(ducati_clkdm, l4_cfg);
+ if (ret) {
+ pr_err("Failed to add MPUSS -> L3/EMIF, DUCATI -> L3"
+ " and MPUSS -> L4* wakeup dependency\n");
+ goto err2;
+ }
+ pr_info("OMAP4 PM: Static dependency added between"
+ " MPUSS <-> EMIF, MPUSS <-> L4_PER/CFG"
+ " MPUSS <-> L3_MAIN_1.\n");
+ pr_info("OMAP4 PM: Static dependency added between"
+ " DUCATI <-> L4_PER/CFG and DUCATI <-> L3.\n");
+ } else if (cpu_is_omap446x()) {
+ ret |= clkdm_add_wkdep(mpuss_clkdm, l4_per);
+ ret |= clkdm_add_wkdep(mpuss_clkdm, l4_cfg);
+
+ /* There appears to be a problem between the MPUSS and L3_1 */
+ ret |= clkdm_add_wkdep(mpuss_clkdm, l3_1_clkdm);
+ ret |= clkdm_add_wkdep(mpuss_clkdm, l3_2_clkdm);
+
+ /* There appears to be a problem between the Ducati and L3/L4 */
+ ret |= clkdm_add_wkdep(ducati_clkdm, l3_1_clkdm);
+ ret |= clkdm_add_wkdep(ducati_clkdm, l3_2_clkdm);
+ ret |= clkdm_add_wkdep(ducati_clkdm, l4_per);
+ ret |= clkdm_add_wkdep(ducati_clkdm, l4_cfg);
+
+ if (ret) {
+ pr_err("Failed to add MPUSS and DUCATI -> "
+ "L4* and L3_1 wakeup dependency\n");
+ goto err2;
+ }
+ pr_info("OMAP4 PM: Static dependency added between"
+ " MPUSS and DUCATI <-> L4_PER/CFG and L3_1.\n");
+ }
+
+ (void) clkdm_for_each(clkdms_setup, NULL);
+
+ /* Get handles for VDD's for enabling/disabling SR */
+ mpu_voltdm = voltdm_lookup("mpu");
+ if (!mpu_voltdm) {
+ pr_err("%s: Failed to get voltdm for VDD MPU\n", __func__);
+ goto err2;
+ }
+ omap_vc_set_auto_trans(mpu_voltdm,
+ OMAP_VC_CHANNEL_AUTO_TRANSITION_DISABLE);
+
+ iva_voltdm = voltdm_lookup("iva");
+ if (!iva_voltdm) {
+ pr_err("%s: Failed to get voltdm for VDD IVA\n", __func__);
+ goto err2;
+ }
+ omap_vc_set_auto_trans(iva_voltdm,
+ OMAP_VC_CHANNEL_AUTO_TRANSITION_DISABLE);
+
+ core_voltdm = voltdm_lookup("core");
+ if (!core_voltdm) {
+ pr_err("%s: Failed to get voltdm for VDD CORE\n", __func__);
+ goto err2;
+ }
+ omap_vc_set_auto_trans(core_voltdm,
+ OMAP_VC_CHANNEL_AUTO_TRANSITION_DISABLE);
+
+ ret = omap4_mpuss_init();
+ if (ret) {
+ pr_err("Failed to initialise OMAP4 MPUSS\n");
+ goto err2;
+ }
+
#ifdef CONFIG_SUSPEND
suspend_set_ops(&omap_pm_ops);
#endif /* CONFIG_SUSPEND */
+ mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
+ cpu0_pwrdm = pwrdm_lookup("cpu0_pwrdm");
+ core_pwrdm = pwrdm_lookup("core_pwrdm");
+ per_pwrdm = pwrdm_lookup("l4per_pwrdm");
+ tesla_pwrdm = pwrdm_lookup("tesla_pwrdm");
+ if (!tesla_pwrdm)
+ pr_err("%s: Failed to lookup tesla_pwrdm\n", __func__);
+
+ tesla_clkdm = clkdm_lookup("tesla_clkdm");
+ if (!tesla_clkdm)
+ pr_err("%s: Failed to lookup tesla_clkdm\n", __func__);
+
+ /* Enable wakeup for PRCM IRQ for system wide suspend */
+ enable_irq_wake(OMAP44XX_IRQ_PRCM);
+
+ /* Overwrite the default arch_idle() */
+ pm_idle = omap_default_idle;
+
+ omap4_idle_init();
+
+ omap_pm_is_ready_status = true;
+ /* let the other CPU know as well */
+ smp_wmb();
+
err2:
return ret;
}
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 9af0847..5aff165 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -19,6 +19,8 @@
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/string.h>
+#include <linux/slab.h>
+
#include <trace/events/power.h>
#include "cm2xxx_3xxx.h"
@@ -76,7 +78,7 @@
*/
static int _pwrdm_register(struct powerdomain *pwrdm)
{
- int i;
+ struct voltagedomain *voltdm;
if (!pwrdm || !pwrdm->name)
return -EINVAL;
@@ -94,19 +96,44 @@
if (_pwrdm_lookup(pwrdm->name))
return -EEXIST;
+ voltdm = voltdm_lookup(pwrdm->voltdm.name);
+ if (!voltdm) {
+ pr_err("powerdomain: %s: voltagedomain %s does not exist\n",
+ pwrdm->name, pwrdm->voltdm.name);
+ return -EINVAL;
+ }
+ pwrdm->voltdm.ptr = voltdm;
+ INIT_LIST_HEAD(&pwrdm->voltdm_node);
+ voltdm_add_pwrdm(voltdm, pwrdm);
+
list_add(&pwrdm->node, &pwrdm_list);
- /* Initialize the powerdomain's state counter */
- for (i = 0; i < PWRDM_MAX_PWRSTS; i++)
- pwrdm->state_counter[i] = 0;
+ /*
+ * Program all powerdomain target state as ON; This is to
+ * prevent domains from hitting low power states (if bootloader
+ * has target states set to something other than ON) and potentially
+ * even losing context while PM is not fully initilized.
+ * The PM late init code can then program the desired target
+ * state for all the power domains.
+ */
+ pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_ON);
- pwrdm->ret_logic_off_counter = 0;
- for (i = 0; i < pwrdm->banks; i++)
- pwrdm->ret_mem_off_counter[i] = 0;
+ /* Initialize the powerdomain's state counter */
+ memset(&pwrdm->count, 0, sizeof(pwrdm->count));
+ memset(&pwrdm->last_count, 0, sizeof(pwrdm->last_count));
+ memset(&pwrdm->time, 0, sizeof(pwrdm->time));
+ memset(&pwrdm->last_time, 0, sizeof(pwrdm->last_time));
pwrdm_wait_transition(pwrdm);
pwrdm->state = pwrdm_read_pwrst(pwrdm);
- pwrdm->state_counter[pwrdm->state] = 1;
+ pwrdm->count.state[pwrdm->state] = 1;
+
+ /* Initialize priority ordered list for wakeup latency constraint */
+ spin_lock_init(&pwrdm->wakeuplat_lock);
+ plist_head_init(&pwrdm->wakeuplat_dev_list);
+
+ /* res_mutex protects res_list add and del ops */
+ mutex_init(&pwrdm->wakeuplat_mutex);
pr_debug("powerdomain: registered %s\n", pwrdm->name);
@@ -119,16 +146,18 @@
u8 prev_logic_pwrst, prev_mem_pwrst;
prev_logic_pwrst = pwrdm_read_prev_logic_pwrst(pwrdm);
+
+ /* Fake logic off counter */
if ((pwrdm->pwrsts_logic_ret == PWRSTS_OFF_RET) &&
- (prev_logic_pwrst == PWRDM_POWER_OFF))
- pwrdm->ret_logic_off_counter++;
+ (pwrdm_read_logic_retst(pwrdm) == PWRDM_POWER_OFF))
+ pwrdm->count.ret_logic_off++;
for (i = 0; i < pwrdm->banks; i++) {
prev_mem_pwrst = pwrdm_read_prev_mem_pwrst(pwrdm, i);
if ((pwrdm->pwrsts_mem_ret[i] == PWRSTS_OFF_RET) &&
(prev_mem_pwrst == PWRDM_POWER_OFF))
- pwrdm->ret_mem_off_counter[i]++;
+ pwrdm->count.ret_mem_off[i]++;
}
}
@@ -149,7 +178,7 @@
case PWRDM_STATE_PREV:
prev = pwrdm_read_prev_pwrst(pwrdm);
if (pwrdm->state != prev)
- pwrdm->state_counter[prev]++;
+ pwrdm->count.state[prev]++;
if (prev == PWRDM_POWER_RET)
_update_logic_membank_counters(pwrdm);
/*
@@ -169,7 +198,7 @@
}
if (state != prev)
- pwrdm->state_counter[state]++;
+ pwrdm->count.state[state]++;
pm_dbg_update_time(pwrdm, prev);
@@ -196,7 +225,7 @@
/**
* pwrdm_init - set up the powerdomain layer
* @pwrdm_list: array of struct powerdomain pointers to register
- * @custom_funcs: func pointers for arch specific implementations
+ * @custom_funcs: func pointers for arch specfic implementations
*
* Loop through the array of powerdomains @pwrdm_list, registering all
* that are available on the current CPU. If pwrdm_list is supplied
@@ -208,6 +237,7 @@
{
struct powerdomain **p = NULL;
+
if (!custom_funcs)
WARN(1, "powerdomain: No custom pwrdm functions registered\n");
else
@@ -383,6 +413,18 @@
}
/**
+ * pwrdm_get_voltdm - return a ptr to the voltdm that this pwrdm resides in
+ * @pwrdm: struct powerdomain *
+ *
+ * Return a pointer to the struct voltageomain that the specified powerdomain
+ * @pwrdm exists in.
+ */
+struct voltagedomain *pwrdm_get_voltdm(struct powerdomain *pwrdm)
+{
+ return pwrdm->voltdm.ptr;
+}
+
+/**
* pwrdm_get_mem_bank_count - get number of memory banks in this powerdomain
* @pwrdm: struct powerdomain *
*
@@ -935,25 +977,31 @@
* @pwrdm: struct powerdomain * to wait for
*
* Context loss count is the sum of powerdomain off-mode counter, the
- * logic off counter and the per-bank memory off counter. Returns 0
+ * logic off counter and the per-bank memory off counter. Returns negative
* (and WARNs) upon error, otherwise, returns the context loss count.
*/
-u32 pwrdm_get_context_loss_count(struct powerdomain *pwrdm)
+int pwrdm_get_context_loss_count(struct powerdomain *pwrdm)
{
int i, count;
if (!pwrdm) {
WARN(1, "powerdomain: %s: pwrdm is null\n", __func__);
- return 0;
+ return -ENODEV;
}
- count = pwrdm->state_counter[PWRDM_POWER_OFF];
- count += pwrdm->ret_logic_off_counter;
+ count = pwrdm->count.state[PWRDM_POWER_OFF];
+ count += pwrdm->count.ret_logic_off;
for (i = 0; i < pwrdm->banks; i++)
- count += pwrdm->ret_mem_off_counter[i];
+ count += pwrdm->count.ret_mem_off[i];
- pr_debug("powerdomain: %s: context loss count = %u\n",
+ /*
+ * Context loss count has to be a non-negative value. Clear the sign
+ * bit to get a value range from 0 to INT_MAX.
+ */
+ count &= INT_MAX;
+
+ pr_debug("powerdomain: %s: context loss count = %d\n",
pwrdm->name, count);
return count;
@@ -999,3 +1047,163 @@
return 0;
}
+
+
+/**
+ * pwrdm_wakeuplat_set_constraint - Set powerdomain wakeup latency constraint
+ * @pwrdm: struct powerdomain * to which requesting device belongs to
+ * @dev: struct device * of requesting device
+ * @t: wakeup latency constraint in microseconds
+ *
+ * Adds new entry to powerdomain's wakeup latency constraint list.
+ * If the requesting device already exists in the list, old value is
+ * overwritten. Checks whether current power state is still adequate.
+ * Returns -EINVAL if the powerdomain or device pointer is NULL,
+ * or -ENOMEM if kmalloc fails, or -ERANGE if constraint can't be met,
+ * or returns 0 upon success.
+ */
+int pwrdm_wakeuplat_set_constraint (struct powerdomain *pwrdm,
+ struct device *dev, unsigned long t)
+{
+ struct wakeuplat_dev_list *user;
+ int found = 0, ret = 0;
+
+ if (!pwrdm || !dev) {
+ WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&pwrdm->wakeuplat_mutex);
+
+ plist_for_each_entry(user, &pwrdm->wakeuplat_dev_list, node) {
+ if (user->dev == dev) {
+ found = 1;
+ break;
+ }
+ }
+
+ /* Add new entry to the list or update existing request */
+ if (found && user->constraint_us == t) {
+ goto exit_set;
+ } else if (!found) {
+ user = kzalloc(sizeof(struct wakeuplat_dev_list), GFP_KERNEL);
+ if (!user) {
+ pr_err("OMAP PM: FATAL ERROR: kzalloc failed\n");
+ ret = -ENOMEM;
+ goto exit_set;
+ }
+ user->dev = dev;
+ } else {
+ plist_del(&user->node, &pwrdm->wakeuplat_dev_list);
+ }
+
+ plist_node_init(&user->node, t);
+ spin_lock(&pwrdm->wakeuplat_lock);
+ plist_add(&user->node, &pwrdm->wakeuplat_dev_list);
+ spin_unlock(&pwrdm->wakeuplat_lock);
+ user->node.prio = user->constraint_us = t;
+
+ ret = pwrdm_wakeuplat_update_pwrst(pwrdm);
+
+exit_set:
+ mutex_unlock(&pwrdm->wakeuplat_mutex);
+
+ return ret;
+}
+
+/**
+ * pwrdm_wakeuplat_release_constraint - Release powerdomain wkuplat constraint
+ * @pwrdm: struct powerdomain * to which requesting device belongs to
+ * @dev: struct device * of requesting device
+ *
+ * Removes device's entry from powerdomain's wakeup latency constraint list.
+ * Checks whether current power state is still adequate.
+ * Returns -EINVAL if the powerdomain or device pointer is NULL or
+ * no such entry exists in the list, or returns 0 upon success.
+ */
+int pwrdm_wakeuplat_release_constraint (struct powerdomain *pwrdm,
+ struct device *dev)
+{
+ struct wakeuplat_dev_list *user;
+ int found = 0, ret = 0;
+
+ if (!pwrdm || !dev) {
+ WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&pwrdm->wakeuplat_mutex);
+
+ plist_for_each_entry(user, &pwrdm->wakeuplat_dev_list, node) {
+ if (user->dev == dev) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_err("OMAP PM: Error: no prior constraint to release\n");
+ ret = -EINVAL;
+ goto exit_rls;
+ }
+
+ spin_lock(&pwrdm->wakeuplat_lock);
+ plist_del(&user->node, &pwrdm->wakeuplat_dev_list);
+ spin_unlock(&pwrdm->wakeuplat_lock);
+ kfree(user);
+
+ ret = pwrdm_wakeuplat_update_pwrst(pwrdm);
+
+exit_rls:
+ mutex_unlock(&pwrdm->wakeuplat_mutex);
+
+ return ret;
+}
+
+/**
+ * pwrdm_wakeuplat_update_pwrst - Update power domain power state if needed
+ * @pwrdm: struct powerdomain * to which requesting device belongs to
+ *
+ * Finds minimum latency value from all entries in the list and
+ * the power domain power state neeting the constraint. Programs
+ * new state if it is different from next power state.
+ * Returns -EINVAL if the powerdomain or device pointer is NULL or
+ * no such entry exists in the list, or -ERANGE if constraint can't be met,
+ * or returns 0 upon success.
+ */
+int pwrdm_wakeuplat_update_pwrst(struct powerdomain *pwrdm)
+{
+ struct plist_node *node;
+ int ret = 0, new_state;
+ unsigned long min_latency = -1;
+
+ if (!plist_head_empty(&pwrdm->wakeuplat_dev_list)) {
+ node = plist_last(&pwrdm->wakeuplat_dev_list);
+ min_latency = node->prio;
+ }
+
+ /* Find power state with wakeup latency < minimum constraint. */
+ for (new_state = 0x0; new_state < PWRDM_MAX_PWRSTS; new_state++) {
+ if (min_latency == -1 ||
+ pwrdm->wakeup_lat[new_state] < min_latency)
+ break;
+ }
+
+ /* No power state wkuplat met constraint. Keep power domain ON. */
+ if (new_state == PWRDM_MAX_PWRSTS) {
+ new_state = PWRDM_FUNC_PWRST_ON;
+ ret = -ERANGE;
+ }
+
+ if (pwrdm_read_next_pwrst(pwrdm) != new_state) {
+ if (cpu_is_omap44xx() || cpu_is_omap34xx())
+ omap_set_pwrdm_state(pwrdm, new_state);
+ }
+
+ pr_debug("OMAP PM: %s pwrst: curr= %d, prev= %d next= %d "
+ "wkuplat_min= %lu, state= %d\n", pwrdm->name,
+ pwrdm_read_pwrst(pwrdm), pwrdm_read_prev_pwrst(pwrdm),
+ pwrdm_read_next_pwrst(pwrdm), min_latency, new_state);
+
+ return ret;
+}
diff --git a/arch/arm/mach-omap2/powerdomain.h b/arch/arm/mach-omap2/powerdomain.h
index d23d979..59aef2b 100644
--- a/arch/arm/mach-omap2/powerdomain.h
+++ b/arch/arm/mach-omap2/powerdomain.h
@@ -19,11 +19,16 @@
#include <linux/types.h>
#include <linux/list.h>
+#include <linux/plist.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <plat/cpu.h>
+#include "voltage.h"
+
/* Powerdomain basic power states */
#define PWRDM_POWER_OFF 0x0
#define PWRDM_POWER_RET 0x1
@@ -43,6 +48,18 @@
#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON)
#define PWRSTS_OFF_RET_ON (PWRSTS_OFF_RET | PWRSTS_ON)
+#define PWRSTS_RET_INA_ON ((1 << PWRDM_POWER_RET) | \
+ (1 << PWRDM_POWER_INACTIVE) | \
+ (1 << PWRDM_POWER_ON))
+
+#define PWRSTS_OFF_INA_ON ((1 << PWRDM_POWER_OFF) | \
+ (1 << PWRDM_POWER_INACTIVE) | \
+ (1 << PWRDM_POWER_ON))
+
+#define PWRSTS_OFF_RET_INA_ON ((1 << PWRDM_POWER_OFF) | \
+ (1 << PWRDM_POWER_RET) | \
+ (1 << PWRDM_POWER_INACTIVE) | \
+ (1 << PWRDM_POWER_ON))
/* Powerdomain flags */
#define PWRDM_HAS_HDWR_SAR (1 << 0) /* hardware save-and-restore support */
@@ -72,12 +89,33 @@
/* XXX A completely arbitrary number. What is reasonable here? */
#define PWRDM_TRANSITION_BAILOUT 100000
+/* Powerdomain functional power states */
+#define PWRDM_FUNC_PWRST_OFF 0x0
+#define PWRDM_FUNC_PWRST_OSWR 0x1
+#define PWRDM_FUNC_PWRST_CSWR 0x2
+#define PWRDM_FUNC_PWRST_ON 0x3
+
+#define PWRDM_MAX_FUNC_PWRSTS 4
+
+#define UNSUP_STATE -1
+
struct clockdomain;
struct powerdomain;
+struct powerdomain_count_stats {
+ unsigned state[PWRDM_MAX_PWRSTS];
+ unsigned ret_logic_off;
+ unsigned ret_mem_off[PWRDM_MAX_MEM_BANKS];
+};
+
+struct powerdomain_time_stats {
+ s64 state[PWRDM_MAX_PWRSTS];
+};
+
/**
* struct powerdomain - OMAP powerdomain
* @name: Powerdomain name
+ * @voltdm: voltagedomain containing this powerdomain
* @omap_chip: represents the OMAP chip types containing this pwrdm
* @prcm_offs: the address offset from CM_BASE/PRM_BASE
* @prcm_partition: (OMAP4 only) the PRCM partition ID containing @prcm_offs
@@ -89,15 +127,22 @@
* @pwrsts_mem_on: Possible memory bank pwrstates when pwrdm in ON
* @pwrdm_clkdms: Clockdomains in this powerdomain
* @node: list_head linking all powerdomains
+ * @voltdm_node: list_head linking all powerdomains in a voltagedomain
* @state:
* @state_counter:
* @timer:
* @state_timer:
- *
- * @prcm_partition possible values are defined in mach-omap2/prcm44xx.h.
+ * @wakeup_lat: Wakeup latencies for possible powerdomain power states
+ * @wakeuplat_lock: spinlock for plist
+ * @wakeuplat_dev_list: plist_head linking all devices placing constraint
+ * @wa * @prcm_partition possible values are defined in mach-omap2/prcm44xx.h.
*/
struct powerdomain {
const char *name;
+ union {
+ const char *name;
+ struct voltagedomain *ptr;
+ } voltdm;
const struct omap_chip_id omap_chip;
const s16 prcm_offs;
const u8 pwrsts;
@@ -109,15 +154,27 @@
const u8 prcm_partition;
struct clockdomain *pwrdm_clkdms[PWRDM_MAX_CLKDMS];
struct list_head node;
+ struct list_head voltdm_node;
int state;
- unsigned state_counter[PWRDM_MAX_PWRSTS];
- unsigned ret_logic_off_counter;
- unsigned ret_mem_off_counter[PWRDM_MAX_MEM_BANKS];
+
+ struct powerdomain_count_stats count;
+ struct powerdomain_count_stats last_count;
#ifdef CONFIG_PM_DEBUG
s64 timer;
- s64 state_timer[PWRDM_MAX_PWRSTS];
+ struct powerdomain_time_stats time;
+ struct powerdomain_time_stats last_time;
#endif
+ const u32 wakeup_lat[PWRDM_MAX_FUNC_PWRSTS];
+ spinlock_t wakeuplat_lock;
+ struct plist_head wakeuplat_dev_list;
+ struct mutex wakeuplat_mutex;
+};
+
+struct wakeuplat_dev_list {
+ struct device *dev;
+ unsigned long constraint_us;
+ struct plist_node node;
};
/**
@@ -176,6 +233,7 @@
int pwrdm_for_each_clkdm(struct powerdomain *pwrdm,
int (*fn)(struct powerdomain *pwrdm,
struct clockdomain *clkdm));
+struct voltagedomain *pwrdm_get_voltdm(struct powerdomain *pwrdm);
int pwrdm_get_mem_bank_count(struct powerdomain *pwrdm);
@@ -207,7 +265,7 @@
int pwrdm_pre_transition(void);
int pwrdm_post_transition(void);
int pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm);
-u32 pwrdm_get_context_loss_count(struct powerdomain *pwrdm);
+int pwrdm_get_context_loss_count(struct powerdomain *pwrdm);
bool pwrdm_can_ever_lose_context(struct powerdomain *pwrdm);
extern void omap2xxx_powerdomains_init(void);
@@ -226,5 +284,10 @@
extern struct powerdomain wkup_omap2_pwrdm;
extern struct powerdomain gfx_omap2_pwrdm;
+int pwrdm_wakeuplat_set_constraint(struct powerdomain *pwrdm,
+ struct device *dev, unsigned long t);
+int pwrdm_wakeuplat_release_constraint(struct powerdomain *pwrdm,
+ struct device *dev);
+int pwrdm_wakeuplat_update_pwrst(struct powerdomain *pwrdm);
#endif
diff --git a/arch/arm/mach-omap2/powerdomain44xx.c b/arch/arm/mach-omap2/powerdomain44xx.c
index a7880af..c0aab2a 100644
--- a/arch/arm/mach-omap2/powerdomain44xx.c
+++ b/arch/arm/mach-omap2/powerdomain44xx.c
@@ -19,9 +19,13 @@
#include "powerdomain.h"
#include <plat/prcm.h>
#include "prm2xxx_3xxx.h"
+#include "cminst44xx.h"
#include "prm44xx.h"
+#include "prcm44xx.h"
#include "prminst44xx.h"
#include "prm-regbits-44xx.h"
+#include "cm-regbits-44xx.h"
+#include "cm2_44xx.h"
static int omap4_pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
{
@@ -207,6 +211,41 @@
return 0;
}
+static int omap4_pwrdm_enable_hdwr_sar(struct powerdomain *pwrdm)
+{
+ /*
+ * FIXME: This should be fixed right way by moving it into HWMOD
+ * or clock framework since sar control is moved to module level
+ */
+ omap4_cminst_rmw_inst_reg_bits(OMAP4430_SAR_MODE_MASK,
+ 1 << OMAP4430_SAR_MODE_SHIFT, OMAP4430_CM2_PARTITION,
+ OMAP4430_CM2_L3INIT_INST,
+ OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_OFFSET);
+ omap4_cminst_rmw_inst_reg_bits(OMAP4430_SAR_MODE_MASK,
+ 1 << OMAP4430_SAR_MODE_SHIFT, OMAP4430_CM2_PARTITION,
+ OMAP4430_CM2_L3INIT_INST,
+ OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_OFFSET);
+ return 0;
+}
+
+static int omap4_pwrdm_disable_hdwr_sar(struct powerdomain *pwrdm)
+{
+ /*
+ * FIXME: This should be fixed right way by moving it into HWMOD
+ * or clock framework since sar control is moved to module level
+ */
+ omap4_cminst_rmw_inst_reg_bits(OMAP4430_SAR_MODE_MASK,
+ 0 << OMAP4430_SAR_MODE_SHIFT, OMAP4430_CM2_PARTITION,
+ OMAP4430_CM2_L3INIT_INST,
+ OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_OFFSET);
+ omap4_cminst_rmw_inst_reg_bits(OMAP4430_SAR_MODE_MASK,
+ 0 << OMAP4430_SAR_MODE_SHIFT, OMAP4430_CM2_PARTITION,
+ OMAP4430_CM2_L3INIT_INST,
+ OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_OFFSET);
+
+ return 0;
+}
+
struct pwrdm_ops omap4_pwrdm_operations = {
.pwrdm_set_next_pwrst = omap4_pwrdm_set_next_pwrst,
.pwrdm_read_next_pwrst = omap4_pwrdm_read_next_pwrst,
@@ -222,4 +261,6 @@
.pwrdm_set_mem_onst = omap4_pwrdm_set_mem_onst,
.pwrdm_set_mem_retst = omap4_pwrdm_set_mem_retst,
.pwrdm_wait_transition = omap4_pwrdm_wait_transition,
+ .pwrdm_enable_hdwr_sar = omap4_pwrdm_enable_hdwr_sar,
+ .pwrdm_disable_hdwr_sar = omap4_pwrdm_disable_hdwr_sar,
};
diff --git a/arch/arm/mach-omap2/powerdomains2xxx_3xxx_data.c b/arch/arm/mach-omap2/powerdomains2xxx_3xxx_data.c
index 4210c33..2242c8e 100644
--- a/arch/arm/mach-omap2/powerdomains2xxx_3xxx_data.c
+++ b/arch/arm/mach-omap2/powerdomains2xxx_3xxx_data.c
@@ -70,6 +70,7 @@
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* MEMONSTATE */
},
+ .voltdm = { .name = "core" },
};
struct powerdomain wkup_omap2_pwrdm = {
@@ -77,4 +78,5 @@
.prcm_offs = WKUP_MOD,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX | CHIP_IS_OMAP3430),
.pwrsts = PWRSTS_ON,
+ .voltdm = { .name = "wakeup" },
};
diff --git a/arch/arm/mach-omap2/powerdomains2xxx_data.c b/arch/arm/mach-omap2/powerdomains2xxx_data.c
index cc389fb..274f64c 100644
--- a/arch/arm/mach-omap2/powerdomains2xxx_data.c
+++ b/arch/arm/mach-omap2/powerdomains2xxx_data.c
@@ -38,6 +38,7 @@
.pwrsts_mem_on = {
[0] = PWRSTS_ON,
},
+ .voltdm = { .name = "core" },
};
static struct powerdomain mpu_24xx_pwrdm = {
@@ -53,6 +54,7 @@
.pwrsts_mem_on = {
[0] = PWRSTS_ON,
},
+ .voltdm = { .name = "core" },
};
static struct powerdomain core_24xx_pwrdm = {
@@ -71,6 +73,7 @@
[1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */
[2] = PWRSTS_OFF_RET_ON, /* MEM3ONSTATE */
},
+ .voltdm = { .name = "core" },
};
@@ -95,6 +98,7 @@
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* MEMONSTATE */
},
+ .voltdm = { .name = "core" },
};
#endif /* CONFIG_SOC_OMAP2430 */
diff --git a/arch/arm/mach-omap2/powerdomains3xxx_data.c b/arch/arm/mach-omap2/powerdomains3xxx_data.c
index 469a920..b91224b 100644
--- a/arch/arm/mach-omap2/powerdomains3xxx_data.c
+++ b/arch/arm/mach-omap2/powerdomains3xxx_data.c
@@ -52,6 +52,13 @@
[2] = PWRSTS_OFF_ON,
[3] = PWRSTS_ON,
},
+ .voltdm = { .name = "mpu_iva" },
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 1100,
+ [PWRDM_FUNC_PWRST_OSWR] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_CSWR] = 350,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
static struct powerdomain mpu_3xxx_pwrdm = {
@@ -68,6 +75,13 @@
.pwrsts_mem_on = {
[0] = PWRSTS_OFF_ON,
},
+ .voltdm = { .name = "mpu_iva" },
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 95,
+ [PWRDM_FUNC_PWRST_OSWR] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_CSWR] = 45,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
/*
@@ -98,6 +112,13 @@
[0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */
[1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */
},
+ .voltdm = { .name = "core" },
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 100,
+ [PWRDM_FUNC_PWRST_OSWR] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_CSWR] = 60,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
static struct powerdomain core_3xxx_es3_1_pwrdm = {
@@ -121,6 +142,13 @@
[0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */
[1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */
},
+ .voltdm = { .name = "core" },
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 100,
+ [PWRDM_FUNC_PWRST_OSWR] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_CSWR] = 60,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
static struct powerdomain dss_pwrdm = {
@@ -136,6 +164,13 @@
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* MEMONSTATE */
},
+ .voltdm = { .name = "core" },
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 70,
+ [PWRDM_FUNC_PWRST_OSWR] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_CSWR] = 20,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
/*
@@ -157,6 +192,13 @@
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* MEMONSTATE */
},
+ .voltdm = { .name = "core" },
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 1000,
+ [PWRDM_FUNC_PWRST_OSWR] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_CSWR] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
static struct powerdomain cam_pwrdm = {
@@ -172,6 +214,13 @@
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* MEMONSTATE */
},
+ .voltdm = { .name = "core" },
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 850,
+ [PWRDM_FUNC_PWRST_OSWR] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_CSWR] = 35,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
static struct powerdomain per_pwrdm = {
@@ -187,12 +236,20 @@
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* MEMONSTATE */
},
+ .voltdm = { .name = "core" },
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 200,
+ [PWRDM_FUNC_PWRST_OSWR] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_CSWR] = 110,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
static struct powerdomain emu_pwrdm = {
.name = "emu_pwrdm",
.prcm_offs = OMAP3430_EMU_MOD,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+ .voltdm = { .name = "core" },
};
static struct powerdomain neon_pwrdm = {
@@ -201,6 +258,13 @@
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
.pwrsts = PWRSTS_OFF_RET_ON,
.pwrsts_logic_ret = PWRSTS_RET,
+ .voltdm = { .name = "mpu_iva" },
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 200,
+ [PWRDM_FUNC_PWRST_OSWR] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_CSWR] = 35,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
static struct powerdomain usbhost_pwrdm = {
@@ -223,36 +287,48 @@
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* MEMONSTATE */
},
+ .voltdm = { .name = "core" },
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 800,
+ [PWRDM_FUNC_PWRST_OSWR] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_CSWR] = 150,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
static struct powerdomain dpll1_pwrdm = {
.name = "dpll1_pwrdm",
.prcm_offs = MPU_MOD,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+ .voltdm = { .name = "mpu_iva" },
};
static struct powerdomain dpll2_pwrdm = {
.name = "dpll2_pwrdm",
.prcm_offs = OMAP3430_IVA2_MOD,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+ .voltdm = { .name = "mpu_iva" },
};
static struct powerdomain dpll3_pwrdm = {
.name = "dpll3_pwrdm",
.prcm_offs = PLL_MOD,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+ .voltdm = { .name = "core" },
};
static struct powerdomain dpll4_pwrdm = {
.name = "dpll4_pwrdm",
.prcm_offs = PLL_MOD,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+ .voltdm = { .name = "core" },
};
static struct powerdomain dpll5_pwrdm = {
.name = "dpll5_pwrdm",
.prcm_offs = PLL_MOD,
.omap_chip = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2),
+ .voltdm = { .name = "core" },
};
/* As powerdomains are added or removed above, this list must also be changed */
diff --git a/arch/arm/mach-omap2/powerdomains44xx_data.c b/arch/arm/mach-omap2/powerdomains44xx_data.c
index c4222c7..d645d68 100644
--- a/arch/arm/mach-omap2/powerdomains44xx_data.c
+++ b/arch/arm/mach-omap2/powerdomains44xx_data.c
@@ -1,7 +1,7 @@
/*
* OMAP4 Power domains framework
*
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
* Copyright (C) 2009-2011 Nokia Corporation
*
* Abhijit Pagare (abhijitpagare@ti.com)
@@ -33,10 +33,11 @@
/* core_44xx_pwrdm: CORE power domain */
static struct powerdomain core_44xx_pwrdm = {
.name = "core_pwrdm",
+ .voltdm = { .name = "core" },
.prcm_offs = OMAP4430_PRM_CORE_INST,
.prcm_partition = OMAP4430_PRM_PARTITION,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
- .pwrsts = PWRSTS_RET_ON,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+ .pwrsts = PWRSTS_RET_INA_ON,
.pwrsts_logic_ret = PWRSTS_OFF_RET,
.banks = 5,
.pwrsts_mem_ret = {
@@ -54,15 +55,22 @@
[4] = PWRSTS_ON, /* ducati_unicache */
},
.flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_OSWR] = 600,
+ [PWRDM_FUNC_PWRST_CSWR] = 300,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
/* gfx_44xx_pwrdm: 3D accelerator power domain */
static struct powerdomain gfx_44xx_pwrdm = {
.name = "gfx_pwrdm",
+ .voltdm = { .name = "core" },
.prcm_offs = OMAP4430_PRM_GFX_INST,
.prcm_partition = OMAP4430_PRM_PARTITION,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
- .pwrsts = PWRSTS_OFF_ON,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+ .pwrsts = PWRSTS_OFF_INA_ON,
.banks = 1,
.pwrsts_mem_ret = {
[0] = PWRSTS_OFF, /* gfx_mem */
@@ -71,16 +79,22 @@
[0] = PWRSTS_ON, /* gfx_mem */
},
.flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 1000,
+ [PWRDM_FUNC_PWRST_OSWR] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_CSWR] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
/* abe_44xx_pwrdm: Audio back end power domain */
static struct powerdomain abe_44xx_pwrdm = {
.name = "abe_pwrdm",
+ .voltdm = { .name = "iva" },
.prcm_offs = OMAP4430_PRM_ABE_INST,
.prcm_partition = OMAP4430_PRM_PARTITION,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
- .pwrsts = PWRSTS_OFF_RET_ON,
- .pwrsts_logic_ret = PWRSTS_OFF,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+ .pwrsts = PWRSTS_OFF_RET_INA_ON,
.banks = 2,
.pwrsts_mem_ret = {
[0] = PWRSTS_RET, /* aessmem */
@@ -91,16 +105,22 @@
[1] = PWRSTS_ON, /* periphmem */
},
.flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 1000,
+ [PWRDM_FUNC_PWRST_OSWR] = 600,
+ [PWRDM_FUNC_PWRST_CSWR] = 300,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
/* dss_44xx_pwrdm: Display subsystem power domain */
static struct powerdomain dss_44xx_pwrdm = {
.name = "dss_pwrdm",
+ .voltdm = { .name = "core" },
.prcm_offs = OMAP4430_PRM_DSS_INST,
.prcm_partition = OMAP4430_PRM_PARTITION,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
- .pwrsts = PWRSTS_OFF_RET_ON,
- .pwrsts_logic_ret = PWRSTS_OFF,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+ .pwrsts = PWRSTS_OFF_RET_INA_ON,
.banks = 1,
.pwrsts_mem_ret = {
[0] = PWRSTS_OFF, /* dss_mem */
@@ -109,15 +129,22 @@
[0] = PWRSTS_ON, /* dss_mem */
},
.flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 1000,
+ [PWRDM_FUNC_PWRST_OSWR] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_CSWR] = 300,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
/* tesla_44xx_pwrdm: Tesla processor power domain */
static struct powerdomain tesla_44xx_pwrdm = {
.name = "tesla_pwrdm",
+ .voltdm = { .name = "iva" },
.prcm_offs = OMAP4430_PRM_TESLA_INST,
.prcm_partition = OMAP4430_PRM_PARTITION,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
- .pwrsts = PWRSTS_OFF_RET_ON,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+ .pwrsts = PWRSTS_OFF_RET_INA_ON,
.pwrsts_logic_ret = PWRSTS_OFF_RET,
.banks = 3,
.pwrsts_mem_ret = {
@@ -131,14 +158,21 @@
[2] = PWRSTS_ON, /* tesla_l2 */
},
.flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 1000,
+ [PWRDM_FUNC_PWRST_OSWR] = 600,
+ [PWRDM_FUNC_PWRST_CSWR] = 300,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
/* wkup_44xx_pwrdm: Wake-up power domain */
static struct powerdomain wkup_44xx_pwrdm = {
.name = "wkup_pwrdm",
+ .voltdm = { .name = "wakeup" },
.prcm_offs = OMAP4430_PRM_WKUP_INST,
.prcm_partition = OMAP4430_PRM_PARTITION,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
.pwrsts = PWRSTS_ON,
.banks = 1,
.pwrsts_mem_ret = {
@@ -152,10 +186,11 @@
/* cpu0_44xx_pwrdm: MPU0 processor and Neon coprocessor power domain */
static struct powerdomain cpu0_44xx_pwrdm = {
.name = "cpu0_pwrdm",
+ .voltdm = { .name = "mpu" },
.prcm_offs = OMAP4430_PRCM_MPU_CPU0_INST,
.prcm_partition = OMAP4430_PRCM_MPU_PARTITION,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
- .pwrsts = PWRSTS_OFF_RET_ON,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+ .pwrsts = PWRSTS_OFF_RET_INA_ON,
.pwrsts_logic_ret = PWRSTS_OFF_RET,
.banks = 1,
.pwrsts_mem_ret = {
@@ -164,15 +199,22 @@
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* cpu0_l1 */
},
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 1000,
+ [PWRDM_FUNC_PWRST_OSWR] = 600,
+ [PWRDM_FUNC_PWRST_CSWR] = 300,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
/* cpu1_44xx_pwrdm: MPU1 processor and Neon coprocessor power domain */
static struct powerdomain cpu1_44xx_pwrdm = {
.name = "cpu1_pwrdm",
+ .voltdm = { .name = "mpu" },
.prcm_offs = OMAP4430_PRCM_MPU_CPU1_INST,
.prcm_partition = OMAP4430_PRCM_MPU_PARTITION,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
- .pwrsts = PWRSTS_OFF_RET_ON,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+ .pwrsts = PWRSTS_OFF_RET_INA_ON,
.pwrsts_logic_ret = PWRSTS_OFF_RET,
.banks = 1,
.pwrsts_mem_ret = {
@@ -181,15 +223,21 @@
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* cpu1_l1 */
},
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 1000,
+ [PWRDM_FUNC_PWRST_OSWR] = 600,
+ [PWRDM_FUNC_PWRST_CSWR] = 300,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
/* emu_44xx_pwrdm: Emulation power domain */
static struct powerdomain emu_44xx_pwrdm = {
.name = "emu_pwrdm",
+ .voltdm = { .name = "wakeup" },
.prcm_offs = OMAP4430_PRM_EMU_INST,
.prcm_partition = OMAP4430_PRM_PARTITION,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
- .pwrsts = PWRSTS_OFF_ON,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
.banks = 1,
.pwrsts_mem_ret = {
[0] = PWRSTS_OFF, /* emu_bank */
@@ -200,12 +248,13 @@
};
/* mpu_44xx_pwrdm: Modena processor and the Neon coprocessor power domain */
-static struct powerdomain mpu_44xx_pwrdm = {
+static struct powerdomain mpu_443x_pwrdm = {
.name = "mpu_pwrdm",
+ .voltdm = { .name = "mpu" },
.prcm_offs = OMAP4430_PRM_MPU_INST,
.prcm_partition = OMAP4430_PRM_PARTITION,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
- .pwrsts = PWRSTS_OFF_RET_ON,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP443X),
+ .pwrsts = PWRSTS_OFF_RET_INA_ON,
.pwrsts_logic_ret = PWRSTS_OFF_RET,
.banks = 3,
.pwrsts_mem_ret = {
@@ -218,16 +267,47 @@
[1] = PWRSTS_ON, /* mpu_l2 */
[2] = PWRSTS_ON, /* mpu_ram */
},
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 1000,
+ [PWRDM_FUNC_PWRST_OSWR] = 600,
+ [PWRDM_FUNC_PWRST_CSWR] = 300,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
+};
+
+static struct powerdomain mpu_446x_pwrdm = {
+ .name = "mpu_pwrdm",
+ .voltdm = { .name = "mpu" },
+ .prcm_offs = OMAP4430_PRM_MPU_INST,
+ .prcm_partition = OMAP4430_PRM_PARTITION,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP446X),
+ .pwrsts = PWRSTS_RET_INA_ON,
+ .pwrsts_logic_ret = PWRSTS_OFF_RET,
+ .banks = 2,
+ .pwrsts_mem_ret = {
+ [0] = PWRSTS_OFF_RET, /* mpu_l2 */
+ [1] = PWRSTS_RET, /* mpu_ram */
+ },
+ .pwrsts_mem_on = {
+ [0] = PWRSTS_ON, /* mpu_l2 */
+ [1] = PWRSTS_ON, /* mpu_ram */
+ },
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 1000,
+ [PWRDM_FUNC_PWRST_OSWR] = 600,
+ [PWRDM_FUNC_PWRST_CSWR] = 300,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
/* ivahd_44xx_pwrdm: IVA-HD power domain */
static struct powerdomain ivahd_44xx_pwrdm = {
.name = "ivahd_pwrdm",
+ .voltdm = { .name = "iva" },
.prcm_offs = OMAP4430_PRM_IVAHD_INST,
.prcm_partition = OMAP4430_PRM_PARTITION,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
- .pwrsts = PWRSTS_OFF_RET_ON,
- .pwrsts_logic_ret = PWRSTS_OFF,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+ .pwrsts = PWRSTS_OFF_RET_INA_ON,
.banks = 4,
.pwrsts_mem_ret = {
[0] = PWRSTS_OFF, /* hwa_mem */
@@ -242,15 +322,22 @@
[3] = PWRSTS_ON, /* tcm2_mem */
},
.flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 1000,
+ [PWRDM_FUNC_PWRST_OSWR] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_CSWR] = 300,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
/* cam_44xx_pwrdm: Camera subsystem power domain */
static struct powerdomain cam_44xx_pwrdm = {
.name = "cam_pwrdm",
+ .voltdm = { .name = "core" },
.prcm_offs = OMAP4430_PRM_CAM_INST,
.prcm_partition = OMAP4430_PRM_PARTITION,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
- .pwrsts = PWRSTS_OFF_ON,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+ .pwrsts = PWRSTS_OFF_INA_ON,
.banks = 1,
.pwrsts_mem_ret = {
[0] = PWRSTS_OFF, /* cam_mem */
@@ -259,15 +346,22 @@
[0] = PWRSTS_ON, /* cam_mem */
},
.flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 1000,
+ [PWRDM_FUNC_PWRST_OSWR] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_CSWR] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
/* l3init_44xx_pwrdm: L3 initators pheripherals power domain */
static struct powerdomain l3init_44xx_pwrdm = {
.name = "l3init_pwrdm",
+ .voltdm = { .name = "core" },
.prcm_offs = OMAP4430_PRM_L3INIT_INST,
.prcm_partition = OMAP4430_PRM_PARTITION,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
- .pwrsts = PWRSTS_RET_ON,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+ .pwrsts = PWRSTS_RET_INA_ON,
.pwrsts_logic_ret = PWRSTS_OFF_RET,
.banks = 1,
.pwrsts_mem_ret = {
@@ -276,16 +370,23 @@
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* l3init_bank1 */
},
- .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE | PWRDM_HAS_HDWR_SAR,
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = 1000,
+ [PWRDM_FUNC_PWRST_OSWR] = 600,
+ [PWRDM_FUNC_PWRST_CSWR] = 300,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
/* l4per_44xx_pwrdm: Target peripherals power domain */
static struct powerdomain l4per_44xx_pwrdm = {
.name = "l4per_pwrdm",
+ .voltdm = { .name = "core" },
.prcm_offs = OMAP4430_PRM_L4PER_INST,
.prcm_partition = OMAP4430_PRM_PARTITION,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
- .pwrsts = PWRSTS_RET_ON,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+ .pwrsts = PWRSTS_RET_INA_ON,
.pwrsts_logic_ret = PWRSTS_OFF_RET,
.banks = 2,
.pwrsts_mem_ret = {
@@ -297,6 +398,12 @@
[1] = PWRSTS_ON, /* retained_bank */
},
.flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .wakeup_lat = {
+ [PWRDM_FUNC_PWRST_OFF] = UNSUP_STATE,
+ [PWRDM_FUNC_PWRST_OSWR] = 600,
+ [PWRDM_FUNC_PWRST_CSWR] = 300,
+ [PWRDM_FUNC_PWRST_ON] = 0,
+ },
};
/*
@@ -305,19 +412,21 @@
*/
static struct powerdomain always_on_core_44xx_pwrdm = {
.name = "always_on_core_pwrdm",
+ .voltdm = { .name = "core" },
.prcm_offs = OMAP4430_PRM_ALWAYS_ON_INST,
.prcm_partition = OMAP4430_PRM_PARTITION,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
.pwrsts = PWRSTS_ON,
};
/* cefuse_44xx_pwrdm: Customer efuse controller power domain */
static struct powerdomain cefuse_44xx_pwrdm = {
.name = "cefuse_pwrdm",
+ .voltdm = { .name = "core" },
.prcm_offs = OMAP4430_PRM_CEFUSE_INST,
.prcm_partition = OMAP4430_PRM_PARTITION,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
- .pwrsts = PWRSTS_OFF_ON,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP44XX),
+ .pwrsts = PWRSTS_OFF_INA_ON,
};
/*
@@ -339,7 +448,8 @@
&cpu0_44xx_pwrdm,
&cpu1_44xx_pwrdm,
&emu_44xx_pwrdm,
- &mpu_44xx_pwrdm,
+ &mpu_443x_pwrdm,
+ &mpu_446x_pwrdm,
&ivahd_44xx_pwrdm,
&cam_44xx_pwrdm,
&l3init_44xx_pwrdm,
diff --git a/arch/arm/mach-omap2/prcm-debug.c b/arch/arm/mach-omap2/prcm-debug.c
new file mode 100644
index 0000000..bbc67431
--- /dev/null
+++ b/arch/arm/mach-omap2/prcm-debug.c
@@ -0,0 +1,1688 @@
+/*
+ * OMAP4 PRCM Debugging
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "prcm-debug.h"
+#include "prm-regbits-44xx.h"
+#include "prcm44xx.h"
+#include "prm44xx.h"
+#include "prminst44xx.h"
+#include "cm44xx.h"
+#include "cm1_44xx.h"
+#include "cm2_44xx.h"
+#include "cm-regbits-44xx.h"
+#include "cminst44xx.h"
+#include "prcm_mpu44xx.h"
+#include "powerdomain.h"
+
+/* DPLLs */
+
+struct d_dpll_info {
+ char *name;
+ void *idlestreg;
+ struct d_dpll_derived *derived[];
+};
+
+struct d_dpll_derived {
+ char *name;
+ void *gatereg;
+ u32 gatemask;
+};
+
+static struct d_dpll_derived derived_dpll_per_m2 = {
+ .name = "DPLL_PER_M2",
+ .gatereg = OMAP4430_CM_DIV_M2_DPLL_PER,
+ .gatemask = 0xa00,
+};
+
+static struct d_dpll_derived derived_dpll_per_m3 = {
+ .name = "DPLL_PER_M3",
+ .gatereg = OMAP4430_CM_DIV_M3_DPLL_PER,
+ .gatemask = 0x200,
+};
+
+static struct d_dpll_derived derived_dpll_per_m4 = {
+ .name = "DPLL_PER_M4",
+ .gatereg = OMAP4430_CM_DIV_M4_DPLL_PER,
+ .gatemask = 0x200,
+};
+
+static struct d_dpll_derived derived_dpll_per_m5 = {
+ .name = "DPLL_PER_M5",
+ .gatereg = OMAP4430_CM_DIV_M5_DPLL_PER,
+ .gatemask = 0x200,
+};
+
+static struct d_dpll_derived derived_dpll_per_m6 = {
+ .name = "DPLL_PER_M6",
+ .gatereg = OMAP4430_CM_DIV_M6_DPLL_PER,
+ .gatemask = 0x200,
+};
+
+static struct d_dpll_derived derived_dpll_per_m7 = {
+ .name = "DPLL_PER_M7",
+ .gatereg = OMAP4430_CM_DIV_M7_DPLL_PER,
+ .gatemask = 0x200,
+};
+
+
+static struct d_dpll_info dpll_per = {
+ .name = "DPLL_PER",
+ .idlestreg = OMAP4430_CM_IDLEST_DPLL_PER,
+ .derived = {&derived_dpll_per_m2, &derived_dpll_per_m3,
+ &derived_dpll_per_m4, &derived_dpll_per_m5,
+ &derived_dpll_per_m6, &derived_dpll_per_m7,
+ NULL},
+};
+
+static struct d_dpll_derived derived_dpll_core_m2 = {
+ .name = "DPLL_CORE_M2",
+ .gatereg = OMAP4430_CM_DIV_M2_DPLL_CORE,
+ .gatemask = 0x200,
+};
+
+static struct d_dpll_derived derived_dpll_core_m3 = {
+ .name = "DPLL_CORE_M3",
+ .gatereg = OMAP4430_CM_DIV_M3_DPLL_CORE,
+ .gatemask = 0x200,
+};
+
+static struct d_dpll_derived derived_dpll_core_m4 = {
+ .name = "DPLL_CORE_M4",
+ .gatereg = OMAP4430_CM_DIV_M4_DPLL_CORE,
+ .gatemask = 0x200,
+};
+
+static struct d_dpll_derived derived_dpll_core_m5 = {
+ .name = "DPLL_CORE_M5",
+ .gatereg = OMAP4430_CM_DIV_M5_DPLL_CORE,
+ .gatemask = 0x200,
+};
+
+static struct d_dpll_derived derived_dpll_core_m6 = {
+ .name = "DPLL_CORE_M6",
+ .gatereg = OMAP4430_CM_DIV_M6_DPLL_CORE,
+ .gatemask = 0x200,
+};
+
+static struct d_dpll_derived derived_dpll_core_m7 = {
+ .name = "DPLL_CORE_M7",
+ .gatereg = OMAP4430_CM_DIV_M7_DPLL_CORE,
+ .gatemask = 0x200,
+};
+
+static struct d_dpll_info dpll_core = {
+ .name = "DPLL_CORE",
+ .idlestreg = OMAP4430_CM_IDLEST_DPLL_CORE,
+ .derived = {&derived_dpll_core_m2, &derived_dpll_core_m3,
+ &derived_dpll_core_m4, &derived_dpll_core_m5,
+ &derived_dpll_core_m6, &derived_dpll_core_m7,
+ NULL},
+};
+
+static struct d_dpll_info dpll_abe = {
+ .name = "DPLL_ABE",
+ .idlestreg = OMAP4430_CM_IDLEST_DPLL_ABE,
+ .derived = {/* &derived_dpll_abe_m2, &derived_dpll_abe_m3,
+ &derived_dpll_abe_m4, &derived_dpll_abe_m5,
+ &derived_dpll_abe_m6, &derived_dpll_abe_m7,
+ */ NULL},
+};
+
+static struct d_dpll_info dpll_mpu = {
+ .name = "DPLL_MPU",
+ .idlestreg = OMAP4430_CM_IDLEST_DPLL_MPU,
+ .derived = {/* &derived_dpll_mpu_m2, */ NULL},
+};
+
+static struct d_dpll_info dpll_iva = {
+ .name = "DPLL_IVA",
+ .idlestreg = OMAP4430_CM_IDLEST_DPLL_IVA,
+ .derived = {/* &derived_dpll_iva_m4, &derived_dpll_iva_m5, */ NULL},
+};
+
+static struct d_dpll_info dpll_usb = {
+ .name = "DPLL_USB",
+ .idlestreg = OMAP4430_CM_IDLEST_DPLL_USB,
+ .derived = {/* &derived_dpll_usb_m2, */ NULL},
+};
+
+
+/* Other internal generators */
+
+struct d_intgen_info {
+ char *name;
+ void *gatereg;
+ u32 gatemask;
+};
+
+static struct d_intgen_info intgen_cm1_abe = {
+ .name = "CM1_ABE",
+ .gatereg = OMAP4430_CM_CLKSEL_ABE,
+ .gatemask = 0x500,
+};
+
+
+
+/* Modules */
+
+#define MOD_MASTER (1 << 0)
+#define MOD_SLAVE (1 << 1)
+#define MOD_MODE (1 << 2)
+
+struct d_mod_info {
+ char *name;
+ void *clkctrl;
+ int flags;
+ int optclk;
+};
+
+static struct d_mod_info mod_debug = {
+ .name = "DEBUG",
+ .clkctrl = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
+ .flags = MOD_MODE | MOD_MASTER | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_bandgap = {
+ .name = "BANDGAP",
+ .clkctrl = OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
+ .flags = 0,
+ .optclk = 0x100,
+};
+
+static struct d_mod_info mod_gpio1 = {
+ .name = "GPIO1",
+ .clkctrl = OMAP4430_CM_WKUP_GPIO1_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x100,
+};
+
+static struct d_mod_info mod_keyboard = {
+ .name = "KEYBOARD",
+ .clkctrl = OMAP4430_CM_WKUP_KEYBOARD_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_sar_ram = {
+ .name = "SAR_RAM",
+ .clkctrl = OMAP4430_CM_WKUP_SARRAM_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_32ktimer = {
+ .name = "32KTIMER",
+ .clkctrl = OMAP4430_CM_WKUP_SYNCTIMER_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_gptimer1 = {
+ .name = "GPTIMER1",
+ .clkctrl = OMAP4430_CM_WKUP_TIMER1_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_wdtimer2 = {
+ .name = "WDTIMER2",
+ .clkctrl = OMAP4430_CM_WKUP_WDT2_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_l4_wkup = {
+ .name = "L4_WKUP",
+ .clkctrl = OMAP4430_CM_WKUP_L4WKUP_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_sr_core = {
+ .name = "SR_CORE",
+ .clkctrl = OMAP4430_CM_ALWON_SR_CORE_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_sr_iva = {
+ .name = "SR_IVA",
+ .clkctrl = OMAP4430_CM_ALWON_SR_IVA_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_sr_mpu = {
+ .name = "SR_MPU",
+ .clkctrl = OMAP4430_CM_ALWON_SR_MPU_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_fdif = {
+ .name = "FACE DETECT",
+ .clkctrl = OMAP4430_CM_CAM_FDIF_CLKCTRL,
+ .flags = MOD_MODE | MOD_MASTER| MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_iss = {
+ .name = "ISS",
+ .clkctrl = OMAP4430_CM_CAM_ISS_CLKCTRL,
+ .flags = MOD_MODE | MOD_MASTER| MOD_SLAVE,
+ .optclk = 0x100,
+};
+
+static struct d_mod_info mod_spinlock = {
+ .name = "SPINLOCK",
+ .clkctrl = OMAP4430_CM_L4CFG_HW_SEM_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_l4_cfg_interconnect = {
+ .name = "L4_CFG interconnect",
+ .clkctrl = OMAP4430_CM_L4CFG_L4_CFG_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_mailbox = {
+ .name = "MAILBOX",
+ .clkctrl = OMAP4430_CM_L4CFG_MAILBOX_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_sar_rom = {
+ .name = "SAR_ROM",
+ .clkctrl = OMAP4430_CM_L4CFG_SAR_ROM_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_dmm = {
+ .name = "DMM",
+ .clkctrl = OMAP4430_CM_MEMIF_DMM_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_emif_1 = {
+ .name = "EMIF_1",
+ .clkctrl = OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_emif_2 = {
+ .name = "EMIF_2",
+ .clkctrl = OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_emif_fw = {
+ .name = "EMIF_FW",
+ .clkctrl = OMAP4430_CM_MEMIF_EMIF_FW_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_dll = {
+ .name = "DLL",
+ .clkctrl = OMAP4430_CM_MEMIF_DLL_CLKCTRL,
+ .flags = 0,
+ .optclk = 0x100,
+};
+
+static struct d_mod_info mod_cortexm3 = {
+ .name = "CORTEXM3",
+ .clkctrl = OMAP4430_CM_DUCATI_DUCATI_CLKCTRL,
+ .flags = MOD_MODE | MOD_MASTER | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_gpmc = {
+ .name = "GPMC",
+ .clkctrl = OMAP4430_CM_L3_2_GPMC_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_l3_2_interconnect = {
+ .name = "L3_2 interconnect",
+ .clkctrl = OMAP4430_CM_L3_2_L3_2_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_ocmc_ram = {
+ .name = "OCMC_RAM",
+ .clkctrl = OMAP4430_CM_L3_2_OCMC_RAM_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_l3_3_interconnect = {
+ .name = "L3_3 interconnect",
+ .clkctrl = OMAP4430_CM_L3INSTR_L3_3_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_l3_instr_interconnect = {
+ .name = "L3_INSTR interconnect",
+ .clkctrl = OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_wp1 = {
+ .name = "WP1",
+ .clkctrl = OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_l3_1_interconnect = {
+ .name = "L3_1 interconnect",
+ .clkctrl = OMAP4430_CM_L3_1_L3_1_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_c2c = {
+ .name = "C2C",
+ .clkctrl = OMAP4430_CM_D2D_SAD2D_CLKCTRL,
+ .flags = MOD_MODE | MOD_MASTER | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_c2c_fw = {
+ .name = "C2C_FW",
+ .clkctrl = OMAP4430_CM_D2D_SAD2D_FW_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+
+static struct d_mod_info mod_sdma = {
+ .name = "sDMA",
+ .clkctrl = OMAP4430_CM_SDMA_SDMA_CLKCTRL,
+ .flags = MOD_MODE | MOD_MASTER | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_dss = {
+ .name = "DSS",
+ .clkctrl = OMAP4430_CM_DSS_DSS_CLKCTRL,
+ .flags = MOD_MODE | MOD_MASTER | MOD_SLAVE,
+ .optclk = 0xf00,
+};
+
+static struct d_mod_info mod_sgx = {
+ .name = "SGX",
+ .clkctrl = OMAP4430_CM_GFX_GFX_CLKCTRL,
+ .flags = MOD_MODE | MOD_MASTER | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_hsi = {
+ .name = "HSI",
+ .clkctrl = OMAP4430_CM_L3INIT_HSI_CLKCTRL,
+ .flags = MOD_MODE | MOD_MASTER | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_hsmmc1 = {
+ .name = "HSMMC1",
+ .clkctrl = OMAP4430_CM_L3INIT_MMC1_CLKCTRL,
+ .flags = MOD_MODE | MOD_MASTER | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_hsmmc2 = {
+ .name = "HSMMC2",
+ .clkctrl = OMAP4430_CM_L3INIT_MMC2_CLKCTRL,
+ .flags = MOD_MODE | MOD_MASTER | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_usbphy = {
+ .name = "USBPHY",
+ .clkctrl = OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x100,
+};
+
+static struct d_mod_info mod_fsusb = {
+ .name = "FSUSB",
+ .clkctrl = OMAP4430_CM_L3INIT_USB_HOST_FS_CLKCTRL,
+ .flags = MOD_MODE | MOD_MASTER | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_hsusbhost = {
+ .name = "HSUSBHOST",
+ .clkctrl = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ .flags = MOD_MODE | MOD_MASTER | MOD_SLAVE,
+ .optclk = 0xff00,
+};
+
+static struct d_mod_info mod_hsusbotg = {
+ .name = "HSUSBOTG",
+ .clkctrl = OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL,
+ .flags = MOD_MODE | MOD_MASTER | MOD_SLAVE,
+ .optclk = 0x100,
+};
+
+static struct d_mod_info mod_usbtll = {
+ .name = "USBTLL",
+ .clkctrl = OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x300,
+};
+
+static struct d_mod_info mod_gptimer10 = {
+ .name = "GPTIMER10",
+ .clkctrl = OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_gptimer11 = {
+ .name = "GPTIMER11",
+ .clkctrl = OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_gptimer2 = {
+ .name = "GPTIMER2",
+ .clkctrl = OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_gptimer3 = {
+ .name = "GPTIMER3",
+ .clkctrl = OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_gptimer4 = {
+ .name = "GPTIMER4",
+ .clkctrl = OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_gptimer9 = {
+ .name = "GPTIMER9",
+ .clkctrl = OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_elm = {
+ .name = "ELM",
+ .clkctrl = OMAP4430_CM_L4PER_ELM_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_gpio2 = {
+ .name = "GPIO2",
+ .clkctrl = OMAP4430_CM_L4PER_GPIO2_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x100,
+};
+static struct d_mod_info mod_gpio3 = {
+ .name = "GPIO3",
+ .clkctrl = OMAP4430_CM_L4PER_GPIO3_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x100,
+};
+static struct d_mod_info mod_gpio4 = {
+ .name = "GPIO4",
+ .clkctrl = OMAP4430_CM_L4PER_GPIO4_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x100,
+};
+static struct d_mod_info mod_gpio5 = {
+ .name = "GPIO5",
+ .clkctrl = OMAP4430_CM_L4PER_GPIO5_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x100,
+};
+static struct d_mod_info mod_gpio6 = {
+ .name = "GPIO6",
+ .clkctrl = OMAP4430_CM_L4PER_GPIO6_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x100,
+};
+static struct d_mod_info mod_hdq = {
+ .name = "HDQ",
+ .clkctrl = OMAP4430_CM_L4PER_HDQ1W_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_i2c1 = {
+ .name = "I2C1",
+ .clkctrl = OMAP4430_CM_L4PER_I2C1_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_i2c2 = {
+ .name = "I2C2",
+ .clkctrl = OMAP4430_CM_L4PER_I2C2_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_i2c3 = {
+ .name = "I2C3",
+ .clkctrl = OMAP4430_CM_L4PER_I2C3_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_i2c4 = {
+ .name = "I2C4",
+ .clkctrl = OMAP4430_CM_L4PER_I2C4_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_l4_per_interconnect = {
+ .name = "L4_PER interconnect",
+ .clkctrl = OMAP4430_CM_L4PER_L4PER_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_mcbsp4 = {
+ .name = "MCBSP4",
+ .clkctrl = OMAP4430_CM_L4PER_MCBSP4_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_mcspi1 = {
+ .name = "MCSPI1",
+ .clkctrl = OMAP4430_CM_L4PER_MCSPI1_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_mcspi2 = {
+ .name = "MCSPI2",
+ .clkctrl = OMAP4430_CM_L4PER_MCSPI2_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_mcspi3 = {
+ .name = "MCSPI3",
+ .clkctrl = OMAP4430_CM_L4PER_MCSPI3_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_mcspi4 = {
+ .name = "MCSPI4",
+ .clkctrl = OMAP4430_CM_L4PER_MCSPI4_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_hsmmc3 = {
+ .name = "HSMMC3",
+ .clkctrl = OMAP4430_CM_L4PER_MMCSD3_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_hsmmc4 = {
+ .name = "HSMMC4",
+ .clkctrl = OMAP4430_CM_L4PER_MMCSD4_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_hsmmc5 = {
+ .name = "HSMMC5",
+ .clkctrl = OMAP4430_CM_L4PER_MMCSD5_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_slimbus2 = {
+ .name = "SLIMBUS2",
+ .clkctrl = OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x700,
+};
+static struct d_mod_info mod_uart1 = {
+ .name = "UART1",
+ .clkctrl = OMAP4430_CM_L4PER_UART1_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_uart2 = {
+ .name = "UART2",
+ .clkctrl = OMAP4430_CM_L4PER_UART2_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_uart3 = {
+ .name = "UART3",
+ .clkctrl = OMAP4430_CM_L4PER_UART3_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_uart4 = {
+ .name = "UART4",
+ .clkctrl = OMAP4430_CM_L4PER_UART4_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_audio_engine = {
+ .name = "AUDIO ENGINE",
+ .clkctrl = OMAP4430_CM1_ABE_AESS_CLKCTRL,
+ .flags = MOD_MODE | MOD_MASTER | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_dmic = {
+ .name = "DMIC",
+ .clkctrl = OMAP4430_CM1_ABE_DMIC_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_l4_abe_interconnect = {
+ .name = "L4_ABE interconnect",
+ .clkctrl = OMAP4430_CM1_ABE_L4ABE_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_mcasp1 = {
+ .name = "MCASP1",
+ .clkctrl = OMAP4430_CM1_ABE_MCASP_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_mcbsp1 = {
+ .name = "MCBSP1",
+ .clkctrl = OMAP4430_CM1_ABE_MCBSP1_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_mcbsp2 = {
+ .name = "MCBSP2",
+ .clkctrl = OMAP4430_CM1_ABE_MCBSP2_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_mcbsp3 = {
+ .name = "MCBSP3",
+ .clkctrl = OMAP4430_CM1_ABE_MCBSP3_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_mcpdm = {
+ .name = "MCPDM",
+ .clkctrl = OMAP4430_CM1_ABE_PDM_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_slimbus1 = {
+ .name = "SLIMBUS1",
+ .clkctrl = OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0xf00,
+};
+static struct d_mod_info mod_gptimer5 = {
+ .name = "GPTIMER5",
+ .clkctrl = OMAP4430_CM1_ABE_TIMER5_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_gptimer6 = {
+ .name = "GPTIMER6",
+ .clkctrl = OMAP4430_CM1_ABE_TIMER6_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_gptimer7 = {
+ .name = "GPTIMER7",
+ .clkctrl = OMAP4430_CM1_ABE_TIMER7_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_gptimer8 = {
+ .name = "GPTIMER8",
+ .clkctrl = OMAP4430_CM1_ABE_TIMER8_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_wdtimer3 = {
+ .name = "WDTIMER3",
+ .clkctrl = OMAP4430_CM1_ABE_WDT3_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_ivahd = {
+ .name = "IVAHD",
+ .clkctrl = OMAP4430_CM_IVAHD_IVAHD_CLKCTRL,
+ .flags = MOD_MODE | MOD_MASTER | MOD_SLAVE,
+ .optclk = 0x0,
+};
+static struct d_mod_info mod_sl2 = {
+ .name = "SL2",
+ .clkctrl = OMAP4430_CM_IVAHD_SL2_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_dsp = {
+ .name = "DSP",
+ .clkctrl = OMAP4430_CM_TESLA_TESLA_CLKCTRL,
+ .flags = MOD_MODE | MOD_MASTER | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_cortexa9 = {
+ .name = "CORTEXA9",
+ .clkctrl = OMAP4430_CM_MPU_MPU_CLKCTRL,
+ .flags = MOD_MODE | MOD_MASTER | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+/* L4SEC modules not in TRM, below based on Linux code */
+
+static struct d_mod_info mod_aes1 = {
+ .name = "AES1",
+ .clkctrl = OMAP4430_CM_L4SEC_AES1_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_aes2 = {
+ .name = "AES2",
+ .clkctrl = OMAP4430_CM_L4SEC_AES2_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_des3des = {
+ .name = "DES3DES",
+ .clkctrl = OMAP4430_CM_L4SEC_DES3DES_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_pkaeip29 = {
+ .name = "PKAEIP29",
+ .clkctrl = OMAP4430_CM_L4SEC_PKAEIP29_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_rng = {
+ .name = "RNG",
+ .clkctrl = OMAP4430_CM_L4SEC_RNG_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_sha2md51 = {
+ .name = "SHA2MD51",
+ .clkctrl = OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+static struct d_mod_info mod_cryptodma = {
+ .name = "CRYPTODMA",
+ .clkctrl = OMAP4430_CM_L4SEC_CRYPTODMA_CLKCTRL,
+ .flags = MOD_MODE | MOD_SLAVE,
+ .optclk = 0x0,
+};
+
+/* Clock domains */
+
+struct d_clkd_info {
+ char *name;
+ const u8 prcm_partition;
+ const s16 cm_inst;
+ const u16 clkdm_offs;
+ int activity;
+ struct d_dpll_info *dplls[20];
+ struct d_intgen_info *intgens[20];
+ struct d_mod_info *mods[];
+};
+
+static struct d_clkd_info cd_emu = {
+ .name = "CD_EMU",
+ .prcm_partition = OMAP4430_PRM_PARTITION,
+ .cm_inst = OMAP4430_PRM_EMU_CM_INST,
+ .clkdm_offs = OMAP4430_PRM_EMU_CM_EMU_CDOFFS,
+ .activity = 0x300,
+ .mods = {&mod_debug, NULL},
+ .intgens = {NULL},
+ // TBD: TRM mentions: CM1_EMU
+};
+
+static struct d_clkd_info cd_wkup = {
+ .name = "CD_WKUP",
+ .prcm_partition = OMAP4430_PRM_PARTITION,
+ .cm_inst = OMAP4430_PRM_WKUP_CM_INST,
+ .clkdm_offs = OMAP4430_PRM_WKUP_CM_WKUP_CDOFFS,
+ .activity = 0x1b00,
+ .mods = {&mod_bandgap, &mod_gpio1, &mod_keyboard, &mod_sar_ram,
+ &mod_32ktimer, &mod_gptimer1, &mod_wdtimer2,
+ &mod_l4_wkup, NULL},
+ .intgens = {NULL},
+ // TBD: TRM mentions: SYSCTRL_PADCONF_WKUP, SYSCTRL_GENERAL_WKUP, PRM,
+ // SCRM
+};
+
+static struct d_clkd_info cd_l4_alwon_core = {
+ .name = "CD_L4_ALWON_CORE",
+ .prcm_partition = OMAP4430_CM2_PARTITION,
+ .cm_inst = OMAP4430_CM2_ALWAYS_ON_INST,
+ .clkdm_offs = OMAP4430_CM2_ALWAYS_ON_ALWON_CDOFFS,
+ .activity = 0xf00,
+ .mods = {&mod_sr_core, &mod_sr_iva, &mod_sr_mpu, NULL},
+ .dplls = {&dpll_per, &dpll_core, &dpll_abe, NULL},
+ .intgens = {NULL},
+ // TBD: TRM mentions: CM1, CORTEXM3_WKUPGEN, SDMA_WKUPGEN, SPINNER
+};
+
+static struct d_clkd_info cd_cam = {
+ .name = "CD_CAM",
+ .prcm_partition = OMAP4430_CM2_PARTITION,
+ .cm_inst = OMAP4430_CM2_CAM_INST,
+ .clkdm_offs = OMAP4430_CM2_CAM_CAM_CDOFFS,
+ .activity = 0x700,
+ .mods = {&mod_fdif, &mod_iss, NULL},
+ .intgens = {NULL},
+};
+
+static struct d_clkd_info cd_l4_cfg = {
+ .name = "CD_L4_CFG",
+ .prcm_partition = OMAP4430_CM2_PARTITION,
+ .cm_inst = OMAP4430_CM2_CORE_INST,
+ .clkdm_offs = OMAP4430_CM2_CORE_L4CFG_CDOFFS,
+ .activity = 0x100,
+ .mods = {&mod_spinlock, &mod_l4_cfg_interconnect, &mod_mailbox,
+ &mod_sar_rom, NULL},
+ .intgens = {NULL},
+ // TBD: TRM mentions: SYSCTRL_PADCONF_CORE, SYSCTRL_GENERAL_CORE
+};
+
+static struct d_clkd_info cd_emif = {
+ .name = "CD_EMIF",
+ .prcm_partition = OMAP4430_CM2_PARTITION,
+ .cm_inst = OMAP4430_CM2_CORE_INST,
+ .clkdm_offs = OMAP4430_CM2_CORE_MEMIF_CDOFFS,
+ .activity = 0x700,
+ .mods = {&mod_dmm, &mod_emif_1, &mod_emif_2, &mod_emif_fw,
+ &mod_dll, NULL},
+ .intgens = {NULL},
+ // TBD: TRM mentions: DDRPHY
+};
+
+static struct d_clkd_info cd_cortexm3 = {
+ .name = "CD_CORTEXM3",
+ .prcm_partition = OMAP4430_CM2_PARTITION,
+ .cm_inst = OMAP4430_CM2_CORE_INST,
+ .clkdm_offs = OMAP4430_CM2_CORE_DUCATI_CDOFFS,
+ .activity = 0x100,
+ .mods = {&mod_cortexm3, NULL},
+ .intgens = {NULL},
+};
+
+static struct d_clkd_info cd_l3_2 = {
+ .name = "CD_L3_2",
+ .prcm_partition = OMAP4430_CM2_PARTITION,
+ .cm_inst = OMAP4430_CM2_CORE_INST,
+ .clkdm_offs = OMAP4430_CM2_CORE_L3_2_CDOFFS,
+ .activity = 0x100,
+ .mods = {&mod_gpmc, &mod_l3_2_interconnect, &mod_ocmc_ram, NULL},
+ .intgens = {NULL},
+};
+
+static struct d_clkd_info cd_l3_instr = {
+ .name = "CD_L3_INSTR",
+ .prcm_partition = OMAP4430_CM2_PARTITION,
+ .cm_inst = OMAP4430_CM2_CORE_INST,
+ .clkdm_offs = OMAP4430_CM2_CORE_L3INSTR_CDOFFS,
+ .activity = 0x100,
+ .mods = {&mod_l3_3_interconnect, &mod_l3_instr_interconnect,
+ &mod_wp1, NULL},
+ .intgens = {NULL},
+};
+
+static struct d_clkd_info cd_l3_1 = {
+ .name = "CD_L3_1",
+ .prcm_partition = OMAP4430_CM2_PARTITION,
+ .cm_inst = OMAP4430_CM2_CORE_INST,
+ .clkdm_offs = OMAP4430_CM2_CORE_L3_1_CDOFFS,
+ .activity = 0x100,
+ .mods = {&mod_l3_1_interconnect, NULL},
+ .intgens = {NULL},
+};
+
+static struct d_clkd_info cd_c2c = {
+ .name = "CD_C2C",
+ .prcm_partition = OMAP4430_CM2_PARTITION,
+ .cm_inst = OMAP4430_CM2_CORE_INST,
+ .clkdm_offs = OMAP4430_CM2_CORE_D2D_CDOFFS,
+ .activity = 0x700,
+ .mods = {&mod_c2c, &mod_c2c_fw, NULL},
+ .intgens = {NULL},
+};
+
+static struct d_clkd_info cd_dma = {
+ .name = "CD_DMA",
+ .prcm_partition = OMAP4430_CM2_PARTITION,
+ .cm_inst = OMAP4430_CM2_CORE_INST,
+ .clkdm_offs = OMAP4430_CM2_CORE_SDMA_CDOFFS,
+ .activity = 0x100,
+ .mods = {&mod_sdma, NULL},
+ .intgens = {NULL},
+};
+
+static struct d_clkd_info cd_dss = {
+ .name = "CD_DSS",
+ .prcm_partition = OMAP4430_CM2_PARTITION,
+ .cm_inst = OMAP4430_CM2_DSS_INST,
+ .clkdm_offs = OMAP4430_CM2_DSS_DSS_CDOFFS,
+ .activity = 0xf00,
+ .mods = {&mod_dss, NULL},
+ .intgens = {NULL},
+};
+
+static struct d_clkd_info cd_sgx = {
+ .name = "CD_SGX",
+ .prcm_partition = OMAP4430_CM2_PARTITION,
+ .cm_inst = OMAP4430_CM2_GFX_INST,
+ .clkdm_offs = OMAP4430_CM2_GFX_GFX_CDOFFS,
+ .activity = 0x300,
+ .mods = {&mod_sgx, NULL},
+};
+
+static struct d_clkd_info cd_l3_init = {
+ .name = "CD_L3_INIT",
+ .prcm_partition = OMAP4430_CM2_PARTITION,
+ .cm_inst = OMAP4430_CM2_L3INIT_INST,
+ .clkdm_offs = OMAP4430_CM2_L3INIT_L3INIT_CDOFFS,
+ .activity = 0x3ef7f300,
+ .mods = {&mod_hsi, &mod_hsmmc1, &mod_hsmmc2, &mod_usbphy,
+ &mod_fsusb, &mod_hsusbhost, &mod_hsusbotg, &mod_usbtll,
+ NULL},
+ .dplls = {&dpll_usb, NULL},
+ .intgens = {NULL},
+ // TBD: TRM mentions: CM1_USB
+};
+
+static struct d_clkd_info cd_l4_per = {
+ .name = "CD_L4_PER",
+ .prcm_partition = OMAP4430_CM2_PARTITION,
+ .cm_inst = OMAP4430_CM2_L4PER_INST,
+ .clkdm_offs = OMAP4430_CM2_L4PER_L4PER_CDOFFS,
+ .activity = 0x24fff00,
+ .mods = {&mod_gptimer10, &mod_gptimer11, &mod_gptimer2,
+ &mod_gptimer3, &mod_gptimer4, &mod_gptimer9, &mod_elm,
+ &mod_gpio2, &mod_gpio3, &mod_gpio4, &mod_gpio5, &mod_gpio6,
+ &mod_hdq, &mod_i2c1, &mod_i2c2, &mod_i2c3, &mod_i2c4,
+ &mod_l4_per_interconnect, &mod_mcbsp4, &mod_mcspi1,
+ &mod_mcspi2, &mod_mcspi3, &mod_mcspi4, &mod_hsmmc3,
+ &mod_hsmmc4, &mod_hsmmc5, &mod_slimbus2, &mod_uart1,
+ &mod_uart2, &mod_uart3, &mod_uart4, NULL},
+ // TBD: Linux refs: I2C5
+ .intgens = {NULL},
+};
+
+static struct d_clkd_info cd_abe = {
+ .name = "CD_ABE",
+ .prcm_partition = OMAP4430_CM1_PARTITION,
+ .cm_inst = OMAP4430_CM1_ABE_INST,
+ .clkdm_offs = OMAP4430_CM1_ABE_ABE_CDOFFS,
+ .activity = 0x3400,
+ .mods = {&mod_audio_engine, &mod_dmic, &mod_l4_abe_interconnect,
+ &mod_mcasp1, &mod_mcbsp1, &mod_mcbsp2, &mod_mcbsp3,
+ &mod_mcpdm, &mod_slimbus1, &mod_gptimer5, &mod_gptimer6,
+ &mod_gptimer7, &mod_gptimer8, &mod_wdtimer3, NULL},
+ .intgens = {&intgen_cm1_abe, NULL},
+};
+
+static struct d_clkd_info cd_ivahd = {
+ .name = "CD_IVAHD",
+ .prcm_partition = OMAP4430_CM2_PARTITION,
+ .cm_inst = OMAP4430_CM2_IVAHD_INST,
+ .clkdm_offs = OMAP4430_CM2_IVAHD_IVAHD_CDOFFS,
+ .activity = 0x100,
+ .mods = {&mod_ivahd, &mod_sl2, NULL},
+ .intgens = {NULL},
+};
+
+static struct d_clkd_info cd_dsp = {
+ .name = "CD_DSP",
+ .prcm_partition = OMAP4430_CM1_PARTITION,
+ .cm_inst = OMAP4430_CM1_TESLA_INST,
+ .clkdm_offs = OMAP4430_CM1_TESLA_TESLA_CDOFFS,
+ .activity = 0x100,
+ .mods = {&mod_dsp, NULL},
+ .intgens = {NULL},
+};
+
+static struct d_clkd_info cd_pd_alwon_mpu_fake = {
+ .name = "N/A (clock generator)",
+ .prcm_partition = -1,
+ .cm_inst = -1,
+ .clkdm_offs = -1,
+ .activity = 0x0,
+ .mods = {NULL},
+ .dplls = {&dpll_mpu, NULL},
+ .intgens = {NULL},
+ // TBD: TRM mentions: CORTEXA9_MPU_INTC
+};
+
+static struct d_clkd_info cd_pd_alwon_dsp_fake = {
+ .name = "N/A (clock generator)",
+ .prcm_partition = -1,
+ .cm_inst = -1,
+ .clkdm_offs = -1,
+ .activity = 0x0,
+ .mods = {NULL},
+ .dplls = {&dpll_iva, NULL},
+ .intgens = {NULL},
+ // TBD: TRM mentions: DSP_WKUPGEN
+};
+
+static struct d_clkd_info cd_cortexa9 = {
+ .name = "CD_CORTEXA9",
+ .prcm_partition = OMAP4430_CM1_PARTITION,
+ .cm_inst = OMAP4430_CM1_MPU_INST,
+ .clkdm_offs = OMAP4430_CM1_MPU_MPU_CDOFFS,
+ .activity = 0x100,
+ .mods = {&mod_cortexa9, NULL},
+ .intgens = {NULL},
+};
+
+static struct d_clkd_info cd_l4sec = {
+ .name = "CD_L4SEC",
+ .prcm_partition = OMAP4430_CM2_PARTITION,
+ .cm_inst = OMAP4430_CM2_L4PER_INST,
+ .clkdm_offs = OMAP4430_CM2_L4PER_L4SEC_CDOFFS,
+ .activity = 0x300,
+ .mods = {&mod_aes1, &mod_aes2, &mod_des3des, &mod_pkaeip29, &mod_rng,
+ &mod_sha2md51, &mod_cryptodma, NULL},
+ .intgens = {NULL},
+};
+
+#if 0 /* Don't appear to be valid */
+/* CD_MPU0 and MPU1 not in TRM, below based on Linux code. */
+
+static struct d_clkd_info cd_mpu0 = {
+ .name = "CD_MPU0",
+ .prcm_partition = OMAP4430_PRCM_MPU_PARTITION,
+ .cm_inst = OMAP4430_PRCM_MPU_CPU0_INST,
+ .clkdm_offs = OMAP4430_PRCM_MPU_CPU0_CPU0_CDOFFS,
+ .activity = 0x0,
+ .mods = {NULL},
+ .intgens = {NULL}, // TBD: No docs
+};
+
+static struct d_clkd_info cd_mpu1 = {
+ .name = "CD_MPU1",
+ .prcm_partition = OMAP4430_PRCM_MPU_PARTITION,
+ .cm_inst = OMAP4430_PRCM_MPU_CPU1_INST,
+ .clkdm_offs = OMAP4430_PRCM_MPU_CPU1_CPU1_CDOFFS,
+ .activity = 0x0,
+ .mods = {NULL},
+ .intgens = {NULL}, // TBD: No docs
+};
+#endif
+
+/* Power domains */
+
+struct d_pwrd_info {
+ char *name;
+ long prminst;
+ int pwrst;
+ struct d_clkd_info *cds[];
+};
+
+static struct d_pwrd_info pd_emu = {
+ .name = "PD_EMU",
+ .prminst = OMAP4430_PRM_EMU_INST,
+ .pwrst = OMAP4_PM_EMU_PWRSTST_OFFSET,
+ .cds = {&cd_emu, NULL},
+};
+
+static struct d_pwrd_info pd_wkup = {
+ .name = "PD_WKUP",
+ .prminst = OMAP4430_PRM_WKUP_INST,
+ .pwrst = -1,
+ .cds = {&cd_wkup, NULL},
+};
+
+static struct d_pwrd_info pd_alwon_core = {
+ .name = "PD_ALWON_CORE",
+ .prminst = OMAP4430_PRM_ALWAYS_ON_INST,
+ .pwrst = -1,
+ .cds = {&cd_l4_alwon_core, NULL},
+};
+
+static struct d_pwrd_info pd_cam = {
+ .name = "PD_CAM",
+ .prminst = OMAP4430_PRM_CAM_INST,
+ .pwrst = OMAP4_PM_CAM_PWRSTST_OFFSET,
+ .cds = {&cd_cam, NULL},
+};
+
+static struct d_pwrd_info pd_core = {
+ .name = "PD_CORE",
+ .prminst = OMAP4430_PRM_CORE_INST,
+ .pwrst = OMAP4_PM_CORE_PWRSTST_OFFSET,
+ .cds = {&cd_l4_cfg, &cd_emif, &cd_cortexm3, &cd_l3_2, &cd_l3_instr,
+ &cd_l3_1, &cd_c2c, &cd_dma, NULL},
+ // TBD: TRM mentions: CM2
+};
+
+static struct d_pwrd_info pd_dss = {
+ .name = "PD_DSS",
+ .prminst = OMAP4430_PRM_DSS_INST,
+ .pwrst = OMAP4_PM_DSS_PWRSTST_OFFSET,
+ .cds = {&cd_dss, NULL},
+};
+
+static struct d_pwrd_info pd_sgx = {
+ .name = "PD_SGX",
+ .prminst = OMAP4430_PRM_GFX_INST,
+ .pwrst = OMAP4_PM_GFX_PWRSTST_OFFSET,
+ .cds = {&cd_sgx, NULL},
+};
+
+static struct d_pwrd_info pd_l3_init = {
+ .name = "PD_L3_INIT",
+ .prminst = OMAP4430_PRM_L3INIT_INST,
+ .pwrst = OMAP4_PM_L3INIT_PWRSTST_OFFSET,
+ .cds = {&cd_l3_init, NULL},
+};
+
+static struct d_pwrd_info pd_l4_per = {
+ .name = "PD_L4_PER",
+ .prminst = OMAP4430_PRM_L4PER_INST,
+ .pwrst = OMAP4_PM_L4PER_PWRSTST_OFFSET,
+ .cds = {&cd_l4_per, &cd_l4sec, NULL},
+};
+
+static struct d_pwrd_info pd_std_efuse = {
+ .name = "PD_STD_EFUSE",
+ .prminst = -1,
+ .pwrst = -1,
+ .cds = {NULL},
+};
+
+static struct d_pwrd_info pd_alwon_dsp = {
+ .name = "PD_ALWON_DSP",
+ .prminst = -1,
+ .pwrst = -1,
+ .cds = {&cd_pd_alwon_dsp_fake, NULL},
+};
+
+static struct d_pwrd_info pd_audio = {
+ .name = "PD_AUDIO",
+ .prminst = OMAP4430_PRM_ABE_INST,
+ .pwrst = OMAP4_PM_ABE_PWRSTST_OFFSET,
+ .cds = {&cd_abe, NULL},
+};
+
+static struct d_pwrd_info pd_ivahd = {
+ .name = "PD_IVAHD",
+ .prminst = OMAP4430_PRM_IVAHD_INST,
+ .pwrst = OMAP4_PM_IVAHD_PWRSTST_OFFSET,
+ .cds = {&cd_ivahd, NULL},
+};
+
+static struct d_pwrd_info pd_dsp = {
+ .name = "PD_DSP",
+ .prminst = OMAP4430_PRM_TESLA_INST,
+ .pwrst = OMAP4_PM_TESLA_PWRSTST_OFFSET,
+ .cds = {&cd_dsp, NULL},
+};
+
+static struct d_pwrd_info pd_alwon_mpu = {
+ .name = "PD_ALWON_MPU",
+ .prminst = -1,
+ .pwrst = -1,
+ .cds = {&cd_pd_alwon_mpu_fake, NULL},
+};
+
+static struct d_pwrd_info pd_mpu = {
+ .name = "PD_MPU",
+ .prminst = OMAP4430_PRM_MPU_INST,
+ .pwrst = OMAP4_PM_MPU_PWRSTST_OFFSET,
+ .cds = {&cd_cortexa9, NULL},
+};
+
+#if 0 /* Do not seem to be valid */
+/* CPU0 and CPU1 power domains not in TRM, below based on Linux code */
+
+static struct d_pwrd_info pd_cpu0 = {
+ .name = "PD_CPU0",
+ .prminst = OMAP4430_PRCM_MPU_CPU0_INST,
+ .pwrst = OMAP4_PM_CPU0_PWRSTST_OFFSET,
+ .cds = {&cd_mpu0, NULL},
+};
+
+static struct d_pwrd_info pd_cpu1 = {
+ .name = "PD_CPU1",
+ .prminst = OMAP4430_PRCM_MPU_CPU1_INST,
+ .pwrst = OMAP4_PM_CPU1_PWRSTST_OFFSET,
+ .cds = {&cd_mpu1, NULL},
+};
+#endif
+
+/* Voltage domains to power domains */
+
+static struct d_pwrd_info *ldo_wakeup_pds[] =
+{&pd_emu, &pd_wkup, NULL};
+
+static struct d_pwrd_info *vdd_core_pds[] =
+{&pd_alwon_core, &pd_cam, &pd_core, &pd_dss, &pd_sgx, &pd_l3_init, &pd_l4_per,
+ &pd_std_efuse, NULL};
+
+static struct d_pwrd_info *vdd_iva_pds[] =
+{&pd_alwon_dsp, &pd_audio, &pd_ivahd, &pd_dsp, NULL};
+
+static struct d_pwrd_info *vdd_mpu_pds[] =
+{&pd_alwon_mpu, &pd_mpu, /* &pd_cpu0, &pd_cpu1, */ NULL};
+
+/* Voltage domains */
+
+#define N_VDDS 4
+
+struct d_vdd_info {
+ char *name;
+ int auto_ctrl_shift;
+ int auto_ctrl_mask;
+ struct d_pwrd_info **pds;
+};
+
+static struct d_vdd_info d_vdd[N_VDDS] = {
+ {
+ .name = "LDO_WAKEUP",
+ .auto_ctrl_shift = -1,
+ .auto_ctrl_mask = -1,
+ .pds = ldo_wakeup_pds,
+ },
+ {
+ .name = "VDD_CORE_L",
+ .auto_ctrl_shift = OMAP4430_AUTO_CTRL_VDD_CORE_L_SHIFT,
+ .auto_ctrl_mask = OMAP4430_AUTO_CTRL_VDD_CORE_L_MASK,
+ .pds = vdd_core_pds,
+ },
+ {
+ .name = "VDD_IVA_L",
+ .auto_ctrl_shift = OMAP4430_AUTO_CTRL_VDD_IVA_L_SHIFT,
+ .auto_ctrl_mask = OMAP4430_AUTO_CTRL_VDD_IVA_L_MASK,
+ .pds = vdd_iva_pds,
+ },
+ {
+ .name = "VDD_MPU_L",
+ .auto_ctrl_shift = OMAP4430_AUTO_CTRL_VDD_MPU_L_SHIFT,
+ .auto_ctrl_mask = OMAP4430_AUTO_CTRL_VDD_MPU_L_MASK,
+ .pds = vdd_mpu_pds,
+ },
+};
+
+
+/* Display strings */
+
+static char *vddauto_s[] = {"disabled", "SLEEP", "RET", "reserved"};
+
+static char *pwrstate_s[] = {"OFF", "RET", "INACTIVE", "ON"};
+
+static char *logic_s[] = {"OFF", "ON"};
+
+static char *cmtrctrl_s[] = {"NOSLEEP", "SW_SLEEP", "SW_WKUP", "HW_AUTO"};
+
+static char *modmode_s[] = {"DISABLED", "AUTO", "ENABLED", "3"};
+
+static char *modstbyst_s[] = {"ON", "STBY"};
+
+static char *modidlest_s[] = {"ON", "TRANSITION", "IDLE", "DISABLED"};
+
+#if 0
+#define DEP_S_MAX 19
+
+static char *dep_s[DEP_S_MAX] = {"MPU_M3", "DSP", "IVAHD", "ABE", "MEMIF",
+ "L3_1", "L3_2", "L3INIT", "DSS", "ISS",
+ "GFX", "SDMA", "L4CFG", "L4PER", "L4SEC",
+ "L4WKUP", "ALWON_CORE", "STD_EFUSE",
+ "D2D"};
+#endif
+
+#define d_pr(sf, fmt, args...) \
+ { \
+ if (sf) \
+ seq_printf(sf, fmt , ## args); \
+ else \
+ pr_info(fmt , ## args); \
+ }
+
+#define d_pr_ctd(sf, fmt, args...) \
+ { \
+ if (sf) \
+ seq_printf(sf, fmt , ## args); \
+ else \
+ pr_cont(fmt , ## args); \
+ }
+
+static void prcmdebug_dump_dpll(struct seq_file *sf,
+ struct d_dpll_info *dpll,
+ int flags)
+{
+ u32 idlest = __raw_readl(dpll->idlestreg);
+ u32 st_bypass = idlest & OMAP4430_ST_MN_BYPASS_MASK;
+ u32 st_dpll_clk = idlest & OMAP4430_ST_DPLL_CLK_MASK;
+ struct d_dpll_derived **derived;
+
+ if (flags & (PRCMDEBUG_LASTSLEEP | PRCMDEBUG_ON) && !st_dpll_clk)
+ return;
+
+ d_pr(sf, " %s status=%s\n", dpll->name,
+ st_dpll_clk ? "locked" : st_bypass ? "bypass" : "stopped");
+
+ derived = dpll->derived;
+
+ while (*derived) {
+ u32 enabled = __raw_readl((*derived)->gatereg) &
+ (*derived)->gatemask;
+
+ if (!(flags & (PRCMDEBUG_LASTSLEEP | PRCMDEBUG_ON)) ||
+ enabled)
+ d_pr(sf, " %s enabled=0x%x\n",
+ (*derived)->name, enabled);
+ derived++;
+ }
+}
+
+
+static void prcmdebug_dump_intgen(struct seq_file *sf,
+ struct d_intgen_info *intgen,
+ int flags)
+{
+ u32 enabled = __raw_readl(intgen->gatereg) & intgen->gatemask;
+
+ if (flags & (PRCMDEBUG_LASTSLEEP | PRCMDEBUG_ON) && !enabled)
+ return;
+
+ d_pr(sf, " %s enabled=0x%x\n", intgen->name, enabled);
+}
+
+static void prcmdebug_dump_mod(struct seq_file *sf, struct d_mod_info *mod,
+ int flags)
+{
+ u32 clkctrl = __raw_readl(mod->clkctrl);
+ u32 stbyst = (clkctrl & OMAP4430_STBYST_MASK) >> OMAP4430_STBYST_SHIFT;
+ u32 idlest = (clkctrl & OMAP4430_IDLEST_MASK) >> OMAP4430_IDLEST_SHIFT;
+ u32 optclk = clkctrl & mod->optclk;
+
+ if (flags & (PRCMDEBUG_LASTSLEEP | PRCMDEBUG_ON) &&
+ (!(mod->flags & MOD_MASTER) || stbyst == 1) &&
+ (!(mod->flags & MOD_SLAVE) || idlest == 2 || idlest == 3) &&
+ !optclk)
+ return;
+
+ if (flags & PRCMDEBUG_LASTSLEEP &&
+ (mod->flags & MOD_MODE &&
+ ((clkctrl & OMAP4430_MODULEMODE_MASK) >>
+ OMAP4430_MODULEMODE_SHIFT) == 1 /* AUTO */) &&
+ (!(mod->flags & MOD_SLAVE) || idlest == 0) /* ON */ &&
+ !optclk)
+ return;
+
+ d_pr(sf, " %s", mod->name);
+
+ if (mod->flags & MOD_MODE)
+ d_pr_ctd(sf, " mode=%s",
+ modmode_s[(clkctrl & OMAP4430_MODULEMODE_MASK) >>
+ OMAP4430_MODULEMODE_SHIFT]);
+
+ if (mod->flags & MOD_MASTER)
+ d_pr_ctd(sf, " stbyst=%s",
+ modstbyst_s[stbyst]);
+
+ if (mod->flags & MOD_SLAVE)
+ d_pr_ctd(sf, " idlest=%s",
+ modidlest_s[idlest]);
+
+ if (optclk)
+ d_pr_ctd(sf, " optclk=0x%x", optclk);
+
+ d_pr_ctd(sf, "\n");
+}
+
+static void prcmdebug_dump_real_cd(struct seq_file *sf, struct d_clkd_info *cd,
+ int flags)
+{
+ u32 clktrctrl =
+ omap4_cminst_read_inst_reg(cd->prcm_partition, cd->cm_inst,
+ cd->clkdm_offs + OMAP4_CM_CLKSTCTRL);
+ u32 mode = (clktrctrl & OMAP4430_CLKTRCTRL_MASK) >>
+ OMAP4430_CLKTRCTRL_SHIFT;
+ u32 activity = clktrctrl & cd->activity;
+#if 0
+ u32 staticdep =
+ omap4_cminst_read_inst_reg(cd->prcm_partition, cd->cm_inst,
+ cd->clkdm_offs + OMAP4_CM_STATICDEP);
+ u32 dynamicdep =
+ omap4_cminst_read_inst_reg(cd->prcm_partition, cd->cm_inst,
+ cd->clkdm_offs +
+ OMAP4_CM_STATICDEP + 4) & 0xffffff;
+ int i;
+#endif
+
+ if (flags & PRCMDEBUG_LASTSLEEP && mode == 3 /* HW_AUTO */)
+ return;
+
+ d_pr(sf, " %s mode=%s", cd->name, cmtrctrl_s[mode]);
+
+ d_pr_ctd(sf, " activity=0x%x", activity);
+
+#if 0
+ if (staticdep) {
+ d_pr_ctd(sf, " static:");
+
+ for (i = 0; i < DEP_S_MAX; i++)
+ if (staticdep & (1 << i))
+ d_pr_ctd(sf, " %s", dep_s[i]);
+ }
+#endif
+
+#if 0
+ if (dynamicdep)
+ d_pr_ctd(sf, " dynamicdep=0x%x", dynamicdep);
+#endif
+
+ d_pr_ctd(sf, "\n");
+}
+
+static void prcmdebug_dump_cd(struct seq_file *sf, struct d_clkd_info *cd,
+ int flags)
+{
+ struct d_mod_info **mod;
+ struct d_intgen_info **intgen;
+ struct d_dpll_info **dpll;
+
+ if (cd->cm_inst != -1) {
+ prcmdebug_dump_real_cd(sf, cd, flags);
+ } else if (!(flags & PRCMDEBUG_LASTSLEEP)) {
+ d_pr(sf, " %s\n", cd->name);
+ }
+
+ mod = cd->mods;
+
+ while (*mod) {
+ prcmdebug_dump_mod(sf, *mod, flags);
+ mod++;
+ }
+
+ dpll = cd->dplls;
+
+ while (*dpll) {
+ prcmdebug_dump_dpll(sf, *dpll, flags);
+ dpll++;
+ }
+
+ intgen = cd->intgens;
+
+ while (*intgen) {
+ prcmdebug_dump_intgen(sf, *intgen, flags);
+ intgen++;
+ }
+}
+
+static void prcmdebug_dump_pd(struct seq_file *sf, struct d_pwrd_info *pd,
+ int flags)
+{
+ u32 pwrstst, currst, prevst;
+ struct d_clkd_info **cd;
+
+ if (pd->pwrst != -1 && pd->prminst != -1) {
+ pwrstst = omap4_prm_read_inst_reg(pd->prminst, pd->pwrst);
+ currst = (pwrstst & OMAP4430_POWERSTATEST_MASK) >>
+ OMAP4430_POWERSTATEST_SHIFT;
+ prevst = (pwrstst & OMAP4430_LASTPOWERSTATEENTERED_MASK) >>
+ OMAP4430_LASTPOWERSTATEENTERED_SHIFT;
+
+ if (flags & PRCMDEBUG_LASTSLEEP &&
+ (prevst == PWRDM_POWER_OFF || prevst == PWRDM_POWER_RET))
+ return;
+
+ if (flags & PRCMDEBUG_ON &&
+ (currst == PWRDM_POWER_OFF || currst == PWRDM_POWER_RET))
+ return;
+
+ d_pr(sf, " %s curr=%s prev=%s logic=%s\n", pd->name,
+ pwrstate_s[currst],
+ pwrstate_s[prevst],
+ logic_s[(pwrstst & OMAP4430_LOGICSTATEST_MASK) >>
+ OMAP4430_LOGICSTATEST_SHIFT]);
+ } else {
+ if (flags & PRCMDEBUG_LASTSLEEP)
+ return;
+
+ d_pr(sf, " %s\n", pd->name);
+ }
+
+ cd = pd->cds;
+
+ while (*cd) {
+ prcmdebug_dump_cd(sf, *cd, flags);
+ cd++;
+ }
+}
+
+static int _prcmdebug_dump(struct seq_file *sf, int flags)
+{
+ int i;
+ u32 prm_voltctrl =
+ omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_VOLTCTRL_OFFSET);
+ struct d_pwrd_info **pd;
+
+ for (i = 0; i < N_VDDS; i++) {
+ if (!(flags & PRCMDEBUG_LASTSLEEP)) {
+ d_pr(sf, "%s",
+ d_vdd[i].name);
+
+ if (d_vdd[i].auto_ctrl_shift != -1) {
+ int auto_ctrl =
+ (prm_voltctrl &
+ d_vdd[i].auto_ctrl_mask) >>
+ d_vdd[i].auto_ctrl_shift;
+ d_pr_ctd(sf, " auto=%s\n",
+ vddauto_s[auto_ctrl]);
+ } else {
+ d_pr_ctd(sf, " (no auto)\n");
+ }
+
+ }
+
+ pd = d_vdd[i].pds;
+
+ while (*pd) {
+ prcmdebug_dump_pd(sf, *pd, flags);
+ pd++;
+ }
+ }
+
+ return 0;
+}
+
+void prcmdebug_dump(int flags)
+{
+ _prcmdebug_dump(NULL, flags);
+}
+
+static int prcmdebug_all_dump(struct seq_file *sf, void *private)
+{
+ _prcmdebug_dump(sf, 0);
+ return 0;
+}
+
+static int prcmdebug_all_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, prcmdebug_all_dump, NULL);
+}
+
+
+static struct file_operations prcmdebug_all_fops = {
+ .open = prcmdebug_all_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int prcmdebug_on_dump(struct seq_file *sf, void *private)
+{
+ _prcmdebug_dump(sf, PRCMDEBUG_ON);
+ return 0;
+}
+
+static int prcmdebug_on_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, prcmdebug_on_dump, NULL);
+}
+
+static struct file_operations prcmdebug_on_fops = {
+ .open = prcmdebug_on_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init prcmdebug_init(void)
+{
+ if (IS_ERR_OR_NULL(debugfs_create_file("prcm", S_IRUGO, NULL, NULL,
+ &prcmdebug_all_fops)))
+ pr_err("%s: failed to create prcm file\n", __func__);
+
+ if (IS_ERR_OR_NULL(debugfs_create_file("prcm-on", S_IRUGO, NULL, NULL,
+ &prcmdebug_on_fops)))
+ pr_err("%s: failed to create prcm-on file\n", __func__);
+
+ return 0;
+}
+
+arch_initcall(prcmdebug_init);
diff --git a/arch/arm/mach-omap2/prcm-debug.h b/arch/arm/mach-omap2/prcm-debug.h
new file mode 100644
index 0000000..14ca00fb
--- /dev/null
+++ b/arch/arm/mach-omap2/prcm-debug.h
@@ -0,0 +1,13 @@
+#ifndef __ARCH_ASM_MACH_OMAP2_PRCM_DEBUG_H
+#define __ARCH_ASM_MACH_OMAP2_PRCM_DEBUG_H
+
+#define PRCMDEBUG_LASTSLEEP (1 << 0)
+#define PRCMDEBUG_ON (1 << 1)
+
+#ifdef CONFIG_PM_DEBUG
+extern void prcmdebug_dump(int flags);
+#else
+static inline void prcmdebug_dump(int flags) { }
+#endif
+
+#endif /* __ARCH_ASM_MACH_OMAP2_PRCM_DEBUG_H */
diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h
index 64c087a..c38c4594 100644
--- a/arch/arm/mach-omap2/prm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-34xx.h
@@ -216,6 +216,7 @@
/* PRM_SYSCONFIG specific bits */
/* PRM_IRQSTATUS_MPU specific bits */
+#define OMAP3630_ABB_LDO_TRANXDONE_ST_MASK (1 << 26)
#define OMAP3430ES2_SND_PERIPH_DPLL_ST_SHIFT 25
#define OMAP3430ES2_SND_PERIPH_DPLL_ST_MASK (1 << 25)
#define OMAP3430_VC_TIMEOUTERR_ST_MASK (1 << 24)
diff --git a/arch/arm/mach-omap2/prm-regbits-44xx.h b/arch/arm/mach-omap2/prm-regbits-44xx.h
index 6d2776f..b76cfd6 100644
--- a/arch/arm/mach-omap2/prm-regbits-44xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-44xx.h
@@ -1,7 +1,7 @@
/*
* OMAP44xx Power Management register bits
*
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
* Copyright (C) 2009-2010 Nokia Corporation
*
* Paul Walmsley (paul@pwsan.com)
@@ -92,6 +92,10 @@
#define OMAP4430_AUTO_CTRL_VDD_MPU_L_SHIFT 2
#define OMAP4430_AUTO_CTRL_VDD_MPU_L_MASK (0x3 << 2)
+/* Used by PRM_VOLTCTRL */
+#define OMAP4430_AUTO_CTRL_VDD_RET_MASK (1 << 1)
+#define OMAP4430_AUTO_CTRL_VDD_SLEEP_MASK (1 << 0)
+
/* Used by PRM_VC_ERRST */
#define OMAP4430_BYPS_RA_ERR_SHIFT 25
#define OMAP4430_BYPS_RA_ERR_MASK (1 << 25)
@@ -283,6 +287,14 @@
#define OMAP4430_DUCATI_UNICACHE_STATEST_SHIFT 10
#define OMAP4430_DUCATI_UNICACHE_STATEST_MASK (0x3 << 10)
+/* Used by PRM_DEVICE_OFF_CTRL */
+#define OMAP4460_EMIF1_OFFWKUP_DISABLE_SHIFT 8
+#define OMAP4460_EMIF1_OFFWKUP_DISABLE_MASK (1 << 8)
+
+/* Used by PRM_DEVICE_OFF_CTRL */
+#define OMAP4460_EMIF2_OFFWKUP_DISABLE_SHIFT 9
+#define OMAP4460_EMIF2_OFFWKUP_DISABLE_MASK (1 << 9)
+
/* Used by RM_MPU_RSTST */
#define OMAP4430_EMULATION_RST_SHIFT 0
#define OMAP4430_EMULATION_RST_MASK (1 << 0)
@@ -390,6 +402,8 @@
/* Used by PRM_IO_PMCTRL */
#define OMAP4430_GLOBAL_WUEN_SHIFT 16
#define OMAP4430_GLOBAL_WUEN_MASK (1 << 16)
+#define OMAP4430_ISOOVR_EXTEND_SHIFT 4
+#define OMAP4430_ISOOVR_EXTEND_MASK (1 << 4)
/* Used by PRM_VC_CFG_I2C_MODE */
#define OMAP4430_HSMCODE_SHIFT 0
@@ -1063,6 +1077,14 @@
#define OMAP4430_SCLL_SHIFT 8
#define OMAP4430_SCLL_MASK (0xff << 8)
+/* Used by PRM_VC_CFG_I2C_CLK */
+#define OMAP4430_HSCLH_SHIFT 16
+#define OMAP4430_HSCLH_MASK (0xff << 16)
+
+/* Used by PRM_VC_CFG_I2C_CLK */
+#define OMAP4430_HSCLL_SHIFT 24
+#define OMAP4430_HSCLL_MASK (0xff << 24)
+
/* Used by PRM_RSTST */
#define OMAP4430_SECURE_WDT_RST_SHIFT 4
#define OMAP4430_SECURE_WDT_RST_MASK (1 << 4)
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.c b/arch/arm/mach-omap2/prm2xxx_3xxx.c
index 051213f..49e9719 100644
--- a/arch/arm/mach-omap2/prm2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.c
@@ -20,6 +20,8 @@
#include <plat/cpu.h>
#include <plat/prcm.h>
+#include "vp.h"
+
#include "prm2xxx_3xxx.h"
#include "cm2xxx_3xxx.h"
#include "prm-regbits-24xx.h"
@@ -156,3 +158,80 @@
return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0;
}
+
+/* PRM VP */
+
+/*
+ * struct omap3_prm_irq - OMAP3 PRM IRQ register access description.
+ * @vp_tranxdone_status: VP_TRANXDONE_ST bitmask in PRM_IRQSTATUS_MPU reg
+ * @abb_tranxdone_status: ABB_TRANXDONE_ST bitmask in PRM_IRQSTATUS_MPU reg
+ * (ONLY for OMAP3630)
+ */
+struct omap3_prm_irq {
+ u32 vp_tranxdone_status;
+ u32 abb_tranxdone_status;
+};
+
+static struct omap3_prm_irq omap3_prm_irqs[] = {
+ [OMAP3_PRM_IRQ_VDD_MPU_ID] = {
+ .vp_tranxdone_status = OMAP3430_VP1_TRANXDONE_ST_MASK,
+ .abb_tranxdone_status = OMAP3630_ABB_LDO_TRANXDONE_ST_MASK,
+ },
+ [OMAP3_PRM_IRQ_VDD_CORE_ID] = {
+ .vp_tranxdone_status = OMAP3430_VP2_TRANXDONE_ST_MASK,
+ /* no abb for core */
+ },
+};
+
+#define MAX_VP_ID ARRAY_SIZE(omap3_vp);
+
+u32 omap3_prm_vp_check_txdone(u8 irq_id)
+{
+ struct omap3_prm_irq *irq = &omap3_prm_irqs[irq_id];
+ u32 irqstatus;
+
+ irqstatus = omap2_prm_read_mod_reg(OCP_MOD,
+ OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+ return irqstatus & irq->vp_tranxdone_status;
+}
+
+void omap3_prm_vp_clear_txdone(u8 irq_id)
+{
+ struct omap3_prm_irq *irq = &omap3_prm_irqs[irq_id];
+
+ omap2_prm_write_mod_reg(irq->vp_tranxdone_status,
+ OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+}
+
+u32 omap36xx_prm_abb_check_txdone(u8 irq_id)
+{
+ struct omap3_prm_irq *irq = &omap3_prm_irqs[irq_id];
+ u32 irqstatus;
+
+ irqstatus = omap2_prm_read_mod_reg(OCP_MOD,
+ OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+ return irqstatus & irq->abb_tranxdone_status;
+}
+
+void omap36xx_prm_abb_clear_txdone(u8 irq_id)
+{
+ struct omap3_prm_irq *irq = &omap3_prm_irqs[irq_id];
+
+ omap2_prm_write_mod_reg(irq->abb_tranxdone_status,
+ OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+}
+
+u32 omap3_prm_vcvp_read(u8 offset)
+{
+ return omap2_prm_read_mod_reg(OMAP3430_GR_MOD, offset);
+}
+
+void omap3_prm_vcvp_write(u32 val, u8 offset)
+{
+ omap2_prm_write_mod_reg(val, OMAP3430_GR_MOD, offset);
+}
+
+u32 omap3_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset)
+{
+ return omap2_prm_rmw_mod_reg_bits(mask, bits, OMAP3430_GR_MOD, offset);
+}
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.h b/arch/arm/mach-omap2/prm2xxx_3xxx.h
index a1fc62a..08d5f1e 100644
--- a/arch/arm/mach-omap2/prm2xxx_3xxx.h
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.h
@@ -167,6 +167,10 @@
#define OMAP3430_PRM_VP2_VOLTAGE OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00e0)
#define OMAP3_PRM_VP2_STATUS_OFFSET 0x00e4
#define OMAP3430_PRM_VP2_STATUS OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00e4)
+#define OMAP3_PRM_LDO_ABB_SETUP_OFFSET 0x00f0
+#define OMAP3630_PRM_LDO_ABB_SETUP OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00f0)
+#define OMAP3_PRM_LDO_ABB_CTRL_OFFSET 0x00f4
+#define OMAP3630_PRM_LDO_ABB_CTRL OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00f4)
#define OMAP3_PRM_CLKSEL_OFFSET 0x0040
#define OMAP3430_PRM_CLKSEL OMAP34XX_PRM_REGADDR(OMAP3430_CCR_MOD, 0x0040)
@@ -303,7 +307,25 @@
extern int omap2_prm_assert_hardreset(s16 prm_mod, u8 shift);
extern int omap2_prm_deassert_hardreset(s16 prm_mod, u8 rst_shift, u8 st_shift);
+#define OMAP3_PRM_IRQ_VDD_MPU_ID 0
+#define OMAP3_PRM_IRQ_VDD_CORE_ID 1
+/* OMAP3-specific VP functions */
+u32 omap3_prm_vp_check_txdone(u8 irq_id);
+void omap3_prm_vp_clear_txdone(u8 irq_id);
+
+/* OMAP36xx-specific ABB functions */
+u32 omap36xx_prm_abb_check_txdone(u8 irq_id);
+void omap36xx_prm_abb_clear_txdone(u8 irq_id);
+
+/*
+ * OMAP3 access functions for voltage controller (VC) and
+ * voltage proccessor (VP) in the PRM.
+ */
+extern u32 omap3_prm_vcvp_read(u8 offset);
+extern void omap3_prm_vcvp_write(u32 val, u8 offset);
+extern u32 omap3_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
#endif /* CONFIG_ARCH_OMAP4 */
+
#endif
/*
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index a2a04bfa..6f011e0 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -21,8 +21,12 @@
#include <plat/cpu.h>
#include <plat/prcm.h>
+#include "voltage.h"
+#include "vp.h"
#include "prm44xx.h"
#include "prm-regbits-44xx.h"
+#include "prcm44xx.h"
+#include "prminst44xx.h"
/*
* Address offset (in bytes) between the reset control and the reset
@@ -193,3 +197,112 @@
v = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
OMAP4_RM_RSTCTRL);
}
+
+void omap4_prm_global_cold_sw_reset(void)
+{
+ u32 v;
+
+ v = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
+ OMAP4_RM_RSTCTRL);
+ v |= OMAP4430_RST_GLOBAL_COLD_SW_MASK;
+ omap4_prm_write_inst_reg(v, OMAP4430_PRM_DEVICE_INST,
+ OMAP4_RM_RSTCTRL);
+
+ /* OCP barrier */
+ v = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
+ OMAP4_RM_RSTCTRL);
+}
+
+/* PRM VP */
+
+/*
+ * struct omap4_prm_irq - OMAP4 VP register access description.
+ * @irqstatus_mpu: offset to IRQSTATUS_MPU register for VP
+ * @vp_tranxdone_status: VP_TRANXDONE_ST bitmask in PRM_IRQSTATUS_MPU reg
+ * @abb_tranxdone_status: ABB_TRANXDONE_ST bitmask in PRM_IRQSTATUS_MPU reg
+ */
+struct omap4_prm_irq {
+ u32 irqstatus_mpu;
+ u32 vp_tranxdone_status;
+ u32 abb_tranxdone_status;
+};
+
+static struct omap4_prm_irq omap4_prm_irqs[] = {
+ [OMAP4_PRM_IRQ_VDD_MPU_ID] = {
+ .irqstatus_mpu = OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET,
+ .vp_tranxdone_status = OMAP4430_VP_MPU_TRANXDONE_ST_MASK,
+ .abb_tranxdone_status = OMAP4430_ABB_MPU_DONE_ST_MASK
+ },
+ [OMAP4_PRM_IRQ_VDD_IVA_ID] = {
+ .irqstatus_mpu = OMAP4_PRM_IRQSTATUS_MPU_OFFSET,
+ .vp_tranxdone_status = OMAP4430_VP_IVA_TRANXDONE_ST_MASK,
+ .abb_tranxdone_status = OMAP4430_ABB_IVA_DONE_ST_MASK,
+ },
+ [OMAP4_PRM_IRQ_VDD_CORE_ID] = {
+ .irqstatus_mpu = OMAP4_PRM_IRQSTATUS_MPU_OFFSET,
+ .vp_tranxdone_status = OMAP4430_VP_CORE_TRANXDONE_ST_MASK,
+ /* Core has no ABB */
+ },
+};
+
+u32 omap4_prm_vp_check_txdone(u8 irq_id)
+{
+ struct omap4_prm_irq *irq = &omap4_prm_irqs[irq_id];
+ u32 irqstatus;
+
+ irqstatus = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_OCP_SOCKET_INST,
+ irq->irqstatus_mpu);
+ return irqstatus & irq->vp_tranxdone_status;
+}
+
+void omap4_prm_vp_clear_txdone(u8 irq_id)
+{
+ struct omap4_prm_irq *irq = &omap4_prm_irqs[irq_id];
+
+ omap4_prminst_write_inst_reg(irq->vp_tranxdone_status,
+ OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_OCP_SOCKET_INST,
+ irq->irqstatus_mpu);
+};
+
+u32 omap4_prm_abb_check_txdone(u8 irq_id)
+{
+ struct omap4_prm_irq *irq = &omap4_prm_irqs[irq_id];
+ u32 irqstatus;
+
+ irqstatus = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_OCP_SOCKET_INST,
+ irq->irqstatus_mpu);
+ return irqstatus & irq->abb_tranxdone_status;
+}
+
+void omap4_prm_abb_clear_txdone(u8 irq_id)
+{
+ struct omap4_prm_irq *irq = &omap4_prm_irqs[irq_id];
+
+ omap4_prminst_write_inst_reg(irq->abb_tranxdone_status,
+ OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_OCP_SOCKET_INST,
+ irq->irqstatus_mpu);
+}
+
+u32 omap4_prm_vcvp_read(u8 offset)
+{
+ return omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_DEVICE_INST, offset);
+}
+
+void omap4_prm_vcvp_write(u32 val, u8 offset)
+{
+ omap4_prminst_write_inst_reg(val, OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_DEVICE_INST, offset);
+}
+
+u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset)
+{
+ return omap4_prminst_rmw_inst_reg_bits(mask, bits,
+ OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_DEVICE_INST,
+ offset);
+}
diff --git a/arch/arm/mach-omap2/prm44xx.h b/arch/arm/mach-omap2/prm44xx.h
index 67a0d3f..ead36e1 100644
--- a/arch/arm/mach-omap2/prm44xx.h
+++ b/arch/arm/mach-omap2/prm44xx.h
@@ -713,8 +713,8 @@
#define OMAP4430_PRM_VC_VAL_BYPASS OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00a0)
#define OMAP4_PRM_VC_CFG_CHANNEL_OFFSET 0x00a4
#define OMAP4430_PRM_VC_CFG_CHANNEL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00a4)
-#define OMAP4_PRM_VC_CFG_I2C_INSTE_OFFSET 0x00a8
-#define OMAP4430_PRM_VC_CFG_I2C_INSTE OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00a8)
+#define OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET 0x00a8
+#define OMAP4430_PRM_VC_CFG_I2C_MODE OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00a8)
#define OMAP4_PRM_VC_CFG_I2C_CLK_OFFSET 0x00ac
#define OMAP4430_PRM_VC_CFG_I2C_CLK OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00ac)
#define OMAP4_PRM_SRAM_COUNT_OFFSET 0x00b0
@@ -772,6 +772,25 @@
extern int omap4_prm_deassert_hardreset(void __iomem *rstctrl_reg, u8 shift);
extern void omap4_prm_global_warm_sw_reset(void);
+extern void omap4_prm_global_cold_sw_reset(void);
+
+#define OMAP4_PRM_IRQ_VDD_CORE_ID 0
+#define OMAP4_PRM_IRQ_VDD_IVA_ID 1
+#define OMAP4_PRM_IRQ_VDD_MPU_ID 2
+/* OMAP4-specific VP functions */
+u32 omap4_prm_vp_check_txdone(u8 irq_id);
+void omap4_prm_vp_clear_txdone(u8 irq_id);
+/* OMAP4-specific ABB functions */
+u32 omap4_prm_abb_check_txdone(u8 irq_id);
+void omap4_prm_abb_clear_txdone(u8 irq_id);
+
+/*
+ * OMAP4 access functions for voltage controller (VC) and
+ * voltage proccessor (VP) in the PRM.
+ */
+extern u32 omap4_prm_vcvp_read(u8 offset);
+extern void omap4_prm_vcvp_write(u32 val, u8 offset);
+extern u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
# endif
diff --git a/arch/arm/mach-omap2/prminst44xx.c b/arch/arm/mach-omap2/prminst44xx.c
index a303242..b6a286f 100644
--- a/arch/arm/mach-omap2/prminst44xx.c
+++ b/arch/arm/mach-omap2/prminst44xx.c
@@ -22,13 +22,14 @@
#include "prm-regbits-44xx.h"
#include "prcm44xx.h"
#include "prcm_mpu44xx.h"
+#include "scrm44xx.h"
static u32 _prm_bases[OMAP4_MAX_PRCM_PARTITIONS] = {
[OMAP4430_INVALID_PRCM_PARTITION] = 0,
[OMAP4430_PRM_PARTITION] = OMAP4430_PRM_BASE,
[OMAP4430_CM1_PARTITION] = 0,
[OMAP4430_CM2_PARTITION] = 0,
- [OMAP4430_SCRM_PARTITION] = 0,
+ [OMAP4430_SCRM_PARTITION] = OMAP4_SCRM_BASE,
[OMAP4430_PRCM_MPU_PARTITION] = OMAP4430_PRCM_MPU_BASE,
};
diff --git a/arch/arm/mach-omap2/remoteproc.c b/arch/arm/mach-omap2/remoteproc.c
new file mode 100644
index 0000000..b8ae36f
--- /dev/null
+++ b/arch/arm/mach-omap2/remoteproc.c
@@ -0,0 +1,164 @@
+/*
+ * Remote processor machine-specific module for OMAP4
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/remoteproc.h>
+#include <linux/memblock.h>
+#include <plat/omap_device.h>
+#include <plat/omap_hwmod.h>
+#include <plat/remoteproc.h>
+#include <plat/dsp.h>
+#include <plat/io.h>
+#include "cm2_44xx.h"
+#include "cm-regbits-44xx.h"
+
+#define OMAP4430_CM_M3_M3_CLKCTRL (OMAP4430_CM2_BASE + OMAP4430_CM2_CORE_INST \
+ + OMAP4_CM_DUCATI_DUCATI_CLKCTRL_OFFSET)
+
+static struct omap_rproc_timers_info ipu_timers[] = {
+ { .id = 3 },
+ { .id = 4 },
+#ifdef CONFIG_REMOTEPROC_WATCHDOG
+ { .id = 9 },
+ { .id = 11 },
+#endif
+};
+
+static struct omap_rproc_pdata omap4_rproc_data[] = {
+ {
+ .name = "dsp",
+ .iommu_name = "tesla",
+ .firmware = "tesla-dsp.bin",
+ .oh_name = "dsp_c0",
+ .clkdm_name = "dsp_clkdm",
+ },
+ {
+ .name = "ipu",
+ .iommu_name = "ducati",
+ .firmware = "ducati-m3.bin",
+ .oh_name = "ipu_c0",
+ .oh_name_opt = "ipu_c1",
+ .clkdm_name = "ducati_clkdm",
+ .timers = ipu_timers,
+ .timers_cnt = ARRAY_SIZE(ipu_timers),
+ .idle_addr = OMAP4430_CM_M3_M3_CLKCTRL,
+ .idle_mask = OMAP4430_STBYST_MASK,
+ .suspend_addr = 0xb3bf02d8,
+ .suspend_mask = ~0,
+ .sus_timeout = 5000,
+ .sus_mbox_name = "mailbox-1",
+ },
+};
+
+static struct omap_device_pm_latency omap_rproc_latency[] = {
+ {
+ OMAP_RPROC_DEFAULT_PM_LATENCY,
+ },
+};
+
+static struct rproc_mem_pool *omap_rproc_get_pool(const char *name)
+{
+ struct rproc_mem_pool *pool = NULL;
+
+ /* check for ipu currently. dsp will be handled later */
+ if (!strcmp("ipu", name)) {
+ phys_addr_t paddr1 = omap_ipu_get_mempool_base(
+ OMAP_RPROC_MEMPOOL_STATIC);
+ phys_addr_t paddr2 = omap_ipu_get_mempool_base(
+ OMAP_RPROC_MEMPOOL_DYNAMIC);
+ u32 len1 = omap_ipu_get_mempool_size(OMAP_RPROC_MEMPOOL_STATIC);
+ u32 len2 = omap_ipu_get_mempool_size(OMAP_RPROC_MEMPOOL_DYNAMIC);
+
+ if (!paddr1 && !paddr2) {
+ pr_err("no carveout memory available at all for "
+ "remotproc\n");
+ return pool;
+ }
+ if (!paddr1 || !len1)
+ pr_warn("static memory is unavailable: 0x%x, 0x%x\n",
+ paddr1, len1);
+ if (!paddr2 || !len2)
+ pr_warn("carveout memory is unavailable: 0x%x, 0x%x\n",
+ paddr2, len2);
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (pool) {
+ pool->st_base = paddr1;
+ pool->st_size = len1;
+ pool->mem_base = paddr2;
+ pool->mem_size = len2;
+ pool->cur_base = paddr2;
+ pool->cur_size = len2;
+ }
+ }
+
+ return pool;
+}
+
+static int __init omap_rproc_init(void)
+{
+ const char *pdev_name = "omap-rproc";
+ struct omap_hwmod *oh[2];
+ struct omap_device *od;
+ int i, ret = 0, oh_count;
+
+ /* names like ipu_cx/dsp_cx might show up on other OMAPs, too */
+ if (!cpu_is_omap44xx())
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(omap4_rproc_data); i++) {
+ const char *oh_name = omap4_rproc_data[i].oh_name;
+ const char *oh_name_opt = omap4_rproc_data[i].oh_name_opt;
+ oh_count = 0;
+
+ oh[0] = omap_hwmod_lookup(oh_name);
+ if (!oh[0]) {
+ pr_err("could not look up %s\n", oh_name);
+ continue;
+ }
+ oh_count++;
+
+ if (oh_name_opt) {
+ oh[1] = omap_hwmod_lookup(oh_name_opt);
+ if (!oh[1]) {
+ pr_err("could not look up %s\n", oh_name_opt);
+ continue;
+ }
+ oh_count++;
+ }
+
+ omap4_rproc_data[i].memory_pool =
+ omap_rproc_get_pool(omap4_rproc_data[i].name);
+ od = omap_device_build_ss(pdev_name, i, oh, oh_count,
+ &omap4_rproc_data[i],
+ sizeof(struct omap_rproc_pdata),
+ omap_rproc_latency,
+ ARRAY_SIZE(omap_rproc_latency),
+ false);
+ if (IS_ERR(od)) {
+ pr_err("Could not build omap_device for %s:%s\n",
+ pdev_name, oh_name);
+ ret = PTR_ERR(od);
+ }
+ }
+
+ return ret;
+}
+/* must be ready in time for device_initcall users */
+subsys_initcall(omap_rproc_init);
diff --git a/arch/arm/mach-omap2/resetreason.c b/arch/arm/mach-omap2/resetreason.c
new file mode 100644
index 0000000..316bfeb
--- /dev/null
+++ b/arch/arm/mach-omap2/resetreason.c
@@ -0,0 +1,75 @@
+/*
+ * arch/arm/mach-omap2/resetreason.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include "prm-regbits-44xx.h"
+#include "prcm44xx.h"
+#include "prm44xx.h"
+#include "prminst44xx.h"
+#include "resetreason.h"
+
+static char resetreason[1024];
+
+static struct {
+ const char *str;
+ u32 mask;
+} resetreason_flags[] = {
+ { "C2C ", OMAP4430_C2C_RST_MASK },
+ { "IcePick ", OMAP4430_ICEPICK_RST_MASK },
+ { "Voltage Manager ", OMAP4430_VDD_MPU_VOLT_MGR_RST_MASK |
+ OMAP4430_VDD_IVA_VOLT_MGR_RST_MASK |
+ OMAP4430_VDD_CORE_VOLT_MGR_RST_MASK },
+ { "external warm ", OMAP4430_EXTERNAL_WARM_RST_MASK },
+ { "MPU Watchdog Timer ", OMAP4430_MPU_WDT_RST_MASK },
+ { "warm software ", OMAP4430_GLOBAL_WARM_SW_RST_MASK },
+ { "cold ", OMAP4430_GLOBAL_COLD_RST_MASK },
+};
+
+const char *omap4_get_resetreason(void)
+{
+ return resetreason;
+}
+
+static int __init resetreason_init(void)
+{
+ int i;
+ u32 reasons =
+ omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_RSTST_OFFSET);
+ char buf[128];
+
+ strlcpy(resetreason, "Last reset was ", sizeof(resetreason));
+
+ for (i = 0; i < ARRAY_SIZE(resetreason_flags); i++)
+ if (reasons & resetreason_flags[i].mask)
+ strlcat(resetreason, resetreason_flags[i].str,
+ sizeof(resetreason));
+
+ snprintf(buf, sizeof(buf), "reset (PRM_RSTST=0x%x)\n", reasons);
+
+ strlcat(resetreason, buf, sizeof(resetreason));
+
+ pr_info("%s\n", resetreason);
+
+ omap4_prminst_write_inst_reg(reasons, OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_RSTST_OFFSET);
+
+ return 0;
+}
+
+postcore_initcall(resetreason_init);
diff --git a/arch/arm/mach-omap2/resetreason.h b/arch/arm/mach-omap2/resetreason.h
new file mode 100644
index 0000000..96c9a00
--- /dev/null
+++ b/arch/arm/mach-omap2/resetreason.h
@@ -0,0 +1,22 @@
+/*
+ * arch/arm/mach-omap2/resetreason.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MACH_OMAP2_RESETREASON_H_
+#define _MACH_OMAP2_RESETREASON_H_
+
+const char *omap4_get_resetreason(void);
+
+#endif
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 1ac361b..76b9cfb 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -19,26 +19,20 @@
*/
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/serial_reg.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/serial_8250.h>
#include <linux/pm_runtime.h>
-#include <linux/console.h>
-#ifdef CONFIG_SERIAL_OMAP
#include <plat/omap-serial.h>
-#endif
-
#include <plat/common.h>
#include <plat/board.h>
#include <plat/clock.h>
#include <plat/dma.h>
-#include <plat/omap_hwmod.h>
#include <plat/omap_device.h>
+#include <plat/omap-pm.h>
#include "prm2xxx_3xxx.h"
#include "pm.h"
@@ -47,66 +41,20 @@
#include "control.h"
#include "mux.h"
-#define UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV 0x52
-#define UART_OMAP_WER 0x17 /* Wake-up enable register */
-
-#define UART_ERRATA_FIFO_FULL_ABORT (0x1 << 0)
-#define UART_ERRATA_i202_MDR1_ACCESS (0x1 << 1)
-
-/*
- * NOTE: By default the serial timeout is disabled as it causes lost characters
- * over the serial ports. This means that the UART clocks will stay on until
- * disabled via sysfs. This also causes that any deeper omap sleep states are
- * blocked.
- */
-#define DEFAULT_TIMEOUT 0
-
#define MAX_UART_HWMOD_NAME_LEN 16
-struct omap_uart_state {
- int num;
- int can_sleep;
- struct timer_list timer;
- u32 timeout;
+static int omap_uart_con_id __initdata = -1;
- void __iomem *wk_st;
- void __iomem *wk_en;
- u32 wk_mask;
- u32 padconf;
- u32 dma_enabled;
-
- struct clk *ick;
- struct clk *fck;
- int clocked;
-
- int irq;
- int regshift;
- int irqflags;
- void __iomem *membase;
- resource_size_t mapbase;
-
- struct list_head node;
- struct omap_hwmod *oh;
- struct platform_device *pdev;
-
- u32 errata;
-#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
- int context_valid;
-
- /* Registers to be saved/restored for OFF-mode */
- u16 dll;
- u16 dlh;
- u16 ier;
- u16 sysc;
- u16 scr;
- u16 wer;
- u16 mcr;
-#endif
+static struct omap_uart_port_info omap_serial_default_info[] = {
+ {
+ .use_dma = 0,
+ .dma_rx_buf_size = DEFAULT_RXDMA_BUFSIZE,
+ .dma_rx_poll_rate = DEFAULT_RXDMA_POLLRATE,
+ .dma_rx_timeout = DEFAULT_RXDMA_TIMEOUT,
+ .auto_sus_timeout = DEFAULT_AUTOSUSPEND_DELAY,
+ },
};
-static LIST_HEAD(uart_list);
-static u8 num_uarts;
-
static int uart_idle_hwmod(struct omap_device *od)
{
omap_hwmod_idle(od->hwmods[0]);
@@ -129,396 +77,203 @@
},
};
-static inline unsigned int __serial_read_reg(struct uart_port *up,
- int offset)
-{
- offset <<= up->regshift;
- return (unsigned int)__raw_readb(up->membase + offset);
-}
+#ifdef CONFIG_OMAP_MUX
+static struct omap_device_pad default_uart1_pads[] __initdata = {
+ {
+ .name = "uart1_cts.uart1_cts",
+ .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart1_rts.uart1_rts",
+ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart1_tx.uart1_tx",
+ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart1_rx.uart1_rx",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
+ .idle = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
+ },
+};
-static inline unsigned int serial_read_reg(struct omap_uart_state *uart,
- int offset)
-{
- offset <<= uart->regshift;
- return (unsigned int)__raw_readb(uart->membase + offset);
-}
+static struct omap_device_pad default_uart2_pads[] __initdata = {
+ {
+ .name = "uart2_cts.uart2_cts",
+ .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart2_rts.uart2_rts",
+ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart2_tx.uart2_tx",
+ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart2_rx.uart2_rx",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
+ .idle = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
+ },
+};
-static inline void __serial_write_reg(struct uart_port *up, int offset,
- int value)
-{
- offset <<= up->regshift;
- __raw_writeb(value, up->membase + offset);
-}
+static struct omap_device_pad default_uart3_pads[] __initdata = {
+ {
+ .name = "uart3_cts_rctx.uart3_cts_rctx",
+ .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart3_rts_sd.uart3_rts_sd",
+ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart3_tx_irtx.uart3_tx_irtx",
+ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart3_rx_irrx.uart3_rx_irrx",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE0,
+ .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0,
+ },
+};
-static inline void serial_write_reg(struct omap_uart_state *uart, int offset,
- int value)
-{
- offset <<= uart->regshift;
- __raw_writeb(value, uart->membase + offset);
-}
+static struct omap_device_pad default_omap36xx_uart4_pads[] __initdata = {
+ {
+ .name = "gpmc_wait2.uart4_tx",
+ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "gpmc_wait3.uart4_rx",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE2,
+ .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE2,
+ },
+};
-/*
- * Internal UARTs need to be initialized for the 8250 autoconfig to work
- * properly. Note that the TX watermark initialization may not be needed
- * once the 8250.c watermark handling code is merged.
- */
-
-static inline void __init omap_uart_reset(struct omap_uart_state *uart)
-{
- serial_write_reg(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
- serial_write_reg(uart, UART_OMAP_SCR, 0x08);
- serial_write_reg(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_16X_MODE);
-}
-
-#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP3)
-
-/*
- * Work Around for Errata i202 (3430 - 1.12, 3630 - 1.6)
- * The access to uart register after MDR1 Access
- * causes UART to corrupt data.
- *
- * Need a delay =
- * 5 L4 clock cycles + 5 UART functional clock cycle (@48MHz = ~0.2uS)
- * give 10 times as much
- */
-static void omap_uart_mdr1_errataset(struct omap_uart_state *uart, u8 mdr1_val,
- u8 fcr_val)
-{
- u8 timeout = 255;
-
- serial_write_reg(uart, UART_OMAP_MDR1, mdr1_val);
- udelay(2);
- serial_write_reg(uart, UART_FCR, fcr_val | UART_FCR_CLEAR_XMIT |
- UART_FCR_CLEAR_RCVR);
- /*
- * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
- * TX_FIFO_E bit is 1.
- */
- while (UART_LSR_THRE != (serial_read_reg(uart, UART_LSR) &
- (UART_LSR_THRE | UART_LSR_DR))) {
- timeout--;
- if (!timeout) {
- /* Should *never* happen. we warn and carry on */
- dev_crit(&uart->pdev->dev, "Errata i202: timedout %x\n",
- serial_read_reg(uart, UART_LSR));
- break;
- }
- udelay(1);
- }
-}
-
-static void omap_uart_save_context(struct omap_uart_state *uart)
-{
- u16 lcr = 0;
-
- if (!enable_off_mode)
- return;
-
- lcr = serial_read_reg(uart, UART_LCR);
- serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
- uart->dll = serial_read_reg(uart, UART_DLL);
- uart->dlh = serial_read_reg(uart, UART_DLM);
- serial_write_reg(uart, UART_LCR, lcr);
- uart->ier = serial_read_reg(uart, UART_IER);
- uart->sysc = serial_read_reg(uart, UART_OMAP_SYSC);
- uart->scr = serial_read_reg(uart, UART_OMAP_SCR);
- uart->wer = serial_read_reg(uart, UART_OMAP_WER);
- serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_A);
- uart->mcr = serial_read_reg(uart, UART_MCR);
- serial_write_reg(uart, UART_LCR, lcr);
-
- uart->context_valid = 1;
-}
-
-static void omap_uart_restore_context(struct omap_uart_state *uart)
-{
- u16 efr = 0;
-
- if (!enable_off_mode)
- return;
-
- if (!uart->context_valid)
- return;
-
- uart->context_valid = 0;
-
- if (uart->errata & UART_ERRATA_i202_MDR1_ACCESS)
- omap_uart_mdr1_errataset(uart, UART_OMAP_MDR1_DISABLE, 0xA0);
- else
- serial_write_reg(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
-
- serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
- efr = serial_read_reg(uart, UART_EFR);
- serial_write_reg(uart, UART_EFR, UART_EFR_ECB);
- serial_write_reg(uart, UART_LCR, 0x0); /* Operational mode */
- serial_write_reg(uart, UART_IER, 0x0);
- serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_write_reg(uart, UART_DLL, uart->dll);
- serial_write_reg(uart, UART_DLM, uart->dlh);
- serial_write_reg(uart, UART_LCR, 0x0); /* Operational mode */
- serial_write_reg(uart, UART_IER, uart->ier);
- serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_A);
- serial_write_reg(uart, UART_MCR, uart->mcr);
- serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_write_reg(uart, UART_EFR, efr);
- serial_write_reg(uart, UART_LCR, UART_LCR_WLEN8);
- serial_write_reg(uart, UART_OMAP_SCR, uart->scr);
- serial_write_reg(uart, UART_OMAP_WER, uart->wer);
- serial_write_reg(uart, UART_OMAP_SYSC, uart->sysc);
-
- if (uart->errata & UART_ERRATA_i202_MDR1_ACCESS)
- omap_uart_mdr1_errataset(uart, UART_OMAP_MDR1_16X_MODE, 0xA1);
- else
- /* UART 16x mode */
- serial_write_reg(uart, UART_OMAP_MDR1,
- UART_OMAP_MDR1_16X_MODE);
-}
+static struct omap_device_pad default_omap4_uart4_pads[] __initdata = {
+ {
+ .name = "uart4_tx.uart4_tx",
+ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart4_rx.uart4_rx",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE0,
+ .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0,
+ },
+};
#else
-static inline void omap_uart_save_context(struct omap_uart_state *uart) {}
-static inline void omap_uart_restore_context(struct omap_uart_state *uart) {}
-#endif /* CONFIG_PM && CONFIG_ARCH_OMAP3 */
+static struct omap_device_pad default_uart1_pads[] __initdata = {};
+static struct omap_device_pad default_uart2_pads[] __initdata = {};
+static struct omap_device_pad default_uart3_pads[] __initdata = {};
+static struct omap_device_pad default_omap36xx_uart4_pads[] __initdata = {};
+static struct omap_device_pad default_omap4_uart4_pads[] __initdata = {};
+#endif
-static inline void omap_uart_enable_clocks(struct omap_uart_state *uart)
+static __init void omap_serial_fill_default_pads(struct omap_board_data *bdata)
{
- if (uart->clocked)
- return;
+ BUG_ON(!cpu_is_omap44xx() && !cpu_is_omap34xx());
- omap_device_enable(uart->pdev);
- uart->clocked = 1;
- omap_uart_restore_context(uart);
-}
-
-#ifdef CONFIG_PM
-
-static inline void omap_uart_disable_clocks(struct omap_uart_state *uart)
-{
- if (!uart->clocked)
- return;
-
- omap_uart_save_context(uart);
- uart->clocked = 0;
- omap_device_idle(uart->pdev);
-}
-
-static void omap_uart_enable_wakeup(struct omap_uart_state *uart)
-{
- /* Set wake-enable bit */
- if (uart->wk_en && uart->wk_mask) {
- u32 v = __raw_readl(uart->wk_en);
- v |= uart->wk_mask;
- __raw_writel(v, uart->wk_en);
- }
-
- /* Ensure IOPAD wake-enables are set */
- if (cpu_is_omap34xx() && uart->padconf) {
- u16 v = omap_ctrl_readw(uart->padconf);
- v |= OMAP3_PADCONF_WAKEUPENABLE0;
- omap_ctrl_writew(v, uart->padconf);
+ switch (bdata->id) {
+ case 0:
+ bdata->pads = default_uart1_pads;
+ bdata->pads_cnt = ARRAY_SIZE(default_uart1_pads);
+ break;
+ case 1:
+ bdata->pads = default_uart2_pads;
+ bdata->pads_cnt = ARRAY_SIZE(default_uart2_pads);
+ break;
+ case 2:
+ bdata->pads = default_uart3_pads;
+ bdata->pads_cnt = ARRAY_SIZE(default_uart3_pads);
+ break;
+ case 3:
+ if (cpu_is_omap44xx()) {
+ bdata->pads = default_omap4_uart4_pads;
+ bdata->pads_cnt =
+ ARRAY_SIZE(default_omap4_uart4_pads);
+ } else {
+ bdata->pads = default_omap36xx_uart4_pads;
+ bdata->pads_cnt =
+ ARRAY_SIZE(default_omap36xx_uart4_pads);
+ }
+ break;
+ default:
+ break;
}
}
-static void omap_uart_disable_wakeup(struct omap_uart_state *uart)
+/* TBD: Will be removed once we have irq-chaing mechanism */
+static bool omap_uart_chk_wakeup(struct platform_device *pdev)
{
- /* Clear wake-enable bit */
- if (uart->wk_en && uart->wk_mask) {
- u32 v = __raw_readl(uart->wk_en);
- v &= ~uart->wk_mask;
- __raw_writel(v, uart->wk_en);
+ struct omap_uart_port_info *up = pdev->dev.platform_data;
+ struct omap_device *od;
+ u32 wkst = 0;
+ bool ret = false;
+
+ od = to_omap_device(pdev);
+ if (omap_hwmod_pad_get_wakeup_status(od->hwmods[0]))
+ ret = true;
+
+ if (up->wk_st && up->wk_en && up->wk_mask) {
+ /* Check for normal UART wakeup (and clear it) */
+ wkst = __raw_readl(up->wk_st) & up->wk_mask;
+ if (wkst) {
+ __raw_writel(wkst, up->wk_st);
+ ret = true;
+ }
}
- /* Ensure IOPAD wake-enables are cleared */
- if (cpu_is_omap34xx() && uart->padconf) {
- u16 v = omap_ctrl_readw(uart->padconf);
- v &= ~OMAP3_PADCONF_WAKEUPENABLE0;
- omap_ctrl_writew(v, uart->padconf);
- }
+ return ret;
}
-static void omap_uart_smart_idle_enable(struct omap_uart_state *uart,
- int enable)
+static void omap_uart_wakeup_enable(struct platform_device *pdev, bool enable)
{
- u8 idlemode;
+ struct omap_device *od;
- if (enable) {
- /**
- * Errata 2.15: [UART]:Cannot Acknowledge Idle Requests
- * in Smartidle Mode When Configured for DMA Operations.
- */
- if (uart->dma_enabled)
- idlemode = HWMOD_IDLEMODE_FORCE;
- else
- idlemode = HWMOD_IDLEMODE_SMART;
- } else {
- idlemode = HWMOD_IDLEMODE_NO;
- }
-
- omap_hwmod_set_slave_idlemode(uart->oh, idlemode);
-}
-
-static void omap_uart_block_sleep(struct omap_uart_state *uart)
-{
- omap_uart_enable_clocks(uart);
-
- omap_uart_smart_idle_enable(uart, 0);
- uart->can_sleep = 0;
- if (uart->timeout)
- mod_timer(&uart->timer, jiffies + uart->timeout);
+ od = to_omap_device(pdev);
+ if (enable)
+ omap_hwmod_enable_wakeup(od->hwmods[0]);
else
- del_timer(&uart->timer);
+ omap_hwmod_disable_wakeup(od->hwmods[0]);
}
-static void omap_uart_allow_sleep(struct omap_uart_state *uart)
+static void omap_uart_idle_init(struct omap_uart_port_info *uart,
+ unsigned short num)
{
- if (device_may_wakeup(&uart->pdev->dev))
- omap_uart_enable_wakeup(uart);
- else
- omap_uart_disable_wakeup(uart);
-
- if (!uart->clocked)
- return;
-
- omap_uart_smart_idle_enable(uart, 1);
- uart->can_sleep = 1;
- del_timer(&uart->timer);
-}
-
-static void omap_uart_idle_timer(unsigned long data)
-{
- struct omap_uart_state *uart = (struct omap_uart_state *)data;
-
- omap_uart_allow_sleep(uart);
-}
-
-void omap_uart_prepare_idle(int num)
-{
- struct omap_uart_state *uart;
-
- list_for_each_entry(uart, &uart_list, node) {
- if (num == uart->num && uart->can_sleep) {
- omap_uart_disable_clocks(uart);
- return;
- }
- }
-}
-
-void omap_uart_resume_idle(int num)
-{
- struct omap_uart_state *uart;
-
- list_for_each_entry(uart, &uart_list, node) {
- if (num == uart->num && uart->can_sleep) {
- omap_uart_enable_clocks(uart);
-
- /* Check for IO pad wakeup */
- if (cpu_is_omap34xx() && uart->padconf) {
- u16 p = omap_ctrl_readw(uart->padconf);
-
- if (p & OMAP3_PADCONF_WAKEUPEVENT0)
- omap_uart_block_sleep(uart);
- }
-
- /* Check for normal UART wakeup */
- if (__raw_readl(uart->wk_st) & uart->wk_mask)
- omap_uart_block_sleep(uart);
- return;
- }
- }
-}
-
-void omap_uart_prepare_suspend(void)
-{
- struct omap_uart_state *uart;
-
- list_for_each_entry(uart, &uart_list, node) {
- omap_uart_allow_sleep(uart);
- }
-}
-
-int omap_uart_can_sleep(void)
-{
- struct omap_uart_state *uart;
- int can_sleep = 1;
-
- list_for_each_entry(uart, &uart_list, node) {
- if (!uart->clocked)
- continue;
-
- if (!uart->can_sleep) {
- can_sleep = 0;
- continue;
- }
-
- /* This UART can now safely sleep. */
- omap_uart_allow_sleep(uart);
- }
-
- return can_sleep;
-}
-
-/**
- * omap_uart_interrupt()
- *
- * This handler is used only to detect that *any* UART interrupt has
- * occurred. It does _nothing_ to handle the interrupt. Rather,
- * any UART interrupt will trigger the inactivity timer so the
- * UART will not idle or sleep for its timeout period.
- *
- **/
-/* static int first_interrupt; */
-static irqreturn_t omap_uart_interrupt(int irq, void *dev_id)
-{
- struct omap_uart_state *uart = dev_id;
-
- omap_uart_block_sleep(uart);
-
- return IRQ_NONE;
-}
-
-static void omap_uart_idle_init(struct omap_uart_state *uart)
-{
- int ret;
-
- uart->can_sleep = 0;
- uart->timeout = DEFAULT_TIMEOUT;
- setup_timer(&uart->timer, omap_uart_idle_timer,
- (unsigned long) uart);
- if (uart->timeout)
- mod_timer(&uart->timer, jiffies + uart->timeout);
- omap_uart_smart_idle_enable(uart, 0);
-
- if (cpu_is_omap34xx() && !cpu_is_ti816x()) {
- u32 mod = (uart->num > 1) ? OMAP3430_PER_MOD : CORE_MOD;
+ if (cpu_is_omap44xx()) {
+ uart->wer |= OMAP4_UART_WER_MOD_WKUP;
+ } else if (cpu_is_omap34xx()) {
+ u32 mod = num > 1 ? OMAP3430_PER_MOD : CORE_MOD;
u32 wk_mask = 0;
- u32 padconf = 0;
- /* XXX These PRM accesses do not belong here */
+ uart->wer |= OMAP2_UART_WER_MOD_WKUP;
uart->wk_en = OMAP34XX_PRM_REGADDR(mod, PM_WKEN1);
uart->wk_st = OMAP34XX_PRM_REGADDR(mod, PM_WKST1);
- switch (uart->num) {
+ switch (num) {
case 0:
wk_mask = OMAP3430_ST_UART1_MASK;
- padconf = 0x182;
break;
case 1:
wk_mask = OMAP3430_ST_UART2_MASK;
- padconf = 0x17a;
break;
case 2:
wk_mask = OMAP3430_ST_UART3_MASK;
- padconf = 0x19e;
break;
case 3:
wk_mask = OMAP3630_ST_UART4_MASK;
- padconf = 0x0d2;
break;
}
uart->wk_mask = wk_mask;
- uart->padconf = padconf;
} else if (cpu_is_omap24xx()) {
u32 wk_mask = 0;
u32 wk_en = PM_WKEN1, wk_st = PM_WKST1;
- switch (uart->num) {
+ switch (num) {
case 0:
wk_mask = OMAP24XX_ST_UART1_MASK;
break;
@@ -543,162 +298,87 @@
uart->wk_en = NULL;
uart->wk_st = NULL;
uart->wk_mask = 0;
- uart->padconf = 0;
}
-
- uart->irqflags |= IRQF_SHARED;
- ret = request_threaded_irq(uart->irq, NULL, omap_uart_interrupt,
- IRQF_SHARED, "serial idle", (void *)uart);
- WARN_ON(ret);
}
-void omap_uart_enable_irqs(int enable)
+char *cmdline_find_option(char *str)
{
- int ret;
- struct omap_uart_state *uart;
+ extern char *saved_command_line;
- list_for_each_entry(uart, &uart_list, node) {
- if (enable) {
- pm_runtime_put_sync(&uart->pdev->dev);
- ret = request_threaded_irq(uart->irq, NULL,
- omap_uart_interrupt,
- IRQF_SHARED,
- "serial idle",
- (void *)uart);
- } else {
- pm_runtime_get_noresume(&uart->pdev->dev);
- free_irq(uart->irq, (void *)uart);
+ return strstr(saved_command_line, str);
+}
+
+struct omap_hwmod *omap_uart_hwmod_lookup(int num)
+{
+ struct omap_hwmod *oh;
+ char oh_name[MAX_UART_HWMOD_NAME_LEN];
+
+ snprintf(oh_name, MAX_UART_HWMOD_NAME_LEN, "uart%d", num + 1);
+ oh = omap_hwmod_lookup(oh_name);
+ WARN(IS_ERR(oh), "Could not lookup hmwod info for %s\n",
+ oh_name);
+ return oh;
+}
+
+void omap_rts_mux_write(u16 val, int num)
+{
+ struct omap_hwmod *oh;
+ int i;
+
+ oh = omap_uart_hwmod_lookup(num);
+ if (!oh)
+ return;
+
+ for (i = 0; i < oh->mux->nr_pads ; i++) {
+ if (strstr(oh->mux->pads[i].name, "rts")) {
+ omap_mux_write(oh->mux->pads[i].partition,
+ val,
+ oh->mux->pads[i].mux[0].reg_offset);
+ break;
}
}
}
-static ssize_t sleep_timeout_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_device *odev = to_omap_device(pdev);
- struct omap_uart_state *uart = odev->hwmods[0]->dev_attr;
-
- return sprintf(buf, "%u\n", uart->timeout / HZ);
-}
-
-static ssize_t sleep_timeout_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_device *odev = to_omap_device(pdev);
- struct omap_uart_state *uart = odev->hwmods[0]->dev_attr;
- unsigned int value;
-
- if (sscanf(buf, "%u", &value) != 1) {
- dev_err(dev, "sleep_timeout_store: Invalid value\n");
- return -EINVAL;
- }
-
- uart->timeout = value * HZ;
- if (uart->timeout)
- mod_timer(&uart->timer, jiffies + uart->timeout);
- else
- /* A zero value means disable timeout feature */
- omap_uart_block_sleep(uart);
-
- return n;
-}
-
-static DEVICE_ATTR(sleep_timeout, 0644, sleep_timeout_show,
- sleep_timeout_store);
-#define DEV_CREATE_FILE(dev, attr) WARN_ON(device_create_file(dev, attr))
-#else
-static inline void omap_uart_idle_init(struct omap_uart_state *uart) {}
-static void omap_uart_block_sleep(struct omap_uart_state *uart)
-{
- /* Needed to enable UART clocks when built without CONFIG_PM */
- omap_uart_enable_clocks(uart);
-}
-#define DEV_CREATE_FILE(dev, attr)
-#endif /* CONFIG_PM */
-
-#ifndef CONFIG_SERIAL_OMAP
-/*
- * Override the default 8250 read handler: mem_serial_in()
- * Empty RX fifo read causes an abort on omap3630 and omap4
- * This function makes sure that an empty rx fifo is not read on these silicons
- * (OMAP1/2/3430 are not affected)
- */
-static unsigned int serial_in_override(struct uart_port *up, int offset)
-{
- if (UART_RX == offset) {
- unsigned int lsr;
- lsr = __serial_read_reg(up, UART_LSR);
- if (!(lsr & UART_LSR_DR))
- return -EPERM;
- }
-
- return __serial_read_reg(up, offset);
-}
-
-static void serial_out_override(struct uart_port *up, int offset, int value)
-{
- unsigned int status, tmout = 10000;
-
- status = __serial_read_reg(up, UART_LSR);
- while (!(status & UART_LSR_THRE)) {
- /* Wait up to 10ms for the character(s) to be sent. */
- if (--tmout == 0)
- break;
- udelay(1);
- status = __serial_read_reg(up, UART_LSR);
- }
- __serial_write_reg(up, offset, value);
-}
-#endif
-
static int __init omap_serial_early_init(void)
{
int i = 0;
+ char omap_tty_name[MAX_UART_HWMOD_NAME_LEN];
+ struct omap_hwmod *oh;
- do {
- char oh_name[MAX_UART_HWMOD_NAME_LEN];
- struct omap_hwmod *oh;
- struct omap_uart_state *uart;
-
- snprintf(oh_name, MAX_UART_HWMOD_NAME_LEN,
- "uart%d", i + 1);
- oh = omap_hwmod_lookup(oh_name);
- if (!oh)
- break;
-
- uart = kzalloc(sizeof(struct omap_uart_state), GFP_KERNEL);
- if (WARN_ON(!uart))
- return -ENODEV;
-
- uart->oh = oh;
- uart->num = i++;
- list_add_tail(&uart->node, &uart_list);
- num_uarts++;
-
- /*
- * NOTE: omap_hwmod_setup*() has not yet been called,
- * so no hwmod functions will work yet.
- */
-
- /*
- * During UART early init, device need to be probed
- * to determine SoC specific init before omap_device
- * is ready. Therefore, don't allow idle here
- */
- uart->oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET;
- } while (1);
-
+ for (i = 0; i < OMAP_MAX_HSUART_PORTS; i++) {
+ snprintf(omap_tty_name, MAX_UART_HWMOD_NAME_LEN,
+ "%s%d", OMAP_SERIAL_NAME, i);
+ if (cmdline_find_option(omap_tty_name)) {
+ omap_uart_con_id = i;
+ oh = omap_uart_hwmod_lookup(i);
+ oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET;
+ return 0;
+ }
+ }
return 0;
}
core_initcall(omap_serial_early_init);
+void __init omap_serial_init_port_pads(int id, struct omap_device_pad *pads,
+ int size, struct omap_uart_port_info *info)
+{
+ struct omap_board_data bdata;
+
+ bdata.id = id;
+ bdata.flags = 0;
+ bdata.pads = pads;
+ bdata.pads_cnt = size;
+
+ if (!bdata.pads)
+ omap_serial_fill_default_pads(&bdata);
+
+ omap_serial_init_port(&bdata, info);
+}
+
/**
* omap_serial_init_port() - initialize single serial port
* @bdata: port specific board data pointer
+ * @info: platform specific data pointer
*
* This function initialies serial driver for given port only.
* Platforms can call this function instead of omap_serial_init()
@@ -707,171 +387,114 @@
* Don't mix calls to omap_serial_init_port() and omap_serial_init(),
* use only one of the two.
*/
-void __init omap_serial_init_port(struct omap_board_data *bdata)
+void __init omap_serial_init_port(struct omap_board_data *bdata,
+ struct omap_uart_port_info *info)
{
- struct omap_uart_state *uart;
struct omap_hwmod *oh;
struct omap_device *od;
- void *pdata = NULL;
- u32 pdata_size = 0;
- char *name;
-#ifndef CONFIG_SERIAL_OMAP
- struct plat_serial8250_port ports[2] = {
- {},
- {.flags = 0},
- };
- struct plat_serial8250_port *p = &ports[0];
-#else
- struct omap_uart_port_info omap_up;
-#endif
+ struct omap_uart_port_info *pdata;
+ char *name = DRIVER_NAME;
if (WARN_ON(!bdata))
return;
if (WARN_ON(bdata->id < 0))
return;
- if (WARN_ON(bdata->id >= num_uarts))
+ if (WARN_ON(bdata->id >= OMAP_MAX_HSUART_PORTS))
return;
- list_for_each_entry(uart, &uart_list, node)
- if (bdata->id == uart->num)
- break;
+ oh = omap_uart_hwmod_lookup(bdata->id);
+ if (!oh)
+ return;
- oh = uart->oh;
- uart->dma_enabled = 0;
-#ifndef CONFIG_SERIAL_OMAP
- name = "serial8250";
+ if (info == NULL)
+ info = omap_serial_default_info;
- /*
- * !! 8250 driver does not use standard IORESOURCE* It
- * has it's own custom pdata that can be taken from
- * the hwmod resource data. But, this needs to be
- * done after the build.
- *
- * ?? does it have to be done before the register ??
- * YES, because platform_device_data_add() copies
- * pdata, it does not use a pointer.
- */
- p->flags = UPF_BOOT_AUTOCONF;
- p->iotype = UPIO_MEM;
- p->regshift = 2;
- p->uartclk = OMAP24XX_BASE_BAUD * 16;
- p->irq = oh->mpu_irqs[0].irq;
- p->mapbase = oh->slaves[0]->addr->pa_start;
- p->membase = omap_hwmod_get_mpu_rt_va(oh);
- p->irqflags = IRQF_SHARED;
- p->private_data = uart;
-
- /*
- * omap44xx, ti816x: Never read empty UART fifo
- * omap3xxx: Never read empty UART fifo on UARTs
- * with IP rev >=0x52
- */
- uart->regshift = p->regshift;
- uart->membase = p->membase;
- if (cpu_is_omap44xx() || cpu_is_ti816x())
- uart->errata |= UART_ERRATA_FIFO_FULL_ABORT;
- else if ((serial_read_reg(uart, UART_OMAP_MVER) & 0xFF)
- >= UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV)
- uart->errata |= UART_ERRATA_FIFO_FULL_ABORT;
-
- if (uart->errata & UART_ERRATA_FIFO_FULL_ABORT) {
- p->serial_in = serial_in_override;
- p->serial_out = serial_out_override;
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ pr_err("Memory allocation for UART pdata failed\n");
+ return;
}
- pdata = &ports[0];
- pdata_size = 2 * sizeof(struct plat_serial8250_port);
-#else
+ if (cpu_is_omap34xx() || cpu_is_omap44xx())
+ pdata->errata |= UART_ERRATA_i202_MDR1_ACCESS;
- name = DRIVER_NAME;
+ omap_uart_idle_init(pdata, bdata->id);
- omap_up.dma_enabled = uart->dma_enabled;
- omap_up.uartclk = OMAP24XX_BASE_BAUD * 16;
- omap_up.mapbase = oh->slaves[0]->addr->pa_start;
- omap_up.membase = omap_hwmod_get_mpu_rt_va(oh);
- omap_up.irqflags = IRQF_SHARED;
- omap_up.flags = UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
-
- pdata = &omap_up;
- pdata_size = sizeof(struct omap_uart_port_info);
+ pdata->uartclk = OMAP24XX_BASE_BAUD * 16;
+ pdata->flags = UPF_BOOT_AUTOCONF;
+ pdata->enable_wakeup = omap_uart_wakeup_enable;
+ pdata->use_dma = info->use_dma;
+ pdata->chk_wakeup = omap_uart_chk_wakeup;
+ pdata->dma_rx_buf_size = info->dma_rx_buf_size;
+ pdata->dma_rx_poll_rate = info->dma_rx_poll_rate;
+ pdata->dma_rx_timeout = info->dma_rx_timeout;
+ pdata->auto_sus_timeout = info->auto_sus_timeout;
+ pdata->wake_peer = info->wake_peer;
+ pdata->rts_mux_driver_control = info->rts_mux_driver_control;
+ if (bdata->id == omap_uart_con_id) {
+ pdata->console_uart = true;
+#ifdef CONFIG_DEBUG_LL
+ pdata->auto_sus_timeout = -1;
#endif
+ }
- if (WARN_ON(!oh))
- return;
+ if (pdata->use_dma &&
+ cpu_is_omap44xx() && omap_rev() > OMAP4430_REV_ES1_0)
+ pdata->errata |= OMAP4_UART_ERRATA_i659_TX_THR;
- od = omap_device_build(name, uart->num, oh, pdata, pdata_size,
- omap_uart_latency,
- ARRAY_SIZE(omap_uart_latency), false);
+ od = omap_device_build(name, bdata->id, oh, pdata,
+ sizeof(*pdata), omap_uart_latency,
+ ARRAY_SIZE(omap_uart_latency), false);
WARN(IS_ERR(od), "Could not build omap_device for %s: %s.\n",
name, oh->name);
oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt);
- uart->irq = oh->mpu_irqs[0].irq;
- uart->regshift = 2;
- uart->mapbase = oh->slaves[0]->addr->pa_start;
- uart->membase = omap_hwmod_get_mpu_rt_va(oh);
- uart->pdev = &od->pdev;
-
- oh->dev_attr = uart;
-
- console_lock(); /* in case the earlycon is on the UART */
-
- /*
- * Because of early UART probing, UART did not get idled
- * on init. Now that omap_device is ready, ensure full idle
- * before doing omap_device_enable().
- */
- omap_hwmod_idle(uart->oh);
-
- omap_device_enable(uart->pdev);
- omap_uart_idle_init(uart);
- omap_uart_reset(uart);
- omap_hwmod_enable_wakeup(uart->oh);
- omap_device_idle(uart->pdev);
-
- /*
- * Need to block sleep long enough for interrupt driven
- * driver to start. Console driver is in polling mode
- * so device needs to be kept enabled while polling driver
- * is in use.
- */
- if (uart->timeout)
- uart->timeout = (30 * HZ);
- omap_uart_block_sleep(uart);
- uart->timeout = DEFAULT_TIMEOUT;
-
- console_unlock();
-
- if ((cpu_is_omap34xx() && uart->padconf) ||
- (uart->wk_en && uart->wk_mask)) {
+ if (((cpu_is_omap34xx() || cpu_is_omap44xx()) && bdata->pads) ||
+ (pdata->wk_en && pdata->wk_mask)) {
device_init_wakeup(&od->pdev.dev, true);
- DEV_CREATE_FILE(&od->pdev.dev, &dev_attr_sleep_timeout);
}
- /* Enable the MDR1 errata for OMAP3 */
- if (cpu_is_omap34xx() && !cpu_is_ti816x())
- uart->errata |= UART_ERRATA_i202_MDR1_ACCESS;
+ kfree(pdata);
}
/**
- * omap_serial_init() - initialize all supported serial ports
+ * omap_serial_board_init() - initialize all supported serial ports
+ * @platform_data: platform specific data pointer
*
* Initializes all available UARTs as serial ports. Platforms
* can call this function when they want to have default behaviour
* for serial ports (e.g initialize them all as serial ports).
*/
-void __init omap_serial_init(void)
+void __init omap_serial_board_init(struct omap_uart_port_info *platform_data)
{
- struct omap_uart_state *uart;
struct omap_board_data bdata;
+ u8 i;
- list_for_each_entry(uart, &uart_list, node) {
- bdata.id = uart->num;
+ for (i = 0; i < OMAP_MAX_HSUART_PORTS; i++) {
+ bdata.id = i;
bdata.flags = 0;
bdata.pads = NULL;
bdata.pads_cnt = 0;
- omap_serial_init_port(&bdata);
+ if (cpu_is_omap44xx() || cpu_is_omap34xx())
+ omap_serial_fill_default_pads(&bdata);
+
+ if (platform_data == NULL)
+ omap_serial_init_port(&bdata, NULL);
+ else
+ omap_serial_init_port(&bdata, &platform_data[i]);
}
}
+
+/**
+ * omap_serial_init() - initialize all supported serial ports
+ *
+ * Initializes all available UARTs.
+ * Platforms can call this function when they want to have default behaviour
+ * for serial ports (e.g initialize them all as serial ports).
+ */
+void __init omap_serial_init(void)
+{
+ omap_serial_board_init(NULL);
+}
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
new file mode 100644
index 0000000..b025e5f
--- /dev/null
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -0,0 +1,695 @@
+/*
+ * OMAP44xx CPU low power powerdown and powerup code.
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Written by Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software,you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/system.h>
+#include <asm/smp_scu.h>
+#include <asm/memory.h>
+#include <asm/hardware/cache-l2x0.h>
+
+#include <plat/omap44xx.h>
+#include <mach/omap4-common.h>
+
+#include "omap4-sar-layout.h"
+
+#ifdef CONFIG_SMP
+
+/* Masks used for MMU manipulation */
+#define TTRBIT_MASK 0xffffc000
+#define TABLE_INDEX_MASK 0xfff00000
+#define TABLE_ENTRY 0x00000c02
+#define CACHE_DISABLE_MASK 0xffffe7fb
+#define TABLE_ADDRESS_OFFSET 0x04
+#define CR_VALUE_OFFSET 0x08
+#define SCU_POWER_SECURE_INDEX 0x108
+
+
+/*
+ * Macro to call PPA svc when MMU is OFF
+ * Caller must setup r0 and r3 before calling this macro
+ * @r0: PPA service ID
+ * @r3: Pointer to params
+*/
+.macro LM_CALL_PPA_SERVICE_PA
+ mov r1, #0x0 @ Process ID
+ mov r2, #0x4 @ Flag
+ mov r6, #0xff
+ mov r12, #0x00 @ Secure Service ID
+ dsb
+ smc #0
+.endm
+
+/*
+ * To load POR which was saved in SAR RAM
+ */
+POR_params:
+.word 1, 0
+
+
+ppa_zero_params:
+ .word 0x0
+
+/*
+ * =============================
+ * == CPU suspend entry point ==
+ * =============================
+ *
+ * void omap4_cpu_suspend(unsigned int cpu, unsigned int save_state)
+ *
+ * This function code saves the CPU context and performs the CPU
+ * power down sequence. Calling WFI effectively changes the CPU
+ * power domains states to the desired target power state.
+ *
+ * @cpu : contains cpu id (r0)
+ * @save_state : contains context save state (r1)
+ * 0 - No context lost
+ * 1 - CPUx L1 and logic lost: MPUSS CSWR
+ * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
+ * 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
+ * @return: This function never returns for CPU OFF and DORMANT power states.
+ * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
+ * from this follows a full CPU reset path via ROM code to CPU restore code.
+ * It returns to the caller for CPU INACTIVE and ON power states or in case
+ * CPU failed to transition to targeted OFF/DORMANT state.
+ */
+
+ENTRY(omap4_cpu_suspend)
+ stmfd sp!, {r0-r12, lr} @ Save registers on stack
+ cmp r1, #0x0
+ beq do_WFI @ Nothing to save, jump to WFI
+ mov r5, r0
+ mov r6, r1
+ bl omap4_get_sar_ram_base
+ mov r8, r0
+ ands r5, r5, #0x0f
+ streq r6, [r8, #L2X0_SAVE_OFFSET0] @ Store save state
+ strne r6, [r8, #L2X0_SAVE_OFFSET1]
+ orreq r8, r8, #CPU0_SAVE_OFFSET
+ orrne r8, r8, #CPU1_SAVE_OFFSET
+
+ /*
+ * Save only needed CPU CP15 registers. VFP, breakpoint,
+ * performance monitor registers are not saved. Generic
+ * code suppose to take care of those.
+ */
+ mov r4, sp @ Store sp
+ mrs r5, spsr @ Store spsr
+ mov r6, lr @ Store lr
+ stmia r8!, {r4-r6}
+
+ /* c1 and c2 registers */
+ mrc p15, 0, r4, c1, c0, 2 @ CPACR
+ mrc p15, 0, r5, c2, c0, 0 @ TTBR0
+ mrc p15, 0, r6, c2, c0, 1 @ TTBR1
+ mrc p15, 0, r7, c2, c0, 2 @ TTBCR
+ stmia r8!, {r4-r7}
+
+ /* c3 and c10 registers */
+ mrc p15, 0, r4, c3, c0, 0 @ DACR
+ mrc p15, 0, r5, c10, c2, 0 @ PRRR
+ mrc p15, 0, r6, c10, c2, 1 @ NMRR
+ stmia r8!,{r4-r6}
+
+ /* c12, c13 and CPSR registers */
+ mrc p15, 0, r4, c13, c0, 1 @ Context ID
+ mrc p15, 0, r5, c13, c0, 2 @ User r/w thread ID
+ mrc p15, 0, r6, c12, c0, 0 @ Secure or NS VBAR
+ mrs r7, cpsr @ Store CPSR
+ stmia r8!, {r4-r7}
+
+ /* c1 control register */
+ mrc p15, 0, r4, c1, c0, 0 @ Save control register
+ stmia r8!, {r4}
+
+ /*
+ * Flush all data from the L1 data cache before disabling
+ * SCTLR.C bit.
+ */
+ bl v7_flush_dcache_all
+
+ bl omap4_get_sar_ram_base
+ ldr r9, [r0, #OMAP_TYPE_OFFSET]
+ cmp r9, #0x1 @ Check for HS device
+ bne skip_secure_l1_flush
+ mov r0, #SCU_PM_NORMAL
+ mov r1, #0xFF @ clean seucre L1
+ stmfd r13!, {r4-r12, r14}
+ ldr r12, =SCU_POWER_SECURE_INDEX
+ dsb
+ smc #0
+ dsb
+ ldmfd r13!, {r4-r12, r14}
+skip_secure_l1_flush:
+
+ /*
+ * Clear the SCTLR.C bit to prevent further data cache
+ * allocation. Clearing SCTLR.C would make all the data accesses
+ * strongly ordered and would not hit the cache.
+ */
+ mrc p15, 0, r0, c1, c0, 0
+ bic r0, r0, #(1 << 2) @ Disable the C bit
+ mcr p15, 0, r0, c1, c0, 0
+ isb
+
+ /*
+ * Invalidate L1 data cache. Even though only invalidate is
+ * necessary exported flush API is used here. Doing clean
+ * on already clean cache would be almost NOP.
+ */
+ bl v7_flush_dcache_all
+
+ /*
+ * Switch the CPU from Symmetric Multiprocessing (SMP) mode
+ * to AsymmetricMultiprocessing (AMP) mode by programming
+ * the SCU power status to DORMANT or OFF mode.
+ * This enables the CPU to be taken out of coherency by
+ * preventing the CPU from receiving cache, TLB, or BTB
+ * maintenance operations broadcast by other CPUs in the cluster.
+ */
+ bl omap4_get_sar_ram_base
+ mov r8, r0
+ ldr r9, [r8, #OMAP_TYPE_OFFSET]
+ cmp r9, #0x1 @ Check for HS device
+ bne scu_gp_set
+ mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
+ ands r0, r0, #0x0f
+ ldreq r0, [r8, #SCU_OFFSET0]
+ ldrne r0, [r8, #SCU_OFFSET1]
+ mov r1, #0x00 @ Secure L1 is clean already
+ stmfd r13!, {r4-r12, r14}
+ ldr r12, =SCU_POWER_SECURE_INDEX
+ dsb
+ smc #0
+ dsb
+ ldmfd r13!, {r4-r12, r14}
+ b skip_scu_gp_set
+scu_gp_set:
+ mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
+ ands r0, r0, #0x0f
+ ldreq r1, [r8, #SCU_OFFSET0]
+ ldrne r1, [r8, #SCU_OFFSET1]
+ bl omap4_get_scu_base
+ bl scu_power_mode
+skip_scu_gp_set:
+ isb
+ dsb
+
+ mrc p15, 0, r0, c1, c1, 2 @Read NSACR data
+ tst r0, #(1 << 18)
+ mrcne p15, 0, r0, c1, c0, 1
+ bicne r0, r0, #(1 << 6)
+ mcrne p15, 0, r0, c1, c0, 1
+ isb
+
+
+#ifdef CONFIG_CACHE_L2X0
+ /*
+ * Clean and invalidate the L2 cache.
+ * Common cache-l2x0.c functions can't be used here since it
+ * uses spinlocks. We are out of coherency here with data cache
+ * disabled. The spinlock implementation uses exclusive load/store
+ * instruction which can fail without data cache being enabled.
+ * OMAP4 hardware doesn't support exclusive monitor which can
+ * overcome exclusive access issue. Because of this, CPU can
+ * lead to deadlock.
+ */
+l2x_clean_inv:
+ bl omap4_get_sar_ram_base
+ mov r8, r0
+ mrc p15, 0, r5, c0, c0, 5 @ Read MPIDR
+ ands r5, r5, #0x0f
+ ldreq r0, [r8, #L2X0_SAVE_OFFSET0]
+ ldrne r0, [r8, #L2X0_SAVE_OFFSET1]
+ cmp r0, #3
+ bne do_WFI
+#ifdef CONFIG_PL310_ERRATA_727915
+ mov r0, #0x03
+ mov r12, #0x100
+ dsb
+ smc #0
+ dsb
+#endif
+ bl omap4_get_l2cache_base
+ mov r2, r0
+ ldr r0, =0xffff
+ str r0, [r2, #L2X0_CLEAN_INV_WAY]
+wait:
+ ldr r0, [r2, #L2X0_CLEAN_INV_WAY]
+ ands r0, r0, #0xff
+ bne wait
+#ifdef CONFIG_PL310_ERRATA_727915
+ mov r0, #0x00
+ mov r12, #0x100
+ dsb
+ smc #0
+ dsb
+#endif
+l2x_sync:
+ bl omap4_get_l2cache_base
+ mov r2, r0
+ mov r0, #0x0
+ str r0, [r2, #L2X0_CACHE_SYNC]
+sync:
+ ldr r0, [r2, #L2X0_CACHE_SYNC]
+ ands r0, r0, #0x1
+ bne sync
+#endif
+
+do_WFI:
+ bl omap_do_wfi
+
+ /*
+ * CPU is here when it failed to enter OFF/DORMANT or
+ * no low power state was attempted.
+ */
+ mrc p15, 0, r0, c1, c0, 0
+ tst r0, #(1 << 2) @ Check C bit enabled?
+ orreq r0, r0, #(1 << 2) @ Enable the C bit
+ mcreq p15, 0, r0, c1, c0, 0
+ isb
+
+ /* Enable SMP bit if it's being disabled */
+ mrc p15, 0, r0, c1, c0, 1
+ tst r0, #(1 << 6) @ Check SMP bit enabled?
+ orreq r0, r0, #(1 << 6)
+ mcreq p15, 0, r0, c1, c0, 1
+ isb
+
+ /*
+ * Ensure the CPU power state is set to NORMAL in
+ * SCU power state so that CPU is back in coherency.
+ * In non-coherent mode CPU can lock-up and lead to
+ * system deadlock.
+ */
+ bl omap4_get_sar_ram_base
+ mov r8, r0
+ ldr r9, [r8, #OMAP_TYPE_OFFSET]
+ cmp r9, #0x1 @ Check for HS device
+ bne scu_gp_clear
+ mov r0, #SCU_PM_NORMAL
+ mov r1, #0x00
+ stmfd r13!, {r4-r12, r14}
+ ldr r12, =SCU_POWER_SECURE_INDEX
+ dsb
+ smc #0
+ dsb
+ ldmfd r13!, {r4-r12, r14}
+ b skip_scu_gp_clear
+scu_gp_clear:
+ bl omap4_get_scu_base
+ mov r1, #SCU_PM_NORMAL
+ bl scu_power_mode
+skip_scu_gp_clear:
+ isb
+ dsb
+
+ ldmfd sp!, {r0-r12, pc} @ Restore regs and return
+ENDPROC(omap4_cpu_suspend)
+
+/*
+ * ============================
+ * == CPU resume entry point ==
+ * ============================
+ *
+ * void omap4_cpu_resume(void)
+ *
+ * ROM code jumps to this function while waking up from CPU
+ * OFF or DORMANT state. Physical address of the function is
+ * stored in the SAR RAM while entering to OFF or DORMANT mode.
+ */
+
+ENTRY(omap4_cpu_resume)
+ /*
+ * CPU1 must check if CPU0 is alive/awaken.
+ * if PL310 is OFF, MPUSS was OFF and CPU0 is still off,
+ * CPU1 must go to sleep and wait for CPU0.
+ * CPU0 is needed for any PPA API to work.
+ */
+ mrc p15, 0, r0, c0, c0, 5 @ Get cpuID
+ ands r0, r0, #0x0f @ Continue boot if CPU0
+ beq continue_boot
+ ldr r8, =OMAP44XX_SAR_RAM_BASE
+ ldr r9, [r8, #OMAP_TYPE_OFFSET]
+ cmp r9, #0x1 @ Check for HS device
+ bne continue_boot @ Continue on GP devcies
+ ldr r2, =OMAP44XX_L2CACHE_BASE
+ ldr r0, [r2, #L2X0_CTRL]
+ and r0, #0x0f
+ cmp r0, #1 @ is CPU0 already UP?
+ beq ppa_cp15_cpu1_configure @ CPU1 HS go to next stage
+ /*
+ * When CPU1 is released to control of HLOS in the case of OSWR
+ * and OFF mode, PPA below v1.7.3[1] is not performing all
+ * Memory coherency and TLB operations required.
+ *
+ * A WA to recover cleanly from this scenario is to switch CPU1 back to
+ * previous OFF state. This forces a reset of CPU1, which in turn
+ * forces CPU1 not to override MMU descriptors already in place in
+ * internal RAM setup by CPU0. CPU1 will also sync to the in-place
+ * descriptors on the next wakeup. CPU1 wakeup is done by
+ * later kernel subsystems depending on suspend or cpuidle path
+ * being exercised.
+ * NOTE - for OSWR, state provided is 2, and for OFF, state is 3,
+ * Since the bug impacts OFF and OSWR, we need to force a 0x3 to
+ * shut off CPU1
+ *
+ * Since many distributions may not be able to update PPA OR would like
+ * to support platforms with older PPA, we provide a config option.
+ * This is simpler and makes the current code remain cleaner in
+ * comparison to a flag based handling in CPU1 recovery for
+ * board + PPA revision combinations.
+ *
+ * Having this config option enabled even on platforms with fixed PPA
+ * should not impact stability, however, ability to make CPU1 available
+ * for operations a little earlier is curtailed.
+ *
+ * Foot note [1]:
+ * v1.7.3 is the official TI PPA version. Custom PPA could have
+ * the relevant changes ported over to it.
+ */
+#ifdef CONFIG_OMAP4_PPA_CPU1_ONLINE_BUG
+ mov r0, #0x03 @ target CPU1 to OFF(mpusspd=OSWR/OFF)
+ mov r1, #0x00 @ Secure L1 is already clean
+ ldr r12, =SCU_POWER_SECURE_INDEX
+ dsb
+ smc #0
+
+ isb @ Necessary barriers before wfi
+ dsb
+ dmb
+ wfi @ wait for interrupt
+ nop
+ nop
+
+ /*
+ * IF we came out of WFI immediately, something unknown happend.
+ * Fall through AND loop back to the checks. Failing which retry WFI.
+ */
+#endif
+ /*
+ * CPU0 and CPU1 are release together from OFF mode, however,
+ * CPU0 can be busy doing restore operations while waking
+ * from OFF mode, However, for many PPA services we need
+ * CPU0, so, we ask CPU1 to loop back to stagger CPU1 behind CPU0
+ */
+ b omap4_cpu_resume
+
+ppa_cp15_cpu1_configure:
+ /*
+ * Configure CP15 for CPU1 on HS devices:
+ * In HS devices CPU0's CP15 is configured at wakeup by PPA, CPU1 must
+ * call PPA to configure it.
+ * In 4430 devices CPU1 this call also enables the access to SMP bit,
+ * on 4460 devices, CPU1 will have SMP bit access by default.
+ */
+ mov r0, #PPA_SERVICE_DEFAULT_POR_NS_SMP
+ adr r3, ppa_zero_params @ Pointer to parameters
+ LM_CALL_PPA_SERVICE_PA
+ isb
+ dsb
+ cmp r0, #0x0 @ API returns 0 on success.
+ bne ppa_cp15_cpu1_configure @ retry if we did succeed
+
+ /* Fall through to continue with boot */
+
+continue_boot:
+
+#ifdef CONFIG_CACHE_L2X0
+ /*
+ * Restore the L2 AUXCTRL and enable the L2 cache.
+ * 0x109 = Program the L2X0 AUXCTRL
+ * 0x102 = Enable the L2 using L2X0 CTRL
+ * register r0 contains value to be programmed.
+ * L2 cache is already invalidate by ROM code as part
+ * of MPUSS OFF wakeup path.
+ */
+ ldr r2, =OMAP44XX_L2CACHE_BASE
+ ldr r0, [r2, #L2X0_CTRL]
+ and r0, #0x0f
+ cmp r0, #1
+ beq skip_l2en @ Skip if already enabled
+
+check_por:
+ ldr r0, =OMAP44XX_SAR_RAM_BASE @ Check DEVICE type
+ ldr r1, [r0, #OMAP_TYPE_OFFSET]
+ cmp r1, #0x1 @ Check for HS device
+ bne skip_por
+ ldr r0, =PPA_SERVICE_PL310_POR @ Setup PPA HAL call
+ ldr r1, =OMAP44XX_SAR_RAM_BASE
+ ldr r4, [r1, #L2X0_PREFETCHCTRL_OFFSET]
+ adr r3, POR_params
+ str r4, [r3, #0x04]
+ LM_CALL_PPA_SERVICE_PA
+skip_por:
+ ldr r3, =OMAP44XX_SAR_RAM_BASE
+ ldr r0, [r3, #L2X0_AUXCTRL_OFFSET]
+ ldr r12, =0x109 @ Setup L2 AUXCTRL value
+ dsb
+ smc #0
+
+ ldr r2, =OMAP44XX_L2CACHE_BASE
+ ldr r4, =OMAP44XX_SAR_RAM_BASE
+ ldr r9, [r4, #L2X0_LOCKDOWN_OFFSET0]
+ str r9, [r2, #L2X0_LOCKDOWN_WAY_D0]
+ str r9, [r2, #L2X0_LOCKDOWN_WAY_D1]
+ str r9, [r2, #L2X0_LOCKDOWN_WAY_I0]
+ str r9, [r2, #L2X0_LOCKDOWN_WAY_I1]
+
+ dsb
+ mov r0, #0x1
+ ldr r12, =0x102 @ Enable L2 Cache controller
+ dsb
+ smc #0
+ dsb
+skip_l2en:
+#endif
+
+ /* Check if we have Public access to SMP bit */
+ mrc p15, 0, r0, c1, c1, 2 @ Read NSACR data
+ tst r0, #(1 << 18)
+ beq skip_ns_smp_enable @ Skip if still no access
+
+ /* Set the SMP bit if it is not already set */
+ mrc p15, 0, r0, c1, c0, 1
+ tst r0, #(1 << 6) @ Check SMP bit enabled?
+ orreq r0, r0, #(1 << 6)
+ mcreq p15, 0, r0, c1, c0, 1
+ isb
+skip_ns_smp_enable:
+
+ /*
+ * Check the wakeup cpuid and use appropriate
+ * SAR BANK location for context restore.
+ */
+ ldr r3, =OMAP44XX_SAR_RAM_BASE
+ mov r1, #0
+ mcr p15, 0, r1, c7, c5, 0 @ Invalidate L1 I
+ mrc p15, 0, r0, c0, c0, 5 @ MPIDR
+ ands r0, r0, #0x0f
+ orreq r3, r3, #CPU0_SAVE_OFFSET
+ orrne r3, r3, #CPU1_SAVE_OFFSET
+
+ /* Restore cp15 registers */
+ ldmia r3!, {r4-r6}
+ mov sp, r4 @ Restore sp
+ msr spsr_cxsf, r5 @ Restore spsr
+ mov lr, r6 @ Restore lr
+
+ /* c1 and c2 registers */
+ ldmia r3!, {r4-r7}
+ mcr p15, 0, r4, c1, c0, 2 @ CPACR
+ mcr p15, 0, r5, c2, c0, 0 @ TTBR0
+ mcr p15, 0, r6, c2, c0, 1 @ TTBR1
+ mcr p15, 0, r7, c2, c0, 2 @ TTBCR
+
+ /* c3 and c10 registers */
+ ldmia r3!,{r4-r6}
+ mcr p15, 0, r4, c3, c0, 0 @ DACR
+ mcr p15, 0, r5, c10, c2, 0 @ PRRR
+ mcr p15, 0, r6, c10, c2, 1 @ NMRR
+
+ /* c12, c13 and CPSR registers */
+ ldmia r3!,{r4-r7}
+ mcr p15, 0, r4, c13, c0, 1 @ Context ID
+ mcr p15, 0, r5, c13, c0, 2 @ User r/w thread ID
+ mcr p15, 0, r6, c12, c0, 0 @ Secure or NS VBAR
+ msr cpsr, r7 @ store cpsr
+
+ /*
+ * Enabling MMU here. Page entry needs to be altered
+ * to create temporary 1:1 map and then resore the entry
+ * ones MMU is enabled
+ */
+ mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
+ and r7, #0x7 @ Extract N (0:2) to decide
+ cmp r7, #0x0 @ TTBR0/TTBR1
+ beq use_ttbr0
+ttbr_error:
+ b ttbr_error @ Only N = 0 supported
+use_ttbr0:
+ mrc p15, 0, r2, c2, c0, 0 @ Read TTBR0
+ ldr r5, =TTRBIT_MASK
+ and r2, r5
+ mov r4, pc
+ ldr r5, =TABLE_INDEX_MASK
+ and r4, r5 @ r4 = 31 to 20 bits of pc
+ ldr r1, =TABLE_ENTRY
+ add r1, r1, r4 @ r1 has value of table entry
+ lsr r4, #18 @ Address of table entry
+ add r2, r4 @ r2 - location to be modified
+
+ /* Ensure the modified entry makes it to main memory */
+#ifdef CONFIG_CACHE_L2X0
+ ldr r5, =OMAP44XX_L2CACHE_BASE
+ str r2, [r5, #L2X0_CLEAN_INV_LINE_PA]
+wait_l2:
+ ldr r0, [r5, #L2X0_CLEAN_INV_LINE_PA]
+ ands r0, #1
+ bne wait_l2
+#endif
+
+ /* Storing previous entry of location being modified */
+ ldr r5, =OMAP44XX_SAR_RAM_BASE
+ ldr r4, [r2]
+ mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
+ ands r0, r0, #0x0f
+ streq r4, [r5, #MMU_OFFSET0] @ Modify the table entry
+ strne r4, [r5, #MMU_OFFSET1]
+ str r1, [r2]
+
+ /*
+ * Storing address of entry being modified
+ * It will be restored after enabling MMU
+ */
+ ldr r5, =OMAP44XX_SAR_RAM_BASE
+ mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
+ ands r0, r0, #0x0f
+ orreq r5, r5, #MMU_OFFSET0
+ orrne r5, r5, #MMU_OFFSET1
+ str r2, [r5, #TABLE_ADDRESS_OFFSET]
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
+ mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB
+ mcr p15, 0, r0, c8, c5, 0 @ Invalidate ITLB
+ mcr p15, 0, r0, c8, c6, 0 @ Invalidate DTLB
+
+ /*
+ * Restore control register but don't enable Data caches here.
+ * Caches will be enabled after restoring MMU table entry.
+ */
+ ldmia r3!, {r4}
+ str r4, [r5, #CR_VALUE_OFFSET] @ Store previous value of CR
+ ldr r2, =CACHE_DISABLE_MASK
+ and r4, r2
+ mcr p15, 0, r4, c1, c0, 0
+ isb
+ dsb
+ ldr r0, =mmu_on_label
+ bx r0
+mmu_on_label:
+ /* Set up the per-CPU stacks */
+ bl cpu_init
+
+ /*
+ * Restore the MMU table entry that was modified for
+ * enabling MMU.
+ */
+ bl omap4_get_sar_ram_base
+ mov r8, r0
+ mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
+ ands r0, r0, #0x0f
+ orreq r8, r8, #MMU_OFFSET0 @ Get address of entry that..
+ orrne r8, r8, #MMU_OFFSET1 @ was modified
+ ldr r2, [r8, #TABLE_ADDRESS_OFFSET]
+ ldr r3, =local_va2pa_offet
+ add r2, r2, r3
+ ldr r0, [r8] @ Get the previous value..
+ str r0, [r2] @ which needs to be restored
+ mov r0, #0
+ mcr p15, 0, r0, c7, c1, 6 @ flush TLB and issue barriers
+ mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
+ mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB
+ mcr p15, 0, r0, c8, c5, 0 @ Invalidate ITLB
+ mcr p15, 0, r0, c8, c6, 0 @ Invalidate DTLB
+ dsb
+ isb
+ ldr r0, [r8, #CR_VALUE_OFFSET] @ Restore the Control register
+ mcr p15, 0, r0, c1, c0, 0 @ with caches enabled.
+ isb
+
+ ldmfd sp!, {r0-r12, pc} @ restore regs and return
+
+ .equ local_va2pa_offet, (PLAT_PHYS_OFFSET + PAGE_OFFSET)
+
+ENDPROC(omap4_cpu_resume)
+
+ENTRY(omap_bus_sync)
+ stmfd sp!, {r9, lr}
+ /* SO write to drain of MPU-2-DDR T2ASYNC FIFO */
+ bl omap_get_dram_barrier_base
+ ldr r2, [r0]
+ str r2, [r0]
+ /* SO write to drain MPU-2-L3 T2ASYNC FIFO */
+ bl omap_get_sram_barrier_base
+ ldr r2, [r0]
+ str r2, [r0]
+ isb
+ ldmfd sp!, {r9, pc}
+ENDPROC(omap_bus_sync)
+
+ENTRY(omap_do_wfi)
+ stmfd sp!, {lr}
+ /* Drain interconnect write buffers. */
+ bl omap_bus_sync
+
+ /*
+ * Execute an ISB instruction to ensure that all of the
+ * CP15 register changes have been committed.
+ */
+ isb
+
+ /*
+ * Execute a barrier instruction to ensure that all cache,
+ * TLB and branch predictor maintenance operations issued
+ * by any CPU in the cluster have completed.
+ */
+ dsb
+ dmb
+
+ /*
+ * Execute a WFI instruction and wait until the
+ * STANDBYWFI output is asserted to indicate that the
+ * CPU is in idle and low power state. CPU can specualatively
+ * prefetch the instructions so add NOPs after WFI. Sixteen
+ * NOPs as per Cortex-A9 pipeline.
+ */
+ wfi @ Wait For Interrupt
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ ldmfd sp!, {pc}
+ENDPROC(omap_do_wfi)
+
+#endif
diff --git a/arch/arm/mach-omap2/smartreflex-class1p5.c b/arch/arm/mach-omap2/smartreflex-class1p5.c
new file mode 100644
index 0000000..2090884
--- /dev/null
+++ b/arch/arm/mach-omap2/smartreflex-class1p5.c
@@ -0,0 +1,678 @@
+/*
+ * Smart reflex Class 1.5 specific implementations
+ *
+ * Copyright (C) 2010-2011 Texas Instruments, Inc.
+ * Nishanth Menon <nm@ti.com>
+ *
+ * Smart reflex class 1.5 is also called periodic SW Calibration
+ * Some of the highlights are as follows:
+ * – Host CPU triggers OPP calibration when transitioning to non calibrated
+ * OPP
+ * – SR-AVS + VP modules are used to perform calibration
+ * – Once completed, the SmartReflex-AVS module can be disabled
+ * – Enables savings based on process, supply DC accuracy and aging
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/kobject.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/opp.h>
+
+#include "smartreflex.h"
+#include "voltage.h"
+#include "dvfs.h"
+
+#define MAX_VDDS 3
+#define SR1P5_SAMPLING_DELAY_MS 1
+#define SR1P5_STABLE_SAMPLES 10
+#define SR1P5_MAX_TRIGGERS 5
+
+/*
+ * We expect events in 10uS, if we don't receive it in twice as long,
+ * we stop waiting for the event and use the current value
+ */
+#define MAX_CHECK_VPTRANS_US 20
+
+/**
+ * struct sr_class1p5_work_data - data meant to be used by calibration work
+ * @work: calibration work
+ * @voltdm: voltage domain for which we are triggering
+ * @vdata: voltage data we are calibrating
+ * @num_calib_triggers: number of triggers from calibration loop
+ * @num_osc_samples: number of samples collected by isr
+ * @u_volt_samples: private data for collecting voltage samples in
+ * case oscillations. filled by the notifier and
+ * consumed by the work item.
+ * @work_active: have we scheduled a work item?
+ */
+struct sr_class1p5_work_data {
+ struct delayed_work work;
+ struct voltagedomain *voltdm;
+ struct omap_volt_data *vdata;
+ u8 num_calib_triggers;
+ u8 num_osc_samples;
+ unsigned long u_volt_samples[SR1P5_STABLE_SAMPLES];
+ bool work_active;
+};
+
+#if CONFIG_OMAP_SR_CLASS1P5_RECALIBRATION_DELAY
+/* recal_work: recalibration calibration work */
+static struct delayed_work recal_work;
+#endif
+
+/**
+ * sr_class1p5_notify() - isr notifier for status events
+ * @voltdm: voltage domain for which we were triggered
+ * @voltdm_cdata: voltage domain specific private class data
+ * @status: notifier event to use
+ *
+ * This basically collects data for the work to use.
+ */
+static int sr_class1p5_notify(struct voltagedomain *voltdm,
+ void *voltdm_cdata,
+ u32 status)
+{
+ struct sr_class1p5_work_data *work_data;
+ int idx = 0;
+
+ if (IS_ERR_OR_NULL(voltdm)) {
+ pr_err("%s: bad parameters!\n", __func__);
+ return -EINVAL;
+ }
+
+ work_data = (struct sr_class1p5_work_data *)voltdm_cdata;
+ if (IS_ERR_OR_NULL(work_data)) {
+ pr_err("%s:%s no work data!!\n", __func__, voltdm->name);
+ return -EINVAL;
+ }
+
+ /* Wait for transdone so that we know the voltage to read */
+ do {
+ if (omap_vp_is_transdone(voltdm))
+ break;
+ idx++;
+ /* get some constant delay */
+ udelay(1);
+ } while (idx < MAX_CHECK_VPTRANS_US);
+
+ /*
+ * NOTE:
+ * If we timeout, we still read the data,
+ * if we are oscillating+irq latencies are too high, we could
+ * have scenarios where we miss transdone event. since
+ * we waited long enough, it is still safe to read the voltage
+ * as we would have waited long enough - Dont warn for this.
+ */
+ idx = (work_data->num_osc_samples) % SR1P5_STABLE_SAMPLES;
+ work_data->u_volt_samples[idx] = omap_vp_get_curr_volt(voltdm);
+ work_data->num_osc_samples++;
+
+ omap_vp_clear_transdone(voltdm);
+
+
+ return 0;
+}
+
+/**
+ * sr_class1p5_calib_work() - work which actually does the calibration
+ * @work: pointer to the work
+ *
+ * calibration routine uses the following logic:
+ * on the first trigger, we start the isr to collect sr voltages
+ * wait for stabilization delay (reschdule self instead of sleeping)
+ * after the delay, see if we collected any isr events
+ * if none, we have calibrated voltage.
+ * if there are any, we retry untill we giveup.
+ * on retry timeout, select a voltage to use as safe voltage.
+ */
+static void sr_class1p5_calib_work(struct work_struct *work)
+{
+ struct sr_class1p5_work_data *work_data =
+ container_of(work, struct sr_class1p5_work_data, work.work);
+ unsigned long u_volt_safe = 0, u_volt_current = 0, u_volt_margin = 0;
+ struct omap_volt_data *volt_data;
+ struct voltagedomain *voltdm;
+ int idx = 0;
+
+ if (!work) {
+ pr_err("%s: ooops.. null work_data?\n", __func__);
+ return;
+ }
+
+ /*
+ * Handle the case where we might have just been scheduled AND
+ * 1.5 disable was called.
+ */
+ if (!mutex_trylock(&omap_dvfs_lock)) {
+ schedule_delayed_work(&work_data->work,
+ msecs_to_jiffies(SR1P5_SAMPLING_DELAY_MS *
+ SR1P5_STABLE_SAMPLES));
+ return;
+ }
+
+ voltdm = work_data->voltdm;
+ /*
+ * In the unlikely case that we did get through when unplanned,
+ * flag and return.
+ */
+ if (unlikely(!work_data->work_active)) {
+ pr_err("%s:%s unplanned work invocation!\n", __func__,
+ voltdm->name);
+ mutex_unlock(&omap_dvfs_lock);
+ return;
+ }
+
+ volt_data = work_data->vdata;
+
+ work_data->num_calib_triggers++;
+ /* if we are triggered first time, we need to start isr to sample */
+ if (work_data->num_calib_triggers == 1) {
+ /* We could be interrupted many times, so, only for debug */
+ pr_debug("%s: %s: Calibration start: Voltage Nominal=%d\n",
+ __func__, voltdm->name, volt_data->volt_nominal);
+ goto start_sampling;
+ }
+
+ /* Stop isr from interrupting our measurements :) */
+ sr_notifier_control(voltdm, false);
+
+ /*
+ * Quit sampling
+ * a) if we have oscillations
+ * b) if we have nominal voltage as the voltage
+ */
+ if (work_data->num_calib_triggers == SR1P5_MAX_TRIGGERS)
+ goto stop_sampling;
+
+ /* if there are no samples captured.. SR is silent, aka stability! */
+ if (!work_data->num_osc_samples) {
+ /* Did we interrupt too early? */
+ u_volt_current = omap_vp_get_curr_volt(voltdm);
+ if (u_volt_current >= volt_data->volt_nominal)
+ goto start_sampling;
+ u_volt_safe = u_volt_current;
+ goto done_calib;
+ }
+
+ /* we have potential oscillations/first sample */
+start_sampling:
+ work_data->num_osc_samples = 0;
+
+ /* Clear transdone events so that we can go on. */
+ do {
+ if (!omap_vp_is_transdone(voltdm))
+ break;
+ idx++;
+ /* get some constant delay */
+ udelay(1);
+ omap_vp_clear_transdone(voltdm);
+ } while (idx < MAX_CHECK_VPTRANS_US);
+ if (idx >= MAX_CHECK_VPTRANS_US)
+ pr_warning("%s: timed out waiting for transdone clear!!\n",
+ __func__);
+
+ /* Clear pending events */
+ sr_notifier_control(voltdm, false);
+ /* trigger sampling */
+ sr_notifier_control(voltdm, true);
+ schedule_delayed_work(&work_data->work,
+ msecs_to_jiffies(SR1P5_SAMPLING_DELAY_MS *
+ SR1P5_STABLE_SAMPLES));
+ mutex_unlock(&omap_dvfs_lock);
+ return;
+
+stop_sampling:
+ /*
+ * We are here for Oscillations due to two scenarios:
+ * a) SR is attempting to adjust voltage lower than VLIMITO
+ * which VP will ignore, but SR will re-attempt
+ * b) actual oscillations
+ * NOTE: For debugging, enable debug to see the samples.
+ */
+ pr_warning("%s: %s Stop sampling: Voltage Nominal=%d samples=%d\n",
+ __func__, work_data->voltdm->name,
+ volt_data->volt_nominal, work_data->num_osc_samples);
+
+ /* pick up current voltage */
+ u_volt_current = omap_vp_get_curr_volt(voltdm);
+
+ /* Just in case we got more interrupts than our tiny buffer */
+ if (work_data->num_osc_samples > SR1P5_STABLE_SAMPLES)
+ idx = SR1P5_STABLE_SAMPLES;
+ else
+ idx = work_data->num_osc_samples;
+ /* Index at 0 */
+ idx -= 1;
+ u_volt_safe = u_volt_current;
+ /* Grab the max of the samples as the stable voltage */
+ for (; idx >= 0; idx--) {
+ pr_debug("%s: osc_v[%d]=%ld, safe_v=%ld\n", __func__, idx,
+ work_data->u_volt_samples[idx], u_volt_safe);
+ if (work_data->u_volt_samples[idx] > u_volt_safe)
+ u_volt_safe = work_data->u_volt_samples[idx];
+ }
+
+ /* Fall through to close up common stuff */
+done_calib:
+ sr_disable_errgen(voltdm);
+ omap_vp_disable(voltdm);
+ sr_disable(voltdm);
+
+ /* Add margin if needed */
+ if (volt_data->volt_margin) {
+ struct omap_voltdm_pmic *pmic = voltdm->pmic;
+ /* Convert to rounded to PMIC step level if available */
+ if (pmic && pmic->vsel_to_uv && pmic->uv_to_vsel) {
+ /*
+ * To ensure conversion works:
+ * use a proper base voltage - we use the current volt
+ * then convert it with pmic routine to vsel and back
+ * to voltage, and finally remove the base voltage
+ */
+ u_volt_margin = u_volt_current + volt_data->volt_margin;
+ u_volt_margin = pmic->uv_to_vsel(u_volt_margin);
+ u_volt_margin = pmic->vsel_to_uv(u_volt_margin);
+ u_volt_margin -= u_volt_current;
+ } else {
+ u_volt_margin = volt_data->volt_margin;
+ }
+
+ u_volt_safe += u_volt_margin;
+ }
+
+ if (u_volt_safe > volt_data->volt_nominal) {
+ pr_warning("%s: %s Vsafe %ld > Vnom %d. %ld[%d] margin on"
+ "vnom %d curr_v=%ld\n", __func__, voltdm->name,
+ u_volt_safe, volt_data->volt_nominal, u_volt_margin,
+ volt_data->volt_margin, volt_data->volt_nominal,
+ u_volt_current);
+ }
+
+ volt_data->volt_calibrated = u_volt_safe;
+ /* Setup my dynamic voltage for the next calibration for this opp */
+ volt_data->volt_dynamic_nominal = omap_get_dyn_nominal(volt_data);
+
+ /*
+ * if the voltage we decided as safe is not the current voltage,
+ * switch
+ */
+ if (volt_data->volt_calibrated != u_volt_current) {
+ pr_debug("%s: %s reconfiguring to voltage %d\n",
+ __func__, voltdm->name, volt_data->volt_calibrated);
+ voltdm_scale(voltdm, volt_data);
+ }
+
+ pr_info("%s: %s: Calibration complete: Voltage:Nominal=%d,"
+ "Calib=%d,margin=%d\n",
+ __func__, voltdm->name, volt_data->volt_nominal,
+ volt_data->volt_calibrated, volt_data->volt_margin);
+ /*
+ * TODO: Setup my wakeup voltage to allow immediate going to OFF and
+ * on - Pending twl and voltage layer cleanups.
+ * This is necessary, as this is not done as part of regular
+ * Dvfs flow.
+ * vc_setup_on_voltage(voltdm, volt_data->volt_calibrated);
+ */
+ work_data->work_active = false;
+ mutex_unlock(&omap_dvfs_lock);
+}
+
+#if CONFIG_OMAP_SR_CLASS1P5_RECALIBRATION_DELAY
+
+/**
+ * sr_class1p5_voltdm_recal() - Helper routine to reset calibration.
+ * @voltdm: Voltage domain to reset calibration for
+ * @user: unused
+ *
+ * NOTE: Appropriate locks must be held by calling path to ensure mutual
+ * exclusivity
+ */
+static int sr_class1p5_voltdm_recal(struct voltagedomain *voltdm,
+ void *user)
+{
+ struct omap_volt_data *vdata;
+
+ /*
+ * we need to go no further if sr is not enabled for this domain or
+ * voltage processor is not present for this voltage domain
+ * (example vdd_wakeup). Class 1.5 requires Voltage processor
+ * to function.
+ */
+ if (!voltdm->vp || !is_sr_enabled(voltdm))
+ return 0;
+
+ vdata = omap_voltage_get_curr_vdata(voltdm);
+ if (!vdata) {
+ pr_err("%s: unable to find current voltage for vdd_%s\n",
+ __func__, voltdm->name);
+ return -ENXIO;
+ }
+
+ omap_sr_disable(voltdm);
+ omap_voltage_calib_reset(voltdm);
+ voltdm_reset(voltdm);
+ omap_sr_enable(voltdm, vdata);
+ pr_info("%s: %s: calibration reset\n", __func__, voltdm->name);
+
+ return 0;
+}
+
+/**
+ * sr_class1p5_recal_work() - work which actually does the calibration
+ * @work: pointer to the work
+ *
+ * on a periodic basis, we come and reset our calibration setup
+ * so that a recalibration of the OPPs take place. This takes
+ * care of aging factor in the system.
+ */
+static void sr_class1p5_recal_work(struct work_struct *work)
+{
+ mutex_lock(&omap_dvfs_lock);
+ if (voltdm_for_each(sr_class1p5_voltdm_recal, NULL))
+ pr_err("%s: Recalibration failed\n", __func__);
+ mutex_unlock(&omap_dvfs_lock);
+ /* We come back again after time the usual delay */
+ schedule_delayed_work(&recal_work,
+ msecs_to_jiffies
+ (CONFIG_OMAP_SR_CLASS1P5_RECALIBRATION_DELAY));
+}
+#endif /* CONFIG_OMAP_SR_CLASS1P5_RECALIBRATION_DELAY */
+
+/**
+ * sr_class1p5_enable() - class 1.5 mode of enable for a voltage domain
+ * @voltdm: voltage domain to enable SR for
+ * @voltdm_cdata: voltage domain specific private class data
+ * @volt_data: voltdata for the current OPP being transitioned to
+ *
+ * when this gets called, we use the h/w loop to setup our voltages
+ * to an calibrated voltage, detect any oscillations, recover from the same
+ * and finally store the optimized voltage as the calibrated voltage in the
+ * system.
+ *
+ * NOTE: Appropriate locks must be held by calling path to ensure mutual
+ * exclusivity
+ */
+static int sr_class1p5_enable(struct voltagedomain *voltdm,
+ void *voltdm_cdata,
+ struct omap_volt_data *volt_data)
+{
+ int r;
+ struct sr_class1p5_work_data *work_data;
+
+ if (IS_ERR_OR_NULL(voltdm) || IS_ERR_OR_NULL(volt_data)) {
+ pr_err("%s: bad parameters!\n", __func__);
+ return -EINVAL;
+ }
+
+ /* If already calibrated, nothing to do here.. */
+ if (volt_data->volt_calibrated)
+ return 0;
+
+ work_data = (struct sr_class1p5_work_data *)voltdm_cdata;
+ if (IS_ERR_OR_NULL(work_data)) {
+ pr_err("%s: bad work data??\n", __func__);
+ return -EINVAL;
+ }
+
+ if (work_data->work_active)
+ return 0;
+
+ omap_vp_enable(voltdm);
+ r = sr_enable(voltdm, volt_data);
+ if (r) {
+ pr_err("%s: sr[%s] failed\n", __func__, voltdm->name);
+ sr_disable_errgen(voltdm);
+ omap_vp_disable(voltdm);
+ return r;
+ }
+ work_data->vdata = volt_data;
+ work_data->work_active = true;
+ work_data->num_calib_triggers = 0;
+ /* program the workqueue and leave it to calibrate offline.. */
+ schedule_delayed_work(&work_data->work,
+ msecs_to_jiffies(SR1P5_SAMPLING_DELAY_MS *
+ SR1P5_STABLE_SAMPLES));
+
+ return 0;
+}
+
+/**
+ * sr_class1p5_disable() - disable 1.5 mode for a voltage domain
+ * @voltdm: voltage domain for the sr which needs disabling
+ * @volt_data: voltage data for current OPP to disable
+ * @voltdm_cdata: voltage domain specific private class data
+ * @is_volt_reset: reset the voltage?
+ *
+ * This function has the necessity to either disable SR alone OR disable SR
+ * and reset voltage to appropriate level depending on is_volt_reset parameter.
+ *
+ * Disabling SR H/w loop:
+ * If calibration is complete or not yet triggered, we have no need to disable
+ * SR h/w loop.
+ * If calibration is complete, we would have already disabled SR AVS at the end
+ * of calibration and h/w loop is inactive when this is called.
+ * If it was never calibrated before, H/w loop was never enabled in the first
+ * place to disable.
+ * If calibration is underway, we cancel the work queue and disable SR. This is
+ * to provide priority to DVFS transition as such transitions cannot wait
+ * without impacting user experience.
+ *
+ * Resetting voltage:
+ * If we have already completed calibration, then resetting to nominal voltage
+ * is not required as we are functioning at safe voltage levels.
+ * If we have not started calibration, we would like to reset to nominal voltage
+ * If calibration is underway and we are attempting to reset voltage as
+ * well, it implies we are in idle/suspend paths where we give priority
+ * to calibration activity and a retry will be attempted.
+ *
+ * NOTE: Appropriate locks must be held by calling path to ensure mutual
+ * exclusivity
+ */
+static int sr_class1p5_disable(struct voltagedomain *voltdm,
+ void *voltdm_cdata,
+ struct omap_volt_data *volt_data,
+ int is_volt_reset)
+{
+ struct sr_class1p5_work_data *work_data;
+
+ if (IS_ERR_OR_NULL(voltdm) || IS_ERR_OR_NULL(volt_data)) {
+ pr_err("%s: bad parameters!\n", __func__);
+ return -EINVAL;
+ }
+
+ work_data = (struct sr_class1p5_work_data *)voltdm_cdata;
+ if (IS_ERR_OR_NULL(work_data)) {
+ pr_err("%s: bad work data??\n", __func__);
+ return -EINVAL;
+ }
+ if (work_data->work_active) {
+ /* if volt reset and work is active, we dont allow this */
+ if (is_volt_reset)
+ return -EBUSY;
+ /* flag work is dead and remove the old work */
+ work_data->work_active = false;
+ cancel_delayed_work_sync(&work_data->work);
+ sr_notifier_control(voltdm, false);
+ sr_disable_errgen(voltdm);
+ omap_vp_disable(voltdm);
+ sr_disable(voltdm);
+ }
+
+ /* If already calibrated, don't need to reset voltage */
+ if (volt_data->volt_calibrated)
+ return 0;
+
+ if (is_volt_reset)
+ voltdm_reset(voltdm);
+ return 0;
+}
+
+/**
+ * sr_class1p5_configure() - configuration function
+ * @voltdm: configure for which voltage domain
+ * @voltdm_cdata: voltage domain specific private class data
+ *
+ * we dont do much here other than setup some registers for
+ * the sr module involved.
+ */
+static int sr_class1p5_configure(struct voltagedomain *voltdm,
+ void *voltdm_cdata)
+{
+ if (IS_ERR_OR_NULL(voltdm)) {
+ pr_err("%s: bad parameters!\n", __func__);
+ return -EINVAL;
+ }
+
+ return sr_configure_errgen(voltdm);
+}
+
+/**
+ * sr_class1p5_init() - class 1p5 init
+ * @voltdm: sr voltage domain
+ * @voltdm_cdata: voltage domain specific private class data
+ * allocated by class init with work item data
+ * freed by deinit.
+ * @class_priv_data: private data for the class (unused)
+ *
+ * we do class specific initialization like creating sysfs/debugfs entries
+ * needed, spawning of a kthread if needed etc.
+ */
+static int sr_class1p5_init(struct voltagedomain *voltdm,
+ void **voltdm_cdata, void *class_priv_data)
+{
+ struct sr_class1p5_work_data *work_data;
+
+ if (IS_ERR_OR_NULL(voltdm) || IS_ERR_OR_NULL(voltdm_cdata)) {
+ pr_err("%s: bad parameters!\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!IS_ERR_OR_NULL(*voltdm_cdata)) {
+ pr_err("%s: ooopps.. class already initialized for %s! bug??\n",
+ __func__, voltdm->name);
+ return -EINVAL;
+ }
+ /* setup our work params */
+ work_data = kzalloc(sizeof(struct sr_class1p5_work_data), GFP_KERNEL);
+ if (!work_data) {
+ pr_err("%s: no memory to allocate work data on domain %s\n",
+ __func__, voltdm->name);
+ return -ENOMEM;
+ }
+
+ work_data->voltdm = voltdm;
+ INIT_DELAYED_WORK_DEFERRABLE(&work_data->work, sr_class1p5_calib_work);
+ *voltdm_cdata = (void *)work_data;
+
+ return 0;
+}
+
+/**
+ * sr_class1p5_deinit() - class 1p5 deinitialization
+ * @voltdm: voltage domain for which to do this.
+ * @voltdm_cdata: voltage domain specific private class data
+ * allocated by class init with work item data
+ * freed by deinit.
+ * @class_priv_data: class private data for deinitialiation (unused)
+ *
+ * currently only resets the calibrated voltage forcing DVFS voltages
+ * to be used in the system
+ *
+ * NOTE: Appropriate locks must be held by calling path to ensure mutual
+ * exclusivity
+ */
+static int sr_class1p5_deinit(struct voltagedomain *voltdm,
+ void **voltdm_cdata, void *class_priv_data)
+{
+ struct sr_class1p5_work_data *work_data;
+
+ if (IS_ERR_OR_NULL(voltdm) || IS_ERR_OR_NULL(voltdm_cdata)) {
+ pr_err("%s: bad parameters!\n", __func__);
+ return -EINVAL;
+ }
+
+ if (IS_ERR_OR_NULL(*voltdm_cdata)) {
+ pr_err("%s: ooopps.. class not initialized for %s! bug??\n",
+ __func__, voltdm->name);
+ return -EINVAL;
+ }
+
+ work_data = (struct sr_class1p5_work_data *) *voltdm_cdata;
+
+ /*
+ * we dont have SR periodic calib anymore.. so reset calibs
+ * we are already protected by appropriate locks, so no lock needed
+ * here.
+ */
+ if (work_data->work_active)
+ sr_class1p5_disable(voltdm, work_data, work_data->vdata, 0);
+
+ /* Ensure worker canceled. */
+ cancel_delayed_work_sync(&work_data->work);
+ omap_voltage_calib_reset(voltdm);
+ voltdm_reset(voltdm);
+
+ *voltdm_cdata = NULL;
+ kfree(work_data);
+
+ return 0;
+}
+
+/* SR class1p5 structure */
+static struct omap_sr_class_data class1p5_data = {
+ .enable = sr_class1p5_enable,
+ .disable = sr_class1p5_disable,
+ .configure = sr_class1p5_configure,
+ .class_type = SR_CLASS1P5,
+ .init = sr_class1p5_init,
+ .deinit = sr_class1p5_deinit,
+ .notify = sr_class1p5_notify,
+ /*
+ * trigger for bound - this tells VP that SR has a voltage
+ * change. we should try and ensure transdone is set before reading
+ * vp voltage.
+ */
+ .notify_flags = SR_NOTIFY_MCUBOUND,
+};
+
+/**
+ * sr_class1p5_driver_init() - register class 1p5 as default
+ *
+ * board files call this function to use class 1p5, we register with the
+ * smartreflex subsystem
+ */
+static int __init sr_class1p5_driver_init(void)
+{
+ int r;
+
+ /* Enable this class only for OMAP3630 and OMAP4 */
+ if (!(cpu_is_omap3630() || cpu_is_omap44xx()))
+ return -EINVAL;
+
+ r = sr_register_class(&class1p5_data);
+ if (r) {
+ pr_err("SmartReflex class 1.5 driver: "
+ "failed to register with %d\n", r);
+ } else {
+#if CONFIG_OMAP_SR_CLASS1P5_RECALIBRATION_DELAY
+ INIT_DELAYED_WORK_DEFERRABLE(&recal_work,
+ sr_class1p5_recal_work);
+ schedule_delayed_work(&recal_work,
+ msecs_to_jiffies
+ (CONFIG_OMAP_SR_CLASS1P5_RECALIBRATION_DELAY));
+#endif
+ pr_info("SmartReflex class 1.5 driver: initialized (%dms)\n",
+ CONFIG_OMAP_SR_CLASS1P5_RECALIBRATION_DELAY);
+ }
+ return r;
+}
+late_initcall(sr_class1p5_driver_init);
diff --git a/arch/arm/mach-omap2/smartreflex-class3.c b/arch/arm/mach-omap2/smartreflex-class3.c
index f438cf4..9ac1c99 100644
--- a/arch/arm/mach-omap2/smartreflex-class3.c
+++ b/arch/arm/mach-omap2/smartreflex-class3.c
@@ -11,33 +11,33 @@
* published by the Free Software Foundation.
*/
+#include <plat/cpu.h>
#include "smartreflex.h"
-static int sr_class3_enable(struct voltagedomain *voltdm)
+static int sr_class3_enable(struct voltagedomain *voltdm,
+ void *voltdm_cdata,
+ struct omap_volt_data *volt_data)
{
- unsigned long volt = omap_voltage_get_nom_volt(voltdm);
-
- if (!volt) {
- pr_warning("%s: Curr voltage unknown. Cannot enable sr_%s\n",
- __func__, voltdm->name);
- return -ENODATA;
- }
-
omap_vp_enable(voltdm);
- return sr_enable(voltdm, volt);
+ return sr_enable(voltdm, volt_data);
}
-static int sr_class3_disable(struct voltagedomain *voltdm, int is_volt_reset)
+static int sr_class3_disable(struct voltagedomain *voltdm,
+ void *voltdm_cdata,
+ struct omap_volt_data *vdata,
+ int is_volt_reset)
{
+ sr_disable_errgen(voltdm);
omap_vp_disable(voltdm);
sr_disable(voltdm);
if (is_volt_reset)
- omap_voltage_reset(voltdm);
+ voltdm_reset(voltdm);
return 0;
}
-static int sr_class3_configure(struct voltagedomain *voltdm)
+static int sr_class3_configure(struct voltagedomain *voltdm,
+ void *voltdm_cdata)
{
return sr_configure_errgen(voltdm);
}
@@ -53,6 +53,10 @@
/* Smartreflex Class3 init API to be called from board file */
static int __init sr_class3_init(void)
{
+ /* Enable this class only for OMAP343x */
+ if (!cpu_is_omap343x())
+ return -EINVAL;
+
pr_info("SmartReflex Class3 initialized\n");
return sr_register_class(&class3_data);
}
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index f5a6bc1..c2d85c1 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -28,6 +28,7 @@
#include <plat/common.h>
#include "pm.h"
+#include "dvfs.h"
#include "smartreflex.h"
#define SMARTREFLEX_NAME_LEN 16
@@ -49,11 +50,14 @@
u32 senp_mod;
u32 senn_mod;
unsigned int irq;
+ bool irq_enabled;
void __iomem *base;
struct platform_device *pdev;
struct list_head node;
struct omap_sr_nvalue_table *nvalue_table;
struct voltagedomain *voltdm;
+ /* Managed by class driver as needed */
+ void *voltdm_cdata;
struct dentry *dbg_dir;
};
@@ -62,6 +66,7 @@
static struct omap_sr_class_data *sr_class;
static struct omap_sr_pmic_data *sr_pmic_data;
+static struct dentry *sr_dbg_dir;
static inline void sr_write_reg(struct omap_sr *sr, unsigned offset, u32 value)
{
@@ -72,10 +77,6 @@
u32 value)
{
u32 reg_val;
- u32 errconfig_offs = 0, errconfig_mask = 0;
-
- reg_val = __raw_readl(sr->base + offset);
- reg_val &= ~mask;
/*
* Smartreflex error config register is special as it contains
@@ -86,16 +87,15 @@
* if they are currently set, but does allow the caller to write
* those bits.
*/
- if (sr->ip_type == SR_TYPE_V1) {
- errconfig_offs = ERRCONFIG_V1;
- errconfig_mask = ERRCONFIG_STATUS_V1_MASK;
- } else if (sr->ip_type == SR_TYPE_V2) {
- errconfig_offs = ERRCONFIG_V2;
- errconfig_mask = ERRCONFIG_VPBOUNDINTST_V2;
- }
+ if (sr->ip_type == SR_TYPE_V1 && offset == ERRCONFIG_V1)
+ mask |= ERRCONFIG_STATUS_V1_MASK;
+ else if (sr->ip_type == SR_TYPE_V2 && offset == ERRCONFIG_V2)
+ mask |= ERRCONFIG_VPBOUNDINTST_V2;
- if (offset == errconfig_offs)
- reg_val &= ~errconfig_mask;
+ reg_val = __raw_readl(sr->base + offset);
+ reg_val &= ~mask;
+
+ value &= mask;
reg_val |= value;
@@ -124,27 +124,119 @@
return ERR_PTR(-ENODATA);
}
+static inline u32 notifier_to_irqen_v1(u8 notify_flags)
+{
+ u32 val;
+
+ val = (notify_flags & SR_NOTIFY_MCUACCUM) ?
+ ERRCONFIG_MCUACCUMINTEN : 0;
+ val |= (notify_flags & SR_NOTIFY_MCUVALID) ?
+ ERRCONFIG_MCUVALIDINTEN : 0;
+ val |= (notify_flags & SR_NOTIFY_MCUBOUND) ?
+ ERRCONFIG_MCUBOUNDINTEN : 0;
+ val |= (notify_flags & SR_NOTIFY_MCUDISACK) ?
+ ERRCONFIG_MCUDISACKINTEN : 0;
+
+ return val;
+}
+
+static inline u32 notifier_to_irqen_v2(u8 notify_flags)
+{
+ u32 val;
+
+ val = (notify_flags & SR_NOTIFY_MCUACCUM) ?
+ IRQENABLE_MCUACCUMINT : 0;
+ val |= (notify_flags & SR_NOTIFY_MCUVALID) ?
+ IRQENABLE_MCUVALIDINT : 0;
+ val |= (notify_flags & SR_NOTIFY_MCUBOUND) ?
+ IRQENABLE_MCUBOUNDSINT : 0;
+ val |= (notify_flags & SR_NOTIFY_MCUDISACK) ?
+ IRQENABLE_MCUDISABLEACKINT : 0;
+
+ return val;
+}
+
+static inline u8 irqstat_to_notifier_v1(u32 status)
+{
+ u8 val;
+
+ val = (status & ERRCONFIG_MCUACCUMINTST) ?
+ SR_NOTIFY_MCUACCUM : 0;
+ val |= (status & ERRCONFIG_MCUVALIDINTEN) ?
+ SR_NOTIFY_MCUVALID : 0;
+ val |= (status & ERRCONFIG_MCUBOUNDINTEN) ?
+ SR_NOTIFY_MCUBOUND : 0;
+ val |= (status & ERRCONFIG_MCUDISACKINTEN) ?
+ SR_NOTIFY_MCUDISACK : 0;
+
+ return val;
+}
+
+static inline u8 irqstat_to_notifier_v2(u32 status)
+{
+ u8 val;
+
+ val = (status & IRQENABLE_MCUACCUMINT) ?
+ SR_NOTIFY_MCUACCUM : 0;
+ val |= (status & IRQENABLE_MCUVALIDINT) ?
+ SR_NOTIFY_MCUVALID : 0;
+ val |= (status & IRQENABLE_MCUBOUNDSINT) ?
+ SR_NOTIFY_MCUBOUND : 0;
+ val |= (status & IRQENABLE_MCUDISABLEACKINT) ?
+ SR_NOTIFY_MCUDISACK : 0;
+
+ return val;
+}
+
+
static irqreturn_t sr_interrupt(int irq, void *data)
{
struct omap_sr *sr_info = (struct omap_sr *)data;
u32 status = 0;
+ u32 value = 0;
if (sr_info->ip_type == SR_TYPE_V1) {
+ /* Status bits are one bit before enable bits in v1 */
+ value = notifier_to_irqen_v1(sr_class->notify_flags) >> 1;
+
/* Read the status bits */
status = sr_read_reg(sr_info, ERRCONFIG_V1);
+ status &= value;
/* Clear them by writing back */
- sr_write_reg(sr_info, ERRCONFIG_V1, status);
+ sr_modify_reg(sr_info, ERRCONFIG_V1, value, status);
+
+ value = irqstat_to_notifier_v1(status);
} else if (sr_info->ip_type == SR_TYPE_V2) {
+ value = notifier_to_irqen_v2(sr_class->notify_flags);
/* Read the status bits */
status = sr_read_reg(sr_info, IRQSTATUS);
+ status &= value;
/* Clear them by writing back */
sr_write_reg(sr_info, IRQSTATUS, status);
+ value = irqstat_to_notifier_v2(status);
}
- if (sr_class->class_type == SR_CLASS2 && sr_class->notify)
- sr_class->notify(sr_info->voltdm, status);
+ /* Attempt some resemblance of recovery! */
+ if (!value) {
+ dev_err(&sr_info->pdev->dev, "%s: Spurious interrupt!"
+ "status = 0x%08x. Disabling to prevent spamming!!\n",
+ __func__, status);
+ disable_irq_nosync(sr_info->irq);
+ sr_info->irq_enabled = false;
+ } else {
+ /* If the caller reports inability to handle, disable as well */
+ if (sr_class->notify && sr_class->notify(sr_info->voltdm,
+ sr_info->voltdm_cdata, value)) {
+ dev_err(&sr_info->pdev->dev, "%s: Callback cant handle!"
+ "status=0x%08x. Disabling to prevent spam!!\n",
+ __func__, status);
+ disable_irq_nosync(sr_info->irq);
+ sr_info->irq_enabled = false;
+ }
+
+ }
return IRQ_HANDLED;
}
@@ -214,6 +306,7 @@
static void sr_start_vddautocomp(struct omap_sr *sr)
{
+ int r;
if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) {
dev_warn(&sr->pdev->dev,
"%s: smartreflex class driver not registered\n",
@@ -221,8 +314,23 @@
return;
}
- if (!sr_class->enable(sr->voltdm))
+ /* pause dvfs from interfereing with our operations */
+ mutex_lock(&omap_dvfs_lock);
+
+ if (sr_class->init &&
+ sr_class->init(sr->voltdm, &sr->voltdm_cdata,
+ sr_class->class_priv_data)) {
+ dev_err(&sr->pdev->dev,
+ "%s: SRClass initialization failed\n", __func__);
+ mutex_unlock(&omap_dvfs_lock);
+ return;
+ }
+
+ r = sr_class->enable(sr->voltdm, sr->voltdm_cdata,
+ omap_voltage_get_curr_vdata(sr->voltdm));
+ if (!r)
sr->autocomp_active = true;
+ mutex_unlock(&omap_dvfs_lock);
}
static void sr_stop_vddautocomp(struct omap_sr *sr)
@@ -235,8 +343,19 @@
}
if (sr->autocomp_active) {
- sr_class->disable(sr->voltdm, 1);
+ /* Pause dvfs from interfereing with our operations */
+ mutex_lock(&omap_dvfs_lock);
+ sr_class->disable(sr->voltdm, sr->voltdm_cdata,
+ omap_voltage_get_curr_vdata(sr->voltdm), 1);
+ if (sr_class->deinit &&
+ sr_class->deinit(sr->voltdm, &sr->voltdm_cdata,
+ sr_class->class_priv_data)) {
+ dev_err(&sr->pdev->dev,
+ "%s: SR[%d]Class deinitialization failed\n",
+ __func__, sr->srid);
+ }
sr->autocomp_active = false;
+ mutex_unlock(&omap_dvfs_lock);
}
}
@@ -258,9 +377,7 @@
struct resource *mem;
int ret = 0;
- if (sr_class->class_type == SR_CLASS2 &&
- sr_class->notify_flags && sr_info->irq) {
-
+ if (sr_class->notify && sr_class->notify_flags && sr_info->irq) {
name = kasprintf(GFP_KERNEL, "sr_%s", sr_info->voltdm->name);
if (name == NULL) {
ret = -ENOMEM;
@@ -270,6 +387,7 @@
0, name, (void *)sr_info);
if (ret)
goto error;
+ disable_irq(sr_info->irq);
}
if (pdata && pdata->enable_on_init)
@@ -278,21 +396,23 @@
return ret;
error:
- iounmap(sr_info->base);
- mem = platform_get_resource(sr_info->pdev, IORESOURCE_MEM, 0);
- release_mem_region(mem->start, resource_size(mem));
- list_del(&sr_info->node);
- dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
- "interrupt handler. Smartreflex will"
- "not function as desired\n", __func__);
- kfree(name);
- kfree(sr_info);
- return ret;
+ iounmap(sr_info->base);
+ mem = platform_get_resource(sr_info->pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+ list_del(&sr_info->node);
+ dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
+ "interrupt handler. Smartreflex will"
+ "not function as desired\n", __func__);
+ kfree(name);
+ kfree(sr_info);
+ return ret;
}
static void sr_v1_disable(struct omap_sr *sr)
{
int timeout = 0;
+ int errconf_val = ERRCONFIG_MCUACCUMINTST | ERRCONFIG_MCUVALIDINTST |
+ ERRCONFIG_MCUBOUNDINTST;
/* Enable MCUDisableAcknowledge interrupt */
sr_modify_reg(sr, ERRCONFIG_V1,
@@ -301,13 +421,13 @@
/* SRCONFIG - disable SR */
sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0);
- /* Disable all other SR interrupts and clear the status */
+ /* Disable all other SR interrupts and clear the status as needed */
+ if (sr_read_reg(sr, ERRCONFIG_V1) & ERRCONFIG_VPBOUNDINTST_V1)
+ errconf_val |= ERRCONFIG_VPBOUNDINTST_V1;
sr_modify_reg(sr, ERRCONFIG_V1,
(ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN |
ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_VPBOUNDINTEN_V1),
- (ERRCONFIG_MCUACCUMINTST | ERRCONFIG_MCUVALIDINTST |
- ERRCONFIG_MCUBOUNDINTST |
- ERRCONFIG_VPBOUNDINTST_V1));
+ errconf_val);
/*
* Wait for SR to be disabled.
@@ -336,15 +456,23 @@
/* SRCONFIG - disable SR */
sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0);
- /* Disable all other SR interrupts and clear the status */
- sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2,
+ /*
+ * Disable all other SR interrupts and clear the status
+ * write to status register ONLY on need basis - only if status
+ * is set.
+ */
+ if (sr_read_reg(sr, ERRCONFIG_V2) & ERRCONFIG_VPBOUNDINTST_V2)
+ sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2,
ERRCONFIG_VPBOUNDINTST_V2);
- sr_write_reg(sr, IRQENABLE_CLR, (IRQENABLE_MCUACCUMINT |
- IRQENABLE_MCUVALIDINT |
- IRQENABLE_MCUBOUNDSINT));
+ else
+ sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2,
+ 0x0);
sr_write_reg(sr, IRQSTATUS, (IRQSTATUS_MCUACCUMINT |
IRQSTATUS_MCVALIDINT |
IRQSTATUS_MCBOUNDSINT));
+ sr_write_reg(sr, IRQENABLE_CLR, (IRQENABLE_MCUACCUMINT |
+ IRQENABLE_MCUVALIDINT |
+ IRQENABLE_MCUBOUNDSINT));
/*
* Wait for SR to be disabled.
@@ -359,8 +487,8 @@
__func__);
/* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */
- sr_write_reg(sr, IRQENABLE_CLR, IRQENABLE_MCUDISABLEACKINT);
sr_write_reg(sr, IRQSTATUS, IRQSTATUS_MCUDISABLEACKINT);
+ sr_write_reg(sr, IRQENABLE_CLR, IRQENABLE_MCUDISABLEACKINT);
}
static u32 sr_retrieve_nvalue(struct omap_sr *sr, u32 efuse_offs)
@@ -384,6 +512,28 @@
/* Public Functions */
/**
+ * is_sr_enabled() - is Smart reflex enabled for this domain?
+ * @voltdm: voltage domain to check
+ *
+ * Returns 0 if SR is enabled for this domain, else returns err
+ */
+bool is_sr_enabled(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr;
+ if (IS_ERR_OR_NULL(voltdm)) {
+ pr_warning("%s: invalid param voltdm\n", __func__);
+ return false;
+ }
+ sr = _sr_lookup(voltdm);
+ if (IS_ERR(sr)) {
+ pr_warning("%s: omap_sr struct for sr_%s not found\n",
+ __func__, voltdm->name);
+ return false;
+ }
+ return sr->autocomp_active;
+}
+
+/**
* sr_configure_errgen() - Configures the smrtreflex to perform AVS using the
* error generator module.
* @voltdm: VDD pointer to which the SR module to be configured belongs to.
@@ -446,8 +596,52 @@
sr_errconfig);
/* Enabling the interrupts if the ERROR module is used */
- sr_modify_reg(sr, errconfig_offs,
- vpboundint_en, (vpboundint_en | vpboundint_st));
+ sr_modify_reg(sr, errconfig_offs, (vpboundint_en | vpboundint_st),
+ vpboundint_en);
+
+ return 0;
+}
+
+/**
+ * sr_disable_errgen() - Disables SmartReflex AVS module's errgen component
+ * @voltdm: voltagedomain pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * disable the error generator module inside the smartreflex module.
+ *
+ * Returns 0 on success and error value in case of failure.
+ */
+int sr_disable_errgen(struct voltagedomain *voltdm)
+{
+ u32 errconfig_offs, vpboundint_en;
+ u32 vpboundint_st;
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warning("%s: omap_sr struct for sr_%s not found\n",
+ __func__, voltdm->name);
+ return -EINVAL;
+ }
+
+ if (sr->ip_type == SR_TYPE_V1) {
+ errconfig_offs = ERRCONFIG_V1;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1;
+ } else if (sr->ip_type == SR_TYPE_V2) {
+ errconfig_offs = ERRCONFIG_V2;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2;
+ } else {
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex"
+ "module without specifying the ip\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Disable the interrupts of ERROR module */
+ sr_modify_reg(sr, errconfig_offs, vpboundint_en | vpboundint_st, 0);
+
+ /* Disable the Sensor and errorgen */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN, 0);
return 0;
}
@@ -532,7 +726,7 @@
/**
* sr_enable() - Enables the smartreflex module.
* @voltdm: VDD pointer to which the SR module to be configured belongs to.
- * @volt: The voltage at which the Voltage domain associated with
+ * @volt_data: The voltage at which the Voltage domain associated with
* the smartreflex module is operating at.
* This is required only to program the correct Ntarget value.
*
@@ -540,10 +734,9 @@
* enable a smartreflex module. Returns 0 on success. Returns error
* value if the voltage passed is wrong or if ntarget value is wrong.
*/
-int sr_enable(struct voltagedomain *voltdm, unsigned long volt)
+int sr_enable(struct voltagedomain *voltdm, struct omap_volt_data *volt_data)
{
u32 nvalue_reciprocal;
- struct omap_volt_data *volt_data;
struct omap_sr *sr = _sr_lookup(voltdm);
int ret;
@@ -553,19 +746,16 @@
return -EINVAL;
}
- volt_data = omap_voltage_get_voltdata(sr->voltdm, volt);
-
- if (IS_ERR(volt_data)) {
- dev_warn(&sr->pdev->dev, "%s: Unable to get voltage table"
- "for nominal voltage %ld\n", __func__, volt);
- return -ENODATA;
+ if (IS_ERR_OR_NULL(volt_data)) {
+ dev_warn(&sr->pdev->dev, "%s: bad voltage data\n", __func__);
+ return -EINVAL;
}
nvalue_reciprocal = sr_retrieve_nvalue(sr, volt_data->sr_efuse_offs);
if (!nvalue_reciprocal) {
dev_warn(&sr->pdev->dev, "%s: NVALUE = 0 at voltage %ld\n",
- __func__, volt);
+ __func__, omap_get_operation_voltage(volt_data));
return -ENODATA;
}
@@ -579,7 +769,7 @@
return 0;
/* Configure SR */
- ret = sr_class->configure(voltdm);
+ ret = sr_class->configure(voltdm, sr->voltdm_cdata);
if (ret)
return ret;
@@ -622,7 +812,79 @@
sr_v2_disable(sr);
}
- pm_runtime_put_sync(&sr->pdev->dev);
+ pm_runtime_put_sync_suspend(&sr->pdev->dev);
+}
+
+/**
+ * sr_notifier_control() - control the notifier mechanism
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ * @enable: true to enable notifiers and false to disable the same
+ *
+ * SR modules allow an MCU interrupt mechanism that vary based on the IP
+ * revision, we allow the system to generate interrupt if the class driver
+ * has capability to handle the same. it is upto the class driver to ensure
+ * the proper sequencing and handling for a clean implementation. returns
+ * 0 if all goes fine, else returns failure results
+ */
+int sr_notifier_control(struct voltagedomain *voltdm, bool enable)
+{
+ struct omap_sr *sr = _sr_lookup(voltdm);
+ u32 value = 0;
+
+ if (!sr) {
+ pr_warning("%s: sr corresponding to domain not found\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (!sr->autocomp_active)
+ return -EINVAL;
+
+ /* If I could never register an ISR, why bother?? */
+ if (!(sr_class && sr_class->notify && sr_class->notify_flags &&
+ sr->irq)) {
+ dev_warn(&sr->pdev->dev,
+ "%s: unable to setup IRQ without handling mechanism\n",
+ __func__);
+ return -EINVAL;
+ }
+
+
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ value = notifier_to_irqen_v1(sr_class->notify_flags);
+ break;
+ case SR_TYPE_V2:
+ value = notifier_to_irqen_v2(sr_class->notify_flags);
+ break;
+ default:
+ dev_warn(&sr->pdev->dev, "%s: unknown type of sr??\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!enable)
+ sr_write_reg(sr, IRQSTATUS, value);
+
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_modify_reg(sr, ERRCONFIG_V1, value,
+ (enable) ? value : 0);
+ break;
+ case SR_TYPE_V2:
+ sr_write_reg(sr, (enable) ? IRQENABLE_SET : IRQENABLE_CLR,
+ value);
+ break;
+ }
+
+ if (enable != sr->irq_enabled) {
+ if (enable)
+ enable_irq(sr->irq);
+ else
+ disable_irq(sr->irq);
+ sr->irq_enabled = enable;
+ }
+
+ return 0;
}
/**
@@ -665,13 +927,15 @@
* omap_sr_enable() - API to enable SR clocks and to call into the
* registered smartreflex class enable API.
* @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ * @volt_data: Voltage data to go to
*
* This API is to be called from the kernel in order to enable
* a particular smartreflex module. This API will do the initial
* configurations to turn on the smartreflex module and in turn call
* into the registered smartreflex class enable API.
*/
-void omap_sr_enable(struct voltagedomain *voltdm)
+void omap_sr_enable(struct voltagedomain *voltdm,
+ struct omap_volt_data *volt_data)
{
struct omap_sr *sr = _sr_lookup(voltdm);
@@ -690,7 +954,8 @@
return;
}
- sr_class->enable(voltdm);
+ sr_class->enable(voltdm, sr->voltdm_cdata,
+ omap_voltage_get_curr_vdata(voltdm));
}
/**
@@ -723,7 +988,8 @@
return;
}
- sr_class->disable(voltdm, 0);
+ sr_class->disable(voltdm, sr->voltdm_cdata,
+ omap_voltage_get_curr_vdata(voltdm), 0);
}
/**
@@ -736,27 +1002,30 @@
* into the registered smartreflex class disable API. This API will tell
* the smartreflex class disable to reset the VP voltage after
* disabling smartreflex.
+ *
+ * Returns result of transition request.
*/
-void omap_sr_disable_reset_volt(struct voltagedomain *voltdm)
+int omap_sr_disable_reset_volt(struct voltagedomain *voltdm)
{
struct omap_sr *sr = _sr_lookup(voltdm);
if (IS_ERR(sr)) {
pr_warning("%s: omap_sr struct for sr_%s not found\n",
__func__, voltdm->name);
- return;
+ return -ENODEV;
}
if (!sr->autocomp_active)
- return;
+ return 0;
if (!sr_class || !(sr_class->disable)) {
dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not"
"registered\n", __func__);
- return;
+ return -ENODEV;
}
- sr_class->disable(voltdm, 1);
+ return sr_class->disable(voltdm, sr->voltdm_cdata,
+ omap_voltage_get_curr_vdata(voltdm), 1);
}
/**
@@ -808,10 +1077,13 @@
return -EINVAL;
}
- if (!val)
- sr_stop_vddautocomp(sr_info);
- else
- sr_start_vddautocomp(sr_info);
+ /* control enable/disable only if there is a delta in value */
+ if (sr_info->autocomp_active != val) {
+ if (!val)
+ sr_stop_vddautocomp(sr_info);
+ else
+ sr_start_vddautocomp(sr_info);
+ }
return 0;
}
@@ -824,9 +1096,10 @@
struct omap_sr *sr_info = kzalloc(sizeof(struct omap_sr), GFP_KERNEL);
struct omap_sr_data *pdata = pdev->dev.platform_data;
struct resource *mem, *irq;
- struct dentry *vdd_dbg_dir, *nvalue_dir;
+ struct dentry *nvalue_dir;
struct omap_volt_data *volt_data;
int i, ret = 0;
+ char *name;
if (!sr_info) {
dev_err(&pdev->dev, "%s: unable to allocate sr_info\n",
@@ -858,6 +1131,7 @@
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
pm_runtime_enable(&pdev->dev);
+ pm_runtime_irq_safe(&pdev->dev);
sr_info->pdev = pdev;
sr_info->srid = pdev->id;
@@ -891,23 +1165,30 @@
ret = sr_late_init(sr_info);
if (ret) {
pr_warning("%s: Error in SR late init\n", __func__);
- return ret;
+ goto err_iounmap;
}
}
dev_info(&pdev->dev, "%s: SmartReflex driver initialized\n", __func__);
-
- /*
- * If the voltage domain debugfs directory is not created, do
- * not try to create rest of the debugfs entries.
- */
- vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm);
- if (!vdd_dbg_dir) {
- ret = -EINVAL;
- goto err_iounmap;
+ if (!sr_dbg_dir) {
+ sr_dbg_dir = debugfs_create_dir("smartreflex", NULL);
+ if (!sr_dbg_dir) {
+ ret = PTR_ERR(sr_dbg_dir);
+ pr_err("%s:sr debugfs dir creation failed(%d)\n",
+ __func__, ret);
+ goto err_iounmap;
+ }
}
- sr_info->dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir);
+ name = kasprintf(GFP_KERNEL, "sr_%s", sr_info->voltdm->name);
+ if (!name) {
+ dev_err(&pdev->dev, "%s: Unable to alloc debugfs name\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err_iounmap;
+ }
+ sr_info->dbg_dir = debugfs_create_dir(name, sr_dbg_dir);
+ kfree(name);
if (IS_ERR(sr_info->dbg_dir)) {
dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
__func__);
@@ -998,8 +1279,32 @@
return 0;
}
+static void __devexit omap_sr_shutdown(struct platform_device *pdev)
+{
+ struct omap_sr_data *pdata = pdev->dev.platform_data;
+ struct omap_sr *sr_info;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
+ return;
+ }
+
+ sr_info = _sr_lookup(pdata->voltdm);
+ if (IS_ERR(sr_info)) {
+ dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
+ __func__);
+ return;
+ }
+
+ if (sr_info->autocomp_active)
+ sr_stop_vddautocomp(sr_info);
+
+ return;
+}
+
static struct platform_driver smartreflex_driver = {
.remove = omap_sr_remove,
+ .shutdown = omap_sr_shutdown,
.driver = {
.name = "smartreflex",
},
diff --git a/arch/arm/mach-omap2/smartreflex.h b/arch/arm/mach-omap2/smartreflex.h
index 5f35b9e..f17c2ec 100644
--- a/arch/arm/mach-omap2/smartreflex.h
+++ b/arch/arm/mach-omap2/smartreflex.h
@@ -142,6 +142,12 @@
#define OMAP3430_SR_ERRWEIGHT 0x04
#define OMAP3430_SR_ERRMAXLIMIT 0x02
+/* Smart reflex notifiers for class drivers to use */
+#define SR_NOTIFY_MCUDISACK BIT(3)
+#define SR_NOTIFY_MCUBOUND BIT(2)
+#define SR_NOTIFY_MCUVALID BIT(1)
+#define SR_NOTIFY_MCUACCUM BIT(0)
+
/**
* struct omap_sr_pmic_data - Strucutre to be populated by pmic code to pass
* pmic specific info to smartreflex driver
@@ -152,6 +158,15 @@
void (*sr_pmic_init) (void);
};
+/**
+ * struct omap_smartreflex_dev_attr - Smartreflex Device attribute.
+ *
+ * @sensor_voltdm_name: Name of voltdomain of SR instance
+ */
+struct omap_smartreflex_dev_attr {
+ const char *sensor_voltdm_name;
+};
+
#ifdef CONFIG_OMAP_SMARTREFLEX
/*
* The smart reflex driver supports CLASS1 CLASS2 and CLASS3 SR.
@@ -162,12 +177,15 @@
#define SR_CLASS1 0x1
#define SR_CLASS2 0x2
#define SR_CLASS3 0x3
+#define SR_CLASS1P5 0x4
/**
* struct omap_sr_class_data - Smartreflex class driver info
*
* @enable: API to enable a particular class smaartreflex.
* @disable: API to disable a particular class smartreflex.
+ * @init: API to do class specific initialization (optional)
+ * @deinit: API to do class specific deinitialization (optional)
* @configure: API to configure a particular class smartreflex.
* @notify: API to notify the class driver about an event in SR.
* Not needed for class3.
@@ -175,14 +193,23 @@
* @class_type: specify which smartreflex class.
* Can be used by the SR driver to take any class
* based decisions.
+ * @class_priv_data: Class specific private data (optional)
*/
struct omap_sr_class_data {
- int (*enable)(struct voltagedomain *voltdm);
- int (*disable)(struct voltagedomain *voltdm, int is_volt_reset);
- int (*configure)(struct voltagedomain *voltdm);
- int (*notify)(struct voltagedomain *voltdm, u32 status);
+ int (*enable)(struct voltagedomain *voltdm, void *voltdm_cdata,
+ struct omap_volt_data *volt_data);
+ int (*disable)(struct voltagedomain *voltdm, void *voltdm_cdata,
+ struct omap_volt_data *volt_data, int is_volt_reset);
+ int (*init)(struct voltagedomain *voltdm, void **voltdm_cdata,
+ void *class_priv_data);
+ int (*deinit)(struct voltagedomain *voltdm, void **voltdm_cdata,
+ void *class_priv_data);
+ int (*configure)(struct voltagedomain *voltdm, void *voltdm_cdata);
+ int (*notify)(struct voltagedomain *voltdm, void *voltdm_cdata,
+ u32 status);
u8 notify_flags;
u8 class_type;
+ void *class_priv_data;
};
/**
@@ -220,27 +247,43 @@
};
/* Smartreflex module enable/disable interface */
-void omap_sr_enable(struct voltagedomain *voltdm);
+void omap_sr_enable(struct voltagedomain *voltdm,
+ struct omap_volt_data *volt_data);
void omap_sr_disable(struct voltagedomain *voltdm);
-void omap_sr_disable_reset_volt(struct voltagedomain *voltdm);
+int omap_sr_disable_reset_volt(struct voltagedomain *voltdm);
/* API to register the pmic specific data with the smartreflex driver. */
void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data);
/* Smartreflex driver hooks to be called from Smartreflex class driver */
-int sr_enable(struct voltagedomain *voltdm, unsigned long volt);
+int sr_enable(struct voltagedomain *voltdm, struct omap_volt_data *volt_data);
void sr_disable(struct voltagedomain *voltdm);
+int sr_notifier_control(struct voltagedomain *voltdm, bool enable);
int sr_configure_errgen(struct voltagedomain *voltdm);
+int sr_disable_errgen(struct voltagedomain *voltdm);
int sr_configure_minmax(struct voltagedomain *voltdm);
/* API to register the smartreflex class driver with the smartreflex driver */
int sr_register_class(struct omap_sr_class_data *class_data);
+bool is_sr_enabled(struct voltagedomain *voltdm);
#else
static inline void omap_sr_enable(struct voltagedomain *voltdm) {}
static inline void omap_sr_disable(struct voltagedomain *voltdm) {}
-static inline void omap_sr_disable_reset_volt(
- struct voltagedomain *voltdm) {}
+
+static inline int sr_notifier_control(struct voltagedomain *voltdm,
+ bool enable)
+{
+ return -EINVAL;
+}
+
+static inline int omap_sr_disable_reset_volt(
+ struct voltagedomain *voltdm) { return 0; }
static inline void omap_sr_register_pmic(
struct omap_sr_pmic_data *pmic_data) {}
+static inline bool is_sr_enabled(struct voltagedomain *voltdm)
+{
+ return false;
+}
#endif
+
#endif
diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c
index 10d3c5e..0b74f5b 100644
--- a/arch/arm/mach-omap2/sr_device.c
+++ b/arch/arm/mach-omap2/sr_device.c
@@ -82,6 +82,7 @@
struct omap_sr_data *sr_data;
struct omap_device *od;
struct omap_volt_data *volt_data;
+ struct omap_smartreflex_dev_attr *sr_dev_attr;
char *name = "smartreflex";
static int i;
@@ -92,9 +93,11 @@
return -ENOMEM;
}
- if (!oh->vdd_name) {
+ sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
+ if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
pr_err("%s: No voltage domain specified for %s."
- "Cannot initialize\n", __func__, oh->name);
+ "Cannot initialize\n", __func__,
+ oh->name);
goto exit;
}
@@ -102,10 +105,10 @@
sr_data->senn_mod = 0x1;
sr_data->senp_mod = 0x1;
- sr_data->voltdm = omap_voltage_domain_lookup(oh->vdd_name);
+ sr_data->voltdm = voltdm_lookup(sr_dev_attr->sensor_voltdm_name);
if (IS_ERR(sr_data->voltdm)) {
pr_err("%s: Unable to get voltage domain pointer for VDD %s\n",
- __func__, oh->vdd_name);
+ __func__, sr_dev_attr->sensor_voltdm_name);
goto exit;
}
diff --git a/arch/arm/mach-omap2/temp_sensor_device.c b/arch/arm/mach-omap2/temp_sensor_device.c
new file mode 100644
index 0000000..0a647a3
--- /dev/null
+++ b/arch/arm/mach-omap2/temp_sensor_device.c
@@ -0,0 +1,95 @@
+/*
+ * OMAP on die Temperature sensor device file
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: J Keerthy <j-keerthy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <plat/omap_device.h>
+#include "control.h"
+#include "pm.h"
+#include <plat/temperature_sensor.h>
+
+void omap_temp_sensor_resume_idle(void)
+{
+ omap_temp_sensor_idle(0);
+}
+
+void omap_temp_sensor_prepare_idle(void)
+{
+ omap_temp_sensor_idle(1);
+}
+
+static struct omap_device_pm_latency omap_temp_sensor_latency[] = {
+ {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ }
+};
+
+static int temp_sensor_dev_init(struct omap_hwmod *oh, void *user)
+{
+ struct omap_temp_sensor_pdata *temp_sensor_pdata;
+ struct omap_device *od;
+ static int i;
+ int ret = 0;
+
+ temp_sensor_pdata =
+ kzalloc(sizeof(struct omap_temp_sensor_pdata), GFP_KERNEL);
+ if (!temp_sensor_pdata) {
+ pr_err
+ ("%s: Unable to allocate memory for %s.Error!\n",
+ __func__, oh->name);
+ return -ENOMEM;
+ }
+
+ temp_sensor_pdata->offset = OMAP4_CTRL_MODULE_CORE_TEMP_SENSOR;
+
+ temp_sensor_pdata->name = "omap_temp_sensor";
+
+ od = omap_device_build(temp_sensor_pdata->name, i, oh, temp_sensor_pdata,
+ sizeof(*temp_sensor_pdata),
+ omap_temp_sensor_latency,
+ ARRAY_SIZE(omap_temp_sensor_latency), 0);
+ if (IS_ERR(od)) {
+ pr_warning("%s: Could not build omap_device for %s: %s.\n\n",
+ __func__, temp_sensor_pdata->name, oh->name);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ i++;
+done:
+ kfree(temp_sensor_pdata);
+ return ret;
+}
+
+int __init omap_devinit_temp_sensor(void)
+{
+ if (!cpu_is_omap446x())
+ return 0;
+
+ return omap_hwmod_for_each_by_class("thermal_sensor",
+ temp_sensor_dev_init, NULL);
+}
+
+arch_initcall(omap_devinit_temp_sensor);
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
index 3b9cf85..69a5e00 100644
--- a/arch/arm/mach-omap2/timer-gp.c
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -44,7 +44,7 @@
#include <plat/omap_hwmod.h>
#include "timer-gp.h"
-
+#include "dmtimer.h"
/* MAX_GPTIMER_ID: number of GPTIMERs on the chip */
#define MAX_GPTIMER_ID 12
@@ -106,6 +106,7 @@
.name = "gp timer",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.shift = 32,
+ .rating = 300,
.set_next_event = omap2_gp_timer_set_next_event,
.set_mode = omap2_gp_timer_set_mode,
};
@@ -134,13 +135,9 @@
{
u32 tick_rate;
int src;
- char clockevent_hwmod_name[8]; /* 8 = sizeof("timerXX0") */
inited = 1;
- sprintf(clockevent_hwmod_name, "timer%d", gptimer_id);
- omap_hwmod_setup_one(clockevent_hwmod_name);
-
gptimer = omap_dm_timer_request_specific(gptimer_id);
BUG_ON(gptimer == NULL);
gptimer_wakeup = gptimer;
@@ -154,8 +151,8 @@
#endif
if (gptimer_id != 12)
- WARN(IS_ERR_VALUE(omap_dm_timer_set_source(gptimer, src)),
- "timer-gp: omap_dm_timer_set_source() failed\n");
+ WARN(IS_ERR_VALUE(omap2_system_timer_set_src(gptimer, src)),
+ "timer-gp: omap2_system_timer_set_src() failed\n");
tick_rate = clk_get_rate(omap_dm_timer_get_fclk(gptimer));
@@ -174,7 +171,8 @@
clockevent_delta2ns(3, &clockevent_gpt);
/* Timer internal resynch latency. */
- clockevent_gpt.cpumask = cpumask_of(0);
+ clockevent_gpt.cpumask = cpu_all_mask;
+ clockevent_gpt.irq = omap_dm_timer_get_irq(gptimer);
clockevents_register_device(&clockevent_gpt);
}
@@ -255,7 +253,7 @@
BUG_ON(!twd_base);
}
#endif
- omap_dm_timer_init();
+ omap2_system_timer_init(gptimer_id);
omap2_gp_clockevent_init();
omap2_gp_clocksource_init();
diff --git a/arch/arm/mach-omap2/usb-host.c b/arch/arm/mach-omap2/usb-host.c
index 89ae298..7b422b5 100644
--- a/arch/arm/mach-omap2/usb-host.c
+++ b/arch/arm/mach-omap2/usb-host.c
@@ -22,58 +22,433 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <asm/io.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <plat/usb.h>
+#include <plat/omap_device.h>
#include "mux.h"
#ifdef CONFIG_MFD_OMAP_USB_HOST
-#define OMAP_USBHS_DEVICE "usbhs-omap"
-
-static struct resource usbhs_resources[] = {
- {
- .name = "uhh",
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "tll",
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ehci",
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ehci-irq",
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "ohci",
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ohci-irq",
- .flags = IORESOURCE_IRQ,
- }
-};
-
-static struct platform_device usbhs_device = {
- .name = OMAP_USBHS_DEVICE,
- .id = 0,
- .num_resources = ARRAY_SIZE(usbhs_resources),
- .resource = usbhs_resources,
-};
+#define OMAP_USBHS_DEVICE "usbhs_omap"
+#define USBHS_UHH_HWMODNAME "usbhs_uhh"
+#define USBHS_OHCI_HWMODNAME "usbhs_ohci"
+#define USBHS_EHCI_HWMODNAME "usbhs_ehci"
+#define USBHS_TLL_HWMODNAME "usbhs_tll"
static struct usbhs_omap_platform_data usbhs_data;
static struct ehci_hcd_omap_platform_data ehci_data;
static struct ohci_hcd_omap_platform_data ohci_data;
+static struct omap_device_pm_latency omap_uhhtll_latency[] = {
+ {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
+};
+
+static struct usbhs_wakeup {
+ struct device *dev;
+ struct omap_hwmod *oh_ehci;
+ struct omap_hwmod *oh_ohci;
+ struct work_struct wakeup_work;
+ int wakeup_ehci:1;
+ int wakeup_ohci:1;
+} *usbhs_wake;
+
/* MUX settings for EHCI pins */
+static struct omap_device_pad port1_phy_pads[] __initdata = {
+ {
+ .name = "usbb1_ulpitll_stp.usbb1_ulpiphy_stp",
+ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb1_ulpitll_clk.usbb1_ulpiphy_clk",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb1_ulpitll_dir.usbb1_ulpiphy_dir",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = (OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4) & ~OMAP_WAKEUP_EN,
+ .idle = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb1_ulpitll_nxt.usbb1_ulpiphy_nxt",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb1_ulpitll_dat0.usbb1_ulpiphy_dat0",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = (OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4) & ~OMAP_WAKEUP_EN,
+ .idle = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb1_ulpitll_dat1.usbb1_ulpiphy_dat1",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb1_ulpitll_dat2.usbb1_ulpiphy_dat2",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb1_ulpitll_dat3.usbb1_ulpiphy_dat3",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb1_ulpitll_dat4.usbb1_ulpiphy_dat4",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb1_ulpitll_dat5.usbb1_ulpiphy_dat5",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb1_ulpitll_dat6.usbb1_ulpiphy_dat6",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb1_ulpitll_dat7.usbb1_ulpiphy_dat7",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+};
+
+static struct omap_device_pad port1_tll_pads[] __initdata = {
+ {
+ .name = "usbb1_ulpitll_stp.usbb1_ulpitll_stp",
+ .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb1_ulpitll_clk.usbb1_ulpitll_clk",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb1_ulpitll_dir.usbb1_ulpitll_dir",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb1_ulpitll_nxt.usbb1_ulpitll_nxt",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb1_ulpitll_dat0.usbb1_ulpitll_dat0",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb1_ulpitll_dat1.usbb1_ulpitll_dat1",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb1_ulpitll_dat2.usbb1_ulpitll_dat2",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb1_ulpitll_dat3.usbb1_ulpitll_dat3",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb1_ulpitll_dat4.usbb1_ulpitll_dat4",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb1_ulpitll_dat5.usbb1_ulpitll_dat5",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb1_ulpitll_dat6.usbb1_ulpitll_dat6",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb1_ulpitll_dat7.usbb1_ulpitll_dat7",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+};
+
+static struct omap_device_pad port2_phy_pads[] __initdata = {
+ {
+ .name = "usbb2_ulpitll_stp.usbb2_ulpiphy_stp",
+ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb2_ulpitll_clk.usbb2_ulpiphy_clk",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb2_ulpitll_dir.usbb2_ulpiphy_dir",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = (OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4) & ~(OMAP_WAKEUP_EN),
+ .idle = OMAP_PIN_INPUT_PULLDOWN | OMAP_WAKEUP_EN
+ | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb2_ulpitll_nxt.usbb2_ulpiphy_nxt",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb2_ulpitll_dat0.usbb2_ulpiphy_dat0",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = (OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4) & ~(OMAP_WAKEUP_EN),
+ .idle = OMAP_PIN_INPUT_PULLDOWN | OMAP_WAKEUP_EN
+ | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb2_ulpitll_dat1.usbb2_ulpiphy_dat1",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb2_ulpitll_dat2.usbb2_ulpiphy_dat2",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb2_ulpitll_dat3.usbb2_ulpiphy_dat3",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb2_ulpitll_dat4.usbb2_ulpiphy_dat4",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb2_ulpitll_dat5.usbb2_ulpiphy_dat5",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb2_ulpitll_dat6.usbb2_ulpiphy_dat6",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "usbb2_ulpitll_dat7.usbb2_ulpiphy_dat7",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+};
+
+static struct omap_device_pad port2_tll_pads[] __initdata = {
+ {
+ .name = "usbb2_ulpitll_stp.usbb2_ulpitll_stp",
+ .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb2_ulpitll_clk.usbb2_ulpitll_clk",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb2_ulpitll_dir.usbb2_ulpitll_dir",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb2_ulpitll_nxt.usbb2_ulpitll_nxt",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb2_ulpitll_dat0.usbb2_ulpitll_dat0",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb2_ulpitll_dat1.usbb2_ulpitll_dat1",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb2_ulpitll_dat2.usbb2_ulpitll_dat2",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb2_ulpitll_dat3.usbb2_ulpitll_dat3",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb2_ulpitll_dat4.usbb2_ulpitll_dat4",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb2_ulpitll_dat5.usbb2_ulpitll_dat5",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb2_ulpitll_dat6.usbb2_ulpitll_dat6",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "usbb2_ulpitll_dat7.usbb2_ulpitll_dat7",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE0,
+ },
+};
+
+static struct omap_device_pad port1_6pin_pads[] __initdata = {
+ {
+ .name = "usbb1_ulpitll_stp.usbb1_mm_rxdp",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = (OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5) & ~(OMAP_WAKEUP_EN),
+ .idle = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5,
+ },
+ {
+ .name = "usbb1_ulpitll_nxt.usbb1_mm_rxdm",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = (OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5) & ~(OMAP_WAKEUP_EN),
+ .idle = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5,
+ },
+ {
+ .name = "usbb1_ulpitll_dat0.usbb1_mm_rxrcv",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = (OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5) & ~(OMAP_WAKEUP_EN),
+ .idle = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5,
+ },
+ {
+ .name = "usbb1_ulpitll_dat3.usbb1_mm_txen",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = (OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5) & ~(OMAP_WAKEUP_EN),
+ .idle = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5,
+ },
+ {
+ .name = "usbb1_ulpitll_dat1.usbb1_mm_txdat",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = (OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5) & ~(OMAP_WAKEUP_EN),
+ .idle = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5,
+ },
+ {
+ .name = "usbb1_ulpitll_dat2.usbb1_mm_txse0",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = (OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5) & ~(OMAP_WAKEUP_EN),
+ .idle = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5,
+ },
+};
+
+static struct omap_device_pad port1_4pin_pads[] __initdata = {
+ {
+ .name = "usbb1_ulpitll_dat0.usbb1_mm_rxrcv",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5,
+ },
+ {
+ .name = "usbb1_ulpitll_dat3.usbb1_mm_txen",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5,
+ },
+ {
+ .name = "usbb1_ulpitll_dat1.usbb1_mm_txdat",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5,
+ },
+ {
+ .name = "usbb1_ulpitll_dat2.usbb1_mm_txse0",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5,
+ },
+};
+
+static struct omap_device_pad port1_3pin_pads[] __initdata = {
+ {
+ .name = "usbb1_ulpitll_dat3.usbb1_mm_txen",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5,
+ },
+ {
+ .name = "usbb1_ulpitll_dat1.usbb1_mm_txdat",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5,
+ },
+ {
+ .name = "usbb1_ulpitll_dat2.usbb1_mm_txse0",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5,
+ },
+};
+
+static struct omap_device_pad port1_2pin_pads[] __initdata = {
+ {
+ .name = "usbb1_ulpitll_dat1.usbb1_mm_txdat",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5,
+ },
+ {
+ .name = "usbb1_ulpitll_dat2.usbb1_mm_txse0",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE5,
+ },
+};
+
+static struct omap_device_pad port2_6pin_pads[] __initdata = {
+ {
+ .name = "abe_mcbsp2_dr.usbb2_mm_rxdp",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = (OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4) & ~(OMAP_WAKEUP_EN),
+ .idle = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "abe_mcbsp2_clkx.usbb2_mm_rxdm",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = (OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4) & ~(OMAP_WAKEUP_EN),
+ .idle = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "abe_mcbsp2_dx.usbb2_mm_rxrcv",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = (OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4) & ~(OMAP_WAKEUP_EN),
+ .idle = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "abe_mcbsp2_fsx.usbb2_mm_txen",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = (OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4) & ~(OMAP_WAKEUP_EN),
+ .idle = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "abe_dmic_din1.usbb2_mm_txdat",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = (OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4) & ~(OMAP_WAKEUP_EN),
+ .idle = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "abe_dmic_clk1.usbb2_mm_txse0",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = (OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4) & ~(OMAP_WAKEUP_EN),
+ .idle = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+};
+
+static struct omap_device_pad port2_4pin_pads[] __initdata = {
+ {
+ .name = "abe_mcbsp2_dx.usbb2_mm_rxrcv",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "abe_mcbsp2_fsx.usbb2_mm_txen",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "abe_dmic_din1.usbb2_mm_txdat",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "abe_dmic_clk1.usbb2_mm_txse0",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+};
+
+static struct omap_device_pad port2_3pin_pads[] __initdata = {
+ {
+ .name = "abe_mcbsp2_fsx.usbb2_mm_txen",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "abe_dmic_din1.usbb2_mm_txdat",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "abe_dmic_clk1.usbb2_mm_txse0",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+};
+
+static struct omap_device_pad port2_2pin_pads[] __initdata = {
+ {
+ .name = "abe_mcbsp2_fsx.usbb2_mm_txen",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "abe_dmic_din1.usbb2_mm_txdat",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+ {
+ .name = "abe_dmic_clk1.usbb2_mm_txse0",
+ .enable = OMAP_PIN_INPUT_PULLDOWN | OMAP_MUX_MODE4,
+ },
+};
/*
* setup_ehci_io_mux - initialize IO pad mux for USBHOST
*/
@@ -220,60 +595,20 @@
return;
}
-static void setup_4430ehci_io_mux(const enum usbhs_omap_port_mode *port_mode)
+static struct omap_hwmod_mux_info *
+setup_4430ehci_io_mux(const enum usbhs_omap_port_mode *port_mode)
{
+ struct omap_device_pad *pads;
+ int pads_cnt;
+
switch (port_mode[0]) {
case OMAP_EHCI_PORT_MODE_PHY:
- omap_mux_init_signal("usbb1_ulpiphy_stp",
- OMAP_PIN_OUTPUT);
- omap_mux_init_signal("usbb1_ulpiphy_clk",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpiphy_dir",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpiphy_nxt",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpiphy_dat0",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpiphy_dat1",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpiphy_dat2",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpiphy_dat3",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpiphy_dat4",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpiphy_dat5",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpiphy_dat6",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpiphy_dat7",
- OMAP_PIN_INPUT_PULLDOWN);
+ pads = port1_phy_pads;
+ pads_cnt = ARRAY_SIZE(port1_phy_pads);
break;
case OMAP_EHCI_PORT_MODE_TLL:
- omap_mux_init_signal("usbb1_ulpitll_stp",
- OMAP_PIN_INPUT_PULLUP);
- omap_mux_init_signal("usbb1_ulpitll_clk",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpitll_dir",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpitll_nxt",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpitll_dat0",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpitll_dat1",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpitll_dat2",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpitll_dat3",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpitll_dat4",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpitll_dat5",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpitll_dat6",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_ulpitll_dat7",
- OMAP_PIN_INPUT_PULLDOWN);
+ pads = port1_tll_pads;
+ pads_cnt = ARRAY_SIZE(port1_tll_pads);
break;
case OMAP_USBHS_PORT_MODE_UNUSED:
default:
@@ -281,61 +616,19 @@
}
switch (port_mode[1]) {
case OMAP_EHCI_PORT_MODE_PHY:
- omap_mux_init_signal("usbb2_ulpiphy_stp",
- OMAP_PIN_OUTPUT);
- omap_mux_init_signal("usbb2_ulpiphy_clk",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpiphy_dir",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpiphy_nxt",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpiphy_dat0",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpiphy_dat1",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpiphy_dat2",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpiphy_dat3",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpiphy_dat4",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpiphy_dat5",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpiphy_dat6",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpiphy_dat7",
- OMAP_PIN_INPUT_PULLDOWN);
+ pads = port2_phy_pads;
+ pads_cnt = ARRAY_SIZE(port2_phy_pads);
break;
case OMAP_EHCI_PORT_MODE_TLL:
- omap_mux_init_signal("usbb2_ulpitll_stp",
- OMAP_PIN_INPUT_PULLUP);
- omap_mux_init_signal("usbb2_ulpitll_clk",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpitll_dir",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpitll_nxt",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpitll_dat0",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpitll_dat1",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpitll_dat2",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpitll_dat3",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpitll_dat4",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpitll_dat5",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpitll_dat6",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_ulpitll_dat7",
- OMAP_PIN_INPUT_PULLDOWN);
+ pads = port2_tll_pads;
+ pads_cnt = ARRAY_SIZE(port2_tll_pads);
break;
case OMAP_USBHS_PORT_MODE_UNUSED:
default:
break;
}
+
+ return omap_hwmod_mux_init(pads, pads_cnt);
}
static void setup_ohci_io_mux(const enum usbhs_omap_port_mode *port_mode)
@@ -435,37 +728,35 @@
}
}
-static void setup_4430ohci_io_mux(const enum usbhs_omap_port_mode *port_mode)
+static struct omap_hwmod_mux_info *
+setup_4430ohci_io_mux(const enum usbhs_omap_port_mode *port_mode)
{
+ struct omap_device_pad *pads;
+ int pads_cnt;
+
switch (port_mode[0]) {
case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0:
case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM:
case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0:
case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM:
- omap_mux_init_signal("usbb1_mm_rxdp",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_mm_rxdm",
- OMAP_PIN_INPUT_PULLDOWN);
-
+ pads = port1_6pin_pads;
+ pads_cnt = ARRAY_SIZE(port1_6pin_pads);
+ break;
case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM:
case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM:
- omap_mux_init_signal("usbb1_mm_rxrcv",
- OMAP_PIN_INPUT_PULLDOWN);
-
+ pads = port1_4pin_pads;
+ pads_cnt = ARRAY_SIZE(port1_4pin_pads);
+ break;
case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0:
case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0:
- omap_mux_init_signal("usbb1_mm_txen",
- OMAP_PIN_INPUT_PULLDOWN);
-
-
+ pads = port1_3pin_pads;
+ pads_cnt = ARRAY_SIZE(port1_3pin_pads);
+ break;
case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0:
case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM:
- omap_mux_init_signal("usbb1_mm_txdat",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb1_mm_txse0",
- OMAP_PIN_INPUT_PULLDOWN);
+ pads = port1_2pin_pads;
+ pads_cnt = ARRAY_SIZE(port1_2pin_pads);
break;
-
case OMAP_USBHS_PORT_MODE_UNUSED:
default:
break;
@@ -476,39 +767,79 @@
case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM:
case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0:
case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM:
- omap_mux_init_signal("usbb2_mm_rxdp",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_mm_rxdm",
- OMAP_PIN_INPUT_PULLDOWN);
-
+ pads = port2_6pin_pads;
+ pads_cnt = ARRAY_SIZE(port2_6pin_pads);
+ break;
case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM:
case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM:
- omap_mux_init_signal("usbb2_mm_rxrcv",
- OMAP_PIN_INPUT_PULLDOWN);
-
+ pads = port2_4pin_pads;
+ pads_cnt = ARRAY_SIZE(port2_4pin_pads);
+ break;
case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0:
case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0:
- omap_mux_init_signal("usbb2_mm_txen",
- OMAP_PIN_INPUT_PULLDOWN);
-
-
+ pads = port2_3pin_pads;
+ pads_cnt = ARRAY_SIZE(port2_3pin_pads);
+ break;
case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0:
case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM:
- omap_mux_init_signal("usbb2_mm_txdat",
- OMAP_PIN_INPUT_PULLDOWN);
- omap_mux_init_signal("usbb2_mm_txse0",
- OMAP_PIN_INPUT_PULLDOWN);
+ pads = port2_2pin_pads;
+ pads_cnt = ARRAY_SIZE(port2_3pin_pads);
break;
-
case OMAP_USBHS_PORT_MODE_UNUSED:
default:
break;
}
+
+ return omap_hwmod_mux_init(pads, pads_cnt);
+}
+
+void usbhs_wakeup()
+{
+ int workq = 0;
+
+ if (!usbhs_wake)
+ return;
+
+ if (test_bit(USB_OHCI_LOADED, &usb_hcds_loaded) &&
+ omap_hwmod_pad_get_wakeup_status(usbhs_wake->oh_ohci)) {
+ usbhs_wake->wakeup_ohci = 1;
+ workq = 1;
+ }
+
+ if (test_bit(USB_EHCI_LOADED, &usb_hcds_loaded) &&
+ omap_hwmod_pad_get_wakeup_status(usbhs_wake->oh_ehci)) {
+ usbhs_wake->wakeup_ehci = 1;
+ workq = 1;
+ }
+
+ if (workq)
+ queue_work(pm_wq, &usbhs_wake->wakeup_work);
+}
+
+static void usbhs_resume_work(struct work_struct *work)
+{
+ dev_dbg(usbhs_wake->dev, "USB IO PAD Wakeup event triggered\n");
+
+ if (usbhs_wake->wakeup_ehci) {
+ usbhs_wake->wakeup_ehci = 0;
+ omap_hwmod_disable_ioring_wakeup(usbhs_wake->oh_ehci);
+ }
+
+ if (usbhs_wake->wakeup_ohci) {
+ usbhs_wake->wakeup_ohci = 0;
+ omap_hwmod_disable_ioring_wakeup(usbhs_wake->oh_ohci);
+ }
+
+ pm_runtime_get_sync(usbhs_wake->dev);
+ pm_runtime_put_sync(usbhs_wake->dev);
}
void __init usbhs_init(const struct usbhs_omap_board_data *pdata)
{
- int i;
+ struct omap_hwmod *oh[4];
+ struct omap_device *od;
+ int bus_id = -1;
+ int i;
for (i = 0; i < OMAP3_HS_USB_PORTS; i++) {
usbhs_data.port_mode[i] = pdata->port_mode[i];
@@ -516,55 +847,74 @@
ehci_data.port_mode[i] = pdata->port_mode[i];
ehci_data.reset_gpio_port[i] = pdata->reset_gpio_port[i];
ehci_data.regulator[i] = pdata->regulator[i];
+ ehci_data.transceiver_clk[i] = pdata->transceiver_clk[i];
}
ehci_data.phy_reset = pdata->phy_reset;
ohci_data.es2_compatibility = pdata->es2_compatibility;
usbhs_data.ehci_data = &ehci_data;
usbhs_data.ohci_data = &ohci_data;
+ oh[0] = omap_hwmod_lookup(USBHS_UHH_HWMODNAME);
+ if (!oh[0]) {
+ pr_err("Could not look up %s\n", USBHS_UHH_HWMODNAME);
+ return;
+ }
+
+ oh[1] = omap_hwmod_lookup(USBHS_OHCI_HWMODNAME);
+ if (!oh[1]) {
+ pr_err("Could not look up %s\n", USBHS_OHCI_HWMODNAME);
+ return;
+ }
+
+ oh[2] = omap_hwmod_lookup(USBHS_EHCI_HWMODNAME);
+ if (!oh[2]) {
+ pr_err("Could not look up %s\n", USBHS_EHCI_HWMODNAME);
+ return;
+ }
+
+ oh[3] = omap_hwmod_lookup(USBHS_TLL_HWMODNAME);
+ if (!oh[3]) {
+ pr_err("Could not look up %s\n", USBHS_TLL_HWMODNAME);
+ return;
+ }
+
if (cpu_is_omap34xx()) {
- usbhs_resources[0].start = OMAP34XX_UHH_CONFIG_BASE;
- usbhs_resources[0].end = OMAP34XX_UHH_CONFIG_BASE + SZ_1K - 1;
- usbhs_resources[1].start = OMAP34XX_USBTLL_BASE;
- usbhs_resources[1].end = OMAP34XX_USBTLL_BASE + SZ_4K - 1;
- usbhs_resources[2].start = OMAP34XX_EHCI_BASE;
- usbhs_resources[2].end = OMAP34XX_EHCI_BASE + SZ_1K - 1;
- usbhs_resources[3].start = INT_34XX_EHCI_IRQ;
- usbhs_resources[4].start = OMAP34XX_OHCI_BASE;
- usbhs_resources[4].end = OMAP34XX_OHCI_BASE + SZ_1K - 1;
- usbhs_resources[5].start = INT_34XX_OHCI_IRQ;
setup_ehci_io_mux(pdata->port_mode);
setup_ohci_io_mux(pdata->port_mode);
} else if (cpu_is_omap44xx()) {
- usbhs_resources[0].start = OMAP44XX_UHH_CONFIG_BASE;
- usbhs_resources[0].end = OMAP44XX_UHH_CONFIG_BASE + SZ_1K - 1;
- usbhs_resources[1].start = OMAP44XX_USBTLL_BASE;
- usbhs_resources[1].end = OMAP44XX_USBTLL_BASE + SZ_4K - 1;
- usbhs_resources[2].start = OMAP44XX_HSUSB_EHCI_BASE;
- usbhs_resources[2].end = OMAP44XX_HSUSB_EHCI_BASE + SZ_1K - 1;
- usbhs_resources[3].start = OMAP44XX_IRQ_EHCI;
- usbhs_resources[4].start = OMAP44XX_HSUSB_OHCI_BASE;
- usbhs_resources[4].end = OMAP44XX_HSUSB_OHCI_BASE + SZ_1K - 1;
- usbhs_resources[5].start = OMAP44XX_IRQ_OHCI;
- setup_4430ehci_io_mux(pdata->port_mode);
- setup_4430ohci_io_mux(pdata->port_mode);
+ oh[2]->mux = setup_4430ehci_io_mux(pdata->port_mode);
+ oh[1]->mux = setup_4430ohci_io_mux(pdata->port_mode);
}
- if (platform_device_add_data(&usbhs_device,
- &usbhs_data, sizeof(usbhs_data)) < 0) {
- printk(KERN_ERR "USBHS platform_device_add_data failed\n");
- goto init_end;
+ od = omap_device_build_ss(OMAP_USBHS_DEVICE, bus_id, oh, 4,
+ (void *)&usbhs_data, sizeof(usbhs_data),
+ omap_uhhtll_latency,
+ ARRAY_SIZE(omap_uhhtll_latency), false);
+
+ if (IS_ERR(od)) {
+ pr_err("Could not build hwmod devices %s, %s\n",
+ USBHS_UHH_HWMODNAME, USBHS_TLL_HWMODNAME);
+ return;
}
- if (platform_device_register(&usbhs_device) < 0)
- printk(KERN_ERR "USBHS platform_device_register failed\n");
+ usbhs_wake = kmalloc(sizeof(*usbhs_wake), GFP_KERNEL);
+ if (!usbhs_wake) {
+ pr_err("Could not allocate usbhs_wake\n");
+ return;
+ }
-init_end:
- return;
+ INIT_WORK(&usbhs_wake->wakeup_work, usbhs_resume_work);
+ usbhs_wake->oh_ehci = oh[2];
+ usbhs_wake->oh_ohci = oh[1];
+ usbhs_wake->dev = &od->pdev.dev;
}
#else
+void usbhs_wakeup()
+{
+}
+
void __init usbhs_init(const struct usbhs_omap_board_data *pdata)
{
}
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index c7ed540..bd3b513 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -119,7 +119,7 @@
struct omap_hwmod *oh;
struct omap_device *od;
struct platform_device *pdev;
- struct device *dev;
+ struct device *dev = NULL;
int bus_id = -1;
const char *oh_name, *name;
struct omap_musb_board_data *board_data;
@@ -140,7 +140,7 @@
musb_plat.extvbus = board_data->extvbus;
if (cpu_is_omap44xx())
- omap4430_phy_init(dev);
+ omap4430_phy_init(dev); /* power down the phy */
if (cpu_is_omap3517() || cpu_is_omap3505()) {
oh_name = "am35x_otg_hs";
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
new file mode 100644
index 0000000..42ffc64
--- /dev/null
+++ b/arch/arm/mach-omap2/vc.c
@@ -0,0 +1,636 @@
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+
+#include <plat/cpu.h>
+
+#include "voltage.h"
+#include "vc.h"
+#include "prm-regbits-34xx.h"
+#include "prm-regbits-44xx.h"
+#include "prm44xx.h"
+
+#define OMAP_VC_I2C_ACK_DELAY 3
+
+/**
+ * struct omap_vc_channel_cfg - describe the cfg_channel bitfield
+ * @sa: bit for slave address
+ * @rav: bit for voltage configuration register
+ * @rac: bit for command configuration register
+ * @racen: enable bit for RAC
+ * @cmd: bit for command value set selection
+ *
+ * Channel configuration bits, common for OMAP3+
+ * OMAP3 register: PRM_VC_CH_CONF
+ * OMAP4 register: PRM_VC_CFG_CHANNEL
+ * OMAP5 register: PRM_VC_SMPS_<voltdm>_CONFIG
+ */
+struct omap_vc_channel_cfg {
+ u8 sa;
+ u8 rav;
+ u8 rac;
+ u8 racen;
+ u8 cmd;
+};
+
+static struct omap_vc_channel_cfg vc_default_channel_cfg = {
+ .sa = BIT(0),
+ .rav = BIT(1),
+ .rac = BIT(2),
+ .racen = BIT(3),
+ .cmd = BIT(4),
+};
+
+/*
+ * On OMAP3+, all VC channels have the above default bitfield
+ * configuration, except the OMAP4 MPU channel. This appears
+ * to be a freak accident as every other VC channel has the
+ * default configuration, thus creating a mutant channel config.
+ */
+static struct omap_vc_channel_cfg vc_mutant_channel_cfg = {
+ .sa = BIT(0),
+ .rav = BIT(2),
+ .rac = BIT(3),
+ .racen = BIT(4),
+ .cmd = BIT(1),
+};
+
+static struct omap_vc_channel_cfg *vc_cfg_bits;
+#define CFG_CHANNEL_MASK 0x1f
+
+/**
+ * omap_vc_config_channel - configure VC channel to PMIC mappings
+ * @voltdm: pointer to voltagdomain defining the desired VC channel
+ *
+ * Configures the VC channel to PMIC mappings for the following
+ * PMIC settings
+ * - i2c slave address (SA)
+ * - voltage configuration address (RAV)
+ * - command configuration address (RAC) and enable bit (RACEN)
+ * - command values for ON, ONLP, RET and OFF (CMD)
+ *
+ * This function currently only allows flexible configuration of the
+ * non-default channel. Starting with OMAP4, there are more than 2
+ * channels, with one defined as the default (on OMAP4, it's MPU.)
+ * Only the non-default channel can be configured.
+ */
+static int omap_vc_config_channel(struct voltagedomain *voltdm)
+{
+ struct omap_vc_channel *vc = voltdm->vc;
+
+ /*
+ * For default channel, the only configurable bit is RACEN.
+ * All others must stay at zero (see function comment above.)
+ */
+ if (vc->flags & OMAP_VC_CHANNEL_DEFAULT)
+ vc->cfg_channel &= vc_cfg_bits->racen;
+
+ voltdm->rmw(CFG_CHANNEL_MASK << vc->cfg_channel_sa_shift,
+ vc->cfg_channel << vc->cfg_channel_sa_shift,
+ vc->common->cfg_channel_reg);
+
+ return 0;
+}
+
+/* Voltage scale and accessory APIs */
+int omap_vc_pre_scale(struct voltagedomain *voltdm,
+ unsigned long target_volt,
+ struct omap_volt_data *target_v,
+ u8 *target_vsel, u8 *current_vsel)
+{
+ struct omap_vc_channel *vc = voltdm->vc;
+ u32 vc_cmdval;
+
+ /* Check if sufficient pmic info is available for this vdd */
+ if (!voltdm->pmic) {
+ pr_err("%s: Insufficient pmic info to scale the vdd_%s\n",
+ __func__, voltdm->name);
+ return -EINVAL;
+ }
+
+ if (!voltdm->pmic->uv_to_vsel) {
+ pr_err("%s: PMIC function to convert voltage in uV to"
+ "vsel not registered. Hence unable to scale voltage"
+ "for vdd_%s\n", __func__, voltdm->name);
+ return -ENODATA;
+ }
+
+ if (!voltdm->read || !voltdm->write) {
+ pr_err("%s: No read/write API for accessing vdd_%s regs\n",
+ __func__, voltdm->name);
+ return -EINVAL;
+ }
+
+ *target_vsel = voltdm->pmic->uv_to_vsel(target_volt);
+ *current_vsel = voltdm->read(voltdm->vp->voltage);
+
+ /* Setting the ON voltage to the new target voltage */
+ vc_cmdval = voltdm->read(vc->cmdval_reg);
+ vc_cmdval &= ~vc->common->cmd_on_mask;
+ vc_cmdval |= (*target_vsel << vc->common->cmd_on_shift);
+ voltdm->write(vc_cmdval, vc->cmdval_reg);
+
+ omap_vp_update_errorgain(voltdm, target_v);
+
+ return 0;
+}
+
+/**
+ * omap_vc_set_auto_trans() - set auto transition parameters for a domain
+ * @voltdm: voltage domain we are interested in
+ * @flag: which state should we program this to
+ */
+int omap_vc_set_auto_trans(struct voltagedomain *voltdm, u8 flag)
+{
+ struct omap_vc_channel *vc;
+ const struct omap_vc_auto_trans *auto_trans;
+ u8 val = OMAP_VC_CHANNEL_AUTO_TRANSITION_UNSUPPORTED;
+
+ if (!voltdm) {
+ pr_err("%s: NULL Voltage domain!\n", __func__);
+ return -ENOENT;
+ }
+ vc = voltdm->vc;
+ if (!vc) {
+ pr_err("%s: NULL VC Voltage domain %s!\n", __func__,
+ voltdm->name);
+ return -ENOENT;
+ }
+
+ auto_trans = vc->auto_trans;
+ if (!auto_trans) {
+ pr_debug("%s: No auto trans %s!\n", __func__, voltdm->name);
+ return 0;
+ }
+
+ /* Handle value and masks per silicon data */
+ switch (flag) {
+ case OMAP_VC_CHANNEL_AUTO_TRANSITION_DISABLE:
+ val = 0x0;
+ break;
+ case OMAP_VC_CHANNEL_AUTO_TRANSITION_SLEEP:
+ val = auto_trans->sleep_val;
+ break;
+ case OMAP_VC_CHANNEL_AUTO_TRANSITION_RETENTION:
+ val = auto_trans->retention_val;
+ break;
+ case OMAP_VC_CHANNEL_AUTO_TRANSITION_OFF:
+ val = auto_trans->off_val;
+ break;
+ default:
+ pr_err("%s: Voltdm %s invalid flag %d\n", __func__,
+ voltdm->name, flag);
+ return -EINVAL;
+ }
+
+ if (val == OMAP_VC_CHANNEL_AUTO_TRANSITION_UNSUPPORTED) {
+ pr_err("%s: transition to %d on %s is NOT supported\n",
+ __func__, flag, voltdm->name);
+ return -EINVAL;
+ }
+
+ /* All ready - set it and move on.. */
+ voltdm->rmw(vc->auto_trans_mask, val << __ffs(vc->auto_trans_mask),
+ auto_trans->reg);
+ return 0;
+}
+
+void omap_vc_post_scale(struct voltagedomain *voltdm,
+ unsigned long target_volt,
+ struct omap_volt_data *target_vdata,
+ u8 target_vsel, u8 current_vsel)
+{
+ struct omap_vc_channel *vc;
+ u32 smps_steps = 0, smps_delay = 0;
+ u8 on_vsel, onlp_vsel;
+ u32 val;
+
+ if (IS_ERR_OR_NULL(voltdm)) {
+ pr_err("%s bad voldm\n", __func__);
+ return;
+ }
+
+ vc = voltdm->vc;
+ if (IS_ERR_OR_NULL(vc)) {
+ pr_err("%s voldm=%s bad vc\n", __func__, voltdm->name);
+ return;
+ }
+
+ smps_steps = abs(target_vsel - current_vsel);
+ /* SMPS slew rate / step size. 2us added as buffer. */
+ smps_delay = ((smps_steps * voltdm->pmic->step_size) /
+ voltdm->pmic->slew_rate) + 2;
+ udelay(smps_delay);
+
+ voltdm->curr_volt = target_vdata;
+
+ /* Set up the on voltage for wakeup from lp and OFF */
+ on_vsel = voltdm->pmic->uv_to_vsel(target_volt);
+ onlp_vsel = voltdm->pmic->uv_to_vsel(target_volt);
+ val = (on_vsel << vc->common->cmd_on_shift) |
+ (onlp_vsel << vc->common->cmd_onlp_shift) |
+ vc->setup_voltage_common;
+ voltdm->write(val, vc->cmdval_reg);
+}
+
+static int omap_vc_bypass_send_value(struct voltagedomain *voltdm,
+ struct omap_vc_channel *vc, u8 sa, u8 reg, u32 data)
+{
+ u32 loop_cnt = 0, retries_cnt = 0;
+ u32 vc_valid, vc_bypass_val_reg, vc_bypass_value;
+
+ if (IS_ERR_OR_NULL(vc->common)) {
+ pr_err("%s voldm=%s bad value for vc->common\n",
+ __func__, voltdm->name);
+ return -EINVAL;
+ }
+
+ vc_valid = vc->common->valid;
+ vc_bypass_val_reg = vc->common->bypass_val_reg;
+ vc_bypass_value = (data << vc->common->data_shift) |
+ (reg << vc->common->regaddr_shift) |
+ (sa << vc->common->slaveaddr_shift);
+
+ voltdm->write(vc_bypass_value, vc_bypass_val_reg);
+ voltdm->write(vc_bypass_value | vc_valid, vc_bypass_val_reg);
+
+ vc_bypass_value = voltdm->read(vc_bypass_val_reg);
+ /*
+ * Loop till the bypass command is acknowledged from the SMPS.
+ * NOTE: This is legacy code. The loop count and retry count needs
+ * to be revisited.
+ */
+ while (vc_bypass_value & vc_valid) {
+ loop_cnt++;
+
+ if (retries_cnt > 10) {
+ pr_warning("%s: Retry count exceeded\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ if (loop_cnt > 50) {
+ retries_cnt++;
+ loop_cnt = 0;
+ udelay(10);
+ }
+ vc_bypass_value = voltdm->read(vc_bypass_val_reg);
+ }
+
+ return 0;
+
+}
+
+/* vc_bypass_scale_voltage - VC bypass method of voltage scaling */
+int omap_vc_bypass_scale_voltage(struct voltagedomain *voltdm,
+ struct omap_volt_data *target_v)
+{
+ struct omap_vc_channel *vc;
+ u8 target_vsel, current_vsel;
+ int ret;
+ unsigned long target_volt = omap_get_operation_voltage(target_v);
+
+ if (IS_ERR_OR_NULL(voltdm)) {
+ pr_err("%s bad voldm\n", __func__);
+ return -EINVAL;
+ }
+
+ vc = voltdm->vc;
+ if (IS_ERR_OR_NULL(vc)) {
+ pr_err("%s voldm=%s bad vc\n", __func__, voltdm->name);
+ return -EINVAL;
+ }
+
+ ret = omap_vc_pre_scale(voltdm, target_volt, target_v, &target_vsel,
+ ¤t_vsel);
+ if (ret)
+ return ret;
+
+ ret = omap_vc_bypass_send_value(voltdm, vc, vc->i2c_slave_addr,
+ vc->volt_reg_addr, target_vsel);
+ if (ret)
+ return ret;
+
+ omap_vc_post_scale(voltdm, target_volt, target_v, target_vsel,
+ current_vsel);
+ return 0;
+}
+
+/**
+ * omap_vc_bypass_send_i2c_msg() - Function to control PMIC registers over SRI2C
+ * @voltdm: voltage domain
+ * @slave_addr: slave address of the device.
+ * @reg_addr: register address to access
+ * @data: what do we want to write there
+ *
+ * Many simpler PMICs with a single I2C interface still have configuration
+ * registers that may need population. Typical being slew rate configurations
+ * thermal shutdown configuration etc. When these PMICs are hooked on I2C_SR,
+ * this function allows these configuration registers to be accessed.
+ *
+ * WARNING: Though this could be used for voltage register configurations over
+ * I2C_SR, DONOT use it for that purpose, all the Voltage controller's internal
+ * information is bypassed using this function and must be used judiciously.
+ */
+int omap_vc_bypass_send_i2c_msg(struct voltagedomain *voltdm, u8 slave_addr,
+ u8 reg_addr, u8 data)
+{
+ struct omap_vc_channel *vc;
+
+ if (IS_ERR_OR_NULL(voltdm)) {
+ pr_err("%s bad voldm\n", __func__);
+ return -EINVAL;
+ }
+
+ vc = voltdm->vc;
+ if (IS_ERR_OR_NULL(vc)) {
+ pr_err("%s voldm=%s bad vc\n", __func__, voltdm->name);
+ return -EINVAL;
+ }
+
+ return omap_vc_bypass_send_value(voltdm, vc, slave_addr,
+ reg_addr, data);
+}
+
+static void __init omap3_vfsm_init(struct voltagedomain *voltdm)
+{
+ /*
+ * Voltage Manager FSM parameters init
+ * XXX This data should be passed in from the board file
+ */
+ voltdm->write(OMAP3_CLKSETUP, OMAP3_PRM_CLKSETUP_OFFSET);
+ voltdm->write(OMAP3_VOLTOFFSET, OMAP3_PRM_VOLTOFFSET_OFFSET);
+ voltdm->write(OMAP3_VOLTSETUP2, OMAP3_PRM_VOLTSETUP2_OFFSET);
+}
+
+static void __init omap3_vc_init_channel(struct voltagedomain *voltdm)
+{
+ static bool is_initialized;
+
+ if (is_initialized)
+ return;
+
+ omap3_vfsm_init(voltdm);
+
+ is_initialized = true;
+}
+
+
+/* OMAP4 specific voltage init functions */
+static void __init omap4_vc_init_channel(struct voltagedomain *voltdm)
+{
+ static bool is_initialized;
+ struct omap_voltdm_pmic *pmic = voltdm->pmic;
+ u32 vc_val = 0;
+
+ if (is_initialized)
+ return;
+
+ if (pmic->i2c_high_speed) {
+ vc_val |= pmic->i2c_hscll_low << OMAP4430_HSCLL_SHIFT;
+ vc_val |= pmic->i2c_hscll_high << OMAP4430_HSCLH_SHIFT;
+ }
+
+ vc_val |= pmic->i2c_scll_low << OMAP4430_SCLL_SHIFT;
+ vc_val |= pmic->i2c_scll_high << OMAP4430_SCLH_SHIFT;
+
+ if (vc_val)
+ voltdm->write(vc_val, OMAP4_PRM_VC_CFG_I2C_CLK_OFFSET);
+
+ is_initialized = true;
+}
+
+/**
+ * omap_vc_i2c_init - initialize I2C interface to PMIC
+ * @voltdm: voltage domain containing VC data
+ *
+ * Use PMIC supplied seetings for I2C high-speed mode and
+ * master code (if set) and program the VC I2C configuration
+ * register.
+ *
+ * The VC I2C configuration is common to all VC channels,
+ * so this function only configures I2C for the first VC
+ * channel registers. All other VC channels will use the
+ * same configuration.
+ */
+static void __init omap_vc_i2c_init(struct voltagedomain *voltdm)
+{
+ struct omap_vc_channel *vc = voltdm->vc;
+ static bool initialized;
+ static bool i2c_high_speed;
+ u8 mcode;
+
+ if (initialized) {
+ if (voltdm->pmic->i2c_high_speed != i2c_high_speed)
+ pr_warn("%s: I2C config for all channels must match.",
+ __func__);
+ return;
+ }
+
+ i2c_high_speed = voltdm->pmic->i2c_high_speed;
+ if (i2c_high_speed)
+ voltdm->rmw(vc->common->i2c_cfg_hsen_mask,
+ vc->common->i2c_cfg_hsen_mask,
+ vc->common->i2c_cfg_reg);
+
+ mcode = voltdm->pmic->i2c_mcode;
+ if (mcode)
+ voltdm->rmw(vc->common->i2c_mcode_mask,
+ mcode << __ffs(vc->common->i2c_mcode_mask),
+ vc->common->i2c_cfg_reg);
+
+ initialized = true;
+}
+
+/**
+ * omap_vc_setup_lp_time() - configure the voltage ramp time for low states.
+ * @voltdm: voltagedomain we are interested in.
+ * @is_retention: Are we interested in retention or OFF?
+ *
+ * The ramp times are calculated based on the worst case voltage drop,
+ * which is the difference of on_volt and the ret_volt. This time is used
+ * for computing the duration necessary for low power states such as retention.
+ */
+static int __init omap_vc_setup_lp_time(struct voltagedomain *voltdm,
+ bool is_retention)
+{
+ u32 volt_drop = 0, volt_ramptime = 0, volt_rampcount;
+ u32 sys_clk_mhz = 0, sysclk_cycles = 0, max_latency_for_prescaler = 0;
+ struct clk *sys_ck;
+ u8 pre_scaler = 0;
+ struct omap_voltdm_pmic *pmic = voltdm->pmic;
+ struct omap_vc_channel *vc = voltdm->vc;
+ const struct setup_time_ramp_params *params;
+
+ params = vc->common->setup_time_params;
+ /* If the VC data does not have params for us, return PMIC's value */
+ if (!params)
+ return pmic->volt_setup_time;
+ if (!params->pre_scaler_to_sysclk_cycles_count)
+ return pmic->volt_setup_time;
+
+ /* No of sys_clk cycles for pre_scaler 0 */
+ sysclk_cycles = params->pre_scaler_to_sysclk_cycles[0];
+
+ sys_ck = clk_get(NULL, "sys_clkin_ck");
+ if (IS_ERR_OR_NULL(sys_ck)) {
+ WARN_ONCE(1, "%s: unable to get sys_clkin_ck (voldm %s)\n",
+ __func__, voltdm->name);
+ return pmic->volt_setup_time;
+ }
+ sys_clk_mhz = clk_get_rate(sys_ck) / 1000000;
+ clk_put(sys_ck);
+
+ /*
+ * If we chose prescaler 0x0, then we have a limit on the maximum
+ * latency for which we can chose a correct count. This is because,
+ * the count field is limited to 6 bits and max value can be 63 and
+ * for prescaler 0, ramp up/down counter is incremented every
+ * 64 system clock cycles.
+ * for eg, max latency for prescaler for 38.4Mhz sys clk would be
+ * 105 = (63 * 64) / 38.4
+ */
+ max_latency_for_prescaler = (63 * sysclk_cycles) / sys_clk_mhz;
+
+ if (is_retention)
+ volt_drop = pmic->on_volt - pmic->ret_volt;
+ else
+ volt_drop = pmic->on_volt;
+ volt_ramptime = DIV_ROUND_UP(volt_drop, pmic->slew_rate);
+ volt_ramptime += OMAP_VC_I2C_ACK_DELAY;
+
+ /* many PMICs need additional time to switch back on */
+ if (!is_retention)
+ volt_ramptime += pmic->switch_on_time;
+
+ if (volt_ramptime < max_latency_for_prescaler)
+ pre_scaler = 0x0;
+ else
+ pre_scaler = 0x1;
+
+ /*
+ * IF we mess up values, then try to have some form of recovery using
+ * PMIC's value.
+ */
+ if (pre_scaler > params->pre_scaler_to_sysclk_cycles_count) {
+ pr_err("%s: prescaler idx %d > available %d on domain %s\n",
+ __func__, pre_scaler,
+ params->pre_scaler_to_sysclk_cycles_count, voltdm->name);
+ return pmic->volt_setup_time;
+ }
+
+ sysclk_cycles = params->pre_scaler_to_sysclk_cycles[pre_scaler];
+
+ volt_rampcount = ((volt_ramptime * sys_clk_mhz) / sysclk_cycles) + 1;
+
+ return (pre_scaler << OMAP4430_RAMP_DOWN_PRESCAL_SHIFT) |
+ (pre_scaler << OMAP4430_RAMP_UP_PRESCAL_SHIFT) |
+ (volt_rampcount << OMAP4430_RAMP_DOWN_COUNT_SHIFT) |
+ (volt_rampcount << OMAP4430_RAMP_UP_COUNT_SHIFT);
+}
+
+void __init omap_vc_init_channel(struct voltagedomain *voltdm)
+{
+ struct omap_vc_channel *vc = voltdm->vc;
+ u8 on_vsel, onlp_vsel, ret_vsel, off_vsel;
+ u32 val;
+
+ if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) {
+ pr_err("%s: PMIC info requried to configure vc for"
+ "vdd_%s not populated.Hence cannot initialize vc\n",
+ __func__, voltdm->name);
+ return;
+ }
+
+ if (!voltdm->read || !voltdm->write) {
+ pr_err("%s: No read/write API for accessing vdd_%s regs\n",
+ __func__, voltdm->name);
+ return;
+ }
+
+ vc->cfg_channel = 0;
+ if (vc->flags & OMAP_VC_CHANNEL_CFG_MUTANT)
+ vc_cfg_bits = &vc_mutant_channel_cfg;
+ else
+ vc_cfg_bits = &vc_default_channel_cfg;
+
+ /* get PMIC/board specific settings */
+ vc->i2c_slave_addr = voltdm->pmic->i2c_slave_addr;
+ vc->volt_reg_addr = voltdm->pmic->volt_reg_addr;
+ vc->cmd_reg_addr = voltdm->pmic->cmd_reg_addr;
+ /* Calculate the RET voltage setup time and update volt_setup_time */
+ vc->setup_time = omap_vc_setup_lp_time(voltdm, true);
+
+ if ((vc->flags & OMAP_VC_CHANNEL_DEFAULT) &&
+ ((vc->i2c_slave_addr == USE_DEFAULT_CHANNEL_I2C_PARAM) ||
+ (vc->cmd_reg_addr == USE_DEFAULT_CHANNEL_I2C_PARAM) ||
+ (vc->volt_reg_addr == USE_DEFAULT_CHANNEL_I2C_PARAM))) {
+ pr_err("%s: voltdm %s: default channel "
+ "bad config-sa=%2x vol=%2x, cmd=%2x?\n", __func__,
+ voltdm->name, vc->i2c_slave_addr, vc->volt_reg_addr,
+ vc->cmd_reg_addr);
+ return;
+ }
+
+ /* Configure the i2c slave address for this VC */
+ if (vc->i2c_slave_addr != USE_DEFAULT_CHANNEL_I2C_PARAM) {
+ voltdm->rmw(vc->smps_sa_mask,
+ vc->i2c_slave_addr << __ffs(vc->smps_sa_mask),
+ vc->common->smps_sa_reg);
+ vc->cfg_channel |= vc_cfg_bits->sa;
+ }
+
+ /*
+ * Configure the PMIC register addresses.
+ */
+ if (vc->volt_reg_addr != USE_DEFAULT_CHANNEL_I2C_PARAM) {
+ voltdm->rmw(vc->smps_volra_mask,
+ vc->volt_reg_addr << __ffs(vc->smps_volra_mask),
+ vc->common->smps_volra_reg);
+ vc->cfg_channel |= vc_cfg_bits->rav;
+ }
+
+ if (vc->cmd_reg_addr != USE_DEFAULT_CHANNEL_I2C_PARAM) {
+ voltdm->rmw(vc->smps_cmdra_mask,
+ vc->cmd_reg_addr << __ffs(vc->smps_cmdra_mask),
+ vc->common->smps_cmdra_reg);
+ vc->cfg_channel |= vc_cfg_bits->rac;
+ }
+
+ /* If voltage and cmd regs are same, we can use cmdra register */
+ if (vc->volt_reg_addr == vc->cmd_reg_addr)
+ vc->cfg_channel |= vc_cfg_bits->racen;
+
+ /* Set up the on, inactive, retention and off voltage */
+ on_vsel = voltdm->pmic->uv_to_vsel(voltdm->pmic->on_volt);
+ onlp_vsel = voltdm->pmic->uv_to_vsel(voltdm->pmic->onlp_volt);
+ ret_vsel = voltdm->pmic->uv_to_vsel(voltdm->pmic->ret_volt);
+ off_vsel = voltdm->pmic->uv_to_vsel(voltdm->pmic->off_volt);
+ vc->setup_voltage_common =
+ (ret_vsel << vc->common->cmd_ret_shift) |
+ (off_vsel << vc->common->cmd_off_shift);
+ val = (on_vsel << vc->common->cmd_on_shift) |
+ (onlp_vsel << vc->common->cmd_onlp_shift) |
+ vc->setup_voltage_common;
+ voltdm->write(val, vc->cmdval_reg);
+ vc->cfg_channel |= vc_cfg_bits->cmd;
+
+ /* Channel configuration */
+ omap_vc_config_channel(voltdm);
+
+ /* Configure the setup times */
+ voltdm->rmw(voltdm->vfsm->voltsetup_mask,
+ vc->setup_time << __ffs(voltdm->vfsm->voltsetup_mask),
+ voltdm->vfsm->voltsetup_reg);
+ voltdm->rmw(voltdm->vfsm->voltsetup_mask,
+ omap_vc_setup_lp_time(voltdm, false) <<
+ ffs(voltdm->vfsm->voltsetup_mask),
+ voltdm->vfsm->voltsetupoff_reg);
+
+ omap_vc_i2c_init(voltdm);
+
+ if (cpu_is_omap34xx())
+ omap3_vc_init_channel(voltdm);
+ else if (cpu_is_omap44xx())
+ omap4_vc_init_channel(voltdm);
+}
diff --git a/arch/arm/mach-omap2/vc.h b/arch/arm/mach-omap2/vc.h
index e776777..66c6d9a 100644
--- a/arch/arm/mach-omap2/vc.h
+++ b/arch/arm/mach-omap2/vc.h
@@ -19,8 +19,26 @@
#include <linux/kernel.h>
+struct voltagedomain;
+
/**
- * struct omap_vc_common_data - per-VC register/bitfield data
+ * struct setup_time_ramp_params - ramp time parameters
+ * @pre_scaler_to_sysclk_cycles: The array represents correlation of prescaler
+ * to the number of system clock cycles, for which rampdown counter is
+ * incremented or decremented in PRM_VOLTSETUP_XXX_RET_SLEEP registers.
+ * This is to handle variances in defined values due to conditions such
+ * as "Errata Id: i623: Retention/Sleep Voltage Transitions Ramp Time"
+ * @pre_scaler_to_sysclk_cycles_count: number of entries available
+ *
+ * Add parameters that allow us to compute the ramp time for the device
+ */
+struct setup_time_ramp_params {
+ u16 *pre_scaler_to_sysclk_cycles;
+ u8 pre_scaler_to_sysclk_cycles_count;
+};
+
+/**
+ * struct omap_vc_common - per-VC register/bitfield data
* @cmd_on_mask: ON bitmask in PRM_VC_CMD_VAL* register
* @valid: VALID bitmask in PRM_VC_BYPASS_VAL register
* @smps_sa_reg: Offset of PRM_VC_SMPS_SA reg from PRM start
@@ -33,15 +51,20 @@
* @cmd_onlp_shift: ONLP field shift in PRM_VC_CMD_VAL_* register
* @cmd_ret_shift: RET field shift in PRM_VC_CMD_VAL_* register
* @cmd_off_shift: OFF field shift in PRM_VC_CMD_VAL_* register
+ * @i2c_cfg_reg: I2C configuration register offset
+ * @i2c_cfg_hsen_mask: high-speed mode bit field mask in I2C config register
+ * @i2c_mcode_mask: MCODE field mask for I2C config register
+ * @setup_time_params: setup time parameters
*
* XXX One of cmd_on_mask and cmd_on_shift are not needed
* XXX VALID should probably be a shift, not a mask
*/
-struct omap_vc_common_data {
+struct omap_vc_common {
u32 cmd_on_mask;
u32 valid;
u8 smps_sa_reg;
u8 smps_volra_reg;
+ u8 smps_cmdra_reg;
u8 bypass_val_reg;
u8 data_shift;
u8 slaveaddr_shift;
@@ -50,34 +73,92 @@
u8 cmd_onlp_shift;
u8 cmd_ret_shift;
u8 cmd_off_shift;
+ u8 cfg_channel_reg;
+ u8 i2c_cfg_reg;
+ u8 i2c_cfg_hsen_mask;
+ u8 i2c_mcode_mask;
+ struct setup_time_ramp_params *setup_time_params;
};
/**
- * struct omap_vc_instance_data - VC per-instance data
- * @vc_common: pointer to VC common data for this platform
- * @smps_sa_mask: SA* bitmask in the PRM_VC_SMPS_SA register
- * @smps_volra_mask: VOLRA* bitmask in the PRM_VC_VOL_RA register
- * @smps_sa_shift: SA* field shift in the PRM_VC_SMPS_SA register
- * @smps_volra_shift: VOLRA* field shift in the PRM_VC_VOL_RA register
- *
- * XXX It is not necessary to have both a *_mask and a *_shift -
- * remove one
+ * struct omap_vc_auto_trans - describe the auto transition for the domain
+ * @reg: register to modify (usually PRM_VOLTCTRL)
+ * @sleep_val: value to set for enabling sleep transition
+ * @retention_val: value to set for enabling retention transition
+ * @off_val: value to set for enabling off transition
*/
-struct omap_vc_instance_data {
- const struct omap_vc_common_data *vc_common;
- u32 smps_sa_mask;
- u32 smps_volra_mask;
- u8 cmdval_reg;
- u8 smps_sa_shift;
- u8 smps_volra_shift;
+struct omap_vc_auto_trans {
+ u8 reg;
+ u8 sleep_val;
+ u8 retention_val;
+ u8 off_val;
};
-extern struct omap_vc_instance_data omap3_vc1_data;
-extern struct omap_vc_instance_data omap3_vc2_data;
+/* omap_vc_channel.flags values */
+#define OMAP_VC_CHANNEL_DEFAULT BIT(0)
+#define OMAP_VC_CHANNEL_CFG_MUTANT BIT(1)
-extern struct omap_vc_instance_data omap4_vc_mpu_data;
-extern struct omap_vc_instance_data omap4_vc_iva_data;
-extern struct omap_vc_instance_data omap4_vc_core_data;
+/**
+ * struct omap_vc_channel - VC per-instance data
+ * @flags: VC channel-specific flags (optional)
+ * @common: pointer to VC common data for this platform
+ * @smps_sa_mask: i2c slave address bitmask in the PRM_VC_SMPS_SA register
+ * @smps_volra_mask: VOLRA* bitmask in the PRM_VC_VOL_RA register
+ * @auto_trans: Auto transition information
+ * @auto_trans_mask: Auto transition mask for this channel
+ */
+struct omap_vc_channel {
+ u8 flags;
+ /* channel state */
+ u16 i2c_slave_addr;
+ u16 volt_reg_addr;
+ u16 cmd_reg_addr;
+ u8 cfg_channel;
+ u32 setup_time;
+ u32 setup_voltage_common;
+ bool i2c_high_speed;
+
+ /* register access data */
+ const struct omap_vc_common *common;
+ u32 smps_sa_mask;
+ u32 smps_volra_mask;
+ u32 smps_cmdra_mask;
+ u8 cmdval_reg;
+ u8 cfg_channel_sa_shift;
+
+ const struct omap_vc_auto_trans *auto_trans;
+ u32 auto_trans_mask;
+};
+
+extern struct omap_vc_channel omap3_vc_mpu;
+extern struct omap_vc_channel omap3_vc_core;
+
+extern struct omap_vc_channel omap4_vc_mpu;
+extern struct omap_vc_channel omap4_vc_iva;
+extern struct omap_vc_channel omap4_vc_core;
+
+void omap_vc_init_channel(struct voltagedomain *voltdm);
+int omap_vc_pre_scale(struct voltagedomain *voltdm,
+ unsigned long target_volt,
+ struct omap_volt_data *target_vdata,
+ u8 *target_vsel, u8 *current_vsel);
+void omap_vc_post_scale(struct voltagedomain *voltdm,
+ unsigned long target_volt,
+ struct omap_volt_data *target_vdata,
+ u8 target_vsel, u8 current_vsel);
+
+/* Auto transition flags for users */
+#define OMAP_VC_CHANNEL_AUTO_TRANSITION_DISABLE 0
+#define OMAP_VC_CHANNEL_AUTO_TRANSITION_SLEEP 1
+#define OMAP_VC_CHANNEL_AUTO_TRANSITION_RETENTION 2
+#define OMAP_VC_CHANNEL_AUTO_TRANSITION_OFF 3
+/* For silicon data to mark unsupported transition */
+#define OMAP_VC_CHANNEL_AUTO_TRANSITION_UNSUPPORTED 0xff
+int omap_vc_set_auto_trans(struct voltagedomain *voltdm, u8 flag);
+int omap_vc_bypass_scale_voltage(struct voltagedomain *voltdm,
+ struct omap_volt_data *target_volt);
+int omap_vc_bypass_send_i2c_msg(struct voltagedomain *voltdm,
+ u8 slave_addr, u8 reg_addr, u8 data);
#endif
diff --git a/arch/arm/mach-omap2/vc3xxx_data.c b/arch/arm/mach-omap2/vc3xxx_data.c
index f37dc4b..c21d3f5 100644
--- a/arch/arm/mach-omap2/vc3xxx_data.c
+++ b/arch/arm/mach-omap2/vc3xxx_data.c
@@ -29,9 +29,10 @@
* VC data common to 34xx/36xx chips
* XXX This stuff presumably belongs in the vc3xxx.c or vc.c file.
*/
-static struct omap_vc_common_data omap3_vc_common = {
+static struct omap_vc_common omap3_vc_common = {
.smps_sa_reg = OMAP3_PRM_VC_SMPS_SA_OFFSET,
.smps_volra_reg = OMAP3_PRM_VC_SMPS_VOL_RA_OFFSET,
+ .smps_cmdra_reg = OMAP3_PRM_VC_SMPS_CMD_RA_OFFSET,
.bypass_val_reg = OMAP3_PRM_VC_BYPASS_VAL_OFFSET,
.data_shift = OMAP3430_DATA_SHIFT,
.slaveaddr_shift = OMAP3430_SLAVEADDR_SHIFT,
@@ -42,22 +43,42 @@
.cmd_onlp_shift = OMAP3430_VC_CMD_ONLP_SHIFT,
.cmd_ret_shift = OMAP3430_VC_CMD_RET_SHIFT,
.cmd_off_shift = OMAP3430_VC_CMD_OFF_SHIFT,
+ .cfg_channel_reg = OMAP3_PRM_VC_CH_CONF_OFFSET,
+ .i2c_cfg_hsen_mask = OMAP3430_HSEN_MASK,
+ .i2c_cfg_reg = OMAP3_PRM_VC_I2C_CFG_OFFSET,
+ .i2c_mcode_mask = OMAP3430_MCODE_MASK,
};
-struct omap_vc_instance_data omap3_vc1_data = {
- .vc_common = &omap3_vc_common,
+/*
+ * VC auto transition settings for OMAP3. On OMAP3, we just have a single
+ * device wide state that is achieved on core, so we shall use this data
+ * only for core domain transition
+ */
+static const struct omap_vc_auto_trans omap3_vc_auto_trans = {
+ .reg = OMAP3_PRM_VOLTCTRL_OFFSET,
+ .sleep_val = OMAP3430_AUTO_SLEEP_MASK,
+ .retention_val = OMAP3430_AUTO_RET_MASK,
+ .off_val = OMAP3430_AUTO_OFF_MASK,
+};
+
+struct omap_vc_channel omap3_vc_mpu = {
+ .common = &omap3_vc_common,
.cmdval_reg = OMAP3_PRM_VC_CMD_VAL_0_OFFSET,
- .smps_sa_shift = OMAP3430_PRM_VC_SMPS_SA_SA0_SHIFT,
.smps_sa_mask = OMAP3430_PRM_VC_SMPS_SA_SA0_MASK,
- .smps_volra_shift = OMAP3430_VOLRA0_SHIFT,
.smps_volra_mask = OMAP3430_VOLRA0_MASK,
+ .smps_cmdra_mask = OMAP3430_CMDRA0_MASK,
+ .cfg_channel_sa_shift = OMAP3430_PRM_VC_SMPS_SA_SA0_SHIFT,
};
-struct omap_vc_instance_data omap3_vc2_data = {
- .vc_common = &omap3_vc_common,
+struct omap_vc_channel omap3_vc_core = {
+ .common = &omap3_vc_common,
.cmdval_reg = OMAP3_PRM_VC_CMD_VAL_1_OFFSET,
- .smps_sa_shift = OMAP3430_PRM_VC_SMPS_SA_SA1_SHIFT,
.smps_sa_mask = OMAP3430_PRM_VC_SMPS_SA_SA1_MASK,
- .smps_volra_shift = OMAP3430_VOLRA1_SHIFT,
.smps_volra_mask = OMAP3430_VOLRA1_MASK,
+ .smps_cmdra_mask = OMAP3430_CMDRA1_MASK,
+ .cfg_channel_sa_shift = OMAP3430_PRM_VC_SMPS_SA_SA1_SHIFT,
+
+ .auto_trans = &omap3_vc_auto_trans,
+ .auto_trans_mask = OMAP3430_AUTO_OFF_MASK | OMAP3430_AUTO_RET_MASK |
+ OMAP3430_AUTO_SLEEP_MASK,
};
diff --git a/arch/arm/mach-omap2/vc44xx_data.c b/arch/arm/mach-omap2/vc44xx_data.c
index a98da8d..08f845b 100644
--- a/arch/arm/mach-omap2/vc44xx_data.c
+++ b/arch/arm/mach-omap2/vc44xx_data.c
@@ -26,13 +26,20 @@
#include "vc.h"
+static u16 pre_scaler_to_sysclk_cycles_44xx[] = {16, 64, 128, 512};
+static struct setup_time_ramp_params omap4_vc_setuptime_params = {
+ .pre_scaler_to_sysclk_cycles = pre_scaler_to_sysclk_cycles_44xx,
+ .pre_scaler_to_sysclk_cycles_count = 4,
+};
+
/*
* VC data common to 44xx chips
* XXX This stuff presumably belongs in the vc3xxx.c or vc.c file.
*/
-static const struct omap_vc_common_data omap4_vc_common = {
+static const struct omap_vc_common omap4_vc_common = {
.smps_sa_reg = OMAP4_PRM_VC_SMPS_SA_OFFSET,
.smps_volra_reg = OMAP4_PRM_VC_VAL_SMPS_RA_VOL_OFFSET,
+ .smps_cmdra_reg = OMAP4_PRM_VC_VAL_SMPS_RA_CMD_OFFSET,
.bypass_val_reg = OMAP4_PRM_VC_VAL_BYPASS_OFFSET,
.data_shift = OMAP4430_DATA_SHIFT,
.slaveaddr_shift = OMAP4430_SLAVEADDR_SHIFT,
@@ -43,33 +50,56 @@
.cmd_onlp_shift = OMAP4430_ONLP_SHIFT,
.cmd_ret_shift = OMAP4430_RET_SHIFT,
.cmd_off_shift = OMAP4430_OFF_SHIFT,
+ .cfg_channel_reg = OMAP4_PRM_VC_CFG_CHANNEL_OFFSET,
+ .i2c_cfg_reg = OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET,
+ .i2c_cfg_hsen_mask = OMAP4430_HSMODEEN_MASK,
+ .i2c_mcode_mask = OMAP4430_HSMCODE_MASK,
+ .setup_time_params = &omap4_vc_setuptime_params,
+};
+
+/* VC auto transition settings for OMAP4. */
+static const struct omap_vc_auto_trans omap4_vc_auto_trans = {
+ .reg = OMAP4_PRM_VOLTCTRL_OFFSET,
+ .sleep_val = OMAP4430_AUTO_CTRL_VDD_SLEEP_MASK,
+ .retention_val = OMAP4430_AUTO_CTRL_VDD_RET_MASK,
+ .off_val = OMAP_VC_CHANNEL_AUTO_TRANSITION_UNSUPPORTED,
};
/* VC instance data for each controllable voltage line */
-struct omap_vc_instance_data omap4_vc_mpu_data = {
- .vc_common = &omap4_vc_common,
+struct omap_vc_channel omap4_vc_mpu = {
+ .flags = OMAP_VC_CHANNEL_DEFAULT | OMAP_VC_CHANNEL_CFG_MUTANT,
+ .common = &omap4_vc_common,
.cmdval_reg = OMAP4_PRM_VC_VAL_CMD_VDD_MPU_L_OFFSET,
- .smps_sa_shift = OMAP4430_SA_VDD_MPU_L_PRM_VC_SMPS_SA_SHIFT,
.smps_sa_mask = OMAP4430_SA_VDD_MPU_L_PRM_VC_SMPS_SA_MASK,
- .smps_volra_shift = OMAP4430_VOLRA_VDD_MPU_L_SHIFT,
.smps_volra_mask = OMAP4430_VOLRA_VDD_MPU_L_MASK,
+ .smps_cmdra_mask = OMAP4430_CMDRA_VDD_MPU_L_MASK,
+ .cfg_channel_sa_shift = OMAP4430_SA_VDD_MPU_L_SHIFT,
+
+ .auto_trans = &omap4_vc_auto_trans,
+ .auto_trans_mask = OMAP4430_AUTO_CTRL_VDD_MPU_L_MASK,
};
-struct omap_vc_instance_data omap4_vc_iva_data = {
- .vc_common = &omap4_vc_common,
+struct omap_vc_channel omap4_vc_iva = {
+ .common = &omap4_vc_common,
.cmdval_reg = OMAP4_PRM_VC_VAL_CMD_VDD_IVA_L_OFFSET,
- .smps_sa_shift = OMAP4430_SA_VDD_IVA_L_PRM_VC_SMPS_SA_SHIFT,
.smps_sa_mask = OMAP4430_SA_VDD_IVA_L_PRM_VC_SMPS_SA_MASK,
- .smps_volra_shift = OMAP4430_VOLRA_VDD_IVA_L_SHIFT,
.smps_volra_mask = OMAP4430_VOLRA_VDD_IVA_L_MASK,
+ .smps_cmdra_mask = OMAP4430_CMDRA_VDD_IVA_L_MASK,
+ .cfg_channel_sa_shift = OMAP4430_SA_VDD_IVA_L_SHIFT,
+
+ .auto_trans = &omap4_vc_auto_trans,
+ .auto_trans_mask = OMAP4430_AUTO_CTRL_VDD_IVA_L_MASK,
};
-struct omap_vc_instance_data omap4_vc_core_data = {
- .vc_common = &omap4_vc_common,
+struct omap_vc_channel omap4_vc_core = {
+ .common = &omap4_vc_common,
.cmdval_reg = OMAP4_PRM_VC_VAL_CMD_VDD_CORE_L_OFFSET,
- .smps_sa_shift = OMAP4430_SA_VDD_CORE_L_0_6_SHIFT,
.smps_sa_mask = OMAP4430_SA_VDD_CORE_L_0_6_MASK,
- .smps_volra_shift = OMAP4430_VOLRA_VDD_CORE_L_SHIFT,
.smps_volra_mask = OMAP4430_VOLRA_VDD_CORE_L_MASK,
+ .smps_cmdra_mask = OMAP4430_CMDRA_VDD_CORE_L_MASK,
+ .cfg_channel_sa_shift = OMAP4430_SA_VDD_CORE_L_SHIFT,
+
+ .auto_trans = &omap4_vc_auto_trans,
+ .auto_trans_mask = OMAP4430_AUTO_CTRL_VDD_CORE_L_MASK,
};
diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c
index 9ef3789..995c38f 100644
--- a/arch/arm/mach-omap2/voltage.c
+++ b/arch/arm/mach-omap2/voltage.c
@@ -36,633 +36,51 @@
#include "control.h"
#include "voltage.h"
+#include "powerdomain.h"
#include "vc.h"
#include "vp.h"
-#define VOLTAGE_DIR_SIZE 16
+static LIST_HEAD(voltdm_list);
-
-static struct omap_vdd_info **vdd_info;
-
-/*
- * Number of scalable voltage domains.
- */
-static int nr_scalable_vdd;
-
-/* XXX document */
-static s16 prm_mod_offs;
-static s16 prm_irqst_ocp_mod_offs;
-
-static struct dentry *voltage_dir;
-
-/* Init function pointers */
-static int vp_forceupdate_scale_voltage(struct omap_vdd_info *vdd,
- unsigned long target_volt);
-
-static u32 omap3_voltage_read_reg(u16 mod, u8 offset)
+static int __init _config_common_vdd_data(struct voltagedomain *voltdm)
{
- return omap2_prm_read_mod_reg(mod, offset);
-}
-
-static void omap3_voltage_write_reg(u32 val, u16 mod, u8 offset)
-{
- omap2_prm_write_mod_reg(val, mod, offset);
-}
-
-static u32 omap4_voltage_read_reg(u16 mod, u8 offset)
-{
- return omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
- mod, offset);
-}
-
-static void omap4_voltage_write_reg(u32 val, u16 mod, u8 offset)
-{
- omap4_prminst_write_inst_reg(val, OMAP4430_PRM_PARTITION, mod, offset);
-}
-
-static int __init _config_common_vdd_data(struct omap_vdd_info *vdd)
-{
- char *sys_ck_name;
struct clk *sys_ck;
- u32 sys_clk_speed, timeout_val, waittime;
-
- /*
- * XXX Clockfw should handle this, or this should be in a
- * struct record
- */
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
- sys_ck_name = "sys_ck";
- else if (cpu_is_omap44xx())
- sys_ck_name = "sys_clkin_ck";
- else
- return -EINVAL;
/*
* Sys clk rate is require to calculate vp timeout value and
* smpswaittimemin and smpswaittimemax.
*/
- sys_ck = clk_get(NULL, sys_ck_name);
+ sys_ck = clk_get(NULL, voltdm->sys_clk.name);
if (IS_ERR(sys_ck)) {
pr_warning("%s: Could not get the sys clk to calculate"
- "various vdd_%s params\n", __func__, vdd->voltdm.name);
+ "various vdd_%s params\n", __func__, voltdm->name);
return -EINVAL;
}
- sys_clk_speed = clk_get_rate(sys_ck);
- clk_put(sys_ck);
- /* Divide to avoid overflow */
- sys_clk_speed /= 1000;
+ voltdm->sys_clk.rate = clk_get_rate(sys_ck);
+ WARN_ON(!voltdm->sys_clk.rate);
/* Generic voltage parameters */
- vdd->volt_scale = vp_forceupdate_scale_voltage;
- vdd->vp_enabled = false;
-
- vdd->vp_rt_data.vpconfig_erroroffset =
- (vdd->pmic_info->vp_erroroffset <<
- vdd->vp_data->vp_common->vpconfig_erroroffset_shift);
-
- timeout_val = (sys_clk_speed * vdd->pmic_info->vp_timeout_us) / 1000;
- vdd->vp_rt_data.vlimitto_timeout = timeout_val;
- vdd->vp_rt_data.vlimitto_vddmin = vdd->pmic_info->vp_vddmin;
- vdd->vp_rt_data.vlimitto_vddmax = vdd->pmic_info->vp_vddmax;
-
- waittime = ((vdd->pmic_info->step_size / vdd->pmic_info->slew_rate) *
- sys_clk_speed) / 1000;
- vdd->vp_rt_data.vstepmin_smpswaittimemin = waittime;
- vdd->vp_rt_data.vstepmax_smpswaittimemax = waittime;
- vdd->vp_rt_data.vstepmin_stepmin = vdd->pmic_info->vp_vstepmin;
- vdd->vp_rt_data.vstepmax_stepmax = vdd->pmic_info->vp_vstepmax;
+ voltdm->scale = omap_vp_forceupdate_scale;
return 0;
}
-/* Voltage debugfs support */
-static int vp_volt_debug_get(void *data, u64 *val)
-{
- struct omap_vdd_info *vdd = (struct omap_vdd_info *) data;
- u8 vsel;
-
- if (!vdd) {
- pr_warning("Wrong paramater passed\n");
- return -EINVAL;
- }
-
- vsel = vdd->read_reg(prm_mod_offs, vdd->vp_data->voltage);
-
- if (!vdd->pmic_info->vsel_to_uv) {
- pr_warning("PMIC function to convert vsel to voltage"
- "in uV not registerd\n");
- return -EINVAL;
- }
-
- *val = vdd->pmic_info->vsel_to_uv(vsel);
- return 0;
-}
-
-static int nom_volt_debug_get(void *data, u64 *val)
-{
- struct omap_vdd_info *vdd = (struct omap_vdd_info *) data;
-
- if (!vdd) {
- pr_warning("Wrong paramater passed\n");
- return -EINVAL;
- }
-
- *val = omap_voltage_get_nom_volt(&vdd->voltdm);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(vp_volt_debug_fops, vp_volt_debug_get, NULL, "%llu\n");
-DEFINE_SIMPLE_ATTRIBUTE(nom_volt_debug_fops, nom_volt_debug_get, NULL,
- "%llu\n");
-static void vp_latch_vsel(struct omap_vdd_info *vdd)
-{
- u32 vpconfig;
- unsigned long uvdc;
- char vsel;
-
- uvdc = omap_voltage_get_nom_volt(&vdd->voltdm);
- if (!uvdc) {
- pr_warning("%s: unable to find current voltage for vdd_%s\n",
- __func__, vdd->voltdm.name);
- return;
- }
-
- if (!vdd->pmic_info || !vdd->pmic_info->uv_to_vsel) {
- pr_warning("%s: PMIC function to convert voltage in uV to"
- " vsel not registered\n", __func__);
- return;
- }
-
- vsel = vdd->pmic_info->uv_to_vsel(uvdc);
-
- vpconfig = vdd->read_reg(prm_mod_offs, vdd->vp_data->vpconfig);
- vpconfig &= ~(vdd->vp_data->vp_common->vpconfig_initvoltage_mask |
- vdd->vp_data->vp_common->vpconfig_initvdd);
- vpconfig |= vsel << vdd->vp_data->vp_common->vpconfig_initvoltage_shift;
-
- vdd->write_reg(vpconfig, prm_mod_offs, vdd->vp_data->vpconfig);
-
- /* Trigger initVDD value copy to voltage processor */
- vdd->write_reg((vpconfig | vdd->vp_data->vp_common->vpconfig_initvdd),
- prm_mod_offs, vdd->vp_data->vpconfig);
-
- /* Clear initVDD copy trigger bit */
- vdd->write_reg(vpconfig, prm_mod_offs, vdd->vp_data->vpconfig);
-}
-
-/* Generic voltage init functions */
-static void __init vp_init(struct omap_vdd_info *vdd)
-{
- u32 vp_val;
-
- if (!vdd->read_reg || !vdd->write_reg) {
- pr_err("%s: No read/write API for accessing vdd_%s regs\n",
- __func__, vdd->voltdm.name);
- return;
- }
-
- vp_val = vdd->vp_rt_data.vpconfig_erroroffset |
- (vdd->vp_rt_data.vpconfig_errorgain <<
- vdd->vp_data->vp_common->vpconfig_errorgain_shift) |
- vdd->vp_data->vp_common->vpconfig_timeouten;
- vdd->write_reg(vp_val, prm_mod_offs, vdd->vp_data->vpconfig);
-
- vp_val = ((vdd->vp_rt_data.vstepmin_smpswaittimemin <<
- vdd->vp_data->vp_common->vstepmin_smpswaittimemin_shift) |
- (vdd->vp_rt_data.vstepmin_stepmin <<
- vdd->vp_data->vp_common->vstepmin_stepmin_shift));
- vdd->write_reg(vp_val, prm_mod_offs, vdd->vp_data->vstepmin);
-
- vp_val = ((vdd->vp_rt_data.vstepmax_smpswaittimemax <<
- vdd->vp_data->vp_common->vstepmax_smpswaittimemax_shift) |
- (vdd->vp_rt_data.vstepmax_stepmax <<
- vdd->vp_data->vp_common->vstepmax_stepmax_shift));
- vdd->write_reg(vp_val, prm_mod_offs, vdd->vp_data->vstepmax);
-
- vp_val = ((vdd->vp_rt_data.vlimitto_vddmax <<
- vdd->vp_data->vp_common->vlimitto_vddmax_shift) |
- (vdd->vp_rt_data.vlimitto_vddmin <<
- vdd->vp_data->vp_common->vlimitto_vddmin_shift) |
- (vdd->vp_rt_data.vlimitto_timeout <<
- vdd->vp_data->vp_common->vlimitto_timeout_shift));
- vdd->write_reg(vp_val, prm_mod_offs, vdd->vp_data->vlimitto);
-}
-
-static void __init vdd_debugfs_init(struct omap_vdd_info *vdd)
-{
- char *name;
-
- name = kzalloc(VOLTAGE_DIR_SIZE, GFP_KERNEL);
- if (!name) {
- pr_warning("%s: Unable to allocate memory for debugfs"
- " directory name for vdd_%s",
- __func__, vdd->voltdm.name);
- return;
- }
- strcpy(name, "vdd_");
- strcat(name, vdd->voltdm.name);
-
- vdd->debug_dir = debugfs_create_dir(name, voltage_dir);
- kfree(name);
- if (IS_ERR(vdd->debug_dir)) {
- pr_warning("%s: Unable to create debugfs directory for"
- " vdd_%s\n", __func__, vdd->voltdm.name);
- vdd->debug_dir = NULL;
- return;
- }
-
- (void) debugfs_create_x16("vp_errorgain", S_IRUGO, vdd->debug_dir,
- &(vdd->vp_rt_data.vpconfig_errorgain));
- (void) debugfs_create_x16("vp_smpswaittimemin", S_IRUGO,
- vdd->debug_dir,
- &(vdd->vp_rt_data.vstepmin_smpswaittimemin));
- (void) debugfs_create_x8("vp_stepmin", S_IRUGO, vdd->debug_dir,
- &(vdd->vp_rt_data.vstepmin_stepmin));
- (void) debugfs_create_x16("vp_smpswaittimemax", S_IRUGO,
- vdd->debug_dir,
- &(vdd->vp_rt_data.vstepmax_smpswaittimemax));
- (void) debugfs_create_x8("vp_stepmax", S_IRUGO, vdd->debug_dir,
- &(vdd->vp_rt_data.vstepmax_stepmax));
- (void) debugfs_create_x8("vp_vddmax", S_IRUGO, vdd->debug_dir,
- &(vdd->vp_rt_data.vlimitto_vddmax));
- (void) debugfs_create_x8("vp_vddmin", S_IRUGO, vdd->debug_dir,
- &(vdd->vp_rt_data.vlimitto_vddmin));
- (void) debugfs_create_x16("vp_timeout", S_IRUGO, vdd->debug_dir,
- &(vdd->vp_rt_data.vlimitto_timeout));
- (void) debugfs_create_file("curr_vp_volt", S_IRUGO, vdd->debug_dir,
- (void *) vdd, &vp_volt_debug_fops);
- (void) debugfs_create_file("curr_nominal_volt", S_IRUGO,
- vdd->debug_dir, (void *) vdd,
- &nom_volt_debug_fops);
-}
-
-/* Voltage scale and accessory APIs */
-static int _pre_volt_scale(struct omap_vdd_info *vdd,
- unsigned long target_volt, u8 *target_vsel, u8 *current_vsel)
-{
- struct omap_volt_data *volt_data;
- const struct omap_vc_common_data *vc_common;
- const struct omap_vp_common_data *vp_common;
- u32 vc_cmdval, vp_errgain_val;
-
- vc_common = vdd->vc_data->vc_common;
- vp_common = vdd->vp_data->vp_common;
-
- /* Check if suffiecient pmic info is available for this vdd */
- if (!vdd->pmic_info) {
- pr_err("%s: Insufficient pmic info to scale the vdd_%s\n",
- __func__, vdd->voltdm.name);
- return -EINVAL;
- }
-
- if (!vdd->pmic_info->uv_to_vsel) {
- pr_err("%s: PMIC function to convert voltage in uV to"
- "vsel not registered. Hence unable to scale voltage"
- "for vdd_%s\n", __func__, vdd->voltdm.name);
- return -ENODATA;
- }
-
- if (!vdd->read_reg || !vdd->write_reg) {
- pr_err("%s: No read/write API for accessing vdd_%s regs\n",
- __func__, vdd->voltdm.name);
- return -EINVAL;
- }
-
- /* Get volt_data corresponding to target_volt */
- volt_data = omap_voltage_get_voltdata(&vdd->voltdm, target_volt);
- if (IS_ERR(volt_data))
- volt_data = NULL;
-
- *target_vsel = vdd->pmic_info->uv_to_vsel(target_volt);
- *current_vsel = vdd->read_reg(prm_mod_offs, vdd->vp_data->voltage);
-
- /* Setting the ON voltage to the new target voltage */
- vc_cmdval = vdd->read_reg(prm_mod_offs, vdd->vc_data->cmdval_reg);
- vc_cmdval &= ~vc_common->cmd_on_mask;
- vc_cmdval |= (*target_vsel << vc_common->cmd_on_shift);
- vdd->write_reg(vc_cmdval, prm_mod_offs, vdd->vc_data->cmdval_reg);
-
- /* Setting vp errorgain based on the voltage */
- if (volt_data) {
- vp_errgain_val = vdd->read_reg(prm_mod_offs,
- vdd->vp_data->vpconfig);
- vdd->vp_rt_data.vpconfig_errorgain = volt_data->vp_errgain;
- vp_errgain_val &= ~vp_common->vpconfig_errorgain_mask;
- vp_errgain_val |= vdd->vp_rt_data.vpconfig_errorgain <<
- vp_common->vpconfig_errorgain_shift;
- vdd->write_reg(vp_errgain_val, prm_mod_offs,
- vdd->vp_data->vpconfig);
- }
-
- return 0;
-}
-
-static void _post_volt_scale(struct omap_vdd_info *vdd,
- unsigned long target_volt, u8 target_vsel, u8 current_vsel)
-{
- u32 smps_steps = 0, smps_delay = 0;
-
- smps_steps = abs(target_vsel - current_vsel);
- /* SMPS slew rate / step size. 2us added as buffer. */
- smps_delay = ((smps_steps * vdd->pmic_info->step_size) /
- vdd->pmic_info->slew_rate) + 2;
- udelay(smps_delay);
-
- vdd->curr_volt = target_volt;
-}
-
-/* vc_bypass_scale_voltage - VC bypass method of voltage scaling */
-static int vc_bypass_scale_voltage(struct omap_vdd_info *vdd,
- unsigned long target_volt)
-{
- u32 loop_cnt = 0, retries_cnt = 0;
- u32 vc_valid, vc_bypass_val_reg, vc_bypass_value;
- u8 target_vsel, current_vsel;
- int ret;
-
- ret = _pre_volt_scale(vdd, target_volt, &target_vsel, ¤t_vsel);
- if (ret)
- return ret;
-
- vc_valid = vdd->vc_data->vc_common->valid;
- vc_bypass_val_reg = vdd->vc_data->vc_common->bypass_val_reg;
- vc_bypass_value = (target_vsel << vdd->vc_data->vc_common->data_shift) |
- (vdd->pmic_info->pmic_reg <<
- vdd->vc_data->vc_common->regaddr_shift) |
- (vdd->pmic_info->i2c_slave_addr <<
- vdd->vc_data->vc_common->slaveaddr_shift);
-
- vdd->write_reg(vc_bypass_value, prm_mod_offs, vc_bypass_val_reg);
- vdd->write_reg(vc_bypass_value | vc_valid, prm_mod_offs,
- vc_bypass_val_reg);
-
- vc_bypass_value = vdd->read_reg(prm_mod_offs, vc_bypass_val_reg);
- /*
- * Loop till the bypass command is acknowledged from the SMPS.
- * NOTE: This is legacy code. The loop count and retry count needs
- * to be revisited.
- */
- while (!(vc_bypass_value & vc_valid)) {
- loop_cnt++;
-
- if (retries_cnt > 10) {
- pr_warning("%s: Retry count exceeded\n", __func__);
- return -ETIMEDOUT;
- }
-
- if (loop_cnt > 50) {
- retries_cnt++;
- loop_cnt = 0;
- udelay(10);
- }
- vc_bypass_value = vdd->read_reg(prm_mod_offs,
- vc_bypass_val_reg);
- }
-
- _post_volt_scale(vdd, target_volt, target_vsel, current_vsel);
- return 0;
-}
-
-/* VP force update method of voltage scaling */
-static int vp_forceupdate_scale_voltage(struct omap_vdd_info *vdd,
- unsigned long target_volt)
-{
- u32 vpconfig;
- u8 target_vsel, current_vsel, prm_irqst_reg;
- int ret, timeout = 0;
-
- ret = _pre_volt_scale(vdd, target_volt, &target_vsel, ¤t_vsel);
- if (ret)
- return ret;
-
- prm_irqst_reg = vdd->vp_data->prm_irqst_data->prm_irqst_reg;
-
- /*
- * Clear all pending TransactionDone interrupt/status. Typical latency
- * is <3us
- */
- while (timeout++ < VP_TRANXDONE_TIMEOUT) {
- vdd->write_reg(vdd->vp_data->prm_irqst_data->tranxdone_status,
- prm_irqst_ocp_mod_offs, prm_irqst_reg);
- if (!(vdd->read_reg(prm_irqst_ocp_mod_offs, prm_irqst_reg) &
- vdd->vp_data->prm_irqst_data->tranxdone_status))
- break;
- udelay(1);
- }
- if (timeout >= VP_TRANXDONE_TIMEOUT) {
- pr_warning("%s: vdd_%s TRANXDONE timeout exceeded."
- "Voltage change aborted", __func__, vdd->voltdm.name);
- return -ETIMEDOUT;
- }
-
- /* Configure for VP-Force Update */
- vpconfig = vdd->read_reg(prm_mod_offs, vdd->vp_data->vpconfig);
- vpconfig &= ~(vdd->vp_data->vp_common->vpconfig_initvdd |
- vdd->vp_data->vp_common->vpconfig_forceupdate |
- vdd->vp_data->vp_common->vpconfig_initvoltage_mask);
- vpconfig |= ((target_vsel <<
- vdd->vp_data->vp_common->vpconfig_initvoltage_shift));
- vdd->write_reg(vpconfig, prm_mod_offs, vdd->vp_data->vpconfig);
-
- /* Trigger initVDD value copy to voltage processor */
- vpconfig |= vdd->vp_data->vp_common->vpconfig_initvdd;
- vdd->write_reg(vpconfig, prm_mod_offs, vdd->vp_data->vpconfig);
-
- /* Force update of voltage */
- vpconfig |= vdd->vp_data->vp_common->vpconfig_forceupdate;
- vdd->write_reg(vpconfig, prm_mod_offs, vdd->vp_data->vpconfig);
-
- /*
- * Wait for TransactionDone. Typical latency is <200us.
- * Depends on SMPSWAITTIMEMIN/MAX and voltage change
- */
- timeout = 0;
- omap_test_timeout((vdd->read_reg(prm_irqst_ocp_mod_offs, prm_irqst_reg) &
- vdd->vp_data->prm_irqst_data->tranxdone_status),
- VP_TRANXDONE_TIMEOUT, timeout);
- if (timeout >= VP_TRANXDONE_TIMEOUT)
- pr_err("%s: vdd_%s TRANXDONE timeout exceeded."
- "TRANXDONE never got set after the voltage update\n",
- __func__, vdd->voltdm.name);
-
- _post_volt_scale(vdd, target_volt, target_vsel, current_vsel);
-
- /*
- * Disable TransactionDone interrupt , clear all status, clear
- * control registers
- */
- timeout = 0;
- while (timeout++ < VP_TRANXDONE_TIMEOUT) {
- vdd->write_reg(vdd->vp_data->prm_irqst_data->tranxdone_status,
- prm_irqst_ocp_mod_offs, prm_irqst_reg);
- if (!(vdd->read_reg(prm_irqst_ocp_mod_offs, prm_irqst_reg) &
- vdd->vp_data->prm_irqst_data->tranxdone_status))
- break;
- udelay(1);
- }
-
- if (timeout >= VP_TRANXDONE_TIMEOUT)
- pr_warning("%s: vdd_%s TRANXDONE timeout exceeded while trying"
- "to clear the TRANXDONE status\n",
- __func__, vdd->voltdm.name);
-
- vpconfig = vdd->read_reg(prm_mod_offs, vdd->vp_data->vpconfig);
- /* Clear initVDD copy trigger bit */
- vpconfig &= ~vdd->vp_data->vp_common->vpconfig_initvdd;
- vdd->write_reg(vpconfig, prm_mod_offs, vdd->vp_data->vpconfig);
- /* Clear force bit */
- vpconfig &= ~vdd->vp_data->vp_common->vpconfig_forceupdate;
- vdd->write_reg(vpconfig, prm_mod_offs, vdd->vp_data->vpconfig);
-
- return 0;
-}
-
-static void __init omap3_vfsm_init(struct omap_vdd_info *vdd)
-{
- /*
- * Voltage Manager FSM parameters init
- * XXX This data should be passed in from the board file
- */
- vdd->write_reg(OMAP3_CLKSETUP, prm_mod_offs, OMAP3_PRM_CLKSETUP_OFFSET);
- vdd->write_reg(OMAP3_VOLTOFFSET, prm_mod_offs,
- OMAP3_PRM_VOLTOFFSET_OFFSET);
- vdd->write_reg(OMAP3_VOLTSETUP2, prm_mod_offs,
- OMAP3_PRM_VOLTSETUP2_OFFSET);
-}
-
-static void __init omap3_vc_init(struct omap_vdd_info *vdd)
-{
- static bool is_initialized;
- u8 on_vsel, onlp_vsel, ret_vsel, off_vsel;
- u32 vc_val;
-
- if (is_initialized)
- return;
-
- /* Set up the on, inactive, retention and off voltage */
- on_vsel = vdd->pmic_info->uv_to_vsel(vdd->pmic_info->on_volt);
- onlp_vsel = vdd->pmic_info->uv_to_vsel(vdd->pmic_info->onlp_volt);
- ret_vsel = vdd->pmic_info->uv_to_vsel(vdd->pmic_info->ret_volt);
- off_vsel = vdd->pmic_info->uv_to_vsel(vdd->pmic_info->off_volt);
- vc_val = ((on_vsel << vdd->vc_data->vc_common->cmd_on_shift) |
- (onlp_vsel << vdd->vc_data->vc_common->cmd_onlp_shift) |
- (ret_vsel << vdd->vc_data->vc_common->cmd_ret_shift) |
- (off_vsel << vdd->vc_data->vc_common->cmd_off_shift));
- vdd->write_reg(vc_val, prm_mod_offs, vdd->vc_data->cmdval_reg);
-
- /*
- * Generic VC parameters init
- * XXX This data should be abstracted out
- */
- vdd->write_reg(OMAP3430_CMD1_MASK | OMAP3430_RAV1_MASK, prm_mod_offs,
- OMAP3_PRM_VC_CH_CONF_OFFSET);
- vdd->write_reg(OMAP3430_MCODE_SHIFT | OMAP3430_HSEN_MASK, prm_mod_offs,
- OMAP3_PRM_VC_I2C_CFG_OFFSET);
-
- omap3_vfsm_init(vdd);
-
- is_initialized = true;
-}
-
-
-/* OMAP4 specific voltage init functions */
-static void __init omap4_vc_init(struct omap_vdd_info *vdd)
-{
- static bool is_initialized;
- u32 vc_val;
-
- if (is_initialized)
- return;
-
- /* TODO: Configure setup times and CMD_VAL values*/
-
- /*
- * Generic VC parameters init
- * XXX This data should be abstracted out
- */
- vc_val = (OMAP4430_RAV_VDD_MPU_L_MASK | OMAP4430_CMD_VDD_MPU_L_MASK |
- OMAP4430_RAV_VDD_IVA_L_MASK | OMAP4430_CMD_VDD_IVA_L_MASK |
- OMAP4430_RAV_VDD_CORE_L_MASK | OMAP4430_CMD_VDD_CORE_L_MASK);
- vdd->write_reg(vc_val, prm_mod_offs, OMAP4_PRM_VC_CFG_CHANNEL_OFFSET);
-
- /* XXX These are magic numbers and do not belong! */
- vc_val = (0x60 << OMAP4430_SCLL_SHIFT | 0x26 << OMAP4430_SCLH_SHIFT);
- vdd->write_reg(vc_val, prm_mod_offs, OMAP4_PRM_VC_CFG_I2C_CLK_OFFSET);
-
- is_initialized = true;
-}
-
-static void __init omap_vc_init(struct omap_vdd_info *vdd)
-{
- u32 vc_val;
-
- if (!vdd->pmic_info || !vdd->pmic_info->uv_to_vsel) {
- pr_err("%s: PMIC info requried to configure vc for"
- "vdd_%s not populated.Hence cannot initialize vc\n",
- __func__, vdd->voltdm.name);
- return;
- }
-
- if (!vdd->read_reg || !vdd->write_reg) {
- pr_err("%s: No read/write API for accessing vdd_%s regs\n",
- __func__, vdd->voltdm.name);
- return;
- }
-
- /* Set up the SMPS_SA(i2c slave address in VC */
- vc_val = vdd->read_reg(prm_mod_offs,
- vdd->vc_data->vc_common->smps_sa_reg);
- vc_val &= ~vdd->vc_data->smps_sa_mask;
- vc_val |= vdd->pmic_info->i2c_slave_addr << vdd->vc_data->smps_sa_shift;
- vdd->write_reg(vc_val, prm_mod_offs,
- vdd->vc_data->vc_common->smps_sa_reg);
-
- /* Setup the VOLRA(pmic reg addr) in VC */
- vc_val = vdd->read_reg(prm_mod_offs,
- vdd->vc_data->vc_common->smps_volra_reg);
- vc_val &= ~vdd->vc_data->smps_volra_mask;
- vc_val |= vdd->pmic_info->pmic_reg << vdd->vc_data->smps_volra_shift;
- vdd->write_reg(vc_val, prm_mod_offs,
- vdd->vc_data->vc_common->smps_volra_reg);
-
- /* Configure the setup times */
- vc_val = vdd->read_reg(prm_mod_offs, vdd->vfsm->voltsetup_reg);
- vc_val &= ~vdd->vfsm->voltsetup_mask;
- vc_val |= vdd->pmic_info->volt_setup_time <<
- vdd->vfsm->voltsetup_shift;
- vdd->write_reg(vc_val, prm_mod_offs, vdd->vfsm->voltsetup_reg);
-
- if (cpu_is_omap34xx())
- omap3_vc_init(vdd);
- else if (cpu_is_omap44xx())
- omap4_vc_init(vdd);
-}
-
-static int __init omap_vdd_data_configure(struct omap_vdd_info *vdd)
+static int __init omap_vdd_data_configure(struct voltagedomain *voltdm)
{
int ret = -EINVAL;
- if (!vdd->pmic_info) {
+ if (!voltdm->pmic) {
pr_err("%s: PMIC info requried to configure vdd_%s not"
"populated.Hence cannot initialize vdd_%s\n",
- __func__, vdd->voltdm.name, vdd->voltdm.name);
+ __func__, voltdm->name, voltdm->name);
goto ovdc_out;
}
- if (IS_ERR_VALUE(_config_common_vdd_data(vdd)))
+ if (IS_ERR_VALUE(_config_common_vdd_data(voltdm)))
goto ovdc_out;
- if (cpu_is_omap34xx()) {
- vdd->read_reg = omap3_voltage_read_reg;
- vdd->write_reg = omap3_voltage_write_reg;
- ret = 0;
- } else if (cpu_is_omap44xx()) {
- vdd->read_reg = omap4_voltage_read_reg;
- vdd->write_reg = omap4_voltage_write_reg;
- ret = 0;
- }
+ ret = 0;
ovdc_out:
return ret;
@@ -670,205 +88,95 @@
/* Public functions */
/**
- * omap_voltage_get_nom_volt() - Gets the current non-auto-compensated voltage
+ * omap_voltage_get_curr_vdata() - Gets the current voltage data
* @voltdm: pointer to the VDD for which current voltage info is needed
*
- * API to get the current non-auto-compensated voltage for a VDD.
- * Returns 0 in case of error else returns the current voltage for the VDD.
+ * API to get the current voltage data pointer for a VDD, returns NULL on error
*/
-unsigned long omap_voltage_get_nom_volt(struct voltagedomain *voltdm)
+struct omap_volt_data *omap_voltage_get_curr_vdata(struct voltagedomain *voltdm)
{
- struct omap_vdd_info *vdd;
-
if (!voltdm || IS_ERR(voltdm)) {
pr_warning("%s: VDD specified does not exist!\n", __func__);
- return 0;
+ return NULL;
}
- vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
-
- return vdd->curr_volt;
+ return voltdm->curr_volt;
}
/**
- * omap_vp_get_curr_volt() - API to get the current vp voltage.
- * @voltdm: pointer to the VDD.
- *
- * This API returns the current voltage for the specified voltage processor
- */
-unsigned long omap_vp_get_curr_volt(struct voltagedomain *voltdm)
-{
- struct omap_vdd_info *vdd;
- u8 curr_vsel;
-
- if (!voltdm || IS_ERR(voltdm)) {
- pr_warning("%s: VDD specified does not exist!\n", __func__);
- return 0;
- }
-
- vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
- if (!vdd->read_reg) {
- pr_err("%s: No read API for reading vdd_%s regs\n",
- __func__, voltdm->name);
- return 0;
- }
-
- curr_vsel = vdd->read_reg(prm_mod_offs, vdd->vp_data->voltage);
-
- if (!vdd->pmic_info || !vdd->pmic_info->vsel_to_uv) {
- pr_warning("%s: PMIC function to convert vsel to voltage"
- "in uV not registerd\n", __func__);
- return 0;
- }
-
- return vdd->pmic_info->vsel_to_uv(curr_vsel);
-}
-
-/**
- * omap_vp_enable() - API to enable a particular VP
- * @voltdm: pointer to the VDD whose VP is to be enabled.
- *
- * This API enables a particular voltage processor. Needed by the smartreflex
- * class drivers.
- */
-void omap_vp_enable(struct voltagedomain *voltdm)
-{
- struct omap_vdd_info *vdd;
- u32 vpconfig;
-
- if (!voltdm || IS_ERR(voltdm)) {
- pr_warning("%s: VDD specified does not exist!\n", __func__);
- return;
- }
-
- vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
- if (!vdd->read_reg || !vdd->write_reg) {
- pr_err("%s: No read/write API for accessing vdd_%s regs\n",
- __func__, voltdm->name);
- return;
- }
-
- /* If VP is already enabled, do nothing. Return */
- if (vdd->vp_enabled)
- return;
-
- vp_latch_vsel(vdd);
-
- /* Enable VP */
- vpconfig = vdd->read_reg(prm_mod_offs, vdd->vp_data->vpconfig);
- vpconfig |= vdd->vp_data->vp_common->vpconfig_vpenable;
- vdd->write_reg(vpconfig, prm_mod_offs, vdd->vp_data->vpconfig);
- vdd->vp_enabled = true;
-}
-
-/**
- * omap_vp_disable() - API to disable a particular VP
- * @voltdm: pointer to the VDD whose VP is to be disabled.
- *
- * This API disables a particular voltage processor. Needed by the smartreflex
- * class drivers.
- */
-void omap_vp_disable(struct voltagedomain *voltdm)
-{
- struct omap_vdd_info *vdd;
- u32 vpconfig;
- int timeout;
-
- if (!voltdm || IS_ERR(voltdm)) {
- pr_warning("%s: VDD specified does not exist!\n", __func__);
- return;
- }
-
- vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
- if (!vdd->read_reg || !vdd->write_reg) {
- pr_err("%s: No read/write API for accessing vdd_%s regs\n",
- __func__, voltdm->name);
- return;
- }
-
- /* If VP is already disabled, do nothing. Return */
- if (!vdd->vp_enabled) {
- pr_warning("%s: Trying to disable VP for vdd_%s when"
- "it is already disabled\n", __func__, voltdm->name);
- return;
- }
-
- /* Disable VP */
- vpconfig = vdd->read_reg(prm_mod_offs, vdd->vp_data->vpconfig);
- vpconfig &= ~vdd->vp_data->vp_common->vpconfig_vpenable;
- vdd->write_reg(vpconfig, prm_mod_offs, vdd->vp_data->vpconfig);
-
- /*
- * Wait for VP idle Typical latency is <2us. Maximum latency is ~100us
- */
- omap_test_timeout((vdd->read_reg(prm_mod_offs, vdd->vp_data->vstatus)),
- VP_IDLE_TIMEOUT, timeout);
-
- if (timeout >= VP_IDLE_TIMEOUT)
- pr_warning("%s: vdd_%s idle timedout\n",
- __func__, voltdm->name);
-
- vdd->vp_enabled = false;
-
- return;
-}
-
-/**
- * omap_voltage_scale_vdd() - API to scale voltage of a particular
- * voltage domain.
- * @voltdm: pointer to the VDD which is to be scaled.
- * @target_volt: The target voltage of the voltage domain
+ * voltdm_scale() - API to scale voltage of a particular voltage domain.
+ * @voltdm: pointer to the voltage domain which is to be scaled.
+ * @target_volt: The target voltage of the voltage domain
*
* This API should be called by the kernel to do the voltage scaling
- * for a particular voltage domain during dvfs or any other situation.
+ * for a particular voltage domain during DVFS.
*/
-int omap_voltage_scale_vdd(struct voltagedomain *voltdm,
- unsigned long target_volt)
+int voltdm_scale(struct voltagedomain *voltdm,
+ struct omap_volt_data *target_v)
{
- struct omap_vdd_info *vdd;
+ int ret = 0;
+ struct omap_voltage_notifier notify;
+ unsigned long target_volt = omap_get_operation_voltage(target_v);
if (!voltdm || IS_ERR(voltdm)) {
pr_warning("%s: VDD specified does not exist!\n", __func__);
return -EINVAL;
}
- vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
-
- if (!vdd->volt_scale) {
+ if (!voltdm->scale) {
pr_err("%s: No voltage scale API registered for vdd_%s\n",
__func__, voltdm->name);
return -ENODATA;
}
- return vdd->volt_scale(vdd, target_volt);
+ notify.voltdm = voltdm;
+ notify.target_volt = target_volt;
+
+ srcu_notifier_call_chain(&voltdm->change_notify_list,
+ OMAP_VOLTAGE_PRECHANGE,
+ (void *)¬ify);
+
+ ret = voltdm->scale(voltdm, target_v);
+ if (ret)
+ pr_err("%s: voltage scale failed for vdd%s: %d\n",
+ __func__, voltdm->name, ret);
+
+ notify.op_result = ret;
+ srcu_notifier_call_chain(&voltdm->change_notify_list,
+ OMAP_VOLTAGE_POSTCHANGE,
+ (void *)¬ify);
+
+ return ret;
}
/**
- * omap_voltage_reset() - Resets the voltage of a particular voltage domain
- * to that of the current OPP.
- * @voltdm: pointer to the VDD whose voltage is to be reset.
+ * voltdm_reset() - Resets the voltage of a particular voltage domain
+ * to that of the current OPP.
+ * @voltdm: pointer to the voltage domain whose voltage is to be reset.
*
* This API finds out the correct voltage the voltage domain is supposed
* to be at and resets the voltage to that level. Should be used especially
* while disabling any voltage compensation modules.
+ *
+ * NOTE: appropriate locks should be held for mutual exclusivity.
*/
-void omap_voltage_reset(struct voltagedomain *voltdm)
+void voltdm_reset(struct voltagedomain *voltdm)
{
- unsigned long target_uvdc;
+ struct omap_volt_data *target_volt;
if (!voltdm || IS_ERR(voltdm)) {
pr_warning("%s: VDD specified does not exist!\n", __func__);
return;
}
- target_uvdc = omap_voltage_get_nom_volt(voltdm);
- if (!target_uvdc) {
+ target_volt = omap_voltage_get_curr_vdata(voltdm);
+ if (!target_volt) {
pr_err("%s: unable to find current voltage for vdd_%s\n",
__func__, voltdm->name);
return;
}
- omap_voltage_scale_vdd(voltdm, target_uvdc);
+ voltdm_scale(voltdm, target_volt);
}
/**
@@ -893,7 +201,7 @@
return;
}
- vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
+ vdd = voltdm->vdd;
*volt_data = vdd->volt_data;
}
@@ -924,7 +232,7 @@
return ERR_PTR(-EINVAL);
}
- vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
+ vdd = voltdm->vdd;
if (!vdd->volt_data) {
pr_warning("%s: voltage table does not exist for vdd_%s\n",
@@ -937,8 +245,8 @@
return &vdd->volt_data[i];
}
- pr_notice("%s: Unable to match the current voltage with the voltage"
- "table for vdd_%s\n", __func__, voltdm->name);
+ pr_notice("%s: Unable to match the current voltage %lu with the voltage"
+ "table for vdd_%s\n", __func__, volt, voltdm->name);
return ERR_PTR(-ENODATA);
}
@@ -947,54 +255,25 @@
* omap_voltage_register_pmic() - API to register PMIC specific data
* @voltdm: pointer to the VDD for which the PMIC specific data is
* to be registered
- * @pmic_info: the structure containing pmic info
+ * @pmic: the structure containing pmic info
*
* This API is to be called by the SOC/PMIC file to specify the
- * pmic specific info as present in omap_volt_pmic_info structure.
+ * pmic specific info as present in omap_voltdm_pmic structure.
*/
int omap_voltage_register_pmic(struct voltagedomain *voltdm,
- struct omap_volt_pmic_info *pmic_info)
+ struct omap_voltdm_pmic *pmic)
{
- struct omap_vdd_info *vdd;
-
if (!voltdm || IS_ERR(voltdm)) {
pr_warning("%s: VDD specified does not exist!\n", __func__);
return -EINVAL;
}
- vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
-
- vdd->pmic_info = pmic_info;
+ voltdm->pmic = pmic;
return 0;
}
/**
- * omap_voltage_get_dbgdir() - API to get pointer to the debugfs directory
- * corresponding to a voltage domain.
- *
- * @voltdm: pointer to the VDD whose debug directory is required.
- *
- * This API returns pointer to the debugfs directory corresponding
- * to the voltage domain. Should be used by drivers requiring to
- * add any debug entry for a particular voltage domain. Returns NULL
- * in case of error.
- */
-struct dentry *omap_voltage_get_dbgdir(struct voltagedomain *voltdm)
-{
- struct omap_vdd_info *vdd;
-
- if (!voltdm || IS_ERR(voltdm)) {
- pr_warning("%s: VDD specified does not exist!\n", __func__);
- return NULL;
- }
-
- vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
-
- return vdd->debug_dir;
-}
-
-/**
* omap_change_voltscale_method() - API to change the voltage scaling method.
* @voltdm: pointer to the VDD whose voltage scaling method
* has to be changed.
@@ -1005,23 +284,19 @@
* defined in voltage.h
*/
void omap_change_voltscale_method(struct voltagedomain *voltdm,
- int voltscale_method)
+ int voltscale_method)
{
- struct omap_vdd_info *vdd;
-
if (!voltdm || IS_ERR(voltdm)) {
pr_warning("%s: VDD specified does not exist!\n", __func__);
return;
}
- vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
-
switch (voltscale_method) {
case VOLTSCALE_VPFORCEUPDATE:
- vdd->volt_scale = vp_forceupdate_scale_voltage;
+ voltdm->scale = omap_vp_forceupdate_scale;
return;
case VOLTSCALE_VCBYPASS:
- vdd->volt_scale = vc_bypass_scale_voltage;
+ voltdm->scale = omap_vc_bypass_scale_voltage;
return;
default:
pr_warning("%s: Trying to change the method of voltage scaling"
@@ -1029,36 +304,147 @@
}
}
-/**
- * omap_voltage_domain_lookup() - API to get the voltage domain pointer
- * @name: Name of the voltage domain
- *
- * This API looks up in the global vdd_info struct for the
- * existence of voltage domain <name>. If it exists, the API returns
- * a pointer to the voltage domain structure corresponding to the
- * VDD<name>. Else retuns error pointer.
- */
-struct voltagedomain *omap_voltage_domain_lookup(char *name)
+/* Voltage debugfs support */
+static int vp_volt_debug_get(void *data, u64 *val)
{
- int i;
+ struct voltagedomain *voltdm = (struct voltagedomain *)data;
- if (!vdd_info) {
- pr_err("%s: Voltage driver init not yet happened.Faulting!\n",
- __func__);
- return ERR_PTR(-EINVAL);
+ if (!voltdm) {
+ pr_warning("Wrong paramater passed\n");
+ return -EINVAL;
+ }
+ *val = omap_vp_get_curr_volt(voltdm);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(vp_volt_debug_fops, vp_volt_debug_get, NULL, "%llu\n");
+
+static int dyn_volt_debug_get(void *data, u64 *val)
+{
+ struct voltagedomain *voltdm = (struct voltagedomain *)data;
+ struct omap_volt_data *volt_data;
+
+ if (!voltdm) {
+ pr_warning("%s: Wrong paramater passed\n", __func__);
+ return -EINVAL;
}
+ volt_data = omap_voltage_get_curr_vdata(voltdm);
+ if (IS_ERR_OR_NULL(volt_data)) {
+ pr_warning("%s: No voltage/domain?\n", __func__);
+ return -ENODEV;
+ }
+
+ *val = volt_data->volt_dynamic_nominal;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(dyn_volt_debug_fops, dyn_volt_debug_get, NULL,
+ "%llu\n");
+
+static int calib_volt_debug_get(void *data, u64 *val)
+{
+ struct voltagedomain *voltdm = (struct voltagedomain *)data;
+ struct omap_volt_data *volt_data;
+
+ if (!voltdm) {
+ pr_warning("%s: Wrong paramater passed\n", __func__);
+ return -EINVAL;
+ }
+
+ volt_data = omap_voltage_get_curr_vdata(voltdm);
+ if (IS_ERR_OR_NULL(volt_data)) {
+ pr_warning("%s: No voltage/domain?\n", __func__);
+ return -ENODEV;
+ }
+
+ *val = volt_data->volt_calibrated;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(calib_volt_debug_fops, calib_volt_debug_get, NULL,
+ "%llu\n");
+static int margin_volt_debug_get(void *data, u64 *val)
+{
+ struct voltagedomain *voltdm = (struct voltagedomain *) data;
+ struct omap_volt_data *vdata;
+
+ if (!voltdm) {
+ pr_warning("%s: Wrong parameter passed\n", __func__);
+ return -EINVAL;
+ }
+
+ vdata = omap_voltage_get_curr_vdata(voltdm);
+ if (IS_ERR_OR_NULL(vdata)) {
+ pr_warning("%s: unable to get volt for vdd_%s\n",
+ __func__, voltdm->name);
+ return -ENODEV;
+ }
+ *val = vdata->volt_margin;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(margin_volt_debug_fops, margin_volt_debug_get, NULL,
+ "%llu\n");
+
+static int nom_volt_debug_get(void *data, u64 *val)
+{
+ struct voltagedomain *voltdm = (struct voltagedomain *) data;
+ struct omap_volt_data *vdata;
+
+ if (!voltdm) {
+ pr_warning("Wrong paramater passed\n");
+ return -EINVAL;
+ }
+
+ vdata = omap_voltage_get_curr_vdata(voltdm);
+ if (IS_ERR_OR_NULL(vdata)) {
+ pr_warning("%s: unable to get volt for vdd_%s\n",
+ __func__, voltdm->name);
+ return -ENODEV;
+ }
+ *val = vdata->volt_nominal;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(nom_volt_debug_fops, nom_volt_debug_get, NULL,
+ "%llu\n");
+
+static void __init voltdm_debugfs_init(struct dentry *voltage_dir,
+ struct voltagedomain *voltdm)
+{
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "vdd_%s", voltdm->name);
if (!name) {
- pr_err("%s: No name to get the votage domain!\n", __func__);
- return ERR_PTR(-EINVAL);
+ pr_warning("%s:vdd_%s: no mem for debugfs\n", __func__,
+ voltdm->name);
+ return;
}
- for (i = 0; i < nr_scalable_vdd; i++) {
- if (!(strcmp(name, vdd_info[i]->voltdm.name)))
- return &vdd_info[i]->voltdm;
+ voltdm->debug_dir = debugfs_create_dir(name, voltage_dir);
+ kfree(name);
+ if (IS_ERR_OR_NULL(voltdm->debug_dir)) {
+ pr_warning("%s: Unable to create debugfs directory for"
+ " vdd_%s\n", __func__, voltdm->name);
+ voltdm->debug_dir = NULL;
+ return;
}
- return ERR_PTR(-EINVAL);
+ (void) debugfs_create_file("curr_vp_volt", S_IRUGO, voltdm->debug_dir,
+ (void *) voltdm, &vp_volt_debug_fops);
+ (void) debugfs_create_file("curr_nominal_volt", S_IRUGO,
+ voltdm->debug_dir, (void *) voltdm,
+ &nom_volt_debug_fops);
+ (void) debugfs_create_file("curr_dyn_nominal_volt", S_IRUGO,
+ voltdm->debug_dir, (void *) voltdm,
+ &dyn_volt_debug_fops);
+ (void) debugfs_create_file("curr_calibrated_volt", S_IRUGO,
+ voltdm->debug_dir, (void *) voltdm,
+ &calib_volt_debug_fops);
+ (void) debugfs_create_file("curr_margin_volt", S_IRUGO,
+ voltdm->debug_dir, (void *) voltdm,
+ &margin_volt_debug_fops);
}
/**
@@ -1070,37 +456,212 @@
*/
int __init omap_voltage_late_init(void)
{
- int i;
+ struct voltagedomain *voltdm;
+ struct dentry *voltage_dir;
- if (!vdd_info) {
+ if (list_empty(&voltdm_list)) {
pr_err("%s: Voltage driver support not added\n",
__func__);
return -EINVAL;
}
voltage_dir = debugfs_create_dir("voltage", NULL);
- if (IS_ERR(voltage_dir))
- pr_err("%s: Unable to create voltage debugfs main dir\n",
- __func__);
- for (i = 0; i < nr_scalable_vdd; i++) {
- if (omap_vdd_data_configure(vdd_info[i]))
+
+ list_for_each_entry(voltdm, &voltdm_list, node) {
+ if (!voltdm->scalable)
continue;
- omap_vc_init(vdd_info[i]);
- vp_init(vdd_info[i]);
- vdd_debugfs_init(vdd_info[i]);
+
+ if (voltdm->vdd) {
+ if (omap_vdd_data_configure(voltdm))
+ continue;
+ omap_vp_init(voltdm);
+ }
+
+ if (voltdm->vc)
+ omap_vc_init_channel(voltdm);
+
+ if (voltdm->abb)
+ omap_ldo_abb_init(voltdm);
+
+ if (voltage_dir)
+ voltdm_debugfs_init(voltage_dir, voltdm);
+
+ srcu_init_notifier_head(&voltdm->change_notify_list);
}
return 0;
}
-/* XXX document */
-int __init omap_voltage_early_init(s16 prm_mod, s16 prm_irqst_ocp_mod,
- struct omap_vdd_info *omap_vdd_array[],
- u8 omap_vdd_count)
+static struct voltagedomain *_voltdm_lookup(const char *name)
{
- prm_mod_offs = prm_mod;
- prm_irqst_ocp_mod_offs = prm_irqst_ocp_mod;
- vdd_info = omap_vdd_array;
- nr_scalable_vdd = omap_vdd_count;
+ struct voltagedomain *voltdm, *temp_voltdm;
+
+ voltdm = NULL;
+
+ list_for_each_entry(temp_voltdm, &voltdm_list, node) {
+ if (!strcmp(name, temp_voltdm->name)) {
+ voltdm = temp_voltdm;
+ break;
+ }
+ }
+
+ return voltdm;
+}
+
+/**
+ * omap_voltage_calib_reset() - reset the calibrated voltage entries
+ * @voltdm: voltage domain to reset the entries for
+ *
+ * when the calibrated entries are no longer valid, this api allows
+ * the calibrated voltages to be reset.
+ *
+ * NOTE: Appropriate locks must be held by calling path to ensure mutual
+ * exclusivity
+ */
+int omap_voltage_calib_reset(struct voltagedomain *voltdm)
+{
+ struct omap_volt_data *volt_data;
+
+ if (!voltdm) {
+ pr_warning("%s: voltdm NULL!\n", __func__);
+ return -EINVAL;
+ }
+
+ volt_data = voltdm->vdd->volt_data;
+
+ /* reset the calibrated voltages as 0 */
+ while (volt_data->volt_nominal) {
+ volt_data->volt_calibrated = 0;
+ volt_data++;
+ }
return 0;
}
+
+/**
+ * voltdm_add_pwrdm - add a powerdomain to a voltagedomain
+ * @voltdm: struct voltagedomain * to add the powerdomain to
+ * @pwrdm: struct powerdomain * to associate with a voltagedomain
+ *
+ * Associate the powerdomain @pwrdm with a voltagedomain @voltdm. This
+ * enables the use of voltdm_for_each_pwrdm(). Returns -EINVAL if
+ * presented with invalid pointers; -ENOMEM if memory could not be allocated;
+ * or 0 upon success.
+ */
+int voltdm_add_pwrdm(struct voltagedomain *voltdm, struct powerdomain *pwrdm)
+{
+ if (!voltdm || !pwrdm)
+ return -EINVAL;
+
+ pr_debug("voltagedomain: associating powerdomain %s with voltagedomain "
+ "%s\n", pwrdm->name, voltdm->name);
+
+ list_add(&pwrdm->voltdm_node, &voltdm->pwrdm_list);
+
+ return 0;
+}
+
+/**
+ * voltdm_for_each_pwrdm - call function for each pwrdm in a voltdm
+ * @voltdm: struct voltagedomain * to iterate over
+ * @fn: callback function *
+ *
+ * Call the supplied function @fn for each powerdomain in the
+ * voltagedomain @voltdm. Returns -EINVAL if presented with invalid
+ * pointers; or passes along the last return value of the callback
+ * function, which should be 0 for success or anything else to
+ * indicate failure.
+ */
+int voltdm_for_each_pwrdm(struct voltagedomain *voltdm,
+ int (*fn)(struct voltagedomain *voltdm,
+ struct powerdomain *pwrdm))
+{
+ struct powerdomain *pwrdm;
+ int ret = 0;
+
+ if (!fn)
+ return -EINVAL;
+
+ list_for_each_entry(pwrdm, &voltdm->pwrdm_list, voltdm_node)
+ ret = (*fn)(voltdm, pwrdm);
+
+ return ret;
+}
+
+/**
+ * voltdm_for_each - call function on each registered voltagedomain
+ * @fn: callback function *
+ *
+ * Call the supplied function @fn for each registered voltagedomain.
+ * The callback function @fn can return anything but 0 to bail out
+ * early from the iterator. Returns the last return value of the
+ * callback function, which should be 0 for success or anything else
+ * to indicate failure; or -EINVAL if the function pointer is null.
+ */
+int voltdm_for_each(int (*fn)(struct voltagedomain *voltdm, void *user),
+ void *user)
+{
+ struct voltagedomain *temp_voltdm;
+ int ret = 0;
+
+ if (!fn)
+ return -EINVAL;
+
+ list_for_each_entry(temp_voltdm, &voltdm_list, node) {
+ ret = (*fn)(temp_voltdm, user);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int _voltdm_register(struct voltagedomain *voltdm)
+{
+ if (!voltdm || !voltdm->name)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&voltdm->pwrdm_list);
+ list_add(&voltdm->node, &voltdm_list);
+
+ pr_debug("voltagedomain: registered %s\n", voltdm->name);
+
+ return 0;
+}
+
+/**
+ * voltdm_lookup - look up a voltagedomain by name, return a pointer
+ * @name: name of voltagedomain
+ *
+ * Find a registered voltagedomain by its name @name. Returns a pointer
+ * to the struct voltagedomain if found, or NULL otherwise.
+ */
+struct voltagedomain *voltdm_lookup(const char *name)
+{
+ struct voltagedomain *voltdm ;
+
+ if (!name)
+ return NULL;
+
+ voltdm = _voltdm_lookup(name);
+
+ return voltdm;
+}
+
+/**
+ * voltdm_init - set up the voltagedomain layer
+ * @voltdm_list: array of struct voltagedomain pointers to register
+ *
+ * Loop through the array of voltagedomains @voltdm_list, registering all
+ * that are available on the current CPU. If voltdm_list is supplied
+ * and not null, all of the referenced voltagedomains will be
+ * registered. No return value.
+ */
+void voltdm_init(struct voltagedomain **voltdms)
+{
+ struct voltagedomain **v;
+
+ if (voltdms) {
+ for (v = voltdms; *v; v++)
+ _voltdm_register(*v);
+ }
+}
diff --git a/arch/arm/mach-omap2/voltage.h b/arch/arm/mach-omap2/voltage.h
index e9f5408..c454129 100644
--- a/arch/arm/mach-omap2/voltage.h
+++ b/arch/arm/mach-omap2/voltage.h
@@ -14,10 +14,16 @@
#ifndef __ARCH_ARM_MACH_OMAP2_VOLTAGE_H
#define __ARCH_ARM_MACH_OMAP2_VOLTAGE_H
+#include <linux/notifier.h>
#include <linux/err.h>
+struct omap_volt_data;
+
#include "vc.h"
#include "vp.h"
+#include "ldo.h"
+
+struct powerdomain;
/* XXX document */
#define VOLTSCALE_VPFORCEUPDATE 1
@@ -31,35 +37,105 @@
#define OMAP3_VOLTOFFSET 0xff
#define OMAP3_VOLTSETUP2 0xff
+struct omap_vdd_info;
+
/**
- * struct omap_vfsm_instance_data - per-voltage manager FSM register/bitfield
+ * struct omap_vfsm_instance - per-voltage manager FSM register/bitfield
* data
- * @voltsetup_mask: SETUP_TIME* bitmask in the PRM_VOLTSETUP* register
- * @voltsetup_reg: register offset of PRM_VOLTSETUP from PRM base
+ * @voltsetup_mask: SETUP_TIME* bitmask of PRM_VOLTSETUP* register(RET/SLEEP)
+ * @voltsetup_reg: register offset of PRM_VOLTSETUP from PRM base(RET/SLEEP)
* @voltsetup_shift: SETUP_TIME* field shift in the PRM_VOLTSETUP* register
+ * @voltsetupoff_reg: register offset of PRM_VOLTSETUP*_OFF from PRM base
*
* XXX What about VOLTOFFSET/VOLTCTRL?
* XXX It is not necessary to have both a _mask and a _shift for the same
* bitfield - remove one!
*/
-struct omap_vfsm_instance_data {
+struct omap_vfsm_instance {
u32 voltsetup_mask;
u8 voltsetup_reg;
u8 voltsetup_shift;
+ u8 voltsetupoff_reg;
};
+/* Dynamic nominal voltage margin common for OMAP3630 and OMAP4 */
+#define OMAP3PLUS_DYNAMIC_NOMINAL_MARGIN_UV 50000
+
/**
* struct voltagedomain - omap voltage domain global structure.
- * @name: Name of the voltage domain which can be used as a unique
- * identifier.
+ * @name: Name of the voltage domain which can be used as a unique identifier.
+ * @scalable: Whether or not this voltage domain is scalable
+ * @node: list_head linking all voltage domains
+ * @pwrdm_node: list_head linking all powerdomains in this voltagedomain
+ * @vdd: to be removed
+ * @pwrdms: powerdomains in this voltagedomain
+ * @scale: function used to scale the voltage of the voltagedomain
+ * @curr_volt: current nominal voltage for this voltage domain
+ * @change_notify_list: notifiers that need to be told on pre and post change
*/
struct voltagedomain {
char *name;
+ bool scalable;
+ struct list_head node;
+ struct list_head pwrdm_list;
+ struct omap_vc_channel *vc;
+ const struct omap_vfsm_instance *vfsm;
+ struct omap_vp_instance *vp;
+ struct omap_voltdm_pmic *pmic;
+ struct omap_ldo_abb_instance *abb;
+
+ /* VC/VP register access functions: SoC specific */
+ u32 (*read) (u8 offset);
+ void (*write) (u32 val, u8 offset);
+ u32 (*rmw)(u32 mask, u32 bits, u8 offset);
+
+ union {
+ const char *name;
+ u32 rate;
+ } sys_clk;
+
+ int (*scale) (struct voltagedomain *voltdm,
+ struct omap_volt_data *target_volt);
+ struct omap_volt_data *curr_volt;
+
+ struct omap_vdd_info *vdd;
+ struct srcu_notifier_head change_notify_list;
+ struct dentry *debug_dir;
};
+/* Notifier values for voltage changes */
+#define OMAP_VOLTAGE_PRECHANGE 1
+#define OMAP_VOLTAGE_POSTCHANGE 2
+
+/**
+ * struct omap_voltage_notifier - notifier data that is passed along
+ * @voltdm: voltage domain for the notification
+ * @target_volt: what voltage is happening
+ * @op_result: valid only for POSTCHANGE, tells the result of
+ * the operation.
+ *
+ * This provides notification
+ */
+struct omap_voltage_notifier {
+ struct voltagedomain *voltdm;
+ unsigned long target_volt;
+ int op_result;
+};
+
+/* Flags for various ABB options */
+#define OMAP_ABB_NONE -1
+#define OMAP_ABB_NOMINAL_OPP 0
+#define OMAP_ABB_FAST_OPP 1
+
/**
* struct omap_volt_data - Omap voltage specific data.
* @voltage_nominal: The possible voltage value in uV
+ * @voltage_calibrated: The Calibrated voltage value in uV
+ * @voltage_dynamic_nominal: The run time optimized nominal voltage for
+ * the device. Dynamic nominal is the nominal voltage
+ * specialized for that OPP on the device in uV.
+ * @volt_margin: Additional sofware margin in uV to add to OPP calibrated
+ * voltage
* @sr_efuse_offs: The offset of the efuse register(from system
* control module base address) from where to read
* the n-target value for the smartreflex module.
@@ -68,22 +144,69 @@
* with voltage.
* @vp_errorgain: Error gain value for the voltage processor. This
* field also differs according to the voltage/opp.
+ * @abb_type: Either OMAP_ABB_NONE - which implies that there is no
+ * usage of ABB; OMAP_ABB_NOMINAL_OPP - which bypasses ABB
+ * LDO; or OMAP_ABB_FAST_OPP, which enables Forward-Body
+ * Bias.
*/
struct omap_volt_data {
u32 volt_nominal;
+ u32 volt_calibrated;
+ u32 volt_dynamic_nominal;
+ u32 volt_margin;
u32 sr_efuse_offs;
u8 sr_errminlimit;
u8 vp_errgain;
+ int abb_type;
};
+/*
+ * Introduced in OMAP4, is a concept of a default channel - in OMAP4, this
+ * channel is MPU, all other domains such as IVA/CORE, could optionally
+ * link their i2c reg configuration to use MPU channel's configuration if
+ * required. To do this, mark in the PMIC structure's
+ * i2c_slave_addr, volt_reg_addr,cmd_reg_addr with this macro.
+ */
+#define USE_DEFAULT_CHANNEL_I2C_PARAM 0x8000
+
+/* Min and max voltages from OMAP perspective */
+#define OMAP3430_VP1_VLIMITTO_VDDMIN 850000
+#define OMAP3430_VP1_VLIMITTO_VDDMAX 1425000
+#define OMAP3430_VP2_VLIMITTO_VDDMIN 900000
+#define OMAP3430_VP2_VLIMITTO_VDDMAX 1150000
+
+#define OMAP3630_VP1_VLIMITTO_VDDMIN 900000
+#define OMAP3630_VP1_VLIMITTO_VDDMAX 1350000
+#define OMAP3630_VP2_VLIMITTO_VDDMIN 900000
+#define OMAP3630_VP2_VLIMITTO_VDDMAX 1200000
+
+#define OMAP4_VP_MPU_VLIMITTO_VDDMIN 830000
+#define OMAP4_VP_MPU_VLIMITTO_VDDMAX 1410000
+#define OMAP4_VP_IVA_VLIMITTO_VDDMIN 830000
+#define OMAP4_VP_IVA_VLIMITTO_VDDMAX 1260000
+#define OMAP4_VP_CORE_VLIMITTO_VDDMIN 830000
+#define OMAP4_VP_CORE_VLIMITTO_VDDMAX 1200000
+
+#define OMAP4_VP_CONFIG_ERROROFFSET 0x00
+#define OMAP4_VP_VSTEPMIN_VSTEPMIN 0x01
+#define OMAP4_VP_VSTEPMAX_VSTEPMAX 0x04
+#define OMAP4_VP_VLIMITTO_TIMEOUT_US 200
+
/**
- * struct omap_volt_pmic_info - PMIC specific data required by voltage driver.
+ * struct omap_voltdm_pmic - PMIC specific data required by voltage driver.
* @slew_rate: PMIC slew rate (in uv/us)
* @step_size: PMIC voltage step size (in uv)
+ * @i2c_high_speed: whether VC uses I2C high-speed mode to PMIC
+ * @i2c_mcode: master code value for I2C high-speed preamble transmission
* @vsel_to_uv: PMIC API to convert vsel value to actual voltage in uV.
* @uv_to_vsel: PMIC API to convert voltage in uV to vsel value.
+ * @i2c_hscll_low: PMIC interface speed config for highspeed mode (T low)
+ * @i2c_hscll_high: PMIC interface speed config for highspeed mode (T high)
+ * @i2c_scll_low: PMIC interface speed config for fullspeed mode (T low)
+ * @i2c_scll_high: PMIC interface speed config for fullspeed mode (T high)
+ * @switch_on_time: time taken for switch on the DCDC in uSec
*/
-struct omap_volt_pmic_info {
+struct omap_voltdm_pmic {
int slew_rate;
int step_size;
u32 on_volt;
@@ -91,81 +214,83 @@
u32 ret_volt;
u32 off_volt;
u16 volt_setup_time;
+ u16 switch_on_time;
u8 vp_erroroffset;
u8 vp_vstepmin;
u8 vp_vstepmax;
- u8 vp_vddmin;
- u8 vp_vddmax;
+ u32 vp_vddmin;
+ u32 vp_vddmax;
u8 vp_timeout_us;
- u8 i2c_slave_addr;
- u8 pmic_reg;
+ u16 i2c_slave_addr;
+ u16 volt_reg_addr;
+ u16 cmd_reg_addr;
+ bool i2c_high_speed;
+ u8 i2c_hscll_low;
+ u8 i2c_hscll_high;
+ u8 i2c_scll_low;
+ u8 i2c_scll_high;
+ u8 i2c_mcode;
unsigned long (*vsel_to_uv) (const u8 vsel);
u8 (*uv_to_vsel) (unsigned long uV);
};
/**
+ * struct omap_vdd_dep_volt - Map table for voltage dependencies
+ * @main_vdd_volt : The main vdd voltage
+ * @dep_vdd_volt : The voltage at which the dependent vdd should be
+ * when the main vdd is at <main_vdd_volt> voltage
+ *
+ * Table containing the parent vdd voltage and the dependent vdd voltage
+ * corresponding to it.
+ */
+struct omap_vdd_dep_volt {
+ u32 main_vdd_volt;
+ u32 dep_vdd_volt;
+};
+
+/**
+ * struct omap_vdd_dep_info - Dependent vdd info
+ * @name : Dependent vdd name
+ * @_dep_voltdm : internal structure meant to prevent multiple lookups
+ * @dep_table : Table containing the dependent vdd voltage
+ * corresponding to every main vdd voltage.
+ * @nr_dep_entries : number of dependency voltage entries
+ */
+struct omap_vdd_dep_info {
+ char *name;
+ struct voltagedomain *_dep_voltdm;
+ struct omap_vdd_dep_volt *dep_table;
+ int nr_dep_entries;
+};
+
+/**
* omap_vdd_info - Per Voltage Domain info
*
- * @volt_data : voltage table having the distinct voltages supported
+ * @volt_data : Array ending with a 0 terminator containing the
+ * voltage table with distinct voltages supported
* by the domain and other associated per voltage data.
- * @pmic_info : pmic specific parameters which should be populted by
- * the pmic drivers.
- * @vp_data : the register values, shifts, masks for various
- * vp registers
- * @vp_rt_data : VP data derived at runtime, not predefined
- * @vc_data : structure containing various various vc registers,
- * shifts, masks etc.
- * @vfsm : voltage manager FSM data
- * @voltdm : pointer to the voltage domain structure
- * @debug_dir : debug directory for this voltage domain.
- * @curr_volt : current voltage for this vdd.
- * @vp_enabled : flag to keep track of whether vp is enabled or not
- * @volt_scale : API to scale the voltage of the vdd.
+ * @dep_vdd_info : Array ending with a 0 terminator for dependency
+ * voltage information.
*/
struct omap_vdd_info {
struct omap_volt_data *volt_data;
- struct omap_volt_pmic_info *pmic_info;
- struct omap_vp_instance_data *vp_data;
- struct omap_vp_runtime_data vp_rt_data;
- struct omap_vc_instance_data *vc_data;
- const struct omap_vfsm_instance_data *vfsm;
- struct voltagedomain voltdm;
- struct dentry *debug_dir;
- u32 curr_volt;
- bool vp_enabled;
- u32 (*read_reg) (u16 mod, u8 offset);
- void (*write_reg) (u32 val, u16 mod, u8 offset);
- int (*volt_scale) (struct omap_vdd_info *vdd,
- unsigned long target_volt);
+ struct omap_vdd_dep_info *dep_vdd_info;
};
-unsigned long omap_vp_get_curr_volt(struct voltagedomain *voltdm);
-void omap_vp_enable(struct voltagedomain *voltdm);
-void omap_vp_disable(struct voltagedomain *voltdm);
-int omap_voltage_scale_vdd(struct voltagedomain *voltdm,
- unsigned long target_volt);
-void omap_voltage_reset(struct voltagedomain *voltdm);
void omap_voltage_get_volttable(struct voltagedomain *voltdm,
struct omap_volt_data **volt_data);
struct omap_volt_data *omap_voltage_get_voltdata(struct voltagedomain *voltdm,
unsigned long volt);
-unsigned long omap_voltage_get_nom_volt(struct voltagedomain *voltdm);
-struct dentry *omap_voltage_get_dbgdir(struct voltagedomain *voltdm);
-int __init omap_voltage_early_init(s16 prm_mod, s16 prm_irqst_mod,
- struct omap_vdd_info *omap_vdd_array[],
- u8 omap_vdd_count);
+struct omap_volt_data *omap_voltage_get_curr_vdata(struct voltagedomain *voldm);
#ifdef CONFIG_PM
int omap_voltage_register_pmic(struct voltagedomain *voltdm,
- struct omap_volt_pmic_info *pmic_info);
+ struct omap_voltdm_pmic *pmic);
void omap_change_voltscale_method(struct voltagedomain *voltdm,
int voltscale_method);
-/* API to get the voltagedomain pointer */
-struct voltagedomain *omap_voltage_domain_lookup(char *name);
-
int omap_voltage_late_init(void);
#else
static inline int omap_voltage_register_pmic(struct voltagedomain *voltdm,
- struct omap_volt_pmic_info *pmic_info)
+ struct omap_voltdm_pmic *pmic)
{
return -EINVAL;
}
@@ -175,10 +300,69 @@
{
return -EINVAL;
}
-static inline struct voltagedomain *omap_voltage_domain_lookup(char *name)
-{
- return ERR_PTR(-EINVAL);
-}
#endif
+extern void omap2xxx_voltagedomains_init(void);
+extern void omap3xxx_voltagedomains_init(void);
+extern void omap44xx_voltagedomains_init(void);
+
+struct voltagedomain *voltdm_lookup(const char *name);
+void voltdm_init(struct voltagedomain **voltdm_list);
+int voltdm_add_pwrdm(struct voltagedomain *voltdm, struct powerdomain *pwrdm);
+int voltdm_for_each(int (*fn)(struct voltagedomain *voltdm, void *user),
+ void *user);
+int voltdm_for_each_pwrdm(struct voltagedomain *voltdm,
+ int (*fn)(struct voltagedomain *voltdm,
+ struct powerdomain *pwrdm));
+int voltdm_scale(struct voltagedomain *voltdm,
+ struct omap_volt_data *target_volt);
+void voltdm_reset(struct voltagedomain *voltdm);
+
+static inline int voltdm_register_notifier(struct voltagedomain *voltdm,
+ struct notifier_block *nb)
+{
+ return srcu_notifier_chain_register(&voltdm->change_notify_list, nb);
+}
+
+static inline int voltdm_unregister_notifier(struct voltagedomain *voltdm,
+ struct notifier_block *nb)
+{
+ return srcu_notifier_chain_unregister(&voltdm->change_notify_list, nb);
+}
+
+/* convert volt data to the voltage for the voltage data */
+static inline unsigned long omap_get_operation_voltage(
+ struct omap_volt_data *vdata)
+{
+ if (!vdata)
+ return 0;
+ return (vdata->volt_calibrated) ? vdata->volt_calibrated :
+ (vdata->volt_dynamic_nominal) ? vdata->volt_dynamic_nominal :
+ vdata->volt_nominal;
+}
+
+/* what is my dynamic nominal? */
+static inline unsigned long omap_get_dyn_nominal(struct omap_volt_data *vdata)
+{
+ if (IS_ERR_OR_NULL(vdata))
+ return 0;
+ if (vdata->volt_calibrated) {
+ unsigned long v = vdata->volt_calibrated +
+ OMAP3PLUS_DYNAMIC_NOMINAL_MARGIN_UV;
+ if (v > vdata->volt_nominal)
+ return vdata->volt_nominal;
+ return v;
+ }
+ return vdata->volt_nominal;
+}
+static inline unsigned long omap_get_nominal_voltage(
+ struct omap_volt_data *vdata)
+{
+ if (IS_ERR_OR_NULL(vdata))
+ return 0;
+ return vdata->volt_nominal;
+}
+
+int omap_voltage_calib_reset(struct voltagedomain *voltdm);
+
#endif
diff --git a/arch/arm/mach-omap2/voltagedomains2xxx_data.c b/arch/arm/mach-omap2/voltagedomains2xxx_data.c
new file mode 100644
index 0000000..69ff261
--- /dev/null
+++ b/arch/arm/mach-omap2/voltagedomains2xxx_data.c
@@ -0,0 +1,32 @@
+/*
+ * OMAP3 voltage domain data
+ *
+ * Copyright (C) 2007, 2010 Texas Instruments, Inc.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include "voltage.h"
+
+static struct voltagedomain omap2_voltdm_core = {
+ .name = "core",
+};
+
+static struct voltagedomain omap2_voltdm_wkup = {
+ .name = "wakeup",
+};
+
+static struct voltagedomain *voltagedomains_omap2[] __initdata = {
+ &omap2_voltdm_core,
+ &omap2_voltdm_wkup,
+ NULL,
+};
+
+void __init omap2xxx_voltagedomains_init(void)
+{
+ voltdm_init(voltagedomains_omap2);
+}
diff --git a/arch/arm/mach-omap2/voltagedomains3xxx_data.c b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
index def230f..65a00ff 100644
--- a/arch/arm/mach-omap2/voltagedomains3xxx_data.c
+++ b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
@@ -26,55 +26,69 @@
#include "voltage.h"
#include "vc.h"
#include "vp.h"
+#include "ldo.h"
/*
* VDD data
*/
-static const struct omap_vfsm_instance_data omap3_vdd1_vfsm_data = {
+static const struct omap_vfsm_instance omap3_vdd1_vfsm = {
.voltsetup_reg = OMAP3_PRM_VOLTSETUP1_OFFSET,
.voltsetup_shift = OMAP3430_SETUP_TIME1_SHIFT,
.voltsetup_mask = OMAP3430_SETUP_TIME1_MASK,
};
-static struct omap_vdd_info omap3_vdd1_info = {
- .vp_data = &omap3_vp1_data,
- .vc_data = &omap3_vc1_data,
- .vfsm = &omap3_vdd1_vfsm_data,
- .voltdm = {
- .name = "mpu",
- },
-};
+static struct omap_vdd_info omap3_vdd1_info;
-static const struct omap_vfsm_instance_data omap3_vdd2_vfsm_data = {
+static const struct omap_vfsm_instance omap3_vdd2_vfsm = {
.voltsetup_reg = OMAP3_PRM_VOLTSETUP1_OFFSET,
.voltsetup_shift = OMAP3430_SETUP_TIME2_SHIFT,
.voltsetup_mask = OMAP3430_SETUP_TIME2_MASK,
};
-static struct omap_vdd_info omap3_vdd2_info = {
- .vp_data = &omap3_vp2_data,
- .vc_data = &omap3_vc2_data,
- .vfsm = &omap3_vdd2_vfsm_data,
- .voltdm = {
- .name = "core",
- },
+static struct omap_vdd_info omap3_vdd2_info;
+
+static struct voltagedomain omap3_voltdm_mpu = {
+ .name = "mpu_iva",
+ .scalable = true,
+ .read = omap3_prm_vcvp_read,
+ .write = omap3_prm_vcvp_write,
+ .rmw = omap3_prm_vcvp_rmw,
+ .vc = &omap3_vc_mpu,
+ .vfsm = &omap3_vdd1_vfsm,
+ .vp = &omap3_vp_mpu,
+ .vdd = &omap3_vdd1_info,
};
-/* OMAP3 VDD structures */
-static struct omap_vdd_info *omap3_vdd_info[] = {
- &omap3_vdd1_info,
- &omap3_vdd2_info,
+static struct voltagedomain omap3_voltdm_core = {
+ .name = "core",
+ .scalable = true,
+ .read = omap3_prm_vcvp_read,
+ .write = omap3_prm_vcvp_write,
+ .rmw = omap3_prm_vcvp_rmw,
+ .vc = &omap3_vc_core,
+ .vfsm = &omap3_vdd2_vfsm,
+ .vp = &omap3_vp_core,
+ .vdd = &omap3_vdd2_info,
};
-/* OMAP3 specific voltage init functions */
-static int __init omap3xxx_voltage_early_init(void)
+static struct voltagedomain omap3_voltdm_wkup = {
+ .name = "wakeup",
+};
+
+static struct voltagedomain *voltagedomains_omap3[] __initdata = {
+ &omap3_voltdm_mpu,
+ &omap3_voltdm_core,
+ &omap3_voltdm_wkup,
+ NULL,
+};
+
+static const char *sys_clk_name __initdata = "sys_ck";
+
+void __init omap3xxx_voltagedomains_init(void)
{
- s16 prm_mod = OMAP3430_GR_MOD;
- s16 prm_irqst_ocp_mod = OCP_MOD;
-
- if (!cpu_is_omap34xx())
- return 0;
+ struct voltagedomain *voltdm;
+ int i;
/*
* XXX Will depend on the process, validation, and binning
@@ -83,13 +97,17 @@
if (cpu_is_omap3630()) {
omap3_vdd1_info.volt_data = omap36xx_vddmpu_volt_data;
omap3_vdd2_info.volt_data = omap36xx_vddcore_volt_data;
+ omap3_vdd1_info.dep_vdd_info = omap36xx_vddmpu_dep_info;
+ omap3_voltdm_mpu.abb = &omap3630_ldo_abb_mpu_instance;
+
} else {
omap3_vdd1_info.volt_data = omap34xx_vddmpu_volt_data;
omap3_vdd2_info.volt_data = omap34xx_vddcore_volt_data;
+ omap3_vdd1_info.dep_vdd_info = omap34xx_vddmpu_dep_info;
}
- return omap_voltage_early_init(prm_mod, prm_irqst_ocp_mod,
- omap3_vdd_info,
- ARRAY_SIZE(omap3_vdd_info));
+ for (i = 0; voltdm = voltagedomains_omap3[i], voltdm; i++)
+ voltdm->sys_clk.name = sys_clk_name;
+
+ voltdm_init(voltagedomains_omap3);
};
-core_initcall(omap3xxx_voltage_early_init);
diff --git a/arch/arm/mach-omap2/voltagedomains44xx_data.c b/arch/arm/mach-omap2/voltagedomains44xx_data.c
index cb64996..dc5026e 100644
--- a/arch/arm/mach-omap2/voltagedomains44xx_data.c
+++ b/arch/arm/mach-omap2/voltagedomains44xx_data.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/clk.h>
#include <plat/common.h>
@@ -31,72 +32,135 @@
#include "omap_opp_data.h"
#include "vc.h"
#include "vp.h"
+#include "ldo.h"
-static const struct omap_vfsm_instance_data omap4_vdd_mpu_vfsm_data = {
+static const struct omap_vfsm_instance omap4_vdd_mpu_vfsm = {
.voltsetup_reg = OMAP4_PRM_VOLTSETUP_MPU_RET_SLEEP_OFFSET,
+ .voltsetup_mask = OMAP4430_RAMP_DOWN_PRESCAL_MASK |
+ OMAP4430_RAMP_DOWN_COUNT_MASK |
+ OMAP4430_RAMP_UP_PRESCAL_MASK |
+ OMAP4430_RAMP_UP_COUNT_MASK,
+ .voltsetupoff_reg = OMAP4_PRM_VOLTSETUP_MPU_OFF_OFFSET,
};
-static struct omap_vdd_info omap4_vdd_mpu_info = {
- .vp_data = &omap4_vp_mpu_data,
- .vc_data = &omap4_vc_mpu_data,
- .vfsm = &omap4_vdd_mpu_vfsm_data,
- .voltdm = {
- .name = "mpu",
- },
-};
+static struct omap_vdd_info omap4_vdd_mpu_info;
-static const struct omap_vfsm_instance_data omap4_vdd_iva_vfsm_data = {
+static const struct omap_vfsm_instance omap4_vdd_iva_vfsm = {
.voltsetup_reg = OMAP4_PRM_VOLTSETUP_IVA_RET_SLEEP_OFFSET,
+ .voltsetup_mask = OMAP4430_RAMP_DOWN_PRESCAL_MASK |
+ OMAP4430_RAMP_DOWN_COUNT_MASK |
+ OMAP4430_RAMP_UP_PRESCAL_MASK |
+ OMAP4430_RAMP_UP_COUNT_MASK,
+ .voltsetupoff_reg = OMAP4_PRM_VOLTSETUP_IVA_OFF_OFFSET,
};
-static struct omap_vdd_info omap4_vdd_iva_info = {
- .vp_data = &omap4_vp_iva_data,
- .vc_data = &omap4_vc_iva_data,
- .vfsm = &omap4_vdd_iva_vfsm_data,
- .voltdm = {
- .name = "iva",
- },
-};
+static struct omap_vdd_info omap4_vdd_iva_info;
-static const struct omap_vfsm_instance_data omap4_vdd_core_vfsm_data = {
+static const struct omap_vfsm_instance omap4_vdd_core_vfsm = {
.voltsetup_reg = OMAP4_PRM_VOLTSETUP_CORE_RET_SLEEP_OFFSET,
+ .voltsetup_mask = OMAP4430_RAMP_DOWN_PRESCAL_MASK |
+ OMAP4430_RAMP_DOWN_COUNT_MASK |
+ OMAP4430_RAMP_UP_PRESCAL_MASK |
+ OMAP4430_RAMP_UP_COUNT_MASK,
+ .voltsetupoff_reg = OMAP4_PRM_VOLTSETUP_CORE_OFF_OFFSET,
};
-static struct omap_vdd_info omap4_vdd_core_info = {
- .vp_data = &omap4_vp_core_data,
- .vc_data = &omap4_vc_core_data,
- .vfsm = &omap4_vdd_core_vfsm_data,
- .voltdm = {
- .name = "core",
- },
+static struct omap_vdd_info omap4_vdd_core_info;
+
+static struct voltagedomain omap4_voltdm_mpu = {
+ .name = "mpu",
+ .scalable = true,
+ .read = omap4_prm_vcvp_read,
+ .write = omap4_prm_vcvp_write,
+ .rmw = omap4_prm_vcvp_rmw,
+ .vc = &omap4_vc_mpu,
+ .vfsm = &omap4_vdd_mpu_vfsm,
+ .vp = &omap4_vp_mpu,
+ .vdd = &omap4_vdd_mpu_info,
+ .abb = &omap4_ldo_abb_mpu_instance,
};
-/* OMAP4 VDD structures */
-static struct omap_vdd_info *omap4_vdd_info[] = {
- &omap4_vdd_mpu_info,
- &omap4_vdd_iva_info,
- &omap4_vdd_core_info,
+static struct voltagedomain omap4_voltdm_iva = {
+ .name = "iva",
+ .scalable = true,
+ .read = omap4_prm_vcvp_read,
+ .write = omap4_prm_vcvp_write,
+ .rmw = omap4_prm_vcvp_rmw,
+ .vc = &omap4_vc_iva,
+ .vfsm = &omap4_vdd_iva_vfsm,
+ .vp = &omap4_vp_iva,
+ .vdd = &omap4_vdd_iva_info,
+ .abb = &omap4_ldo_abb_iva_instance,
};
-/* OMAP4 specific voltage init functions */
-static int __init omap44xx_voltage_early_init(void)
+static struct voltagedomain omap4_voltdm_core = {
+ .name = "core",
+ .scalable = true,
+ .read = omap4_prm_vcvp_read,
+ .write = omap4_prm_vcvp_write,
+ .rmw = omap4_prm_vcvp_rmw,
+ .vc = &omap4_vc_core,
+ .vfsm = &omap4_vdd_core_vfsm,
+ .vp = &omap4_vp_core,
+ .vdd = &omap4_vdd_core_info,
+};
+
+static struct voltagedomain omap4_voltdm_wkup = {
+ .name = "wakeup",
+};
+
+static struct voltagedomain *voltagedomains_omap4[] __initdata = {
+ &omap4_voltdm_mpu,
+ &omap4_voltdm_iva,
+ &omap4_voltdm_core,
+ &omap4_voltdm_wkup,
+ NULL,
+};
+
+/*
+ * Handle Mutant pre_scalar to sysclk cycles map:
+ * Due to "Errata Id: i623: Retention/Sleep Voltage Transitions Ramp Time"
+ * on OMAP4430 specifically, the maps is 64, 256, 512, 2048 cycles.
+ * Handle this condition dynamically from version detection logic
+ */
+static u16 pre_scaler_to_sysclk_cycles_443x[] = {64, 256, 512, 2048};
+
+static const char *sys_clk_name __initdata = "sys_clkin_ck";
+
+void __init omap44xx_voltagedomains_init(void)
{
- s16 prm_mod = OMAP4430_PRM_DEVICE_INST;
- s16 prm_irqst_ocp_mod = OMAP4430_PRM_OCP_SOCKET_INST;
-
- if (!cpu_is_omap44xx())
- return 0;
+ struct voltagedomain *voltdm;
+ int i;
/*
* XXX Will depend on the process, validation, and binning
* for the currently-running IC
*/
- omap4_vdd_mpu_info.volt_data = omap44xx_vdd_mpu_volt_data;
- omap4_vdd_iva_info.volt_data = omap44xx_vdd_iva_volt_data;
- omap4_vdd_core_info.volt_data = omap44xx_vdd_core_volt_data;
+ if (cpu_is_omap443x()) {
+ struct setup_time_ramp_params *params =
+ omap4_vc_core.common->setup_time_params;
- return omap_voltage_early_init(prm_mod, prm_irqst_ocp_mod,
- omap4_vdd_info,
- ARRAY_SIZE(omap4_vdd_info));
+ if (params) {
+ params->pre_scaler_to_sysclk_cycles =
+ pre_scaler_to_sysclk_cycles_443x;
+ }
+ omap4_vdd_mpu_info.volt_data = omap443x_vdd_mpu_volt_data;
+ omap4_vdd_iva_info.volt_data = omap443x_vdd_iva_volt_data;
+ omap4_vdd_core_info.volt_data = omap443x_vdd_core_volt_data;
+ omap4_vdd_mpu_info.dep_vdd_info = omap443x_vddmpu_dep_info;
+ omap4_vdd_iva_info.dep_vdd_info = omap443x_vddiva_dep_info;
+ } else if (cpu_is_omap446x()) {
+ omap4_vdd_mpu_info.volt_data = omap446x_vdd_mpu_volt_data;
+ omap4_vdd_iva_info.volt_data = omap446x_vdd_iva_volt_data;
+ omap4_vdd_core_info.volt_data = omap446x_vdd_core_volt_data;
+ omap4_vdd_mpu_info.dep_vdd_info = omap446x_vddmpu_dep_info;
+ omap4_vdd_iva_info.dep_vdd_info = omap446x_vddiva_dep_info;
+ } else {
+ return;
+ }
+
+ for (i = 0; voltdm = voltagedomains_omap4[i], voltdm; i++)
+ voltdm->sys_clk.name = sys_clk_name;
+
+ voltdm_init(voltagedomains_omap4);
};
-core_initcall(omap44xx_voltage_early_init);
diff --git a/arch/arm/mach-omap2/vp.c b/arch/arm/mach-omap2/vp.c
new file mode 100644
index 0000000..3f16be5
--- /dev/null
+++ b/arch/arm/mach-omap2/vp.c
@@ -0,0 +1,426 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ratelimit.h>
+
+#include <plat/common.h>
+
+#include "voltage.h"
+#include "vp.h"
+#include "prm-regbits-34xx.h"
+#include "prm-regbits-44xx.h"
+#include "prm44xx.h"
+
+static void vp_latch_vsel(struct voltagedomain *voltdm)
+{
+ struct omap_vp_instance *vp = voltdm->vp;
+ struct omap_volt_data *v = omap_voltage_get_curr_vdata(voltdm);
+ u32 vpconfig;
+ unsigned long uvdc;
+ char vsel;
+
+ if (IS_ERR_OR_NULL(v)) {
+ pr_warning("%s: unable to get voltage for vdd_%s\n",
+ __func__, voltdm->name);
+ return;
+ }
+ uvdc = omap_get_operation_voltage(v);
+ if (!uvdc) {
+ pr_warning("%s: unable to find current voltage for vdd_%s\n",
+ __func__, voltdm->name);
+ return;
+ }
+
+ if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) {
+ pr_warning("%s: PMIC function to convert voltage in uV to"
+ " vsel not registered\n", __func__);
+ return;
+ }
+
+ vsel = voltdm->pmic->uv_to_vsel(uvdc);
+
+ vpconfig = voltdm->read(vp->vpconfig);
+ vpconfig &= ~(vp->common->vpconfig_initvoltage_mask |
+ vp->common->vpconfig_initvdd);
+ vpconfig |= vsel << __ffs(vp->common->vpconfig_initvoltage_mask);
+ voltdm->write(vpconfig, vp->vpconfig);
+
+ /* Trigger initVDD value copy to voltage processor */
+ voltdm->write((vpconfig | vp->common->vpconfig_initvdd),
+ vp->vpconfig);
+
+ /* Clear initVDD copy trigger bit */
+ voltdm->write(vpconfig, vp->vpconfig);
+}
+
+/* Generic voltage init functions */
+void __init omap_vp_init(struct voltagedomain *voltdm)
+{
+ struct omap_vp_instance *vp = voltdm->vp;
+ u32 val, sys_clk_rate, timeout, waittime;
+ u32 vddmin, vddmax, vstepmin, vstepmax;
+
+ if (!voltdm->read || !voltdm->write) {
+ pr_err("%s: No read/write API for accessing vdd_%s regs\n",
+ __func__, voltdm->name);
+ return;
+ }
+
+ vp->enabled = false;
+
+ /* Divide to avoid overflow */
+ sys_clk_rate = voltdm->sys_clk.rate / 1000;
+
+ timeout = (sys_clk_rate * voltdm->pmic->vp_timeout_us) / 1000;
+ vddmin = voltdm->pmic->uv_to_vsel(voltdm->pmic->vp_vddmin);
+ vddmax = voltdm->pmic->uv_to_vsel(voltdm->pmic->vp_vddmax);
+
+ waittime = DIV_ROUND_UP(voltdm->pmic->step_size * sys_clk_rate,
+ 1000 * voltdm->pmic->slew_rate);
+ vstepmin = voltdm->pmic->vp_vstepmin;
+ vstepmax = voltdm->pmic->vp_vstepmax;
+
+ /*
+ * VP_CONFIG: error gain is not set here, it will be updated
+ * on each scale, based on OPP.
+ */
+ val = (voltdm->pmic->vp_erroroffset <<
+ __ffs(voltdm->vp->common->vpconfig_erroroffset_mask)) |
+ vp->common->vpconfig_timeouten;
+ voltdm->write(val, vp->vpconfig);
+
+ /* VSTEPMIN */
+ val = (waittime << vp->common->vstepmin_smpswaittimemin_shift) |
+ (vstepmin << vp->common->vstepmin_stepmin_shift);
+ voltdm->write(val, vp->vstepmin);
+
+ /* VSTEPMAX */
+ val = (vstepmax << vp->common->vstepmax_stepmax_shift) |
+ (waittime << vp->common->vstepmax_smpswaittimemax_shift);
+ voltdm->write(val, vp->vstepmax);
+
+ /* VLIMITTO */
+ val = (vddmax << vp->common->vlimitto_vddmax_shift) |
+ (vddmin << vp->common->vlimitto_vddmin_shift) |
+ (timeout << vp->common->vlimitto_timeout_shift);
+ voltdm->write(val, vp->vlimitto);
+}
+
+/**
+ * omap_vp_is_transdone() - is voltage transfer done on vp?
+ * @voltdm: pointer to the VDD which is to be scaled.
+ *
+ * VP's transdone bit is the only way to ensure that the transfer
+ * of the voltage value has actually been send over to the PMIC
+ * This is hence useful for all users of voltage domain to precisely
+ * identify once the PMIC voltage has been set by the voltage processor
+ */
+bool omap_vp_is_transdone(struct voltagedomain *voltdm)
+{
+
+ struct omap_vp_instance *vp = voltdm->vp;
+
+ return vp->common->ops->check_txdone(vp->id) ? true : false;
+}
+
+/**
+ * omap_vp_clear_transdone() - clear voltage transfer done status on vp
+ * @voltdm: pointer to the VDD which is to be scaled.
+ */
+void omap_vp_clear_transdone(struct voltagedomain *voltdm)
+{
+ struct omap_vp_instance *vp = voltdm->vp;
+
+ vp->common->ops->clear_txdone(vp->id);
+
+ return;
+}
+
+int omap_vp_update_errorgain(struct voltagedomain *voltdm,
+ struct omap_volt_data *volt_data)
+{
+ if (IS_ERR_OR_NULL(volt_data)) {
+ pr_err("%s: vdm %s bad voltage data %p\n", __func__,
+ voltdm->name, volt_data);
+ return -EINVAL;
+ }
+
+ /* Setting vp errorgain based on the voltage */
+ voltdm->rmw(voltdm->vp->common->vpconfig_errorgain_mask,
+ volt_data->vp_errgain <<
+ __ffs(voltdm->vp->common->vpconfig_errorgain_mask),
+ voltdm->vp->vpconfig);
+
+ return 0;
+}
+
+#define _MAX_RETRIES_BEFORE_RECOVER 50
+#define _MAX_COUNT_ERR 10
+static u8 __vp_debug_error_message_count = _MAX_COUNT_ERR;
+static u8 __vp_recover_count = _MAX_RETRIES_BEFORE_RECOVER;
+/* Dump with stack the first few messages, tone down severity for the rest */
+#define _vp_controlled_err(vp, voltdm, ARGS...) \
+{ \
+ if (__vp_debug_error_message_count) { \
+ pr_err(ARGS); \
+ dump_stack(); \
+ __vp_debug_error_message_count--; \
+ } else { \
+ pr_err_ratelimited(ARGS); \
+ } \
+ if ((vp)->common->ops->recover && !(--__vp_recover_count)) { \
+ pr_err("%s:domain %s recovery count triggered\n", \
+ __func__, (voltdm)->name); \
+ (vp)->common->ops->recover((vp)->id); \
+ __vp_recover_count =_MAX_RETRIES_BEFORE_RECOVER; \
+ } \
+}
+
+
+/* VP force update method of voltage scaling */
+int omap_vp_forceupdate_scale(struct voltagedomain *voltdm,
+ struct omap_volt_data *target_v)
+{
+ struct omap_vp_instance *vp = voltdm->vp;
+ u32 vpconfig;
+ u8 target_vsel, current_vsel;
+ int ret, timeout = 0;
+ unsigned long target_volt = omap_get_operation_voltage(target_v);
+
+ /*
+ * Wait for VP idle Typical latency is <2us. Maximum latency is ~100us
+ * This is an additional allowance to ensure we are in proper state
+ * to enter into forceupdate state transition.
+ */
+ omap_test_timeout((voltdm->read(vp->vstatus) & vp->common->vstatus_vpidle),
+ VP_IDLE_TIMEOUT, timeout);
+
+ if (timeout >= VP_IDLE_TIMEOUT)
+ _vp_controlled_err(vp, voltdm,
+ "%s:vdd_%s idletimdout forceupdate(v=%ld)\n",
+ __func__, voltdm->name, target_volt);
+
+ ret = omap_vc_pre_scale(voltdm, target_volt, target_v,
+ &target_vsel, ¤t_vsel);
+ if (ret)
+ return ret;
+
+ /*
+ * Clear all pending TransactionDone interrupt/status. Typical latency
+ * is <3us
+ */
+ while (timeout++ < VP_TRANXDONE_TIMEOUT) {
+ vp->common->ops->clear_txdone(vp->id);
+ if (!vp->common->ops->check_txdone(vp->id))
+ break;
+ udelay(1);
+ }
+ if (timeout >= VP_TRANXDONE_TIMEOUT) {
+ _vp_controlled_err(vp, voltdm,
+ "%s: vdd_%s TRANXDONE timeout exceeded."
+ "Voltage change aborted target volt=%ld,"
+ "target vsel=0x%02x, current_vsel=0x%02x\n",
+ __func__, voltdm->name, target_volt,
+ target_vsel, current_vsel);
+ return -ETIMEDOUT;
+ }
+
+ /* Configure for VP-Force Update */
+ vpconfig = voltdm->read(vp->vpconfig);
+ vpconfig &= ~(vp->common->vpconfig_initvdd |
+ vp->common->vpconfig_forceupdate |
+ vp->common->vpconfig_initvoltage_mask);
+ vpconfig |= ((target_vsel <<
+ __ffs(vp->common->vpconfig_initvoltage_mask)));
+ voltdm->write(vpconfig, vp->vpconfig);
+
+ /* Trigger initVDD value copy to voltage processor */
+ vpconfig |= vp->common->vpconfig_initvdd;
+ voltdm->write(vpconfig, vp->vpconfig);
+
+ /* Force update of voltage */
+ vpconfig |= vp->common->vpconfig_forceupdate;
+ voltdm->write(vpconfig, vp->vpconfig);
+
+ /*
+ * Wait for TransactionDone. Typical latency is <200us.
+ * Depends on SMPSWAITTIMEMIN/MAX and voltage change
+ */
+ timeout = 0;
+ omap_test_timeout(vp->common->ops->check_txdone(vp->id),
+ VP_TRANXDONE_TIMEOUT, timeout);
+ if (timeout >= VP_TRANXDONE_TIMEOUT)
+ _vp_controlled_err(vp, voltdm,
+ "%s: vdd_%s TRANXDONE timeout exceeded. "
+ "TRANXDONE never got set after the voltage update. "
+ "target volt=%ld, target vsel=0x%02x, "
+ "current_vsel=0x%02x\n",
+ __func__, voltdm->name, target_volt,
+ target_vsel, current_vsel);
+
+ omap_vc_post_scale(voltdm, target_volt, target_v,
+ target_vsel, current_vsel);
+
+ /*
+ * Disable TransactionDone interrupt , clear all status, clear
+ * control registers
+ */
+ timeout = 0;
+ while (timeout++ < VP_TRANXDONE_TIMEOUT) {
+ vp->common->ops->clear_txdone(vp->id);
+ if (!vp->common->ops->check_txdone(vp->id))
+ break;
+ udelay(1);
+ }
+
+ if (timeout >= VP_TRANXDONE_TIMEOUT)
+ _vp_controlled_err(vp, voltdm,
+ "%s: vdd_%s TRANXDONE timeout exceeded while"
+ "trying to clear the TRANXDONE status. target volt=%ld,"
+ "target vsel=0x%02x, current_vsel=0x%02x\n",
+ __func__, voltdm->name, target_volt,
+ target_vsel, current_vsel);
+
+ vpconfig = voltdm->read(vp->vpconfig);
+ /* Clear initVDD copy trigger bit */
+ vpconfig &= ~vp->common->vpconfig_initvdd;
+ voltdm->write(vpconfig, vp->vpconfig);
+ /* Clear force bit */
+ vpconfig &= ~vp->common->vpconfig_forceupdate;
+ voltdm->write(vpconfig, vp->vpconfig);
+
+ return 0;
+}
+
+/**
+ * omap_vp_get_curr_volt() - API to get the current vp voltage.
+ * @voltdm: pointer to the VDD.
+ *
+ * This API returns the current voltage for the specified voltage processor
+ */
+unsigned long omap_vp_get_curr_volt(struct voltagedomain *voltdm)
+{
+ struct omap_vp_instance *vp = voltdm->vp;
+ u8 curr_vsel;
+
+ if (!voltdm || IS_ERR(voltdm)) {
+ pr_warning("%s: VDD specified does not exist!\n", __func__);
+ return 0;
+ }
+
+ if (!voltdm->read) {
+ pr_err("%s: No read API for reading vdd_%s regs\n",
+ __func__, voltdm->name);
+ return 0;
+ }
+
+ curr_vsel = (voltdm->read(vp->voltage) & vp->common->vpvoltage_mask)
+ >> __ffs(vp->common->vpvoltage_mask);
+
+ if (!voltdm->pmic || !voltdm->pmic->vsel_to_uv) {
+ pr_warning("%s: PMIC function to convert vsel to voltage"
+ "in uV not registerd\n", __func__);
+ return 0;
+ }
+
+ return voltdm->pmic->vsel_to_uv(curr_vsel);
+}
+
+/**
+ * omap_vp_enable() - API to enable a particular VP
+ * @voltdm: pointer to the VDD whose VP is to be enabled.
+ *
+ * This API enables a particular voltage processor. Needed by the smartreflex
+ * class drivers.
+ */
+void omap_vp_enable(struct voltagedomain *voltdm)
+{
+ struct omap_vp_instance *vp;
+ u32 vpconfig;
+
+ if (!voltdm || IS_ERR(voltdm)) {
+ pr_warning("%s: VDD specified does not exist!\n", __func__);
+ return;
+ }
+
+ vp = voltdm->vp;
+ if (!voltdm->read || !voltdm->write) {
+ pr_err("%s: No read/write API for accessing vdd_%s regs\n",
+ __func__, voltdm->name);
+ return;
+ }
+
+ /* If VP is already enabled, do nothing. Return */
+ if (vp->enabled)
+ return;
+
+ vp_latch_vsel(voltdm);
+
+ /* Enable VP */
+ vpconfig = voltdm->read(vp->vpconfig);
+ vpconfig |= vp->common->vpconfig_vpenable;
+ voltdm->write(vpconfig, vp->vpconfig);
+ vp->enabled = true;
+}
+
+/**
+ * omap_vp_disable() - API to disable a particular VP
+ * @voltdm: pointer to the VDD whose VP is to be disabled.
+ *
+ * This API disables a particular voltage processor. Needed by the smartreflex
+ * class drivers.
+ */
+void omap_vp_disable(struct voltagedomain *voltdm)
+{
+ struct omap_vp_instance *vp;
+ u32 vpconfig;
+ int timeout;
+
+ if (!voltdm || IS_ERR(voltdm)) {
+ pr_warning("%s: VDD specified does not exist!\n", __func__);
+ return;
+ }
+
+ vp = voltdm->vp;
+ if (!voltdm->read || !voltdm->write) {
+ pr_err("%s: No read/write API for accessing vdd_%s regs\n",
+ __func__, voltdm->name);
+ return;
+ }
+
+ /* If VP is already disabled, do nothing. Return */
+ if (!vp->enabled) {
+ pr_warning("%s: Trying to disable VP for vdd_%s when"
+ "it is already disabled\n", __func__, voltdm->name);
+ return;
+ }
+
+ /*
+ * Wait for VP idle Typical latency is <2us. Maximum latency is ~100us
+ * Depending on if we catch VP in the middle of an SR operation.
+ */
+ omap_test_timeout((voltdm->read(vp->vstatus) & vp->common->vstatus_vpidle),
+ VP_IDLE_TIMEOUT, timeout);
+
+ if (timeout >= VP_IDLE_TIMEOUT)
+ pr_warning("%s: vdd_%s idle timedout before disable\n",
+ __func__, voltdm->name);
+
+ /* Disable VP */
+ vpconfig = voltdm->read(vp->vpconfig);
+ vpconfig &= ~vp->common->vpconfig_vpenable;
+ voltdm->write(vpconfig, vp->vpconfig);
+
+ /*
+ * Wait for VP idle Typical latency is <2us. Maximum latency is ~100us
+ */
+ omap_test_timeout((voltdm->read(vp->vstatus) & vp->common->vstatus_vpidle),
+ VP_IDLE_TIMEOUT, timeout);
+
+ if (timeout >= VP_IDLE_TIMEOUT)
+ pr_warning("%s: vdd_%s idle timedout after disable\n",
+ __func__, voltdm->name);
+
+ vp->enabled = false;
+
+ return;
+}
diff --git a/arch/arm/mach-omap2/vp.h b/arch/arm/mach-omap2/vp.h
index 7ce134f..cb3465a 100644
--- a/arch/arm/mach-omap2/vp.h
+++ b/arch/arm/mach-omap2/vp.h
@@ -19,44 +19,53 @@
#include <linux/kernel.h>
+struct voltagedomain;
+
/* XXX document */
-#define VP_IDLE_TIMEOUT 200
+#define VP_IDLE_TIMEOUT 500
#define VP_TRANXDONE_TIMEOUT 300
+/**
+ * struct omap_vp_ops - per-VP operations
+ * @check_txdone: check for VP transaction done
+ * @clear_txdone: clear VP transaction done status
+ */
+struct omap_vp_ops {
+ u32 (*check_txdone)(u8 vp_id);
+ void (*clear_txdone)(u8 vp_id);
+ void (*recover)(u8 vp_id);
+};
/**
- * struct omap_vp_common_data - register data common to all VDDs
+ * struct omap_vp_common - register data common to all VDDs
+ * @vpconfig_erroroffset_mask: ERROROFFSET bitmask in the PRM_VP*_CONFIG reg
* @vpconfig_errorgain_mask: ERRORGAIN bitmask in the PRM_VP*_CONFIG reg
* @vpconfig_initvoltage_mask: INITVOLTAGE bitmask in the PRM_VP*_CONFIG reg
- * @vpconfig_timeouten_mask: TIMEOUT bitmask in the PRM_VP*_CONFIG reg
+ * @vpconfig_timeouten: TIMEOUT bitmask in the PRM_VP*_CONFIG reg
* @vpconfig_initvdd: INITVDD bitmask in the PRM_VP*_CONFIG reg
* @vpconfig_forceupdate: FORCEUPDATE bitmask in the PRM_VP*_CONFIG reg
* @vpconfig_vpenable: VPENABLE bitmask in the PRM_VP*_CONFIG reg
* @vpconfig_erroroffset_shift: ERROROFFSET field shift in PRM_VP*_CONFIG reg
* @vpconfig_errorgain_shift: ERRORGAIN field shift in PRM_VP*_CONFIG reg
* @vpconfig_initvoltage_shift: INITVOLTAGE field shift in PRM_VP*_CONFIG reg
- * @vpconfig_stepmin_shift: VSTEPMIN field shift in the PRM_VP*_VSTEPMIN reg
- * @vpconfig_smpswaittimemin_shift: SMPSWAITTIMEMIN field shift in PRM_VP*_VSTEPMIN reg
- * @vpconfig_stepmax_shift: VSTEPMAX field shift in the PRM_VP*_VSTEPMAX reg
- * @vpconfig_smpswaittimemax_shift: SMPSWAITTIMEMAX field shift in PRM_VP*_VSTEPMAX reg
- * @vpconfig_vlimitto_vddmin_shift: VDDMIN field shift in PRM_VP*_VLIMITTO reg
- * @vpconfig_vlimitto_vddmax_shift: VDDMAX field shift in PRM_VP*_VLIMITTO reg
- * @vpconfig_vlimitto_timeout_shift: TIMEOUT field shift in PRM_VP*_VLIMITTO reg
- *
- * XXX It it not necessary to have both a mask and a shift for the same
- * bitfield - remove one
- * XXX Many of these fields are wrongly named -- e.g., vpconfig_smps* -- fix!
+ * @vstepmin_stepmin_shift: VSTEPMIN field shift in the PRM_VP*_VSTEPMIN reg
+ * @vstepmin_smpswaittimemin_shift: SMPSWAITTIMEMIN field shift in PRM_VP*_VSTEPMIN reg
+ * @vstepmax_stepmax_shift: VSTEPMAX field shift in the PRM_VP*_VSTEPMAX reg
+ * @vstepmax_smpswaittimemax_shift: SMPSWAITTIMEMAX field shift in PRM_VP*_VSTEPMAX reg
+ * @vlimitto_vddmin_shift: VDDMIN field shift in PRM_VP*_VLIMITTO reg
+ * @vlimitto_vddmax_shift: VDDMAX field shift in PRM_VP*_VLIMITTO reg
+ * @vlimitto_timeout_shift: TIMEOUT field shift in PRM_VP*_VLIMITTO reg
+ * @vpvoltage_mask: VPVOLTAGE field mask in PRM_VP*_VOLTAGE reg
*/
-struct omap_vp_common_data {
+struct omap_vp_common {
+ u32 vpconfig_erroroffset_mask;
u32 vpconfig_errorgain_mask;
u32 vpconfig_initvoltage_mask;
- u32 vpconfig_timeouten;
- u32 vpconfig_initvdd;
- u32 vpconfig_forceupdate;
- u32 vpconfig_vpenable;
- u8 vpconfig_erroroffset_shift;
- u8 vpconfig_errorgain_shift;
- u8 vpconfig_initvoltage_shift;
+ u8 vpconfig_timeouten;
+ u8 vpconfig_initvdd;
+ u8 vpconfig_forceupdate;
+ u8 vpconfig_vpenable;
+ u8 vstatus_vpidle;
u8 vstepmin_stepmin_shift;
u8 vstepmin_smpswaittimemin_shift;
u8 vstepmax_stepmax_shift;
@@ -64,80 +73,51 @@
u8 vlimitto_vddmin_shift;
u8 vlimitto_vddmax_shift;
u8 vlimitto_timeout_shift;
+ u8 vpvoltage_mask;
+
+ const struct omap_vp_ops *ops;
};
/**
- * struct omap_vp_prm_irqst_data - PRM_IRQSTATUS_MPU.VP_TRANXDONE_ST data
- * @prm_irqst_reg: reg offset for PRM_IRQSTATUS_MPU from top of PRM
- * @tranxdone_status: VP_TRANXDONE_ST bitmask in PRM_IRQSTATUS_MPU reg
- *
- * XXX prm_irqst_reg does not belong here
- * XXX Note that on OMAP3, VP_TRANXDONE interrupt may not work due to a
- * hardware bug
- * XXX This structure is probably not needed
- */
-struct omap_vp_prm_irqst_data {
- u8 prm_irqst_reg;
- u32 tranxdone_status;
-};
-
-/**
- * struct omap_vp_instance_data - VP register offsets (per-VDD)
- * @vp_common: pointer to struct omap_vp_common_data * for this SoC
- * @prm_irqst_data: pointer to struct omap_vp_prm_irqst_data for this VDD
+ * struct omap_vp_instance - VP register offsets (per-VDD)
+ * @common: pointer to struct omap_vp_common * for this SoC
* @vpconfig: PRM_VP*_CONFIG reg offset from PRM start
* @vstepmin: PRM_VP*_VSTEPMIN reg offset from PRM start
* @vlimitto: PRM_VP*_VLIMITTO reg offset from PRM start
* @vstatus: PRM_VP*_VSTATUS reg offset from PRM start
* @voltage: PRM_VP*_VOLTAGE reg offset from PRM start
+ * @enabled: flag to keep track of whether vp is enabled or not
*
* XXX vp_common is probably not needed since it is per-SoC
*/
-struct omap_vp_instance_data {
- const struct omap_vp_common_data *vp_common;
- const struct omap_vp_prm_irqst_data *prm_irqst_data;
+struct omap_vp_instance {
+ const struct omap_vp_common *common;
u8 vpconfig;
u8 vstepmin;
u8 vstepmax;
u8 vlimitto;
u8 vstatus;
u8 voltage;
+ u8 id;
+ bool enabled;
};
-/**
- * struct omap_vp_runtime_data - VP data populated at runtime by code
- * @vpconfig_erroroffset: value of ERROROFFSET bitfield in PRM_VP*_CONFIG
- * @vpconfig_errorgain: value of ERRORGAIN bitfield in PRM_VP*_CONFIG
- * @vstepmin_smpswaittimemin: value of SMPSWAITTIMEMIN bitfield in PRM_VP*_VSTEPMIN
- * @vstepmax_smpswaittimemax: value of SMPSWAITTIMEMAX bitfield in PRM_VP*_VSTEPMAX
- * @vlimitto_timeout: value of TIMEOUT bitfield in PRM_VP*_VLIMITTO
- * @vstepmin_stepmin: value of VSTEPMIN bitfield in PRM_VP*_VSTEPMIN
- * @vstepmax_stepmax: value of VSTEPMAX bitfield in PRM_VP*_VSTEPMAX
- * @vlimitto_vddmin: value of VDDMIN bitfield in PRM_VP*_VLIMITTO
- * @vlimitto_vddmax: value of VDDMAX bitfield in PRM_VP*_VLIMITTO
- *
- * XXX Is this structure really needed? Why not just program the
- * device directly? They are in PRM space, therefore in the WKUP
- * powerdomain, so register contents should not be lost in off-mode.
- * XXX Some of these fields are incorrectly named, e.g., vstep*
- */
-struct omap_vp_runtime_data {
- u32 vpconfig_erroroffset;
- u16 vpconfig_errorgain;
- u16 vstepmin_smpswaittimemin;
- u16 vstepmax_smpswaittimemax;
- u16 vlimitto_timeout;
- u8 vstepmin_stepmin;
- u8 vstepmax_stepmax;
- u8 vlimitto_vddmin;
- u8 vlimitto_vddmax;
-};
+extern struct omap_vp_instance omap3_vp_mpu;
+extern struct omap_vp_instance omap3_vp_core;
-extern struct omap_vp_instance_data omap3_vp1_data;
-extern struct omap_vp_instance_data omap3_vp2_data;
+extern struct omap_vp_instance omap4_vp_mpu;
+extern struct omap_vp_instance omap4_vp_iva;
+extern struct omap_vp_instance omap4_vp_core;
-extern struct omap_vp_instance_data omap4_vp_mpu_data;
-extern struct omap_vp_instance_data omap4_vp_iva_data;
-extern struct omap_vp_instance_data omap4_vp_core_data;
+void omap_vp_init(struct voltagedomain *voltdm);
+void omap_vp_enable(struct voltagedomain *voltdm);
+void omap_vp_disable(struct voltagedomain *voltdm);
+unsigned long omap_vp_get_curr_volt(struct voltagedomain *voltdm);
+int omap_vp_forceupdate_scale(struct voltagedomain *voltdm,
+ struct omap_volt_data *target_v);
+int omap_vp_update_errorgain(struct voltagedomain *voltdm,
+ struct omap_volt_data *volt_data);
+bool omap_vp_is_transdone(struct voltagedomain *voltdm);
+void omap_vp_clear_transdone(struct voltagedomain *voltdm);
#endif
diff --git a/arch/arm/mach-omap2/vp3xxx_data.c b/arch/arm/mach-omap2/vp3xxx_data.c
index 6452170..6db2604 100644
--- a/arch/arm/mach-omap2/vp3xxx_data.c
+++ b/arch/arm/mach-omap2/vp3xxx_data.c
@@ -25,21 +25,26 @@
#include "voltage.h"
#include "vp.h"
+#include "prm2xxx_3xxx.h"
+
+static const struct omap_vp_ops omap3_vp_ops = {
+ .check_txdone = omap3_prm_vp_check_txdone,
+ .clear_txdone = omap3_prm_vp_clear_txdone,
+};
/*
* VP data common to 34xx/36xx chips
* XXX This stuff presumably belongs in the vp3xxx.c or vp.c file.
*/
-static const struct omap_vp_common_data omap3_vp_common = {
- .vpconfig_erroroffset_shift = OMAP3430_ERROROFFSET_SHIFT,
+static const struct omap_vp_common omap3_vp_common = {
+ .vpconfig_erroroffset_mask = OMAP3430_ERROROFFSET_MASK,
.vpconfig_errorgain_mask = OMAP3430_ERRORGAIN_MASK,
- .vpconfig_errorgain_shift = OMAP3430_ERRORGAIN_SHIFT,
- .vpconfig_initvoltage_shift = OMAP3430_INITVOLTAGE_SHIFT,
.vpconfig_initvoltage_mask = OMAP3430_INITVOLTAGE_MASK,
.vpconfig_timeouten = OMAP3430_TIMEOUTEN_MASK,
.vpconfig_initvdd = OMAP3430_INITVDD_MASK,
.vpconfig_forceupdate = OMAP3430_FORCEUPDATE_MASK,
.vpconfig_vpenable = OMAP3430_VPENABLE_MASK,
+ .vstatus_vpidle = OMAP3430_VPINIDLE_MASK,
.vstepmin_smpswaittimemin_shift = OMAP3430_SMPSWAITTIMEMIN_SHIFT,
.vstepmax_smpswaittimemax_shift = OMAP3430_SMPSWAITTIMEMAX_SHIFT,
.vstepmin_stepmin_shift = OMAP3430_VSTEPMIN_SHIFT,
@@ -47,36 +52,29 @@
.vlimitto_vddmin_shift = OMAP3430_VDDMIN_SHIFT,
.vlimitto_vddmax_shift = OMAP3430_VDDMAX_SHIFT,
.vlimitto_timeout_shift = OMAP3430_TIMEOUT_SHIFT,
+ .vpvoltage_mask = OMAP3430_VPVOLTAGE_MASK,
+
+ .ops = &omap3_vp_ops,
};
-static const struct omap_vp_prm_irqst_data omap3_vp1_prm_irqst_data = {
- .prm_irqst_reg = OMAP3_PRM_IRQSTATUS_MPU_OFFSET,
- .tranxdone_status = OMAP3430_VP1_TRANXDONE_ST_MASK,
-};
-
-struct omap_vp_instance_data omap3_vp1_data = {
- .vp_common = &omap3_vp_common,
+struct omap_vp_instance omap3_vp_mpu = {
+ .id = OMAP3_PRM_IRQ_VDD_MPU_ID,
+ .common = &omap3_vp_common,
.vpconfig = OMAP3_PRM_VP1_CONFIG_OFFSET,
.vstepmin = OMAP3_PRM_VP1_VSTEPMIN_OFFSET,
.vstepmax = OMAP3_PRM_VP1_VSTEPMAX_OFFSET,
.vlimitto = OMAP3_PRM_VP1_VLIMITTO_OFFSET,
.vstatus = OMAP3_PRM_VP1_STATUS_OFFSET,
.voltage = OMAP3_PRM_VP1_VOLTAGE_OFFSET,
- .prm_irqst_data = &omap3_vp1_prm_irqst_data,
};
-static const struct omap_vp_prm_irqst_data omap3_vp2_prm_irqst_data = {
- .prm_irqst_reg = OMAP3_PRM_IRQSTATUS_MPU_OFFSET,
- .tranxdone_status = OMAP3430_VP2_TRANXDONE_ST_MASK,
-};
-
-struct omap_vp_instance_data omap3_vp2_data = {
- .vp_common = &omap3_vp_common,
+struct omap_vp_instance omap3_vp_core = {
+ .id = OMAP3_PRM_IRQ_VDD_CORE_ID,
+ .common = &omap3_vp_common,
.vpconfig = OMAP3_PRM_VP2_CONFIG_OFFSET,
.vstepmin = OMAP3_PRM_VP2_VSTEPMIN_OFFSET,
.vstepmax = OMAP3_PRM_VP2_VSTEPMAX_OFFSET,
.vlimitto = OMAP3_PRM_VP2_VLIMITTO_OFFSET,
.vstatus = OMAP3_PRM_VP2_STATUS_OFFSET,
.voltage = OMAP3_PRM_VP2_VOLTAGE_OFFSET,
- .prm_irqst_data = &omap3_vp2_prm_irqst_data,
};
diff --git a/arch/arm/mach-omap2/vp44xx_data.c b/arch/arm/mach-omap2/vp44xx_data.c
index 65d1ad6..da6fed9 100644
--- a/arch/arm/mach-omap2/vp44xx_data.c
+++ b/arch/arm/mach-omap2/vp44xx_data.c
@@ -21,26 +21,38 @@
#include <plat/common.h>
+#include "pm.h"
#include "prm44xx.h"
#include "prm-regbits-44xx.h"
#include "voltage.h"
#include "vp.h"
+/* OMAP4 is hooked such that only a cold reset will reset VP */
+static void omap4_vp_recover(u8 vp_id)
+{
+ omap4_pm_cold_reset("Voltage Processor Recovery");
+}
+
+static const struct omap_vp_ops omap4_vp_ops = {
+ .check_txdone = omap4_prm_vp_check_txdone,
+ .clear_txdone = omap4_prm_vp_clear_txdone,
+ .recover = omap4_vp_recover,
+};
+
/*
* VP data common to 44xx chips
* XXX This stuff presumably belongs in the vp44xx.c or vp.c file.
*/
-static const struct omap_vp_common_data omap4_vp_common = {
- .vpconfig_erroroffset_shift = OMAP4430_ERROROFFSET_SHIFT,
+static const struct omap_vp_common omap4_vp_common = {
+ .vpconfig_erroroffset_mask = OMAP4430_ERROROFFSET_MASK,
.vpconfig_errorgain_mask = OMAP4430_ERRORGAIN_MASK,
- .vpconfig_errorgain_shift = OMAP4430_ERRORGAIN_SHIFT,
- .vpconfig_initvoltage_shift = OMAP4430_INITVOLTAGE_SHIFT,
.vpconfig_initvoltage_mask = OMAP4430_INITVOLTAGE_MASK,
.vpconfig_timeouten = OMAP4430_TIMEOUTEN_MASK,
.vpconfig_initvdd = OMAP4430_INITVDD_MASK,
.vpconfig_forceupdate = OMAP4430_FORCEUPDATE_MASK,
.vpconfig_vpenable = OMAP4430_VPENABLE_MASK,
+ .vstatus_vpidle = OMAP4430_VPINIDLE_MASK,
.vstepmin_smpswaittimemin_shift = OMAP4430_SMPSWAITTIMEMIN_SHIFT,
.vstepmax_smpswaittimemax_shift = OMAP4430_SMPSWAITTIMEMAX_SHIFT,
.vstepmin_stepmin_shift = OMAP4430_VSTEPMIN_SHIFT,
@@ -48,53 +60,39 @@
.vlimitto_vddmin_shift = OMAP4430_VDDMIN_SHIFT,
.vlimitto_vddmax_shift = OMAP4430_VDDMAX_SHIFT,
.vlimitto_timeout_shift = OMAP4430_TIMEOUT_SHIFT,
+ .vpvoltage_mask = OMAP4430_VPVOLTAGE_MASK,
+ .ops = &omap4_vp_ops,
};
-static const struct omap_vp_prm_irqst_data omap4_vp_mpu_prm_irqst_data = {
- .prm_irqst_reg = OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET,
- .tranxdone_status = OMAP4430_VP_MPU_TRANXDONE_ST_MASK,
-};
-
-struct omap_vp_instance_data omap4_vp_mpu_data = {
- .vp_common = &omap4_vp_common,
+struct omap_vp_instance omap4_vp_mpu = {
+ .id = OMAP4_PRM_IRQ_VDD_MPU_ID,
+ .common = &omap4_vp_common,
.vpconfig = OMAP4_PRM_VP_MPU_CONFIG_OFFSET,
.vstepmin = OMAP4_PRM_VP_MPU_VSTEPMIN_OFFSET,
.vstepmax = OMAP4_PRM_VP_MPU_VSTEPMAX_OFFSET,
.vlimitto = OMAP4_PRM_VP_MPU_VLIMITTO_OFFSET,
.vstatus = OMAP4_PRM_VP_MPU_STATUS_OFFSET,
.voltage = OMAP4_PRM_VP_MPU_VOLTAGE_OFFSET,
- .prm_irqst_data = &omap4_vp_mpu_prm_irqst_data,
};
-static const struct omap_vp_prm_irqst_data omap4_vp_iva_prm_irqst_data = {
- .prm_irqst_reg = OMAP4_PRM_IRQSTATUS_MPU_OFFSET,
- .tranxdone_status = OMAP4430_VP_IVA_TRANXDONE_ST_MASK,
-};
-
-struct omap_vp_instance_data omap4_vp_iva_data = {
- .vp_common = &omap4_vp_common,
+struct omap_vp_instance omap4_vp_iva = {
+ .id = OMAP4_PRM_IRQ_VDD_IVA_ID,
+ .common = &omap4_vp_common,
.vpconfig = OMAP4_PRM_VP_IVA_CONFIG_OFFSET,
.vstepmin = OMAP4_PRM_VP_IVA_VSTEPMIN_OFFSET,
.vstepmax = OMAP4_PRM_VP_IVA_VSTEPMAX_OFFSET,
.vlimitto = OMAP4_PRM_VP_IVA_VLIMITTO_OFFSET,
.vstatus = OMAP4_PRM_VP_IVA_STATUS_OFFSET,
.voltage = OMAP4_PRM_VP_IVA_VOLTAGE_OFFSET,
- .prm_irqst_data = &omap4_vp_iva_prm_irqst_data,
};
-static const struct omap_vp_prm_irqst_data omap4_vp_core_prm_irqst_data = {
- .prm_irqst_reg = OMAP4_PRM_IRQSTATUS_MPU_OFFSET,
- .tranxdone_status = OMAP4430_VP_CORE_TRANXDONE_ST_MASK,
-};
-
-struct omap_vp_instance_data omap4_vp_core_data = {
- .vp_common = &omap4_vp_common,
+struct omap_vp_instance omap4_vp_core = {
+ .id = OMAP4_PRM_IRQ_VDD_CORE_ID,
+ .common = &omap4_vp_common,
.vpconfig = OMAP4_PRM_VP_CORE_CONFIG_OFFSET,
.vstepmin = OMAP4_PRM_VP_CORE_VSTEPMIN_OFFSET,
.vstepmax = OMAP4_PRM_VP_CORE_VSTEPMAX_OFFSET,
.vlimitto = OMAP4_PRM_VP_CORE_VLIMITTO_OFFSET,
.vstatus = OMAP4_PRM_VP_CORE_STATUS_OFFSET,
.voltage = OMAP4_PRM_VP_CORE_VOLTAGE_OFFSET,
- .prm_irqst_data = &omap4_vp_core_prm_irqst_data,
};
-
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index b0ee9ba..131f381 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -99,8 +99,7 @@
set_mm_context(mm, asid);
/* set the new ASID */
- asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id));
- isb();
+ cpu_switch_mm(mm->pgd, mm);
}
#else
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f96d2c7..1a765c8 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -357,6 +357,18 @@
}
EXPORT_SYMBOL(dma_alloc_writecombine);
+/*
+ * Allocate a strongly ordered region, in much the same way as
+ * dma_alloc_coherent above.
+ */
+void *dma_alloc_stronglyordered(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp)
+{
+ return __dma_alloc(dev, size, handle, gfp,
+ pgprot_stronglyordered(pgprot_kernel));
+}
+EXPORT_SYMBOL(dma_alloc_stronglyordered);
+
static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 19d9369bd..f23e315 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -273,6 +273,14 @@
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_KERNEL,
},
+ [MT_MEMORY_SO] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_MT_UNCACHED,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
+ PMD_SECT_UNCACHED | PMD_SECT_XN,
+ .domain = DOMAIN_KERNEL,
+ },
};
const struct mem_type *get_mem_type(unsigned int type)
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index 49a4c75..ef3763c 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -38,6 +38,29 @@
depends on OMAP_DEBUG_DEVICES
default y if LEDS_CLASS
+config OMAP_RPMSG
+ tristate "OMAP Virtio-based remote processor messaging support"
+ depends on ARCH_OMAP4
+ default y
+ select VIRTIO
+ select VIRTIO_RING
+ select OMAP_MBOX_FWK
+ help
+ Say Y if you want to enable OMAP virtio-based remote-processor
+ communication, currently only available with OMAP4. This is required
+ for offloading tasks to the remote on-chip M3s or C64x+ dsp,
+ usually used by multimedia frameworks to offload cpu-intensive and/or
+ latency-sensitive tasks.
+
+config OMAP_RPMSG_RECOVERY
+ bool "OMAP RPMSG recovery"
+ default y
+ depends on OMAP_RPMSG
+ help
+ Say Y if you want RPMSG to reset the rpmsg channels after a fatal
+ error in remote proc. That way it will restart all the channels and
+ the remote processor, causing a clean restart.
+
config OMAP_SMARTREFLEX
bool "SmartReflex support"
depends on (ARCH_OMAP3 || ARCH_OMAP4) && PM
@@ -69,6 +92,26 @@
Class 3 implementation of Smartreflex employs continuous hardware
voltage calibration.
+config OMAP_SMARTREFLEX_CLASS1P5
+ bool "Class 1.5 mode of Smartreflex Implementation"
+ depends on OMAP_SMARTREFLEX
+ help
+ Say Y to enable Class 1.5 implementation of Smartreflex.
+
+ Class 1.5 implementation of Smartreflex employs software controlled
+ hardware voltage calibration.
+
+config OMAP_SR_CLASS1P5_RECALIBRATION_DELAY
+ int "Class 1.5 mode recalibration recalibration delay(ms)"
+ depends on OMAP_SMARTREFLEX_CLASS1P5
+ default 86400000
+ help
+ Setup the recalibration delay in milliseconds.
+
+ Use 0 for never doing a recalibration (operates in AVS Class 1 mode).
+ Defaults to recommended recalibration every 24hrs.
+ If you do not understand this, use the default.
+
config OMAP_RESET_CLOCKS
bool "Reset unused clocks during boot"
depends on ARCH_OMAP
@@ -116,7 +159,7 @@
Buffered Serial Port.
config OMAP_MBOX_FWK
- tristate "Mailbox framework support"
+ bool "Mailbox framework support"
depends on ARCH_OMAP
help
Say Y here if you want to use OMAP Mailbox framework support for
@@ -132,10 +175,10 @@
module parameter).
config OMAP_IOMMU
- tristate
+ bool "IOMMU support for OMAP devices"
config OMAP_IOMMU_DEBUG
- tristate "Export OMAP IOMMU internals in DebugFS"
+ bool "Export OMAP IOMMU internals in DebugFS"
depends on OMAP_IOMMU && DEBUG_FS
help
Select this to see extensive information about
@@ -206,10 +249,29 @@
to data on the serial RX line. This allows you to wake the
system from serial console.
+config OMAP_TEMP_SENSOR
+ bool "OMAP Temp Sensor Support"
+ depends on ARCH_OMAP4
+ default n
+ help
+ Say Y here if you want support for the temp sensor on OMAP4460.
+ This provides the temperature of the MPU
+ subsystem. Only one instance of on die temperature
+ sensor is present.
+
+# this carveout should probably become generic and not omap specific
+config OMAP_REMOTEPROC_MEMPOOL_SIZE
+ hex "Physical carveout memory pool size (Byte)"
+ depends on OMAP_REMOTE_PROC
+ default 0x700000
+ help
+ Allocate specified size of memory at boot time so we can ioremap
+ it safely.
+
choice
prompt "OMAP PM layer selection"
depends on ARCH_OMAP
- default OMAP_PM_NOOP
+ default OMAP_PM
config OMAP_PM_NONE
bool "No PM layer"
@@ -217,6 +279,9 @@
config OMAP_PM_NOOP
bool "No-op/debug PM layer"
+config OMAP_PM
+ depends on PM
+ bool "OMAP PM layer implementation"
endchoice
endmenu
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
index f0233e6..476d817 100644
--- a/arch/arm/plat-omap/Makefile
+++ b/arch/arm/plat-omap/Makefile
@@ -16,12 +16,13 @@
obj-$(CONFIG_ARCH_OMAP2) += omap_device.o
obj-$(CONFIG_ARCH_OMAP3) += omap_device.o
obj-$(CONFIG_ARCH_OMAP4) += omap_device.o
+obj-$(CONFIG_REMOTE_PROC) += rproc_user.o
obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
+obj-$(CONFIG_OMAP_RPMSG) += omap_rpmsg.o
obj-$(CONFIG_OMAP_IOMMU) += iommu.o iovmm.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += iommu-debug.o
-obj-$(CONFIG_CPU_FREQ) += cpu-omap.o
obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o
obj-$(CONFIG_OMAP_DEBUG_DEVICES) += debug-devices.o
obj-$(CONFIG_OMAP_DEBUG_LEDS) += debug-leds.o
@@ -31,4 +32,5 @@
# OMAP mailbox framework
obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox.o
-obj-$(CONFIG_OMAP_PM_NOOP) += omap-pm-noop.o
+obj-$(CONFIG_OMAP_PM_NOOP) += omap-pm-interface.o
+obj-$(CONFIG_OMAP_PM) += omap-pm-interface.o omap-pm-helper.o
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index c9122dd..b327956 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -441,6 +441,8 @@
return 0;
pr_info("clock: disabling unused clocks to save power\n");
+
+ spin_lock_irqsave(&clockfw_lock, flags);
list_for_each_entry(ck, &clocks, node) {
if (ck->ops == &clkops_null)
continue;
@@ -448,10 +450,9 @@
if (ck->usecount > 0 || !ck->enable_reg)
continue;
- spin_lock_irqsave(&clockfw_lock, flags);
arch_clock->clk_disable_unused(ck);
- spin_unlock_irqrestore(&clockfw_lock, flags);
}
+ spin_unlock_irqrestore(&clockfw_lock, flags);
return 0;
}
@@ -475,8 +476,43 @@
/*
* debugfs support to trace clock tree hierarchy and attributes
*/
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
static struct dentry *clk_debugfs_root;
+static int clk_dbg_show_summary(struct seq_file *s, void *unused)
+{
+ struct clk *c;
+ struct clk *pa;
+
+ seq_printf(s, "%-30s %-30s %-10s %s\n",
+ "clock-name", "parent-name", "rate", "use-count");
+
+ mutex_lock(&clocks_mutex);
+ list_for_each_entry(c, &clocks, node) {
+ pa = c->parent;
+ seq_printf(s, "%-30s %-30s %-10lu %d\n",
+ c->name, pa ? pa->name : "none", c->rate, c->usecount);
+ }
+
+ mutex_unlock(&clocks_mutex);
+ return 0;
+}
+
+static int clk_dbg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clk_dbg_show_summary, inode->i_private);
+}
+
+static const struct file_operations debug_clock_fops = {
+ .open = clk_dbg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int clk_debugfs_register_one(struct clk *c)
{
int err;
@@ -551,6 +587,12 @@
if (err)
goto err_out;
}
+
+ d = debugfs_create_file("summary", S_IRUGO,
+ d, NULL, &debug_clock_fops);
+ if (!d)
+ return -ENOMEM;
+
return 0;
err_out:
debugfs_remove_recursive(clk_debugfs_root);
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index d9f10a3..0c8c8b5 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -20,7 +20,7 @@
#include <plat/board.h>
#include <plat/vram.h>
#include <plat/dsp.h>
-
+#include <plat/remoteproc.h>
#define NO_LENGTH_CHECK 0xffffffff
@@ -65,4 +65,5 @@
omapfb_reserve_sdram_memblock();
omap_vram_reserve_sdram_memblock();
omap_dsp_reserve_sdram_memblock();
+ omap_ipu_reserve_sdram_memblock();
}
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index f7fed60..91b2ec0 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -18,6 +18,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/sched.h>
+#include <linux/syscore_ops.h>
#include <asm/sched_clock.h>
@@ -120,9 +121,40 @@
#define SC_MULT 4000000000u
#define SC_SHIFT 17
+static u32 sched_clock_cyc_offset;
+static u32 sched_clock_cyc_suspend;
+static bool sched_clock_suspended;
+
+static int sched_clock_suspend(void)
+{
+ sched_clock_suspended = true;
+ sched_clock_cyc_suspend = clocksource_32k.read(&clocksource_32k) -
+ sched_clock_cyc_offset;
+
+ return 0;
+}
+
+static void sched_clock_resume(void)
+{
+ sched_clock_cyc_offset = clocksource_32k.read(&clocksource_32k) -
+ sched_clock_cyc_suspend;
+ sched_clock_suspended = false;
+}
+
+static struct syscore_ops sched_clock_syscore_ops = {
+ .suspend = sched_clock_suspend,
+ .resume = sched_clock_resume,
+};
+
static inline unsigned long long notrace _omap_32k_sched_clock(void)
{
- u32 cyc = clocksource_32k.read(&clocksource_32k);
+ u32 cyc;
+ if (!sched_clock_suspended)
+ cyc = clocksource_32k.read(&clocksource_32k) -
+ sched_clock_cyc_offset;
+ else
+ cyc = sched_clock_cyc_suspend;
+
return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
}
@@ -140,7 +172,8 @@
static void notrace omap_update_sched_clock(void)
{
- u32 cyc = clocksource_32k.read(&clocksource_32k);
+ u32 cyc = clocksource_32k.read(&clocksource_32k) -
+ sched_clock_cyc_offset;
update_sched_clock(&cd, cyc, (u32)~0);
}
@@ -152,22 +185,27 @@
* nsecs and adds to a monotonically increasing timespec.
*/
static struct timespec persistent_ts;
-static cycles_t cycles, last_cycles;
+static cycles_t cycles;
+static DEFINE_SPINLOCK(read_persistent_clock_lock);
void read_persistent_clock(struct timespec *ts)
{
unsigned long long nsecs;
- cycles_t delta;
- struct timespec *tsp = &persistent_ts;
+ cycles_t last_cycles;
+ unsigned long flags;
+
+ spin_lock_irqsave(&read_persistent_clock_lock, flags);
last_cycles = cycles;
cycles = clocksource_32k.read(&clocksource_32k);
- delta = cycles - last_cycles;
- nsecs = clocksource_cyc2ns(delta,
+ nsecs = clocksource_cyc2ns(cycles - last_cycles,
clocksource_32k.mult, clocksource_32k.shift);
- timespec_add_ns(tsp, nsecs);
- *ts = *tsp;
+ timespec_add_ns(&persistent_ts, nsecs);
+
+ *ts = persistent_ts;
+
+ spin_unlock_irqrestore(&read_persistent_clock_lock, flags);
}
int __init omap_init_clocksource_32k(void)
@@ -202,6 +240,8 @@
init_fixed_sched_clock(&cd, omap_update_sched_clock, 32,
32768, SC_MULT, SC_SHIFT);
+
+ register_syscore_ops(&sched_clock_syscore_ops);
}
return 0;
}
diff --git a/arch/arm/plat-omap/cpu-omap.c b/arch/arm/plat-omap/cpu-omap.c
deleted file mode 100644
index da4f68d..0000000
--- a/arch/arm/plat-omap/cpu-omap.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * linux/arch/arm/plat-omap/cpu-omap.c
- *
- * CPU frequency scaling for OMAP
- *
- * Copyright (C) 2005 Nokia Corporation
- * Written by Tony Lindgren <tony@atomide.com>
- *
- * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/cpufreq.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <plat/clock.h>
-#include <asm/system.h>
-
-#define VERY_HI_RATE 900000000
-
-static struct cpufreq_frequency_table *freq_table;
-
-#ifdef CONFIG_ARCH_OMAP1
-#define MPU_CLK "mpu"
-#else
-#define MPU_CLK "virt_prcm_set"
-#endif
-
-static struct clk *mpu_clk;
-
-/* TODO: Add support for SDRAM timing changes */
-
-static int omap_verify_speed(struct cpufreq_policy *policy)
-{
- if (freq_table)
- return cpufreq_frequency_table_verify(policy, freq_table);
-
- if (policy->cpu)
- return -EINVAL;
-
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
-
- policy->min = clk_round_rate(mpu_clk, policy->min * 1000) / 1000;
- policy->max = clk_round_rate(mpu_clk, policy->max * 1000) / 1000;
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
- return 0;
-}
-
-static unsigned int omap_getspeed(unsigned int cpu)
-{
- unsigned long rate;
-
- if (cpu)
- return 0;
-
- rate = clk_get_rate(mpu_clk) / 1000;
- return rate;
-}
-
-static int omap_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- struct cpufreq_freqs freqs;
- int ret = 0;
-
- /* Ensure desired rate is within allowed range. Some govenors
- * (ondemand) will just pass target_freq=0 to get the minimum. */
- if (target_freq < policy->min)
- target_freq = policy->min;
- if (target_freq > policy->max)
- target_freq = policy->max;
-
- freqs.old = omap_getspeed(0);
- freqs.new = clk_round_rate(mpu_clk, target_freq * 1000) / 1000;
- freqs.cpu = 0;
-
- if (freqs.old == freqs.new)
- return ret;
-
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-#ifdef CONFIG_CPU_FREQ_DEBUG
- printk(KERN_DEBUG "cpufreq-omap: transition: %u --> %u\n",
- freqs.old, freqs.new);
-#endif
- ret = clk_set_rate(mpu_clk, freqs.new * 1000);
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-
- return ret;
-}
-
-static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
-{
- int result = 0;
-
- mpu_clk = clk_get(NULL, MPU_CLK);
- if (IS_ERR(mpu_clk))
- return PTR_ERR(mpu_clk);
-
- if (policy->cpu != 0)
- return -EINVAL;
-
- policy->cur = policy->min = policy->max = omap_getspeed(0);
-
- clk_init_cpufreq_table(&freq_table);
- if (freq_table) {
- result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (!result)
- cpufreq_frequency_table_get_attr(freq_table,
- policy->cpu);
- } else {
- policy->cpuinfo.min_freq = clk_round_rate(mpu_clk, 0) / 1000;
- policy->cpuinfo.max_freq = clk_round_rate(mpu_clk,
- VERY_HI_RATE) / 1000;
- }
-
- /* FIXME: what's the actual transition time? */
- policy->cpuinfo.transition_latency = 300 * 1000;
-
- return 0;
-}
-
-static int omap_cpu_exit(struct cpufreq_policy *policy)
-{
- clk_exit_cpufreq_table(&freq_table);
- clk_put(mpu_clk);
- return 0;
-}
-
-static struct freq_attr *omap_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-static struct cpufreq_driver omap_driver = {
- .flags = CPUFREQ_STICKY,
- .verify = omap_verify_speed,
- .target = omap_target,
- .get = omap_getspeed,
- .init = omap_cpu_init,
- .exit = omap_cpu_exit,
- .name = "omap",
- .attr = omap_cpufreq_attr,
-};
-
-static int __init omap_cpufreq_init(void)
-{
- return cpufreq_register_driver(&omap_driver);
-}
-
-arch_initcall(omap_cpufreq_init);
-
-/*
- * if ever we want to remove this, upon cleanup call:
- *
- * cpufreq_unregister_driver()
- * cpufreq_frequency_table_put_attr()
- */
-
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index ea28f98..d8add7e7 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -16,17 +16,21 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/memblock.h>
+#include <linux/err.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
+#include <plat/omap_hwmod.h>
+#include <plat/omap_device.h>
#include <plat/tc.h>
#include <plat/board.h>
#include <plat/mmc.h>
#include <mach/gpio.h>
#include <plat/menelaus.h>
#include <plat/mcbsp.h>
+#include <plat/remoteproc.h>
#include <plat/omap44xx.h>
/*-------------------------------------------------------------------------*/
@@ -74,41 +78,6 @@
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_SND_OMAP_SOC_MCPDM) || \
- defined(CONFIG_SND_OMAP_SOC_MCPDM_MODULE)
-
-static struct resource mcpdm_resources[] = {
- {
- .name = "mcpdm_mem",
- .start = OMAP44XX_MCPDM_BASE,
- .end = OMAP44XX_MCPDM_BASE + SZ_4K,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "mcpdm_irq",
- .start = OMAP44XX_IRQ_MCPDM,
- .end = OMAP44XX_IRQ_MCPDM,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device omap_mcpdm_device = {
- .name = "omap-mcpdm",
- .id = -1,
- .num_resources = ARRAY_SIZE(mcpdm_resources),
- .resource = mcpdm_resources,
-};
-
-static void omap_init_mcpdm(void)
-{
- (void) platform_device_register(&omap_mcpdm_device);
-}
-#else
-static inline void omap_init_mcpdm(void) {}
-#endif
-
-/*-------------------------------------------------------------------------*/
-
#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \
defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
@@ -190,8 +159,6 @@
static inline void omap_init_rng(void) {}
#endif
-/*-------------------------------------------------------------------------*/
-
/* Numbering for the SPI-capable controllers when used for SPI:
* spi = 1
* uwire = 2
@@ -237,6 +204,7 @@
#if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE)
static phys_addr_t omap_dsp_phys_mempool_base;
+static phys_addr_t omap_dsp_phys_mempool_size;
void __init omap_dsp_reserve_sdram_memblock(void)
{
@@ -256,6 +224,7 @@
memblock_remove(paddr, size);
omap_dsp_phys_mempool_base = paddr;
+ omap_dsp_phys_mempool_size = size;
}
phys_addr_t omap_dsp_get_mempool_base(void)
@@ -263,6 +232,73 @@
return omap_dsp_phys_mempool_base;
}
EXPORT_SYMBOL(omap_dsp_get_mempool_base);
+
+phys_addr_t omap_dsp_get_mempool_size(void)
+{
+ return omap_dsp_phys_mempool_size;
+}
+EXPORT_SYMBOL(omap_dsp_get_mempool_size);
+#endif
+
+#if defined(CONFIG_OMAP_REMOTE_PROC)
+static phys_addr_t omap_ipu_phys_mempool_base;
+static u32 omap_ipu_phys_mempool_size;
+static phys_addr_t omap_ipu_phys_st_mempool_base;
+static u32 omap_ipu_phys_st_mempool_size;
+
+void __init omap_ipu_reserve_sdram_memblock(void)
+{
+ /* currently handles only ipu. dsp will be handled later...*/
+ u32 size = CONFIG_OMAP_REMOTEPROC_MEMPOOL_SIZE;
+ phys_addr_t paddr;
+
+ if (!size)
+ return;
+
+ paddr = memblock_alloc(size, SZ_1M);
+ if (!paddr) {
+ pr_err("%s: failed to reserve %x bytes\n",
+ __func__, size);
+ return;
+ }
+ memblock_free(paddr, size);
+ memblock_remove(paddr, size);
+
+ omap_ipu_phys_mempool_base = paddr;
+ omap_ipu_phys_mempool_size = size;
+}
+
+void __init omap_ipu_set_static_mempool(u32 start, u32 size)
+{
+ omap_ipu_phys_st_mempool_base = start;
+ omap_ipu_phys_st_mempool_size = size;
+}
+
+phys_addr_t omap_ipu_get_mempool_base(enum omap_rproc_mempool_type type)
+{
+ switch (type) {
+ case OMAP_RPROC_MEMPOOL_STATIC:
+ return omap_ipu_phys_st_mempool_base;
+ case OMAP_RPROC_MEMPOOL_DYNAMIC:
+ return omap_ipu_phys_mempool_base;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(omap_ipu_get_mempool_base);
+
+u32 omap_ipu_get_mempool_size(enum omap_rproc_mempool_type type)
+{
+ switch (type) {
+ case OMAP_RPROC_MEMPOOL_STATIC:
+ return omap_ipu_phys_st_mempool_size;
+ case OMAP_RPROC_MEMPOOL_DYNAMIC:
+ return omap_ipu_phys_mempool_size;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(omap_ipu_get_mempool_size);
#endif
/*
@@ -291,7 +327,6 @@
* in alphabetical order so they're easier to sort through.
*/
omap_init_rng();
- omap_init_mcpdm();
omap_init_uwire();
return 0;
}
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index c22217c..3ec7ec5 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -1024,12 +1024,26 @@
*/
dma_addr_t omap_get_dma_src_pos(int lch)
{
+ u32 cdac;
dma_addr_t offset = 0;
if (cpu_is_omap15xx())
offset = p->dma_read(CPC, lch);
- else
- offset = p->dma_read(CSAC, lch);
+ else {
+ /*
+ * CDAC != 0 indicates that the DMA transfer on the channel has
+ * been started already.
+ * If CDAC == 0, we can not trust the CSAC value since it has
+ * not been updated, and can contain random number.
+ * Return the start address in case the DMA has not jet started.
+ * This is valid since in fact the DMA has not yet progressed.
+ */
+ cdac = p->dma_read(CDAC, lch);
+ if (likely(cdac))
+ offset = p->dma_read(CSAC, lch);
+ else
+ offset = p->dma_read(CSSA, lch);
+ }
if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
offset = p->dma_read(CSAC, lch);
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index ee9f6eb..4278e3c 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -3,6 +3,12 @@
*
* OMAP Dual-Mode Timers
*
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Tarun Kanti DebBarma <tarun.kanti@ti.com>
+ * Thara Gopinath <thara@ti.com>
+ *
+ * dmtimer adaptation to platform_driver.
+ *
* Copyright (C) 2005 Nokia Corporation
* OMAP2 support by Juha Yrjola
* API improvements and OMAP2 clock framework support by Timo Teras
@@ -29,17 +35,17 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/list.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/module.h>
-#include <mach/hardware.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+
#include <plat/dmtimer.h>
-#include <mach/irqs.h>
+#include <plat/common.h>
+#include <plat/omap-pm.h>
/* register offsets */
#define _OMAP_TIMER_ID_OFFSET 0x00
@@ -150,170 +156,146 @@
#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
(_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
-struct omap_dm_timer {
- unsigned long phys_base;
- int irq;
-#ifdef CONFIG_ARCH_OMAP2PLUS
- struct clk *iclk, *fclk;
-#endif
- void __iomem *io_base;
- unsigned reserved:1;
- unsigned enabled:1;
- unsigned posted:1;
-};
-
-static int dm_timer_count;
-
-#ifdef CONFIG_ARCH_OMAP1
-static struct omap_dm_timer omap1_dm_timers[] = {
- { .phys_base = 0xfffb1400, .irq = INT_1610_GPTIMER1 },
- { .phys_base = 0xfffb1c00, .irq = INT_1610_GPTIMER2 },
- { .phys_base = 0xfffb2400, .irq = INT_1610_GPTIMER3 },
- { .phys_base = 0xfffb2c00, .irq = INT_1610_GPTIMER4 },
- { .phys_base = 0xfffb3400, .irq = INT_1610_GPTIMER5 },
- { .phys_base = 0xfffb3c00, .irq = INT_1610_GPTIMER6 },
- { .phys_base = 0xfffb7400, .irq = INT_1610_GPTIMER7 },
- { .phys_base = 0xfffbd400, .irq = INT_1610_GPTIMER8 },
-};
-
-static const int omap1_dm_timer_count = ARRAY_SIZE(omap1_dm_timers);
-
-#else
-#define omap1_dm_timers NULL
-#define omap1_dm_timer_count 0
-#endif /* CONFIG_ARCH_OMAP1 */
-
-#ifdef CONFIG_ARCH_OMAP2
-static struct omap_dm_timer omap2_dm_timers[] = {
- { .phys_base = 0x48028000, .irq = INT_24XX_GPTIMER1 },
- { .phys_base = 0x4802a000, .irq = INT_24XX_GPTIMER2 },
- { .phys_base = 0x48078000, .irq = INT_24XX_GPTIMER3 },
- { .phys_base = 0x4807a000, .irq = INT_24XX_GPTIMER4 },
- { .phys_base = 0x4807c000, .irq = INT_24XX_GPTIMER5 },
- { .phys_base = 0x4807e000, .irq = INT_24XX_GPTIMER6 },
- { .phys_base = 0x48080000, .irq = INT_24XX_GPTIMER7 },
- { .phys_base = 0x48082000, .irq = INT_24XX_GPTIMER8 },
- { .phys_base = 0x48084000, .irq = INT_24XX_GPTIMER9 },
- { .phys_base = 0x48086000, .irq = INT_24XX_GPTIMER10 },
- { .phys_base = 0x48088000, .irq = INT_24XX_GPTIMER11 },
- { .phys_base = 0x4808a000, .irq = INT_24XX_GPTIMER12 },
-};
-
-static const char *omap2_dm_source_names[] __initdata = {
- "sys_ck",
- "func_32k_ck",
- "alt_ck",
- NULL
-};
-
-static struct clk *omap2_dm_source_clocks[3];
-static const int omap2_dm_timer_count = ARRAY_SIZE(omap2_dm_timers);
-
-#else
-#define omap2_dm_timers NULL
-#define omap2_dm_timer_count 0
-#define omap2_dm_source_names NULL
-#define omap2_dm_source_clocks NULL
-#endif /* CONFIG_ARCH_OMAP2 */
-
-#ifdef CONFIG_ARCH_OMAP3
-static struct omap_dm_timer omap3_dm_timers[] = {
- { .phys_base = 0x48318000, .irq = INT_24XX_GPTIMER1 },
- { .phys_base = 0x49032000, .irq = INT_24XX_GPTIMER2 },
- { .phys_base = 0x49034000, .irq = INT_24XX_GPTIMER3 },
- { .phys_base = 0x49036000, .irq = INT_24XX_GPTIMER4 },
- { .phys_base = 0x49038000, .irq = INT_24XX_GPTIMER5 },
- { .phys_base = 0x4903A000, .irq = INT_24XX_GPTIMER6 },
- { .phys_base = 0x4903C000, .irq = INT_24XX_GPTIMER7 },
- { .phys_base = 0x4903E000, .irq = INT_24XX_GPTIMER8 },
- { .phys_base = 0x49040000, .irq = INT_24XX_GPTIMER9 },
- { .phys_base = 0x48086000, .irq = INT_24XX_GPTIMER10 },
- { .phys_base = 0x48088000, .irq = INT_24XX_GPTIMER11 },
- { .phys_base = 0x48304000, .irq = INT_34XX_GPT12_IRQ },
-};
-
-static const char *omap3_dm_source_names[] __initdata = {
- "sys_ck",
- "omap_32k_fck",
- NULL
-};
-
-static struct clk *omap3_dm_source_clocks[2];
-static const int omap3_dm_timer_count = ARRAY_SIZE(omap3_dm_timers);
-
-#else
-#define omap3_dm_timers NULL
-#define omap3_dm_timer_count 0
-#define omap3_dm_source_names NULL
-#define omap3_dm_source_clocks NULL
-#endif /* CONFIG_ARCH_OMAP3 */
-
-#ifdef CONFIG_ARCH_OMAP4
-static struct omap_dm_timer omap4_dm_timers[] = {
- { .phys_base = 0x4a318000, .irq = OMAP44XX_IRQ_GPT1 },
- { .phys_base = 0x48032000, .irq = OMAP44XX_IRQ_GPT2 },
- { .phys_base = 0x48034000, .irq = OMAP44XX_IRQ_GPT3 },
- { .phys_base = 0x48036000, .irq = OMAP44XX_IRQ_GPT4 },
- { .phys_base = 0x40138000, .irq = OMAP44XX_IRQ_GPT5 },
- { .phys_base = 0x4013a000, .irq = OMAP44XX_IRQ_GPT6 },
- { .phys_base = 0x4013a000, .irq = OMAP44XX_IRQ_GPT7 },
- { .phys_base = 0x4013e000, .irq = OMAP44XX_IRQ_GPT8 },
- { .phys_base = 0x4803e000, .irq = OMAP44XX_IRQ_GPT9 },
- { .phys_base = 0x48086000, .irq = OMAP44XX_IRQ_GPT10 },
- { .phys_base = 0x48088000, .irq = OMAP44XX_IRQ_GPT11 },
- { .phys_base = 0x4a320000, .irq = OMAP44XX_IRQ_GPT12 },
-};
-static const char *omap4_dm_source_names[] __initdata = {
- "sys_clkin_ck",
- "sys_32k_ck",
- NULL
-};
-static struct clk *omap4_dm_source_clocks[2];
-static const int omap4_dm_timer_count = ARRAY_SIZE(omap4_dm_timers);
-
-#else
-#define omap4_dm_timers NULL
-#define omap4_dm_timer_count 0
-#define omap4_dm_source_names NULL
-#define omap4_dm_source_clocks NULL
-#endif /* CONFIG_ARCH_OMAP4 */
-
-static struct omap_dm_timer *dm_timers;
-static const char **dm_source_names;
-static struct clk **dm_source_clocks;
-
-static spinlock_t dm_timer_lock;
-
/*
- * Reads timer registers in posted and non-posted mode. The posted mode bit
- * is encoded in reg. Note that in posted mode write pending bit must be
- * checked. Otherwise a read of a non completed write will produce an error.
+ * OMAP4 IP revision has different register offsets
+ * for interrupt registers and functional registers.
+ */
+#define VERSION2_TIMER_WAKEUP_EN_REG_OFFSET 0x14
+#define VERSION2_TIMER_STAT_REG_OFFSET 0x10
+
+#define MAX_WRITE_PEND_WAIT 10000 /* 10ms timeout delay */
+
+static LIST_HEAD(omap_timer_list);
+static DEFINE_MUTEX(dm_timer_mutex);
+
+/**
+ * omap_dm_timer_read_reg - read timer registers in posted and non-posted mode
+ * @timer: timer pointer over which read operation to perform
+ * @reg: lowest byte holds the register offset
+ *
+ * The posted mode bit is encoded in reg. Note that in posted mode write
+ * pending bit must be checked. Otherwise a read of a non completed write
+ * will produce an error.
*/
static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg)
{
- if (timer->posted)
- while (readl(timer->io_base + (OMAP_TIMER_WRITE_PEND_REG & 0xff))
- & (reg >> WPSHIFT))
- cpu_relax();
+ int i = 0;
+
+ if (reg >= OMAP_TIMER_WAKEUP_EN_REG)
+ reg += timer->func_offset;
+ else if (reg >= OMAP_TIMER_STAT_REG)
+ reg += timer->intr_offset;
+
+ if (timer->posted) {
+ omap_test_timeout(!(readl(timer->io_base +
+ ((OMAP_TIMER_WRITE_PEND_REG +
+ timer->func_offset) & 0xff)) & (reg >> WPSHIFT)),
+ MAX_WRITE_PEND_WAIT, i);
+
+ if (WARN_ON_ONCE(i == MAX_WRITE_PEND_WAIT))
+ dev_err(&timer->pdev->dev, "read timeout.\n");
+ }
+
return readl(timer->io_base + (reg & 0xff));
}
-/*
- * Writes timer registers in posted and non-posted mode. The posted mode bit
- * is encoded in reg. Note that in posted mode the write pending bit must be
- * checked. Otherwise a write on a register which has a pending write will be
- * lost.
+/**
+ * omap_dm_timer_write_reg - write timer registers in posted and non-posted mode
+ * @timer: timer pointer over which write operation is to perform
+ * @reg: lowest byte holds the register offset
+ * @value: data to write into the register
+ *
+ * The posted mode bit is encoded in reg. Note that in posted mode the write
+ * pending bit must be checked. Otherwise a write on a register which has a
+ * pending write will be lost.
*/
static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
u32 value)
{
- if (timer->posted)
- while (readl(timer->io_base + (OMAP_TIMER_WRITE_PEND_REG & 0xff))
- & (reg >> WPSHIFT))
- cpu_relax();
+ int i = 0;
+
+ if (reg >= OMAP_TIMER_WAKEUP_EN_REG)
+ reg += timer->func_offset;
+ else if (reg >= OMAP_TIMER_STAT_REG)
+ reg += timer->intr_offset;
+
+ if (timer->posted) {
+ omap_test_timeout(!(readl(timer->io_base +
+ ((OMAP_TIMER_WRITE_PEND_REG +
+ timer->func_offset) & 0xff)) & (reg >> WPSHIFT)),
+ MAX_WRITE_PEND_WAIT, i);
+
+ if (WARN_ON(i == MAX_WRITE_PEND_WAIT))
+ dev_err(&timer->pdev->dev, "write timeout.\n");
+ }
+
writel(value, timer->io_base + (reg & 0xff));
}
+static void omap_timer_save_context(struct omap_dm_timer *timer)
+{
+ timer->context.tiocp_cfg =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_OCP_CFG_REG);
+ timer->context.tistat =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_SYS_STAT_REG);
+ timer->context.tisr =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_STAT_REG);
+ timer->context.tier =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_INT_EN_REG);
+ timer->context.twer =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_WAKEUP_EN_REG);
+ timer->context.tclr =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ timer->context.tcrr =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_COUNTER_REG);
+ timer->context.tldr =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_LOAD_REG);
+ timer->context.tmar =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_MATCH_REG);
+ timer->context.tsicr =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_IF_CTRL_REG);
+}
+
+static void omap_timer_restore_context(struct omap_dm_timer *timer)
+{
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_OCP_CFG_REG,
+ timer->context.tiocp_cfg);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_SYS_STAT_REG,
+ timer->context.tistat);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG,
+ timer->context.tisr);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_INT_EN_REG,
+ timer->context.tier);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG,
+ timer->context.twer);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG,
+ timer->context.tclr);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG,
+ timer->context.tcrr);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG,
+ timer->context.tldr);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG,
+ timer->context.tmar);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG,
+ timer->context.tsicr);
+}
+
+static void __timer_enable(struct omap_dm_timer *timer)
+{
+ if (!timer->enabled) {
+ pm_runtime_get_sync(&timer->pdev->dev);
+ timer->enabled = 1;
+ }
+}
+
+static void __timer_disable(struct omap_dm_timer *timer)
+{
+ if (timer->enabled) {
+ pm_runtime_put_sync_suspend(&timer->pdev->dev);
+ timer->enabled = 0;
+ }
+}
+
static void omap_dm_timer_wait_for_reset(struct omap_dm_timer *timer)
{
int c;
@@ -332,58 +314,89 @@
{
u32 l;
- if (!cpu_class_is_omap2() || timer != &dm_timers[0]) {
+ if (!timer->is_early_init)
+ __timer_enable(timer);
+
+ if (timer->pdev->id != 1) {
omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
omap_dm_timer_wait_for_reset(timer);
}
- omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_OCP_CFG_REG);
l |= 0x02 << 3; /* Set to smart-idle mode */
l |= 0x2 << 8; /* Set clock activity to perserve f-clock on idle */
-
- /* Enable autoidle on OMAP2 / OMAP3 */
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
- l |= 0x1 << 0;
-
- /*
- * Enable wake-up on OMAP2 CPUs.
- */
- if (cpu_class_is_omap2())
- l |= 1 << 2;
omap_dm_timer_write_reg(timer, OMAP_TIMER_OCP_CFG_REG, l);
+ if (!timer->is_early_init)
+ __timer_disable(timer);
+}
+
+static int omap_dm_timer_prepare(struct omap_dm_timer *timer)
+{
+ int ret;
+
+ timer->fclk = clk_get(&timer->pdev->dev, "fck");
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(timer->fclk))) {
+ timer->fclk = NULL;
+ dev_err(&timer->pdev->dev, ": No fclk handle.\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(timer->is_early_init)) {
+ ret = clk_enable(timer->fclk);
+ if (ret) {
+ clk_put(timer->fclk);
+ return -EINVAL;
+ }
+ goto end;
+ }
+
+ if (timer->needs_manual_reset)
+ omap_dm_timer_reset(timer);
+
+ omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
+
+end:
+ if (!timer->is_early_init)
+ __timer_enable(timer);
+
/* Match hardware reset default of posted mode */
omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG,
OMAP_TIMER_CTRL_POSTED);
- timer->posted = 1;
-}
-static void omap_dm_timer_prepare(struct omap_dm_timer *timer)
-{
- omap_dm_timer_enable(timer);
- omap_dm_timer_reset(timer);
+ if (!timer->is_early_init)
+ __timer_disable(timer);
+
+ timer->posted = 1;
+ return 0;
}
struct omap_dm_timer *omap_dm_timer_request(void)
{
- struct omap_dm_timer *timer = NULL;
- unsigned long flags;
- int i;
+ struct omap_dm_timer *timer = NULL, *t;
+ int ret;
- spin_lock_irqsave(&dm_timer_lock, flags);
- for (i = 0; i < dm_timer_count; i++) {
- if (dm_timers[i].reserved)
+ mutex_lock(&dm_timer_mutex);
+ list_for_each_entry(t, &omap_timer_list, node) {
+ if (t->reserved)
continue;
- timer = &dm_timers[i];
+ timer = t;
timer->reserved = 1;
+ timer->enabled = 0;
break;
}
- spin_unlock_irqrestore(&dm_timer_lock, flags);
+ mutex_unlock(&dm_timer_mutex);
- if (timer != NULL)
- omap_dm_timer_prepare(timer);
+ if (!timer) {
+ pr_debug("%s: free timer not available.\n", __func__);
+ return NULL;
+ }
+ ret = omap_dm_timer_prepare(timer);
+ if (ret) {
+ timer->reserved = 0;
+ return NULL;
+ }
return timer;
}
@@ -391,74 +404,88 @@
struct omap_dm_timer *omap_dm_timer_request_specific(int id)
{
- struct omap_dm_timer *timer;
- unsigned long flags;
+ struct omap_dm_timer *timer = NULL, *t;
+ int ret;
- spin_lock_irqsave(&dm_timer_lock, flags);
- if (id <= 0 || id > dm_timer_count || dm_timers[id-1].reserved) {
- spin_unlock_irqrestore(&dm_timer_lock, flags);
- printk("BUG: warning at %s:%d/%s(): unable to get timer %d\n",
- __FILE__, __LINE__, __func__, id);
- dump_stack();
+ mutex_lock(&dm_timer_mutex);
+ list_for_each_entry(t, &omap_timer_list, node) {
+ if (t->pdev->id == id && !t->reserved) {
+ timer = t;
+ timer->reserved = 1;
+ timer->enabled = 0;
+ break;
+ }
+ }
+ mutex_unlock(&dm_timer_mutex);
+
+ if (!timer) {
+ pr_debug("%s: timer%d not available.\n", __func__, id);
return NULL;
}
-
- timer = &dm_timers[id-1];
- timer->reserved = 1;
- spin_unlock_irqrestore(&dm_timer_lock, flags);
-
- omap_dm_timer_prepare(timer);
+ ret = omap_dm_timer_prepare(timer);
+ if (ret) {
+ timer->reserved = 0;
+ return NULL;
+ }
return timer;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_request_specific);
-void omap_dm_timer_free(struct omap_dm_timer *timer)
+int omap_dm_timer_free(struct omap_dm_timer *timer)
{
- omap_dm_timer_enable(timer);
- omap_dm_timer_reset(timer);
- omap_dm_timer_disable(timer);
+ unsigned long flags;
+ if (!timer)
+ return -EINVAL;
- WARN_ON(!timer->reserved);
+ spin_lock_irqsave(&timer->lock, flags);
+ if (!timer->reserved) {
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return -EINVAL;
+ }
+
+ __timer_disable(timer);
+ clk_put(timer->fclk);
+
timer->reserved = 0;
+ timer->context_saved = false;
+
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_free);
-void omap_dm_timer_enable(struct omap_dm_timer *timer)
+int omap_dm_timer_enable(struct omap_dm_timer *timer)
{
- if (timer->enabled)
- return;
+ unsigned long flags;
+ if (!timer)
+ return -EINVAL;
-#ifdef CONFIG_ARCH_OMAP2PLUS
- if (cpu_class_is_omap2()) {
- clk_enable(timer->fclk);
- clk_enable(timer->iclk);
- }
-#endif
-
- timer->enabled = 1;
+ spin_lock_irqsave(&timer->lock, flags);
+ __timer_enable(timer);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_enable);
-void omap_dm_timer_disable(struct omap_dm_timer *timer)
+int omap_dm_timer_disable(struct omap_dm_timer *timer)
{
- if (!timer->enabled)
- return;
+ unsigned long flags;
+ if (!timer)
+ return -EINVAL;
-#ifdef CONFIG_ARCH_OMAP2PLUS
- if (cpu_class_is_omap2()) {
- clk_disable(timer->iclk);
- clk_disable(timer->fclk);
- }
-#endif
-
- timer->enabled = 0;
+ spin_lock_irqsave(&timer->lock, flags);
+ __timer_disable(timer);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_disable);
int omap_dm_timer_get_irq(struct omap_dm_timer *timer)
{
- return timer->irq;
+ if (timer)
+ return timer->irq;
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_get_irq);
@@ -470,24 +497,29 @@
*/
__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
{
- int i;
+ int i = 0;
+ struct omap_dm_timer *timer = NULL;
/* If ARMXOR cannot be idled this function call is unnecessary */
if (!(inputmask & (1 << 1)))
return inputmask;
/* If any active timer is using ARMXOR return modified mask */
- for (i = 0; i < dm_timer_count; i++) {
+ mutex_lock(&dm_timer_mutex);
+ list_for_each_entry(timer, &omap_timer_list, node) {
+
u32 l;
- l = omap_dm_timer_read_reg(&dm_timers[i], OMAP_TIMER_CTRL_REG);
+ l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
if (l & OMAP_TIMER_CTRL_ST) {
if (((omap_readl(MOD_CONF_CTRL_1) >> (i * 2)) & 0x03) == 0)
inputmask &= ~(1 << 1);
else
inputmask &= ~(1 << 2);
}
+ i++;
}
+ mutex_unlock(&dm_timer_mutex);
return inputmask;
}
@@ -497,7 +529,9 @@
struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
{
- return timer->fclk;
+ if (timer)
+ return timer->fclk;
+ return NULL;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_get_fclk);
@@ -511,75 +545,116 @@
#endif
-void omap_dm_timer_trigger(struct omap_dm_timer *timer)
+int omap_dm_timer_trigger(struct omap_dm_timer *timer)
{
- omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
+ unsigned long flags;
+
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ if (timer->enabled) {
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_trigger);
-void omap_dm_timer_start(struct omap_dm_timer *timer)
+int omap_dm_timer_start(struct omap_dm_timer *timer)
{
u32 l;
+ unsigned long flags;
+
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ if (timer->loses_context) {
+ __timer_enable(timer);
+ if (omap_pm_was_context_lost(&timer->pdev->dev) &&
+ timer->context_saved) {
+ omap_timer_restore_context(timer);
+ timer->context_saved = false;
+ }
+ }
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
if (!(l & OMAP_TIMER_CTRL_ST)) {
l |= OMAP_TIMER_CTRL_ST;
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
}
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_start);
-void omap_dm_timer_stop(struct omap_dm_timer *timer)
+int omap_dm_timer_stop(struct omap_dm_timer *timer)
{
u32 l;
+ struct dmtimer_platform_data *pdata;
+ unsigned long flags;
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ if (!timer->enabled) {
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return -EINVAL;
+ }
+
+ pdata = timer->pdev->dev.platform_data;
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
if (l & OMAP_TIMER_CTRL_ST) {
l &= ~0x1;
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
-#ifdef CONFIG_ARCH_OMAP2PLUS
- /* Readback to make sure write has completed */
- omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- /*
- * Wait for functional clock period x 3.5 to make sure that
- * timer is stopped
- */
- udelay(3500000 / clk_get_rate(timer->fclk) + 1);
-#endif
+
+ if (!pdata->needs_manual_reset) {
+ /* Readback to make sure write has completed */
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ /*
+ * Wait for functional clock period x 3.5 to make
+ * sure that timer is stopped
+ */
+ udelay(3500000 / clk_get_rate(timer->fclk) + 1);
+ }
}
/* Ack possibly pending interrupt */
omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG,
OMAP_TIMER_INT_OVERFLOW);
+
+ if (timer->loses_context) {
+ omap_timer_save_context(timer);
+ timer->context_saved = true;
+ __timer_disable(timer);
+ }
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_stop);
-#ifdef CONFIG_ARCH_OMAP1
-
int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
{
- int n = (timer - dm_timers) << 1;
- u32 l;
+ int ret;
+ struct dmtimer_platform_data *pdata;
+ unsigned long flags;
- l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n);
- l |= source << n;
- omap_writel(l, MOD_CONF_CTRL_1);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(omap_dm_timer_set_source);
-
-#else
-
-int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
-{
- int ret = -EINVAL;
+ if (!timer)
+ return -EINVAL;
if (source < 0 || source >= 3)
return -EINVAL;
- clk_disable(timer->fclk);
- ret = clk_set_parent(timer->fclk, dm_source_clocks[source]);
- clk_enable(timer->fclk);
+ spin_lock_irqsave(&timer->lock, flags);
+ pdata = timer->pdev->dev.platform_data;
+ __timer_disable(timer);
+ spin_unlock_irqrestore(&timer->lock, flags);
+
+ /* change the timer clock source */
+ ret = pdata->set_timer_src(timer->pdev, source);
/*
* When the functional clock disappears, too quick writes seem
@@ -591,13 +666,17 @@
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_source);
-#endif
-
-void omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
+int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
unsigned int load)
{
u32 l;
+ unsigned long flags;
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ __timer_enable(timer);
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
if (autoreload)
l |= OMAP_TIMER_CTRL_AR;
@@ -607,14 +686,30 @@
omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
+ __timer_disable(timer);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_load);
-/* Optimized set_load which removes costly spin wait in timer_start */
-void omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
+int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
unsigned int load)
{
u32 l;
+ unsigned long flags;
+
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ if (timer->loses_context) {
+ __timer_enable(timer);
+ if (omap_pm_was_context_lost(&timer->pdev->dev) &&
+ timer->context_saved) {
+ omap_timer_restore_context(timer);
+ timer->context_saved = false;
+ }
+ }
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
if (autoreload) {
@@ -627,14 +722,22 @@
omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, load);
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_load_start);
-void omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
+int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
unsigned int match)
{
u32 l;
+ unsigned long flags;
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ __timer_enable(timer);
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
if (enable)
l |= OMAP_TIMER_CTRL_CE;
@@ -642,14 +745,23 @@
l &= ~OMAP_TIMER_CTRL_CE;
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match);
+ __timer_disable(timer);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_match);
-void omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
+int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
int toggle, int trigger)
{
u32 l;
+ unsigned long flags;
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ __timer_enable(timer);
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM |
OMAP_TIMER_CTRL_PT | (0x03 << 10));
@@ -659,13 +771,22 @@
l |= OMAP_TIMER_CTRL_PT;
l |= trigger << 10;
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ __timer_disable(timer);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_pwm);
-void omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler)
+int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler)
{
u32 l;
+ unsigned long flags;
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ __timer_enable(timer);
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2));
if (prescaler >= 0x00 && prescaler <= 0x07) {
@@ -673,58 +794,115 @@
l |= prescaler << 2;
}
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ __timer_disable(timer);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_prescaler);
-void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
+int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
unsigned int value)
{
+ unsigned long flags;
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ if (!timer->is_early_init)
+ __timer_enable(timer);
+
omap_dm_timer_write_reg(timer, OMAP_TIMER_INT_EN_REG, value);
omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG, value);
+
+ if (!timer->is_early_init)
+ __timer_disable(timer);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_int_enable);
unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer)
{
- unsigned int l;
+ unsigned long flags;
+ unsigned int ret;
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_STAT_REG);
+ if (WARN_ON(!timer))
+ return -EINVAL;
- return l;
+ spin_lock_irqsave(&timer->lock, flags);
+ if (timer->is_early_init || timer->enabled) {
+ ret = omap_dm_timer_read_reg(timer, OMAP_TIMER_STAT_REG);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return ret;
+ }
+ spin_unlock_irqrestore(&timer->lock, flags);
+ WARN_ON(!timer->enabled);
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_read_status);
-void omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
+int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
{
- omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG, value);
+ unsigned long flags;
+
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ if (timer->is_early_init || timer->enabled) {
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG, value);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_write_status);
unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
{
- unsigned int l;
+ unsigned long flags;
+ unsigned int ret;
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_COUNTER_REG);
+ if (WARN_ON(!timer))
+ return -EINVAL;
- return l;
+ spin_lock_irqsave(&timer->lock, flags);
+ if (timer->is_early_init || timer->enabled) {
+ ret = omap_dm_timer_read_reg(timer, OMAP_TIMER_COUNTER_REG);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return ret;
+ }
+
+ spin_unlock_irqrestore(&timer->lock, flags);
+ WARN_ON(!timer->enabled);
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_read_counter);
-void omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value)
+int omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value)
{
- omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, value);
+ unsigned long flags;
+
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ if (timer->is_early_init || timer->enabled) {
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, value);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(omap_dm_timer_write_counter);
int omap_dm_timers_active(void)
{
- int i;
+ struct omap_dm_timer *timer;
- for (i = 0; i < dm_timer_count; i++) {
- struct omap_dm_timer *timer;
-
- timer = &dm_timers[i];
-
+ list_for_each_entry(timer, &omap_timer_list, node) {
if (!timer->enabled)
continue;
@@ -737,61 +915,146 @@
}
EXPORT_SYMBOL_GPL(omap_dm_timers_active);
-int __init omap_dm_timer_init(void)
+/**
+ * omap_dm_timer_probe - probe function called for every registered device
+ * @pdev: pointer to current timer platform device
+ *
+ * Called by driver framework at the end of device registration for all
+ * timer devices.
+ */
+static int __devinit omap_dm_timer_probe(struct platform_device *pdev)
{
+ int ret;
struct omap_dm_timer *timer;
- int i, map_size = SZ_8K; /* Module 4KB + L4 4KB except on omap1 */
+ struct resource *mem, *irq, *ioarea;
+ struct dmtimer_platform_data *pdata = pdev->dev.platform_data;
- if (!(cpu_is_omap16xx() || cpu_class_is_omap2()))
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: no platform data.\n", __func__);
return -ENODEV;
-
- spin_lock_init(&dm_timer_lock);
-
- if (cpu_class_is_omap1()) {
- dm_timers = omap1_dm_timers;
- dm_timer_count = omap1_dm_timer_count;
- map_size = SZ_2K;
- } else if (cpu_is_omap24xx()) {
- dm_timers = omap2_dm_timers;
- dm_timer_count = omap2_dm_timer_count;
- dm_source_names = omap2_dm_source_names;
- dm_source_clocks = omap2_dm_source_clocks;
- } else if (cpu_is_omap34xx()) {
- dm_timers = omap3_dm_timers;
- dm_timer_count = omap3_dm_timer_count;
- dm_source_names = omap3_dm_source_names;
- dm_source_clocks = omap3_dm_source_clocks;
- } else if (cpu_is_omap44xx()) {
- dm_timers = omap4_dm_timers;
- dm_timer_count = omap4_dm_timer_count;
- dm_source_names = omap4_dm_source_names;
- dm_source_clocks = omap4_dm_source_clocks;
}
- if (cpu_class_is_omap2())
- for (i = 0; dm_source_names[i] != NULL; i++)
- dm_source_clocks[i] = clk_get(NULL, dm_source_names[i]);
-
- if (cpu_is_omap243x())
- dm_timers[0].phys_base = 0x49018000;
-
- for (i = 0; i < dm_timer_count; i++) {
- timer = &dm_timers[i];
-
- /* Static mapping, never released */
- timer->io_base = ioremap(timer->phys_base, map_size);
- BUG_ON(!timer->io_base);
-
-#ifdef CONFIG_ARCH_OMAP2PLUS
- if (cpu_class_is_omap2()) {
- char clk_name[16];
- sprintf(clk_name, "gpt%d_ick", i + 1);
- timer->iclk = clk_get(NULL, clk_name);
- sprintf(clk_name, "gpt%d_fck", i + 1);
- timer->fclk = clk_get(NULL, clk_name);
- }
-#endif
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (unlikely(!irq)) {
+ dev_err(&pdev->dev, "%s: no IRQ resource.\n", __func__);
+ return -ENODEV;
}
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!mem)) {
+ dev_err(&pdev->dev, "%s: no memory resource.\n", __func__);
+ return -ENODEV;
+ }
+
+ ioarea = request_mem_region(mem->start, resource_size(mem),
+ pdev->name);
+ if (!ioarea) {
+ dev_err(&pdev->dev, "%s: region already claimed.\n", __func__);
+ return -EBUSY;
+ }
+
+ timer = kzalloc(sizeof(struct omap_dm_timer), GFP_KERNEL);
+ if (!timer) {
+ dev_err(&pdev->dev, "%s: no memory for omap_dm_timer.\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err_release_ioregion;
+ }
+
+ timer->io_base = ioremap(mem->start, resource_size(mem));
+ if (!timer->io_base) {
+ dev_err(&pdev->dev, "%s: ioremap failed.\n", __func__);
+ ret = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ if (pdata->timer_ip_type == OMAP_TIMER_IP_VERSION_2) {
+ timer->func_offset = VERSION2_TIMER_WAKEUP_EN_REG_OFFSET;
+ timer->intr_offset = VERSION2_TIMER_STAT_REG_OFFSET;
+ }
+
+ timer->irq = irq->start;
+ timer->pdev = pdev;
+ timer->is_early_init = pdata->is_early_init;
+ timer->needs_manual_reset = pdata->needs_manual_reset;
+ timer->loses_context = pdata->loses_context;
+
+ spin_lock_init(&timer->lock);
+ /* Skip pm_runtime_enable during early boot and for OMAP1 */
+ if (!pdata->is_early_init && !pdata->needs_manual_reset) {
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_irq_safe(&pdev->dev);
+ }
+
+ /* add the timer element to the list */
+ mutex_lock(&dm_timer_mutex);
+ list_add_tail(&timer->node, &omap_timer_list);
+ mutex_unlock(&dm_timer_mutex);
+
+ dev_dbg(&pdev->dev, "Device Probed.\n");
+
return 0;
+
+err_free_mem:
+ kfree(timer);
+
+err_release_ioregion:
+ release_mem_region(mem->start, resource_size(mem));
+
+ return ret;
}
+
+/**
+ * omap_dm_timer_remove - cleanup a registered timer device
+ * @pdev: pointer to current timer platform device
+ *
+ * Called by driver framework whenever a timer device is unregistered.
+ * In addition to freeing platform resources it also deletes the timer
+ * entry from the local list.
+ */
+static int __devexit omap_dm_timer_remove(struct platform_device *pdev)
+{
+ struct omap_dm_timer *timer;
+ int ret = -EINVAL;
+
+ mutex_lock(&dm_timer_mutex);
+ list_for_each_entry(timer, &omap_timer_list, node) {
+ if (timer->pdev->id == pdev->id) {
+ list_del(&timer->node);
+ kfree(timer);
+ ret = 0;
+ break;
+ }
+ }
+ mutex_unlock(&dm_timer_mutex);
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver omap_dm_timer_driver = {
+ .probe = omap_dm_timer_probe,
+ .remove = omap_dm_timer_remove,
+ .driver = {
+ .name = "omap_timer",
+ },
+};
+
+static int __init omap_dm_timer_driver_init(void)
+{
+ return platform_driver_register(&omap_dm_timer_driver);
+}
+
+static void __exit omap_dm_timer_driver_exit(void)
+{
+ platform_driver_unregister(&omap_dm_timer_driver);
+}
+
+early_platform_init("earlytimer", &omap_dm_timer_driver);
+module_init(omap_dm_timer_driver_init);
+module_exit(omap_dm_timer_driver_exit);
+
+MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/arch/arm/plat-omap/i2c.c b/arch/arm/plat-omap/i2c.c
index 3341ca4..f8dc82f 100644
--- a/arch/arm/plat-omap/i2c.c
+++ b/arch/arm/plat-omap/i2c.c
@@ -113,16 +113,6 @@
#ifdef CONFIG_ARCH_OMAP2PLUS
-/*
- * XXX This function is a temporary compatibility wrapper - only
- * needed until the I2C driver can be converted to call
- * omap_pm_set_max_dev_wakeup_lat() and handle a return code.
- */
-static void omap_pm_set_max_mpu_wakeup_lat_compat(struct device *dev, long t)
-{
- omap_pm_set_max_mpu_wakeup_lat(dev, t);
-}
-
static struct omap_device_pm_latency omap_i2c_latency[] = {
[0] = {
.deactivate_func = omap_device_idle_hwmods,
@@ -158,8 +148,8 @@
* completes.
* Only omap3 has support for constraints
*/
- if (cpu_is_omap34xx())
- pdata->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat;
+ if (cpu_is_omap34xx() || cpu_is_omap44xx())
+ pdata->needs_wakeup_latency = true;
od = omap_device_build(name, bus_id, oh, pdata,
sizeof(struct omap_i2c_bus_platform_data),
omap_i2c_latency, ARRAY_SIZE(omap_i2c_latency), 0);
diff --git a/arch/arm/plat-omap/include/plat/clkdev_omap.h b/arch/arm/plat-omap/include/plat/clkdev_omap.h
index f1899a3e..324446b 100644
--- a/arch/arm/plat-omap/include/plat/clkdev_omap.h
+++ b/arch/arm/plat-omap/include/plat/clkdev_omap.h
@@ -39,11 +39,13 @@
#define CK_36XX (1 << 10) /* 36xx/37xx-specific clocks */
#define CK_443X (1 << 11)
#define CK_TI816X (1 << 12)
+#define CK_446X (1 << 13)
#define CK_34XX (CK_3430ES1 | CK_3430ES2PLUS)
#define CK_AM35XX (CK_3505 | CK_3517) /* all Sitara AM35xx */
#define CK_3XXX (CK_34XX | CK_AM35XX | CK_36XX)
+#define CK_44XX (CK_443X | CK_446X)
#endif
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index 006e599..12a9ced 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -56,12 +56,14 @@
#define RATE_IN_3430ES1 (1 << 2) /* 3430ES1 rates only */
#define RATE_IN_3430ES2PLUS (1 << 3) /* 3430 ES >= 2 rates only */
#define RATE_IN_36XX (1 << 4)
-#define RATE_IN_4430 (1 << 5)
+#define RATE_IN_443X (1 << 5)
#define RATE_IN_TI816X (1 << 6)
+#define RATE_IN_446X (1 << 7)
#define RATE_IN_24XX (RATE_IN_242X | RATE_IN_243X)
#define RATE_IN_34XX (RATE_IN_3430ES1 | RATE_IN_3430ES2PLUS)
#define RATE_IN_3XXX (RATE_IN_34XX | RATE_IN_36XX)
+#define RATE_IN_44XX (RATE_IN_443X | RATE_IN_446X)
/* RATE_IN_3430ES2PLUS_36XX includes 34xx/35xx with ES >=2, and all 36xx/37xx */
#define RATE_IN_3430ES2PLUS_36XX (RATE_IN_3430ES2PLUS | RATE_IN_36XX)
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index 5288130..c8a65ba 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -53,6 +53,7 @@
unsigned long sms; /* SDRAM Memory Scheduler */
unsigned long ctrl; /* System Control Module */
unsigned long ctrl_pad; /* PAD Control Module */
+ unsigned long ctrl_wk_pad; /* PAD Control WakeUp Module */
unsigned long prm; /* Power and Reset Management */
unsigned long cm; /* Clock Management */
unsigned long cm2;
@@ -96,5 +97,6 @@
extern struct device *omap2_get_iva_device(void);
extern struct device *omap2_get_l3_device(void);
extern struct device *omap4_get_dsp_device(void);
+extern struct device *omap4_get_fdif_device(void);
#endif /* __ARCH_ARM_MACH_OMAP_COMMON_H */
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 8198bb6..6c9fc01 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -45,7 +45,7 @@
int omap_type(void);
struct omap_chip_id {
- u16 oc;
+ u32 oc;
u8 type;
};
@@ -88,6 +88,7 @@
* cpu_is_omap243x(): True for OMAP2430
* cpu_is_omap343x(): True for OMAP3430
* cpu_is_omap443x(): True for OMAP4430
+ * cpu_is_omap446x(): True for OMAP4460
*/
#define GET_OMAP_CLASS (omap_rev() & 0xff)
@@ -123,6 +124,7 @@
IS_OMAP_SUBCLASS(343x, 0x343)
IS_OMAP_SUBCLASS(363x, 0x363)
IS_OMAP_SUBCLASS(443x, 0x443)
+IS_OMAP_SUBCLASS(446x, 0x446)
IS_TI_SUBCLASS(816x, 0x816)
@@ -137,6 +139,7 @@
#define cpu_is_ti816x() 0
#define cpu_is_omap44xx() 0
#define cpu_is_omap443x() 0
+#define cpu_is_omap446x() 0
#if defined(MULTI_OMAP1)
# if defined(CONFIG_ARCH_OMAP730)
@@ -361,8 +364,10 @@
# if defined(CONFIG_ARCH_OMAP4)
# undef cpu_is_omap44xx
# undef cpu_is_omap443x
+# undef cpu_is_omap446x
# define cpu_is_omap44xx() is_omap44xx()
# define cpu_is_omap443x() is_omap443x()
+# define cpu_is_omap446x() is_omap446x()
# endif
/* Macros to detect if we have OMAP1 or OMAP2 */
@@ -410,6 +415,10 @@
#define OMAP4430_REV_ES2_1 (OMAP443X_CLASS | (0x21 << 8))
#define OMAP4430_REV_ES2_2 (OMAP443X_CLASS | (0x22 << 8))
+#define OMAP446X_CLASS 0x44600044
+#define OMAP4460_REV_ES1_0 (OMAP446X_CLASS | (0x10 << 8))
+#define OMAP4460_REV_ES1_1 (OMAP446X_CLASS | (0x11 << 8))
+
/*
* omap_chip bits
*
@@ -439,14 +448,21 @@
#define CHIP_IS_OMAP4430ES2_1 (1 << 12)
#define CHIP_IS_OMAP4430ES2_2 (1 << 13)
#define CHIP_IS_TI816X (1 << 14)
+#define CHIP_IS_OMAP4460ES1_0 (1 << 15)
+#define CHIP_IS_OMAP4460ES1_1 (1 << 16)
#define CHIP_IS_OMAP24XX (CHIP_IS_OMAP2420 | CHIP_IS_OMAP2430)
-#define CHIP_IS_OMAP4430 (CHIP_IS_OMAP4430ES1 | \
+#define CHIP_IS_OMAP443X (CHIP_IS_OMAP4430ES1 | \
CHIP_IS_OMAP4430ES2 | \
CHIP_IS_OMAP4430ES2_1 | \
CHIP_IS_OMAP4430ES2_2)
+#define CHIP_IS_OMAP446X (CHIP_IS_OMAP4460ES1_0 | \
+ CHIP_IS_OMAP4460ES1_1)
+
+#define CHIP_IS_OMAP44XX (CHIP_IS_OMAP443X | CHIP_IS_OMAP446X)
+
/*
* "GE" here represents "greater than or equal to" in terms of ES
* levels. So CHIP_GE_OMAP3430ES2 is intended to match all OMAP3430
@@ -494,4 +510,23 @@
OMAP3_HAS_FEATURE(io_wakeup, IO_WAKEUP)
OMAP3_HAS_FEATURE(sdrc, SDRC)
+/*
+ * Runtime detection of OMAP4 features
+ */
+extern u32 omap4_features;
+
+#define OMAP4_HAS_MPU_1GHZ BIT(0)
+#define OMAP4_HAS_MPU_1_2GHZ BIT(1)
+#define OMAP4_HAS_MPU_1_5GHZ BIT(2)
+
+#define OMAP4_HAS_FEATURE(feat, flag) \
+static inline unsigned int omap4_has_ ##feat(void) \
+{ \
+ return omap4_features & OMAP4_HAS_ ##flag; \
+} \
+
+OMAP4_HAS_FEATURE(mpu_1ghz, MPU_1GHZ)
+OMAP4_HAS_FEATURE(mpu_1_2ghz, MPU_1_2GHZ)
+OMAP4_HAS_FEATURE(mpu_1_5ghz, MPU_1_5GHZ)
+
#endif
diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h
index d6c70d2..aaa676f 100644
--- a/arch/arm/plat-omap/include/plat/dmtimer.h
+++ b/arch/arm/plat-omap/include/plat/dmtimer.h
@@ -35,6 +35,7 @@
#ifndef __ASM_ARCH_DMTIMER_H
#define __ASM_ARCH_DMTIMER_H
+#include <linux/spinlock_types.h>
/* clock sources */
#define OMAP_TIMER_SRC_SYS_CLK 0x00
#define OMAP_TIMER_SRC_32_KHZ 0x01
@@ -55,41 +56,103 @@
* in OMAP4 can be distinguished.
*/
#define OMAP_TIMER_IP_VERSION_1 0x1
-struct omap_dm_timer;
+#define OMAP_TIMER_IP_VERSION_2 0x2
+
+struct omap_secure_timer_dev_attr {
+ bool is_secure_timer;
+};
+
+struct timer_regs {
+ u32 tidr;
+ u32 tiocp_cfg;
+ u32 tistat;
+ u32 tisr;
+ u32 tier;
+ u32 twer;
+ u32 tclr;
+ u32 tcrr;
+ u32 tldr;
+ u32 ttrg;
+ u32 twps;
+ u32 tmar;
+ u32 tcar1;
+ u32 tsicr;
+ u32 tcar2;
+ u32 tpir;
+ u32 tnir;
+ u32 tcvr;
+ u32 tocr;
+ u32 towr;
+};
+
+struct omap_dm_timer {
+ int irq;
+ struct clk *fclk;
+ void __iomem *io_base;
+ unsigned reserved:1;
+ unsigned enabled:1;
+ unsigned posted:1;
+ unsigned is_early_init:1;
+ unsigned needs_manual_reset:1;
+ spinlock_t lock;
+ u8 func_offset;
+ u8 intr_offset;
+ bool loses_context;
+ bool context_saved;
+ u32 ctx_loss_count;
+ struct timer_regs context;
+ struct platform_device *pdev;
+ struct list_head node;
+
+};
+
extern struct omap_dm_timer *gptimer_wakeup;
extern struct sys_timer omap_timer;
struct clk;
-int omap_dm_timer_init(void);
+struct dmtimer_platform_data {
+ int (*set_timer_src)(struct platform_device *pdev, int source);
+ int timer_ip_type;
+ u32 is_early_init:1;
+ u32 needs_manual_reset:1;
+ bool loses_context;
+
+};
struct omap_dm_timer *omap_dm_timer_request(void);
struct omap_dm_timer *omap_dm_timer_request_specific(int timer_id);
-void omap_dm_timer_free(struct omap_dm_timer *timer);
-void omap_dm_timer_enable(struct omap_dm_timer *timer);
-void omap_dm_timer_disable(struct omap_dm_timer *timer);
+int omap_dm_timer_free(struct omap_dm_timer *timer);
+int omap_dm_timer_enable(struct omap_dm_timer *timer);
+int omap_dm_timer_disable(struct omap_dm_timer *timer);
int omap_dm_timer_get_irq(struct omap_dm_timer *timer);
u32 omap_dm_timer_modify_idlect_mask(u32 inputmask);
struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer);
-void omap_dm_timer_trigger(struct omap_dm_timer *timer);
-void omap_dm_timer_start(struct omap_dm_timer *timer);
-void omap_dm_timer_stop(struct omap_dm_timer *timer);
+int omap_dm_timer_trigger(struct omap_dm_timer *timer);
+int omap_dm_timer_start(struct omap_dm_timer *timer);
+int omap_dm_timer_stop(struct omap_dm_timer *timer);
int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source);
-void omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload, unsigned int value);
-void omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload, unsigned int value);
-void omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable, unsigned int match);
-void omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on, int toggle, int trigger);
-void omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler);
+int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
+ unsigned int value);
+int omap_dm_timer_set_load_start(struct omap_dm_timer *timer,
+ int autoreload, unsigned int value);
+int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
+ unsigned int match);
+int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
+ int toggle, int trigger);
+int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler);
-void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer, unsigned int value);
+int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
+ unsigned int value);
unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer);
-void omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value);
+int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value);
unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer);
-void omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value);
+int omap_dm_timer_write_counter(struct omap_dm_timer *timer,
+ unsigned int value);
int omap_dm_timers_active(void);
diff --git a/arch/arm/plat-omap/include/plat/dsp.h b/arch/arm/plat-omap/include/plat/dsp.h
index 9c604b3..14f1228 100644
--- a/arch/arm/plat-omap/include/plat/dsp.h
+++ b/arch/arm/plat-omap/include/plat/dsp.h
@@ -24,8 +24,12 @@
#if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE)
extern void omap_dsp_reserve_sdram_memblock(void);
+phys_addr_t omap_dsp_get_mempool_size(void);
+phys_addr_t omap_dsp_get_mempool_base(void);
#else
static inline void omap_dsp_reserve_sdram_memblock(void) { }
+static inline phys_addr_t omap_dsp_get_mempool_size(void) { return 0; }
+static inline phys_addr_t omap_dsp_get_mempool_base(void) { return 0; }
#endif
#endif
diff --git a/arch/arm/plat-omap/include/plat/dsscomp.h b/arch/arm/plat-omap/include/plat/dsscomp.h
new file mode 100644
index 0000000..d41b73a
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/dsscomp.h
@@ -0,0 +1,25 @@
+#ifndef _ARCH_ARM_PLAT_OMAP_DSSCOMP_H
+#define _ARCH_ARM_PLAT_OMAP_DSSCOMP_H
+
+#include <video/omapdss.h>
+
+/* queuing operations */
+typedef struct dsscomp_data *dsscomp_t; /* handle */
+
+dsscomp_t dsscomp_new(struct omap_overlay_manager *mgr);
+u32 dsscomp_get_ovls(dsscomp_t comp);
+int dsscomp_set_ovl(dsscomp_t comp, struct dss2_ovl_info *ovl);
+int dsscomp_get_ovl(dsscomp_t comp, u32 ix, struct dss2_ovl_info *ovl);
+int dsscomp_set_mgr(dsscomp_t comp, struct dss2_mgr_info *mgr);
+int dsscomp_get_mgr(dsscomp_t comp, struct dss2_mgr_info *mgr);
+int dsscomp_setup(dsscomp_t comp, enum dsscomp_setup_mode mode,
+ struct dss2_rect_t win);
+int dsscomp_delayed_apply(dsscomp_t comp);
+void dsscomp_drop(dsscomp_t c);
+
+struct tiler_pa_info;
+int dsscomp_gralloc_queue(struct dsscomp_setup_dispc_data *d,
+ struct tiler_pa_info **pas,
+ bool early_callback,
+ void (*cb_fn)(void *, int), void *cb_arg);
+#endif
diff --git a/arch/arm/plat-omap/include/plat/gpio.h b/arch/arm/plat-omap/include/plat/gpio.h
index ec97e00..90eae5c 100644
--- a/arch/arm/plat-omap/include/plat/gpio.h
+++ b/arch/arm/plat-omap/include/plat/gpio.h
@@ -162,35 +162,61 @@
IH_MPUIO_BASE + ((nr) & 0x0f) : \
IH_GPIO_BASE + (nr))
-#define METHOD_MPUIO 0
-#define METHOD_GPIO_1510 1
-#define METHOD_GPIO_1610 2
-#define METHOD_GPIO_7XX 3
-#define METHOD_GPIO_24XX 5
-#define METHOD_GPIO_44XX 6
-
struct omap_gpio_dev_attr {
int bank_width; /* GPIO bank width */
bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
};
-struct omap_gpio_platform_data {
- u16 virtual_irq_start;
- int bank_type;
- int bank_width; /* GPIO bank width */
- int bank_stride; /* Only needed for omap1 MPUIO */
- bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
+struct omap_gpio_reg_offs {
+ u16 revision;
+ u16 direction;
+ u16 datain;
+ u16 dataout;
+ u16 set_dataout;
+ u16 clr_dataout;
+ u16 irqstatus;
+ u16 irqstatus2;
+ u16 irqenable;
+ u16 irqenable2;
+ u16 set_irqenable;
+ u16 clr_irqenable;
+ u16 debounce;
+ u16 debounce_en;
+ u16 ctrl;
+ u16 wkup_status;
+ u16 wkup_clear;
+ u16 wkup_set;
+ u16 leveldetect0;
+ u16 leveldetect1;
+ u16 risingdetect;
+ u16 fallingdetect;
+ u16 irqctrl;
+ u16 edgectrl1;
+ u16 edgectrl2;
+ /* Not applicable for OMAP2+ as hwmod layer takes care of sysconfig */
+ u16 sysconfig;
+ u16 pinctrl;
+
+ bool irqenable_inv;
};
-/* TODO: Analyze removing gpio_bank_count usage from driver code */
-extern int gpio_bank_count;
+struct omap_gpio_platform_data {
+ u16 virtual_irq_start;
+ int bank_width; /* GPIO bank width */
+ int bank_stride; /* Only needed for omap1 MPUIO */
+ bool suspend_support; /* If Bank supports suspend/resume operations */
+ bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
+ bool loses_context; /* whether the bank would ever lose context */
+ bool is_mpuio; /* whether the bank is of type MPUIO */
+ u32 non_wakeup_gpios;
-extern void omap2_gpio_prepare_for_idle(int off_mode);
-extern void omap2_gpio_resume_after_idle(void);
+ struct omap_gpio_reg_offs *regs;
+};
+
+extern int omap2_gpio_prepare_for_idle(int off_mode, bool suspend);
+extern void omap2_gpio_resume_after_idle(int off_mode);
extern void omap_set_gpio_debounce(int gpio, int enable);
extern void omap_set_gpio_debounce_time(int gpio, int enable);
-extern void omap_gpio_save_context(void);
-extern void omap_gpio_restore_context(void);
/*-------------------------------------------------------------------------*/
/* Wrappers for "new style" GPIO calls, using the new infrastructure
diff --git a/arch/arm/plat-omap/include/plat/gpmc.h b/arch/arm/plat-omap/include/plat/gpmc.h
index 1527929..d668790 100644
--- a/arch/arm/plat-omap/include/plat/gpmc.h
+++ b/arch/arm/plat-omap/include/plat/gpmc.h
@@ -148,8 +148,8 @@
extern int gpmc_prefetch_enable(int cs, int fifo_th, int dma_mode,
unsigned int u32_count, int is_write);
extern int gpmc_prefetch_reset(int cs);
-extern void omap3_gpmc_save_context(void);
-extern void omap3_gpmc_restore_context(void);
+extern void omap_gpmc_save_context(void);
+extern void omap_gpmc_restore_context(void);
extern int gpmc_read_status(int cmd);
extern int gpmc_cs_configure(int cs, int cmd, int wval);
extern int gpmc_nand_read(int cs, int cmd);
diff --git a/arch/arm/plat-omap/include/plat/gpu.h b/arch/arm/plat-omap/include/plat/gpu.h
new file mode 100644
index 0000000..0a6313b
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/gpu.h
@@ -0,0 +1,39 @@
+/*
+ * arch/arm/plat-omap/include/plat/gpu.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef OMAP_GPU_H
+#define OMAP_GPU_H
+
+#include <plat/omap-pm.h>
+#include <linux/platform_device.h>
+
+struct gpu_platform_data {
+
+ /* Number of overdrive frequencies */
+ unsigned int ovfreqs;
+
+ void (*set_min_bus_tput)(struct device *dev, u8 agent_id,
+ unsigned long r);
+ int (*device_scale) (struct device *req_dev, struct device *target_dev,
+ unsigned long rate);
+ int (*device_enable) (struct platform_device *pdev);
+ int (*device_shutdown) (struct platform_device *pdev);
+ int (*device_idle) (struct platform_device *pdev);
+};
+
+#endif
diff --git a/arch/arm/plat-omap/include/plat/io.h b/arch/arm/plat-omap/include/plat/io.h
index d72ec85..a2f7d31 100644
--- a/arch/arm/plat-omap/include/plat/io.h
+++ b/arch/arm/plat-omap/include/plat/io.h
@@ -228,12 +228,12 @@
#define OMAP44XX_EMIF2_PHYS OMAP44XX_EMIF2_BASE
/* 0x4d000000 --> 0xfd200000 */
-#define OMAP44XX_EMIF2_VIRT (OMAP44XX_EMIF2_PHYS + OMAP4_L3_PER_IO_OFFSET)
+#define OMAP44XX_EMIF2_VIRT (OMAP44XX_EMIF1_VIRT + SZ_1M)
#define OMAP44XX_EMIF2_SIZE SZ_1M
#define OMAP44XX_DMM_PHYS OMAP44XX_DMM_BASE
/* 0x4e000000 --> 0xfd300000 */
-#define OMAP44XX_DMM_VIRT (OMAP44XX_DMM_PHYS + OMAP4_L3_PER_IO_OFFSET)
+#define OMAP44XX_DMM_VIRT (OMAP44XX_EMIF2_VIRT + SZ_1M)
#define OMAP44XX_DMM_SIZE SZ_1M
/*
* ----------------------------------------------------------------------------
diff --git a/arch/arm/plat-omap/include/plat/iommu.h b/arch/arm/plat-omap/include/plat/iommu.h
index 174f1b9..ed33ddf 100644
--- a/arch/arm/plat-omap/include/plat/iommu.h
+++ b/arch/arm/plat-omap/include/plat/iommu.h
@@ -13,6 +13,8 @@
#ifndef __MACH_IOMMU_H
#define __MACH_IOMMU_H
+#include <linux/pm_qos_params.h>
+
struct iotlb_entry {
u32 da;
u32 pa;
@@ -28,7 +30,6 @@
struct iommu {
const char *name;
struct module *owner;
- struct clk *clk;
void __iomem *regbase;
struct device *dev;
void *isr_priv;
@@ -53,6 +54,10 @@
void *ctx; /* iommu context: registres saved area */
u32 da_start;
u32 da_end;
+ struct platform_device *pdev;
+ struct pm_qos_request_list *qos_request;
+ void *secure_ttb;
+ bool secure_mode;
};
struct cr_regs {
@@ -104,10 +109,12 @@
struct iommu_platform_data {
const char *name;
- const char *clk_name;
+ const char *oh_name;
const int nr_tlb_entries;
u32 da_start;
u32 da_end;
+ int irq;
+ void __iomem *io_base;
};
/* IOMMU errors */
@@ -174,6 +181,8 @@
void *priv),
void *isr_priv);
+extern int iommu_set_secure(const char *name, bool enable, void *data);
+
extern void iommu_save_ctx(struct iommu *obj);
extern void iommu_restore_ctx(struct iommu *obj);
diff --git a/arch/arm/plat-omap/include/plat/iommu2.h b/arch/arm/plat-omap/include/plat/iommu2.h
index 10ad05f..45b2e36 100644
--- a/arch/arm/plat-omap/include/plat/iommu2.h
+++ b/arch/arm/plat-omap/include/plat/iommu2.h
@@ -36,6 +36,7 @@
#define MMU_READ_CAM 0x68
#define MMU_READ_RAM 0x6c
#define MMU_EMU_FAULT_AD 0x70
+#define MMU_GP_REG 0x88
#define MMU_REG_SIZE 256
diff --git a/arch/arm/plat-omap/include/plat/irqs-44xx.h b/arch/arm/plat-omap/include/plat/irqs-44xx.h
index 518322c..78839f1 100644
--- a/arch/arm/plat-omap/include/plat/irqs-44xx.h
+++ b/arch/arm/plat-omap/include/plat/irqs-44xx.h
@@ -141,4 +141,13 @@
#define OMAP44XX_IRQ_KBD_CTL (120 + OMAP44XX_IRQ_GIC_START)
#define OMAP44XX_IRQ_UNIPRO1 (124 + OMAP44XX_IRQ_GIC_START)
+/*
+ * GIC interrupts 54, 55, 60, 105, 106, 121, 122, 123, 125, and 127 are tied
+ * low, and can be repurposed as SW triggered IRQs
+ */
+#define OMAP44XX_IRQ_FIQ_DEBUGGER (54 + OMAP44XX_IRQ_GIC_START)
+#define OMAP44XX_IRQ_THERMAL_PROXY (55 + OMAP44XX_IRQ_GIC_START)
+#define OMAP44XX_IRQ_CPUIDLE_POKE0 (60 + OMAP44XX_IRQ_GIC_START)
+#define OMAP44XX_IRQ_CPUIDLE_POKE1 (105 + OMAP44XX_IRQ_GIC_START)
+
#endif
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index 5a25098..2cfba51 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -407,11 +407,19 @@
#endif
#define TWL6030_IRQ_END (TWL6030_IRQ_BASE + TWL6030_BASE_NR_IRQS)
+#define TWL6040_CODEC_IRQ_BASE TWL6030_IRQ_END
+#ifdef CONFIG_TWL6040_CODEC
+#define TWL6040_CODEC_NR_IRQS 6
+#else
+#define TWL6040_CODEC_NR_IRQS 0
+#endif
+#define TWL6040_CODEC_IRQ_END (TWL6040_CODEC_IRQ_BASE + TWL6040_CODEC_NR_IRQS)
+
/* Total number of interrupts depends on the enabled blocks above */
-#if (TWL4030_GPIO_IRQ_END > TWL6030_IRQ_END)
+#if (TWL4030_GPIO_IRQ_END > TWL6040_CODEC_IRQ_END)
#define TWL_IRQ_END TWL4030_GPIO_IRQ_END
#else
-#define TWL_IRQ_END TWL6030_IRQ_END
+#define TWL_IRQ_END TWL6040_CODEC_IRQ_END
#endif
/* GPMC related */
diff --git a/arch/arm/plat-omap/include/plat/mcasp.h b/arch/arm/plat-omap/include/plat/mcasp.h
new file mode 100644
index 0000000..aead0a0
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/mcasp.h
@@ -0,0 +1,52 @@
+/*
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __OMAP_PLAT_MCASP_H__
+#define __OMAP_PLAT_MCASP_H__
+
+#include <linux/platform_device.h>
+
+/* The SPDIF bit clock is derived from the McASP functional clock.
+ * The McASP has two programmable clock dividers (aclkxdiv and
+ * ahclkxdiv) that are configured via the registers MCASP_ACLKXCTL
+ * and MCASP_AHCLKXCTL. For SPDIF the bit clock frequency should be
+ * 128 * sample rate freq. Therefore...
+ *
+ * McASP functional clock = aclkxdiv * ahclkxdiv * 128 * sample rate
+ *
+ * For each sample rate supported the user must define the aclkxdiv
+ * and ahclkxdiv values that are passed to the McASP driver via the
+ * following structure. The McASP functional clock frequency can be
+ * configured also, and this is pass to the McASP driver via the
+ * omap_mcasp_platform_data structure below.
+ */
+struct omap_mcasp_configs {
+ unsigned int sampling_rate;
+ u16 aclkxdiv;
+ u16 ahclkxdiv;
+};
+
+struct omap_mcasp_platform_data {
+ unsigned long mcasp_fclk_rate;
+ struct omap_mcasp_configs *mcasp_configs;
+ unsigned int num_configs;
+};
+
+void omap_init_mcasp(struct omap_mcasp_platform_data *pdata);
+
+#endif
diff --git a/arch/arm/plat-omap/include/plat/mcbsp.h b/arch/arm/plat-omap/include/plat/mcbsp.h
index f8f690a..ddada0b 100644
--- a/arch/arm/plat-omap/include/plat/mcbsp.h
+++ b/arch/arm/plat-omap/include/plat/mcbsp.h
@@ -403,6 +403,8 @@
#endif
u16 buffer_size;
unsigned int mcbsp_config_type;
+ char clks_pad_src[30];
+ char clks_prcm_src[30];
};
struct omap_mcbsp_st_data {
@@ -448,6 +450,8 @@
struct clk *fclk;
#ifdef CONFIG_ARCH_OMAP3
struct omap_mcbsp_st_data *st_data;
+#endif
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
int dma_op_mode;
u16 max_tx_thres;
u16 max_rx_thres;
@@ -474,7 +478,7 @@
void omap_mcbsp_register_board_cfg(struct resource *res, int res_count,
struct omap_mcbsp_platform_data *config, int size);
void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg * config);
-#ifdef CONFIG_ARCH_OMAP3
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold);
void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold);
u16 omap_mcbsp_get_max_tx_threshold(unsigned int id);
diff --git a/arch/arm/plat-omap/include/plat/mcpdm.h b/arch/arm/plat-omap/include/plat/mcpdm.h
new file mode 100644
index 0000000..19ae03b
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/mcpdm.h
@@ -0,0 +1,28 @@
+/*
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __OMAP_PLAT_MCPDM_H__
+#define __OMAP_PLAT_MCPDM_H__
+
+#include <linux/platform_device.h>
+
+struct omap_mcpdm_platform_data {
+ bool (*was_context_lost)(struct device *dev);
+};
+
+#endif
diff --git a/arch/arm/plat-omap/include/plat/mcspi.h b/arch/arm/plat-omap/include/plat/mcspi.h
index 3d51b18..091caa1 100644
--- a/arch/arm/plat-omap/include/plat/mcspi.h
+++ b/arch/arm/plat-omap/include/plat/mcspi.h
@@ -21,6 +21,8 @@
/* Do we want one channel enabled at the same time? */
unsigned single_channel:1;
+ /* Swap data lines */
+ unsigned swap_datalines;
};
#endif
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index c7b8741..b95aabd 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/mmc/host.h>
+#include <asm/mach/mmc.h>
#include <plat/board.h>
#define OMAP15XX_NR_MMC 1
@@ -60,9 +61,6 @@
int (*suspend)(struct device *dev, int slot);
int (*resume)(struct device *dev, int slot);
- /* Return context loss count due to PM states changing */
- int (*get_context_loss_count)(struct device *dev);
-
u64 dma_mask;
/* Integrating attributes from the omap_hwmod layer */
@@ -108,8 +106,9 @@
unsigned vcc_aux_disable_is_sleep:1;
/* we can put the features above into this variable */
-#define HSMMC_HAS_PBIAS (1 << 0)
-#define HSMMC_HAS_UPDATED_RESET (1 << 1)
+#define HSMMC_HAS_PBIAS (1 << 0)
+#define HSMMC_HAS_UPDATED_RESET (1 << 1)
+#define HSMMC_HAS_48MHZ_MASTER_CLK (1 << 2)
unsigned features;
int switch_pin; /* gpio (card detect) */
@@ -146,6 +145,9 @@
int card_detect_irq;
int (*card_detect)(struct device *dev, int slot);
+ /* Additional mmc configuration */
+ struct mmc_platform_data mmc_data;
+
unsigned int ban_openended:1;
} slots[OMAP_MMC_MAX_SLOTS];
diff --git a/arch/arm/plat-omap/include/plat/omap-pm.h b/arch/arm/plat-omap/include/plat/omap-pm.h
index c0a7520..2efbff5 100644
--- a/arch/arm/plat-omap/include/plat/omap-pm.h
+++ b/arch/arm/plat-omap/include/plat/omap-pm.h
@@ -17,7 +17,9 @@
#include <linux/device.h>
#include <linux/cpufreq.h>
#include <linux/clk.h>
+#include <linux/pm_qos_params.h>
#include <linux/opp.h>
+#include <linux/pm_qos_params.h>
/*
* agent_id values for use with omap_pm_set_min_bus_tput():
@@ -73,7 +75,8 @@
/**
* omap_pm_set_max_mpu_wakeup_lat - set the maximum MPU wakeup latency
- * @dev: struct device * requesting the constraint
+ * @qos_request: handle for the constraint. The pointer should be
+ * initialized to NULL
* @t: maximum MPU wakeup latency in microseconds
*
* Request that the maximum interrupt latency for the MPU to be no
@@ -105,7 +108,8 @@
* Returns -EINVAL for an invalid argument, -ERANGE if the constraint
* is not satisfiable, or 0 upon success.
*/
-int omap_pm_set_max_mpu_wakeup_lat(struct device *dev, long t);
+int omap_pm_set_max_mpu_wakeup_lat(struct pm_qos_request_list **qos_request,
+ long t);
/**
@@ -132,12 +136,12 @@
*
* Multiple calls to omap_pm_set_min_bus_tput() will replace the
* previous rate value for this device. To remove the interconnect
- * throughput restriction for this device, call with r = 0.
+ * throughput restriction for this device, call with r = -1.
*
* Returns -EINVAL for an invalid argument, -ERANGE if the constraint
* is not satisfiable, or 0 upon success.
*/
-int omap_pm_set_min_bus_tput(struct device *dev, u8 agent_id, unsigned long r);
+int omap_pm_set_min_bus_tput(struct device *dev, u8 agent_id, long r);
/**
@@ -172,7 +176,8 @@
/**
* omap_pm_set_max_sdma_lat - set the maximum system DMA transfer start latency
- * @dev: struct device *
+ * @qos_request: handle for the constraint. The pointer should be
+ * initialized to NULL
* @t: maximum DMA transfer start latency in microseconds
*
* Request that the maximum system DMA transfer start latency for this
@@ -197,7 +202,8 @@
* Returns -EINVAL for an invalid argument, -ERANGE if the constraint
* is not satisfiable, or 0 upon success.
*/
-int omap_pm_set_max_sdma_lat(struct device *dev, long t);
+int omap_pm_set_max_sdma_lat(struct pm_qos_request_list **qos_request,
+ long t);
/**
@@ -337,24 +343,26 @@
*/
/**
- * omap_pm_get_dev_context_loss_count - return count of times dev has lost ctx
- * @dev: struct device *
+ * omap_pm_was_context_lost - return true if a device lost hw context
*
- * This function returns the number of times that the device @dev has
- * lost its internal context. This generally occurs on a powerdomain
- * transition to OFF. Drivers use this as an optimization to avoid restoring
- * context if the device hasn't lost it. To use, drivers should initially
- * call this in their context save functions and store the result. Early in
- * the driver's context restore function, the driver should call this function
- * again, and compare the result to the stored counter. If they differ, the
- * driver must restore device context. If the number of context losses
- * exceeds the maximum positive integer, the function will wrap to 0 and
- * continue counting. Returns the number of context losses for this device,
- * or zero upon error.
+ * This function returns a bool value indication if a device has lost
+ * its context. Depending on the HW implementation of the device, Context
+ * can be lost in OFF or OSWR. This function reads and *CLEARS* the context
+ * lost registers for the device.
*/
-u32 omap_pm_get_dev_context_loss_count(struct device *dev);
+bool omap_pm_was_context_lost(struct device *dev);
+
+/**
+ * omap_pm_set_min_mpu_freq - sets the min frequency the mpu should be allowed
+ * to run. The function works with a granularity of 1000000. Any frequency requested,
+ * will set the mpu frequency to the closet higher frequency that can match the request.
+ * to release the constraint, the f parameter should be passed as -1.
+ */
+int omap_pm_set_min_mpu_freq(struct device *dev, unsigned long f);
void omap_pm_enable_off_mode(void);
void omap_pm_disable_off_mode(void);
+extern bool off_mode_enabled;
+
#endif
diff --git a/arch/arm/plat-omap/include/plat/omap-serial.h b/arch/arm/plat-omap/include/plat/omap-serial.h
index 2682043..2fbb5f0 100644
--- a/arch/arm/plat-omap/include/plat/omap-serial.h
+++ b/arch/arm/plat-omap/include/plat/omap-serial.h
@@ -36,13 +36,14 @@
/* WER = 0x7F
* Enable module level wakeup in WER reg
*/
-#define OMAP_UART_WER_MOD_WKUP 0X7F
+#define OMAP2_UART_WER_MOD_WKUP 0X7F
+#define OMAP4_UART_WER_MOD_WKUP 0XFF
/* Enable XON/XOFF flow control on output */
-#define OMAP_UART_SW_TX 0x04
+#define OMAP_UART_SW_TX 0x8
/* Enable XON/XOFF flow control on input */
-#define OMAP_UART_SW_RX 0x04
+#define OMAP_UART_SW_RX 0x2
#define OMAP_UART_SYSC_RESET 0X07
#define OMAP_UART_TCR_TRIG 0X0F
@@ -51,18 +52,51 @@
#define OMAP_UART_DMA_CH_FREE -1
-#define RX_TIMEOUT (3 * HZ)
+#define RX_TIMEOUT (3 * HZ) /* RX DMA timeout (jiffies) */
+
+#define DEFAULT_RXDMA_TIMEOUT (3 * HZ) /* RX DMA timeout (jiffies) */
+#define DEFAULT_RXDMA_POLLRATE 1 /* RX DMA polling rate (us) */
+#define DEFAULT_RXDMA_BUFSIZE 4096 /* RX DMA buffer size */
+#define DEFAULT_AUTOSUSPEND_DELAY 3000 /* Runtime autosuspend (msecs)*/
+
+/*
+ * (Errata i659) - From OMAP4430 ES 2.0 onwards set
+ * tx_threshold while using UART in DMA Mode
+ * and ensure tx_threshold + tx_trigger <= 63
+ */
+#define UART_MDR3 0x20
+#define UART_TX_DMA_THRESHOLD 0x21
+#define SET_DMA_TX_THRESHOLD BIT(2)
+/* Setting TX Threshold Level to 62 */
+#define TX_FIFO_THR_LVL 0x3E
+
#define OMAP_MAX_HSUART_PORTS 4
#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
+#define UART_ERRATA_i202_MDR1_ACCESS BIT(0)
+#define OMAP4_UART_ERRATA_i659_TX_THR BIT(1)
+
struct omap_uart_port_info {
- bool dma_enabled; /* To specify DMA Mode */
+ int dma_rx_buf_size;/* DMA Rx Buffer Size */
+ int dma_rx_timeout; /* DMA RX timeout */
+ unsigned int idle_timeout; /* Omap Uart Idle Time out */
+ int use_dma; /* DMA Enable / Disable */
unsigned int uartclk; /* UART clock rate */
- void __iomem *membase; /* ioremap cookie or NULL */
- resource_size_t mapbase; /* resource base */
- unsigned long irqflags; /* request_irq flags */
upf_t flags; /* UPF_* flags */
+ unsigned int errata;
+ unsigned int console_uart;
+ u16 wer; /* Module Wakeup register */
+ unsigned int dma_rx_poll_rate; /* DMA RX poll_rate */
+ unsigned int auto_sus_timeout; /* Auto_suspend timeout */
+ unsigned rts_mux_driver_control:1;
+
+ void (*enable_wakeup)(struct platform_device *, bool);
+ bool (*chk_wakeup)(struct platform_device *);
+ void (*wake_peer)(struct uart_port *);
+ void __iomem *wk_st;
+ void __iomem *wk_en;
+ u32 wk_mask;
};
struct uart_omap_dma {
@@ -86,8 +120,9 @@
spinlock_t rx_lock;
/* timer to poll activity on rx dma */
struct timer_list rx_timer;
- int rx_buf_size;
- int rx_timeout;
+ unsigned int rx_buf_size;
+ unsigned int rx_poll_rate;
+ unsigned int rx_timeout;
};
struct uart_omap_port {
@@ -100,8 +135,13 @@
unsigned char mcr;
unsigned char fcr;
unsigned char efr;
+ unsigned char dll;
+ unsigned char dlh;
+ unsigned char mdr1;
+ unsigned char wer;
int use_dma;
+ bool suspended;
/*
* Some bits in registers are cleared on a read, so they must
* be saved whenever the register is read but the bits will not
@@ -110,7 +150,16 @@
unsigned int lsr_break_flag;
unsigned char msr_saved_flags;
char name[20];
+ unsigned int console_lock;
unsigned long port_activity;
-};
+ int context_loss_cnt;
+ /* RTS control via driver */
+ unsigned rts_mux_driver_control:1;
+ unsigned rts_pullup_in_suspend:1;
+ unsigned int errata;
+ void (*enable_wakeup)(struct platform_device *, bool);
+ bool (*chk_wakeup)(struct platform_device *);
+ void (*wake_peer)(struct uart_port *);
+};
#endif /* __OMAP_SERIAL_H__ */
diff --git a/arch/arm/plat-omap/include/plat/omap44xx.h b/arch/arm/plat-omap/include/plat/omap44xx.h
index ea2b8a6..f6da497 100644
--- a/arch/arm/plat-omap/include/plat/omap44xx.h
+++ b/arch/arm/plat-omap/include/plat/omap44xx.h
@@ -22,6 +22,9 @@
#define L4_PER_44XX_BASE 0x48000000
#define L4_EMU_44XX_BASE 0x54000000
#define L3_44XX_BASE 0x44000000
+#define L3_44XX_BASE_CLK1 L3_44XX_BASE
+#define L3_44XX_BASE_CLK2 0x44800000
+#define L3_44XX_BASE_CLK3 0x45000000
#define OMAP44XX_EMIF1_BASE 0x4c000000
#define OMAP44XX_EMIF2_BASE 0x4d000000
#define OMAP44XX_DMM_BASE 0x4e000000
@@ -34,6 +37,7 @@
#define OMAP44XX_GPMC_BASE 0x50000000
#define OMAP443X_SCM_BASE 0x4a002000
#define OMAP443X_CTRL_BASE 0x4a100000
+#define OMAP443X_CTRL_WK_BASE 0x4a31e000
#define OMAP44XX_IC_BASE 0x48200000
#define OMAP44XX_IVA_INTC_BASE 0x40000000
#define IRQ_SIR_IRQ 0x0040
@@ -45,6 +49,7 @@
#define OMAP44XX_WKUPGEN_BASE 0x48281000
#define OMAP44XX_MCPDM_BASE 0x40132000
#define OMAP44XX_MCPDM_L3_BASE 0x49032000
+#define OMAP44XX_SAR_RAM_BASE 0x4a326000
#define OMAP44XX_MAILBOX_BASE (L4_44XX_BASE + 0xF4000)
#define OMAP44XX_HSUSB_OTG_BASE (L4_44XX_BASE + 0xAB000)
@@ -57,5 +62,7 @@
#define OMAP44XX_HSUSB_OHCI_BASE (L4_44XX_BASE + 0x64800)
#define OMAP44XX_HSUSB_EHCI_BASE (L4_44XX_BASE + 0x64C00)
+#define OMAP44XX_C2C_BASE 0x5c000000
+
#endif /* __ASM_ARCH_OMAP44XX_H */
diff --git a/arch/arm/plat-omap/include/plat/omap_device.h b/arch/arm/plat-omap/include/plat/omap_device.h
index e4c349f..70d31d0 100644
--- a/arch/arm/plat-omap/include/plat/omap_device.h
+++ b/arch/arm/plat-omap/include/plat/omap_device.h
@@ -107,7 +107,7 @@
int omap_device_align_pm_lat(struct platform_device *pdev,
u32 new_wakeup_lat_limit);
struct powerdomain *omap_device_get_pwrdm(struct omap_device *od);
-u32 omap_device_get_context_loss_count(struct platform_device *pdev);
+int omap_device_get_context_loss_count(struct platform_device *pdev);
/* Other */
diff --git a/arch/arm/plat-omap/include/plat/omap_hsi.h b/arch/arm/plat-omap/include/plat/omap_hsi.h
new file mode 100644
index 0000000..1a75ed4
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/omap_hsi.h
@@ -0,0 +1,494 @@
+/*
+ * /mach/omap_hsi.h
+ *
+ * Hardware definitions for HSI and SSI.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* NOTE: This file defines the registers address offsets for both the
+ * SSI and HSI devices. Most of the registers share the same offset between
+ * these devices.
+ * When common or HSI only, the constants are name HSI*. Else the SSI specific
+ * constants are name HSI_SSI*
+ */
+
+#ifndef __OMAP_HSI_H__
+#define __OMAP_HSI_H__
+
+/* Set the HSI Functional Clock to 96MHz.
+ * This is to ensure HSI will function even at OPP50. */
+#define HSI_DEFAULT_FCLK 96000000 /* 96 MHz */
+
+
+#define HSI_PORT_OFFSET 0x1000
+
+/*
+ * GDD base addr : 0x48059000 (SSI)
+ * GDD base addr : 0x4A059000 (HSI)
+ */
+#define HSI_GDD_OFFSET 0x1000
+#define HSI_GDD_BASE HSI_GDD_OFFSET /* 0x9000 */
+
+/*
+ * HST base addr:
+ * port 1: 0x4805a000 (SSI) - 0x4A05a000 (HSI)
+ * port 2: 0x4805b000 (SSI) - 0x4a05b000 (HSI)
+ */
+#define HSI_HST_OFFSET 0x2000
+#define HSI_HST_BASE(port) (HSI_HST_OFFSET + (((port) - 1) *\
+ (HSI_PORT_OFFSET)))
+ /*
+ * HSR base addr:
+ * port 1: 0x4805a800 (SSI) - 0x4A05a800 (HSI)
+ * port 2: 0x4805b800 (SSI) - 0x4A05b800 (HSI)
+ */
+#define HSI_HSR_OFFSET 0x2800
+#define HSI_HSR_BASE(port) (HSI_HSR_OFFSET + (((port) - 1) *\
+ (HSI_PORT_OFFSET)))
+/*
+ * HSI SYS registers
+ */
+#define HSI_SYS_REVISION_REG 0x0000
+#define HSI_SSI_REV_MASK 0x000000ff
+#define HSI_SSI_REV_MAJOR 0xf0
+#define HSI_SSI_REV_MINOR 0x0f
+
+#define HSI_SYS_SYSCONFIG_REG 0x0010
+#define HSI_AUTOIDLE (1 << 0)
+#define HSI_SOFTRESET (1 << 1)
+#define HSI_FREE_EMU (1 << 2) /* Only for HSI */
+#define HSI_SIDLEMODE_FORCE 0
+#define HSI_SIDLEMODE_NO (1 << 3)
+#define HSI_SIDLEMODE_SMART (1 << 4)
+#define HSI_SIDLEMODE_SMART_WAKEUP (3 << 3)
+#define HSI_SIDLEMODE_MASK 0x00000018
+#define HSI_MIDLEMODE_FORCE 0
+#define HSI_MIDLEMODE_NO (1 << 12)
+#define HSI_MIDLEMODE_SMART (1 << 13)
+#define HSI_MIDLEMODE_SMART_WAKEUP (3 << 12)
+#define HSI_MIDLEMODE_MASK 0x00003000
+
+#define HSI_SYS_SYSSTATUS_REG 0x0014
+#define HSI_RESETDONE 1
+
+#define HSI_SYS_MPU_STATUS_BASE 0x0808
+#define HSI_SYS_MPU_STATUS_PORT_OFFSET 0x10
+#define HSI_SYS_MPU_STATUS_IRQ_OFFSET 8
+
+#define HSI_SYS_MPU_STATUS_REG(port, irq) \
+ (HSI_SYS_MPU_STATUS_BASE + \
+ ((((port) - 1) * HSI_SYS_MPU_STATUS_PORT_OFFSET) +\
+ ((irq) * HSI_SYS_MPU_STATUS_IRQ_OFFSET)))
+#define HSI_SYS_MPU_ENABLE_BASE 0x080c
+#define HSI_SYS_MPU_ENABLE_PORT_OFFSET 0x10
+#define HSI_SYS_MPU_ENABLE_IRQ_OFFSET 8
+
+#define HSI_SYS_MPU_ENABLE_REG(port, irq) \
+ (HSI_SYS_MPU_ENABLE_BASE + \
+ ((((port) - 1) * HSI_SYS_MPU_ENABLE_PORT_OFFSET) +\
+ ((irq) * HSI_SYS_MPU_ENABLE_IRQ_OFFSET)))
+#define HSI_HST_DATAACCEPT(channel) (((channel) < 8) ? \
+ (1 << (channel)) : \
+ (1 << ((channel) - 8)))
+#define HSI_HSR_DATAAVAILABLE(channel) ((channel) < 8 ? \
+ (1 << ((channel) + 8)) : \
+ (1 << ((channel) - 8 + 8)))
+#define HSI_HSR_DATAOVERRUN(channel) ((channel) < 8 ? \
+ (1 << ((channel) + 16)) : \
+ (1 << ((channel) - 8 + 16)))
+
+#define HSI_ERROROCCURED (1 << 24)
+#define HSI_BREAKDETECTED (1 << 25)
+#define HSI_CAWAKEDETECTED (1 << 26)
+
+#define HSI_SYS_GDD_MPU_IRQ_STATUS_REG 0x0800
+#define HSI_SYS_GDD_MPU_IRQ_ENABLE_REG 0x0804
+#define HSI_GDD_LCH(channel) (1 << (channel))
+
+
+#define HSI_SYS_WAKE_OFFSET 0x10
+#define HSI_SYS_WAKE_BASE 0x0c00
+#define HSI_SYS_WAKE_REG(port) (HSI_SYS_WAKE_BASE +\
+ (((port) - 1) * HSI_SYS_WAKE_OFFSET))
+
+#define HSI_SYS_CLEAR_WAKE_BASE 0x0c04
+#define HSI_SYS_CLEAR_WAKE_REG(port) (HSI_SYS_CLEAR_WAKE_BASE +\
+ (((port) - 1) * HSI_SYS_WAKE_OFFSET))
+
+#define HSI_SYS_SET_WAKE_BASE 0x0c08
+#define HSI_SYS_SET_WAKE_REG(port) (HSI_SYS_SET_WAKE_BASE +\
+ (((port) - 1) * HSI_SYS_WAKE_OFFSET))
+
+#define HSI_SSI_WAKE_MASK 0xff /* for SSI */
+#define HSI_WAKE_MASK 0xffff /* for HSI */
+#define HSI_SET_WAKE_4_WIRES (0 << 16)
+#define HSI_SET_WAKE_READY_LVL_0 (0 << 17)
+#define HSI_SET_WAKE(channel) (1 << (channel) |\
+ HSI_SET_WAKE_4_WIRES |\
+ HSI_SET_WAKE_READY_LVL_0)
+#define HSI_CLEAR_WAKE(channel) (1 << (channel))
+#define HSI_WAKE(channel) (1 << (channel))
+
+#define HSI_SYS_HWINFO_REG 0x0004 /* only for HSI */
+
+/* Additional registers definitions (for channels 8 .. 15) for HSI */
+#define HSI_SYS_MPU_U_STATUS_BASE 0x0408
+#define HSI_SYS_MPU_U_STATUS_REG(port, irq) \
+ (HSI_SYS_MPU_U_STATUS_BASE + \
+ ((((port) - 1) * HSI_SYS_MPU_STATUS_PORT_OFFSET) +\
+ ((irq) * HSI_SYS_MPU_STATUS_IRQ_OFFSET)))
+
+#define HSI_SYS_MPU_U_ENABLE_BASE 0x040c
+#define HSI_SYS_MPU_U_ENABLE_REG(port, irq) \
+ (HSI_SYS_MPU_U_ENABLE_BASE + \
+ ((((port) - 1) * HSI_SYS_MPU_ENABLE_PORT_OFFSET) +\
+ ((irq) * HSI_SYS_MPU_ENABLE_IRQ_OFFSET)))
+
+/*
+ * HSI HST registers
+ */
+#define HSI_HST_ID_REG(port) (HSI_HST_BASE(port) + 0x0000)
+
+#define HSI_HST_MODE_REG(port) (HSI_HST_BASE(port) + 0x0004)
+#define HSI_MODE_VAL_MASK 3
+#define HSI_MODE_SLEEP 0
+#define HSI_MODE_STREAM 1
+#define HSI_MODE_FRAME 2
+#define HSI_SSI_MODE_MULTIPOINTS 3 /* SSI only */
+#define HSI_FLOW_OFFSET 2 /* HSI only */
+#define HSI_FLOW_VAL_MASK 3 /* HSI only */
+#define HSI_FLOW_SYNCHRONIZED 0 /* HSI only */
+#define HSI_FLOW_PIPELINED 1 /* HSI only */
+#define HSI_FLOW_REAL_TIME 2 /* HSI only */
+#define HSI_HST_MODE_WAKE_CTRL_AUTO (1 << 4) /* HSI only */
+#define HSI_HST_MODE_WAKE_CTRL_SW (0 << 4) /* HSI only */
+
+#define HSI_HST_FRAMESIZE_REG(port) (HSI_HST_BASE(port) + 0x0008)
+#define HSI_FRAMESIZE_DEFAULT 31
+#define HSI_FRAMESIZE_MAX 0x1f
+
+#define HSI_HST_TXSTATE_REG(port) (HSI_HST_BASE(port) + 0x000c)
+#define HSI_HST_TXSTATE_VAL_MASK 0x07
+#define HSI_HST_TXSTATE_IDLE 0
+
+#define HSI_HST_BUFSTATE_REG(port) (HSI_HST_BASE(port) + 0x0010)
+#define HSI_HST_BUFSTATE_FIFO_REG(fifo) (((fifo) < 8) ? \
+ HSI_HST_BUFSTATE_REG(1) : \
+ HSI_HST_BUFSTATE_REG(2))
+#define HSI_BUFSTATE_CHANNEL(channel) ((channel) < 8 ? \
+ (1 << (channel)) : \
+ (1 << ((channel) - 8)))
+
+#define HSI_HST_DIVISOR_REG(port) (HSI_HST_BASE(port) + 0x0018)
+#define HSI_DIVISOR_DEFAULT 1
+#define HSI_SSI_MAX_TX_DIVISOR 0x7f /* for SSI */
+#define HSI_MAX_TX_DIVISOR 0xff /* for HSI */
+
+#define HSI_HST_BREAK_REG(port) (HSI_HST_BASE(port) + 0x0020)
+#define HSI_HST_CHANNELS_REG(port) (HSI_HST_BASE(port) + 0x0024)
+#define HSI_CHANNELS_DEFAULT 4
+#define HSI_SSI_CHANNELS_MAX 8 /* for SSI */
+#define HSI_CHANNELS_MAX 16 /* for HSI */
+
+#define HSI_HST_ARBMODE_REG(port) (HSI_HST_BASE(port) + 0x0028)
+#define HSI_ARBMODE_ROUNDROBIN 0
+#define HSI_ARBMODE_PRIORITY 1
+
+#define HSI_HST_BUFFER_BASE(port) (HSI_HST_BASE(port) + 0x0080)
+#define HSI_HST_BUFFER_CH_REG(port, channel) (HSI_HST_BUFFER_BASE(port) +\
+ ((channel) * 4))
+#define HSI_HST_BUFFER_FIFO_REG(fifo) (((fifo) < 8) ? \
+ (HSI_HST_BUFFER_CH_REG(1, (fifo))) : \
+ (HSI_HST_BUFFER_CH_REG(2, (fifo) - 8)))
+
+#define HSI_HST_SWAPBUF_BASE(port) (HSI_HST_BASE(port) + 0x00c0)
+#define HSI_HST_SWAPBUF_CH_REG(port, channel) (HSI_HST_SWAPBUF_BASE(port) +\
+ ((channel) * 4))
+
+
+/* Additional registers for HSI */
+#define HSI_HST_FIFO_COUNT 16
+#define HSI_HST_FIFO_SIZE 8
+#define HSI_HST_MAPPING_FIFO_REG(fifo) (HSI_HST_BASE(1) + 0x0100 +\
+ ((fifo) * 4))
+#define HSI_MAPPING_ENABLE 1
+#define HSI_MAPPING_CH_NUMBER_OFFSET 1
+#define HSI_MAPPING_PORT_NUMBER_OFFSET 7
+#define HSI_HST_MAPPING_THRESH_OFFSET 10
+#define HSI_HST_MAPPING_THRESH_VALUE (0x0 << HSI_HST_MAPPING_THRESH_OFFSET)
+
+/*
+ * HSI HSR registers
+ */
+#define HSI_HSR_ID_REG(port) (HSI_HSR_BASE(port) + 0x0000)
+
+#define HSI_HSR_MODE_REG(port) (HSI_HSR_BASE(port) + 0x0004)
+
+#define HSI_HSR_MODE_MODE_VAL_MASK (3 << 0) /* HSI only */
+#define HSI_HSR_MODE_FLOW_VAL_MASK (3 << 2) /* HSI only */
+#define HSI_HSR_MODE_WAKE_STATUS (1 << 4) /* HSI only */
+#define HSI_HSR_MODE_MODE_VAL_SLEEP 0xFFFFFFFC /* HSI only */
+
+#define HSI_HSR_FRAMESIZE_REG(port) (HSI_HSR_BASE(port) + 0x0008)
+
+#define HSI_HSR_RXSTATE_REG(port) (HSI_HSR_BASE(port) + 0x000c)
+
+#define HSI_HSR_BUFSTATE_REG(port) (HSI_HSR_BASE(port) + 0x0010)
+#define HSI_HSR_BUFSTATE_FIFO_REG(fifo) (((fifo) < 8) ? \
+ HSI_HSR_BUFSTATE_REG(1) : \
+ HSI_HSR_BUFSTATE_REG(2))
+
+#define HSI_HSR_BREAK_REG(port) (HSI_HSR_BASE(port) + 0x001c)
+
+#define HSI_HSR_ERROR_REG(port) (HSI_HSR_BASE(port) + 0x0020)
+#define HSI_HSR_ERROR_SIG 1
+#define HSI_HSR_ERROR_FTE (1 << 1) /* HSI only */
+#define HSI_HSR_ERROR_TBE (1 << 4) /* HSI only */
+#define HSI_HSR_ERROR_RME (1 << 7) /* HSI only */
+#define HSI_HSR_ERROR_TME (1 << 11) /* HSI only */
+
+#define HSI_HSR_ERRORACK_REG(port) (HSI_HSR_BASE(port) + 0x0024)
+
+#define HSI_HSR_CHANNELS_REG(port) (HSI_HSR_BASE(port) + 0x0028)
+
+#define HSI_HSR_OVERRUN_REG(port) (HSI_HSR_BASE(port) + 0x002c)
+
+#define HSI_HSR_OVERRUNACK_REG(port) (HSI_HSR_BASE(port) + 0x0030)
+
+#define HSI_HSR_COUNTERS_REG(port) (HSI_HSR_BASE(port) + 0x0034)
+#define SSI_TIMEOUT_REG(port) (HSI_HSR_COUNTERS_REG(port))
+#define HSI_TIMEOUT_DEFAULT 0 /* SSI only */
+#define HSI_SSI_RX_TIMEOUT_OFFSET 0 /* SSI only */
+#define HSI_SSI_RX_TIMEOUT_MASK 0x1ff /* SSI only */
+#define HSI_COUNTERS_FT_MASK 0x000fffff /* HSI only */
+#define HSI_COUNTERS_TB_MASK 0x00f00000 /* HSI only */
+#define HSI_COUNTERS_FB_MASK 0xff000000 /* HSI only */
+#define HSI_COUNTERS_FT_OFFSET 0 /* HSI only */
+#define HSI_COUNTERS_TB_OFFSET 20 /* HSI only */
+#define HSI_COUNTERS_FB_OFFSET 24 /* HSI only */
+/* Default FT value: 2 x max_bits_per_frame + 20% margin */
+#define HSI_COUNTERS_FT_DEFAULT (90 << HSI_COUNTERS_FT_OFFSET)
+#define HSI_COUNTERS_TB_DEFAULT (6 << HSI_COUNTERS_TB_OFFSET)
+#define HSI_COUNTERS_FB_DEFAULT (8 << HSI_COUNTERS_FB_OFFSET)
+#define HSI_HSR_COMBINE_COUNTERS(FB, TB, FT) \
+ (((FB << HSI_COUNTERS_FB_OFFSET) & HSI_COUNTERS_FB_MASK) \
+ ((TB << HSI_COUNTERS_TB_OFFSET) & HSI_COUNTERS_TB_MASK) \
+ ((FT << HSI_COUNTERS_FT_OFFSET) & HSI_COUNTERS_FT_MASK))
+#define SSI_SSR_COMBINE_COUNTERS(FT) \
+ ((FT << HSI_SSI_RX_TIMEOUT_OFFSET) & HSI_SSI_RX_TIMEOUT_MASK)
+
+#define HSI_HSR_BUFFER_BASE(port) (HSI_HSR_BASE(port) + 0x0080)
+#define HSI_HSR_BUFFER_CH_REG(port, channel) (HSI_HSR_BUFFER_BASE(port) +\
+ ((channel) * 4))
+#define HSI_HSR_BUFFER_FIFO_REG(fifo) (((fifo) < 8) ? \
+ (HSI_HSR_BUFFER_CH_REG(1, (fifo))) : \
+ (HSI_HSR_BUFFER_CH_REG(2, (fifo) - 8)))
+
+#define HSI_HSR_SWAPBUF_BASE(port) (HSI_HSR_BASE(port) + 0x00c0)
+#define HSI_HSR_SWAPBUF_CH_REG(port, channel) (HSI_HSR_SWAPBUF_BASE(port) +\
+ ((channel) * 4))
+
+/* Additional registers for HSI */
+#define HSI_HSR_FIFO_COUNT 16
+#define HSI_HSR_FIFO_SIZE 8
+#define HSI_HSR_MAPPING_FIFO_REG(fifo) (HSI_HSR_BASE(1) + 0x0100 +\
+ ((fifo) * 4))
+#define HSI_HSR_MAPPING_WORDS_MASK (0xf << 10)
+
+#define HSI_HSR_DLL_REG (HSI_HSR_BASE(1) + 0x0144)
+#define HSI_HSR_DLL_COCHRE 1
+#define HSI_HSR_DLL_COCHGR (1 << 4)
+#define HSI_HSR_DLL_INCO_MASK 0x0003ff00
+#define HSI_HSR_DLL_INCO_OFFSET 8
+
+#define HSI_HSR_DIVISOR_REG(port) (HSI_HSR_BASE(port) + 0x014C)
+#define HSI_HSR_DIVISOR_MASK 0xff
+#define HSI_MAX_RX_DIVISOR 0xff
+
+/*
+ * HSI GDD registers
+ */
+#define HSI_SSI_DMA_CHANNEL_MAX 8
+#define HSI_HSI_DMA_CHANNEL_MAX 16
+
+#define HSI_SSI_GDD_HW_ID_REG (HSI_GDD_BASE + 0x0000)
+
+#define HSI_SSI_GDD_PPORT_ID_REG (HSI_GDD_BASE + 0x0010)
+
+#define HSI_SSI_GDD_MPORT_ID_REG (HSI_GDD_BASE + 0x0014)
+
+#define HSI_SSI_GDD_PPORT_SR_REG (HSI_GDD_BASE + 0x0020)
+#define HSI_PPORT_ACTIVE_LCH_NUMBER_MASK 0xff
+
+#define HSI_GDD_MPORT_SR_REG (HSI_GDD_BASE + 0x0024)
+#define HSI_SSI_MPORT_ACTIVE_LCH_NUMBER_MASK 0xff
+
+#define HSI_SSI_GDD_TEST_REG (HSI_GDD_BASE + 0x0040)
+#define HSI_SSI_TEST 1
+
+#define HSI_GDD_GCR_REG (HSI_GDD_BASE + 0x0100)
+#define HSI_CLK_AUTOGATING_ON (1 << 3)
+#define HSI_SWITCH_OFF (1 << 0)
+
+#define HSI_GDD_GRST_REG (HSI_GDD_BASE + 0x0200)
+#define HSI_GDD_GRST_SWRESET 1
+
+#define HSI_GDD_CSDP_BASE (HSI_GDD_BASE + 0x0800)
+#define HSI_GDD_CSDP_OFFSET 0x40
+#define HSI_GDD_CSDP_REG(channel) (HSI_GDD_CSDP_BASE +\
+ ((channel) * HSI_GDD_CSDP_OFFSET))
+
+#define HSI_DST_BURST_EN_MASK 0xc000
+#define HSI_DST_SINGLE_ACCESS0 0
+#define HSI_DST_SINGLE_ACCESS (1 << 14)
+#define HSI_DST_BURST_4X32_BIT (2 << 14)
+#define HSI_DST_BURST_8x32_BIT (3 << 14)
+
+#define HSI_DST_MASK 0x1e00
+#define HSI_DST_MEMORY_PORT (8 << 9)
+#define HSI_DST_PERIPHERAL_PORT (9 << 9)
+
+#define HSI_SRC_BURST_EN_MASK 0x0180
+#define HSI_SRC_SINGLE_ACCESS0 0
+#define HSI_SRC_SINGLE_ACCESS (1 << 7)
+#define HSI_SRC_BURST_4x32_BIT (2 << 7)
+#define HSI_SRC_BURST_8x32_BIT (3 << 7)
+
+#define HSI_SRC_MASK 0x003c
+#define HSI_SRC_MEMORY_PORT (8 << 2)
+#define HSI_SRC_PERIPHERAL_PORT (9 << 2)
+
+#define HSI_DATA_TYPE_MASK 3
+#define HSI_DATA_TYPE_S32 2
+
+#define HSI_GDD_CCR_BASE (HSI_GDD_BASE + 0x0802)
+#define HSI_GDD_CCR_OFFSET 0x40
+#define HSI_GDD_CCR_REG(channel) (HSI_GDD_CCR_BASE +\
+ ((channel) * HSI_GDD_CCR_OFFSET))
+#define HSI_DST_AMODE_MASK (3 << 14)
+#define HSI_DST_AMODE_CONST 0
+#define HSI_DST_AMODE_POSTINC (1 << 14)
+
+#define HSI_SRC_AMODE_MASK (3 << 12)
+#define HSI_SRC_AMODE_CONST 0
+#define HSI_SRC_AMODE_POSTINC (1 << 12)
+
+#define HSI_CCR_ENABLE (1 << 7)
+
+#define HSI_CCR_SYNC_MASK 0x001f /* only for SSI */
+
+#define HSI_GDD_CCIR_BASE (HSI_GDD_BASE + 0x0804)
+#define HSI_GDD_CCIR_OFFSET 0x40
+#define HSI_GDD_CCIR_REG(channel) (HSI_GDD_CCIR_BASE +\
+ ((channel) * HSI_GDD_CCIR_OFFSET))
+
+#define HSI_BLOCK_IE (1 << 5)
+#define HSI_HALF_IE (1 << 2)
+#define HSI_TOUT_IE (1 << 0)
+
+#define HSI_GDD_CSR_BASE (HSI_GDD_BASE + 0x0806)
+#define HSI_GDD_CSR_OFFSET 0x40
+#define HSI_GDD_CSR_REG(channel) (HSI_GDD_CSR_BASE +\
+ ((channel) * HSI_GDD_CSR_OFFSET))
+
+#define HSI_CSR_SYNC (1 << 6)
+#define HSI_CSR_BLOCK (1 << 5) /* Full block is transferred */
+#define HSI_CSR_HALF (1 << 2) /* Half block is transferred */
+#define HSI_CSR_TOUT (1 << 0) /* Time-out overflow occurs */
+
+#define HSI_GDD_CSSA_BASE (HSI_GDD_BASE + 0x0808)
+#define HSI_GDD_CSSA_OFFSET 0x40
+#define HSI_GDD_CSSA_REG(channel) (HSI_GDD_CSSA_BASE +\
+ ((channel) * HSI_GDD_CSSA_OFFSET))
+
+
+#define HSI_GDD_CDSA_BASE (HSI_GDD_BASE + 0x080c)
+#define HSI_GDD_CDSA_OFFSET 0x40
+#define HSI_GDD_CDSA_REG(channel) (HSI_GDD_CDSA_BASE +\
+ ((channel) * HSI_GDD_CDSA_OFFSET))
+
+#define HSI_GDD_CEN_BASE (HSI_GDD_BASE + 0x0810)
+#define HSI_GDD_CEN_OFFSET 0x40
+#define HSI_GDD_CEN_REG(channel) (HSI_GDD_CEN_BASE +\
+ ((channel) * HSI_GDD_CEN_OFFSET))
+
+
+#define HSI_GDD_CSAC_BASE (HSI_GDD_BASE + 0x0818)
+#define HSI_GDD_CSAC_OFFSET 0x40
+#define HSI_GDD_CSAC_REG(channel) (HSI_GDD_CSAC_BASE +\
+ ((channel) * HSI_GDD_CSAC_OFFSET))
+
+#define HSI_GDD_CDAC_BASE (HSI_GDD_BASE + 0x081a)
+#define HSI_GDD_CDAC_OFFSET 0x40
+#define HSI_GDD_CDAC_REG(channel) (HSI_GDD_CDAC_BASE +\
+ ((channel) * HSI_GDD_CDAC_OFFSET))
+
+#define HSI_SSI_GDD_CLNK_CTRL_BASE (HSI_GDD_BASE + 0x0828)
+#define HSI_SSI_GDD_CLNK_CTRL_OFFSET 0x40
+#define HSI_SSI_GDD_CLNK_CTRL_REG(channel) (HSI_SSI_GDD_CLNK_CTRL_BASE +\
+ (channel * HSI_SSI_GDD_CLNK_CTRL_OFFSET))
+
+#define HSI_SSI_ENABLE_LNK (1 << 15)
+#define HSI_SSI_STOP_LNK (1 << 14)
+#define HSI_SSI_NEXT_CH_ID_MASK 0xf
+
+/*
+ * HSI Helpers
+ */
+#define HSI_SYS_MPU_ENABLE_CH_REG(port, irq, channel) \
+ (((channel) < HSI_SSI_CHANNELS_MAX) ? \
+ HSI_SYS_MPU_ENABLE_REG(port, irq) : \
+ HSI_SYS_MPU_U_ENABLE_REG(port, irq))
+
+#define HSI_SYS_MPU_STATUS_CH_REG(port, irq, channel) \
+ ((channel < HSI_SSI_CHANNELS_MAX) ? \
+ HSI_SYS_MPU_STATUS_REG(port, irq) : \
+ HSI_SYS_MPU_U_STATUS_REG(port, irq))
+/**
+ * struct omap_ssi_config - SSI board configuration
+ * @num_ports: Number of ports in use
+ * @cawake_line: Array of cawake gpio lines
+ */
+struct omap_ssi_board_config {
+ unsigned int num_ports;
+ int cawake_gpio[2];
+};
+extern int omap_ssi_config(struct omap_ssi_board_config *ssi_config);
+
+/**
+ * struct omap_hsi_config - HSI board configuration
+ * @num_ports: Number of ports in use
+ */
+struct omap_hsi_board_config {
+ unsigned int num_ports;
+};
+extern int omap_hsi_config(struct omap_hsi_board_config *hsi_config);
+
+#ifdef CONFIG_OMAP_HSI
+extern int omap_hsi_prepare_suspend(int hsi_port, bool dev_may_wakeup);
+extern int omap_hsi_prepare_idle(void);
+extern int omap_hsi_wakeup(int hsi_port);
+extern int omap_hsi_is_io_wakeup_from_hsi(void);
+#else
+inline int omap_hsi_prepare_suspend(int hsi_port,
+ bool dev_may_wakeup) { return -ENOSYS; }
+inline int omap_hsi_prepare_idle(void) { return -ENOSYS; }
+inline int omap_hsi_wakeup(int hsi_port) { return -ENOSYS; }
+inline int omap_hsi_is_io_wakeup_from_hsi(void) { return -ENOSYS; }
+
+#endif
+
+#endif /* __OMAP_HSI_H__ */
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index 1adea9c..2f279ad 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -40,6 +40,7 @@
extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type1;
extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2;
+extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3;
/*
* OCP SYSCONFIG bit shifts/masks TYPE1. These are for IPs compliant
@@ -69,6 +70,13 @@
#define SYSC_TYPE2_MIDLEMODE_SHIFT 4
#define SYSC_TYPE2_MIDLEMODE_MASK (0x3 << SYSC_TYPE2_MIDLEMODE_SHIFT)
+/*
+ * OCP SYSCONFIG bit shifts/masks TYPE3. These are for IPs compliant
+ * that only implement the sidle feature.
+ */
+#define SYSC_TYPE3_SIDLEMODE_SHIFT 0
+#define SYSC_TYPE3_SIDLEMODE_MASK (0x3 << SYSC_TYPE3_SIDLEMODE_SHIFT)
+
/* OCP SYSSTATUS bit shifts/masks */
#define SYSS_RESETDONE_SHIFT 0
#define SYSS_RESETDONE_MASK (1 << SYSS_RESETDONE_SHIFT)
@@ -77,7 +85,6 @@
#define HWMOD_IDLEMODE_FORCE (1 << 0)
#define HWMOD_IDLEMODE_NO (1 << 1)
#define HWMOD_IDLEMODE_SMART (1 << 2)
-/* Slave idle mode flag only */
#define HWMOD_IDLEMODE_SMART_WKUP (1 << 3)
/**
@@ -258,6 +265,7 @@
#define MSTANDBY_FORCE (HWMOD_IDLEMODE_FORCE << MASTER_STANDBY_SHIFT)
#define MSTANDBY_NO (HWMOD_IDLEMODE_NO << MASTER_STANDBY_SHIFT)
#define MSTANDBY_SMART (HWMOD_IDLEMODE_SMART << MASTER_STANDBY_SHIFT)
+#define MSTANDBY_SMART_WKUP (HWMOD_IDLEMODE_SMART_WKUP << MASTER_STANDBY_SHIFT)
/* omap_hwmod_sysconfig.sysc_flags capability flags */
#define SYSC_HAS_AUTOIDLE (1 << 0)
@@ -300,6 +308,7 @@
* @rev_offs: IP block revision register offset (from module base addr)
* @sysc_offs: OCP_SYSCONFIG register offset (from module base addr)
* @syss_offs: OCP_SYSSTATUS register offset (from module base addr)
+ * @srst_udelay: Delay needed after doing a softreset in usecs
* @idlemodes: One or more of {SIDLE,MSTANDBY}_{OFF,FORCE,SMART}
* @sysc_flags: SYS{C,S}_HAS* flags indicating SYSCONFIG bits supported
* @clockact: the default value of the module CLOCKACTIVITY bits
@@ -325,6 +334,7 @@
u16 sysc_offs;
u16 syss_offs;
u16 sysc_flags;
+ u16 srst_udelay;
u8 idlemodes;
u8 clockact;
struct omap_hwmod_sysc_fields *sysc_fields;
@@ -359,11 +369,15 @@
* struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data
* @clkctrl_reg: PRCM address of the clock control register
* @rstctrl_reg: address of the XXX_RSTCTRL register located in the PRM
+ * @context_reg: addres of the context register
+ * @ctx_restore_trig : indicates if RFF or DFF or both lost
+ * should trigger ctx restore.
* @submodule_wkdep_bit: bit shift of the WKDEP range
*/
struct omap_hwmod_omap4_prcm {
void __iomem *clkctrl_reg;
void __iomem *rstctrl_reg;
+ void __iomem *context_reg;
u8 submodule_wkdep_bit;
};
@@ -519,8 +533,6 @@
const char *main_clk;
struct clk *_clk;
struct omap_hwmod_opt_clk *opt_clks;
- char *vdd_name;
- struct voltagedomain *voltdm;
struct omap_hwmod_ocp_if **masters; /* connect to *_IA */
struct omap_hwmod_ocp_if **slaves; /* connect to *_TA */
void *dev_attr;
@@ -598,10 +610,14 @@
void *user);
int omap_hwmod_set_postsetup_state(struct omap_hwmod *oh, u8 state);
-u32 omap_hwmod_get_context_loss_count(struct omap_hwmod *oh);
+int omap_hwmod_get_context_loss_count(struct omap_hwmod *oh);
int omap_hwmod_no_setup_reset(struct omap_hwmod *oh);
+int omap_hwmod_pad_get_wakeup_status(struct omap_hwmod *oh);
+
+int omap_hwmod_disable_ioring_wakeup(struct omap_hwmod *oh);
+int omap_hwmod_enable_ioring_wakeup(struct omap_hwmod *oh);
/*
* Chip variant-specific hwmod init routines - XXX should be converted
* to use initcalls once the initial boot ordering is straightened out
@@ -611,4 +627,6 @@
extern int omap3xxx_hwmod_init(void);
extern int omap44xx_hwmod_init(void);
+extern struct device *omap_hwmod_name_get_dev(const char *oh_name);
+
#endif
diff --git a/arch/arm/plat-omap/include/plat/remoteproc.h b/arch/arm/plat-omap/include/plat/remoteproc.h
new file mode 100644
index 0000000..3de2a38
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/remoteproc.h
@@ -0,0 +1,151 @@
+/*
+ * Remote Processor - omap-specific bits
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PLAT_REMOTEPROC_H
+#define _PLAT_REMOTEPROC_H
+
+#include <linux/remoteproc.h>
+#include <plat/omap_device.h>
+
+/*
+ * struct omap_rproc_timers_info - optional timers for the omap rproc
+ *
+ * @id: timer id to use by the remoteproc
+ * @odt: timer pointer
+ */
+struct omap_rproc_timers_info {
+ int id;
+ struct omap_dm_timer *odt;
+};
+
+/*
+ * struct omap_rproc_pdata - platform data for the omap rproc implementation
+ *
+ * @name: human readable name of the rproc, cannot exceed RPROC_MAN_NAME bytes
+ * @iommu_name: iommu device we're behind of
+ * @oh_name: omap hwmod device
+ * @oh_name_opt: optional, secondary omap hwmod device
+ * @firmware: name of firmware file to be loaded
+ * @clkdm_name: name of clock domain in which this device is located
+ * @clkdm: clock domain in which this device is located
+ * @ops: platform-specific start/stop rproc handlers
+ * @memory_maps: table of da-to-pa iommu memory maps
+ * @memory_pool: platform-specific pool data
+ * @omap_rproc_timers_info: optional, timer(s) rproc can use
+ */
+struct omap_rproc_pdata {
+ const char *name;
+ const char *iommu_name;
+ const char *oh_name;
+ const char *oh_name_opt;
+ const char *firmware;
+ const char *clkdm_name;
+ struct clockdomain *clkdm;
+ const struct rproc_ops *ops;
+ struct rproc_mem_pool *memory_pool;
+ struct omap_rproc_timers_info *timers;
+ u32 idle_addr;
+ u32 idle_mask;
+ u32 suspend_addr;
+ u32 suspend_mask;
+ unsigned sus_timeout;
+ char *sus_mbox_name;
+ u8 timers_cnt;
+};
+
+enum omap_rproc_mempool_type {
+ OMAP_RPROC_MEMPOOL_STATIC,
+ OMAP_RPROC_MEMPOOL_DYNAMIC
+};
+
+#if defined(CONFIG_OMAP_REMOTE_PROC)
+void omap_ipu_reserve_sdram_memblock(void);
+u32 omap_ipu_get_mempool_size(enum omap_rproc_mempool_type type);
+phys_addr_t omap_ipu_get_mempool_base(enum omap_rproc_mempool_type type);
+void omap_ipu_set_static_mempool(u32 start, u32 size);
+#else
+static inline void omap_ipu_reserve_sdram_memblock(void) { }
+static inline u32 omap_ipu_get_mempool_size(enum omap_rproc_mempool_type type)
+{
+ return 0;
+}
+static inline phys_addr_t omap_ipu_get_mempool_base(
+ enum omap_rproc_mempool_type type)
+{
+ return 0;
+}
+static inline void omap_ipu_set_static_mempool(u32 start, u32 size) { }
+#endif
+
+int omap_rproc_deactivate(struct omap_device *od);
+int omap_rproc_activate(struct omap_device *od);
+#define OMAP_RPROC_DEFAULT_PM_LATENCY \
+ .deactivate_func = omap_rproc_deactivate, \
+ .activate_func = omap_rproc_activate, \
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST
+
+struct exc_regs {
+ u32 r0;
+ u32 r1;
+ u32 r2;
+ u32 r3;
+ u32 r4;
+ u32 r5;
+ u32 r6;
+ u32 r7;
+ u32 r8;
+ u32 r9;
+ u32 r10;
+ u32 r11;
+ u32 r12;
+ u32 sp;
+ u32 lr;
+ u32 pc;
+ u32 psr;
+ u32 ICSR; /* NVIC registers */
+ u32 MMFSR;
+ u32 BFSR;
+ u32 UFSR;
+ u32 HFSR;
+ u32 DFSR;
+ u32 MMAR;
+ u32 BFAR;
+ u32 AFSR;
+};
+
+static inline void remoteproc_fill_pt_regs(struct pt_regs *regs,
+ struct exc_regs *xregs)
+{
+ regs->ARM_r0 = xregs->r0;
+ regs->ARM_ORIG_r0 = xregs->r0;
+ regs->ARM_r1 = xregs->r1;
+ regs->ARM_r2 = xregs->r2;
+ regs->ARM_r3 = xregs->r3;
+ regs->ARM_r4 = xregs->r4;
+ regs->ARM_r5 = xregs->r5;
+ regs->ARM_r6 = xregs->r6;
+ regs->ARM_r7 = xregs->r7;
+ regs->ARM_r8 = xregs->r8;
+ regs->ARM_r9 = xregs->r9;
+ regs->ARM_r10 = xregs->r10;
+ regs->ARM_fp = xregs->r11;
+ regs->ARM_ip = xregs->r12;
+ regs->ARM_sp = xregs->sp;
+ regs->ARM_lr = xregs->lr;
+ regs->ARM_pc = xregs->pc;
+ regs->ARM_cpsr = xregs->psr;
+}
+
+#endif /* _PLAT_REMOTEPROC_H */
diff --git a/arch/arm/plat-omap/include/plat/rpmsg.h b/arch/arm/plat-omap/include/plat/rpmsg.h
new file mode 100644
index 0000000..c78b9d2
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/rpmsg.h
@@ -0,0 +1,68 @@
+/*
+ * Remote processor messaging
+ *
+ * Copyright(c) 2011 Texas Instruments. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Texas Instruments nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PLAT_RPMSG_H
+#define _PLAT_RPMSG_H
+
+/*
+ * enum - Predefined Mailbox Messages
+ *
+ * @RP_MBOX_READY: informs the M3's that we're up and running. will be
+ * followed by another mailbox message that carries the A9's virtual address
+ * of the shared buffer. This would allow the A9's drivers to send virtual
+ * addresses of the buffers.
+ *
+ * @RP_MBOX_PENDING_MSG: informs the receiver that there is an inbound
+ * message waiting in its own receive-side vring. please note that currently
+ * this message is optional: alternatively, one can explicitly send the index
+ * of the triggered virtqueue itself. the preferred approach will be decided
+ * as we progress and experiment with those design ideas.
+ *
+ * @RP_MBOX_CRASH: this message is sent upon a BIOS exception
+ *
+ * @RP_MBOX_ECHO_REQUEST: a mailbox-level "ping" message.
+ *
+ * @RP_MBOX_ECHO_REPLY: a mailbox-level reply to a "ping"
+ *
+ * @RP_MBOX_ABORT_REQUEST: a "please crash" request, used for testing the
+ * recovery mechanism (to some extent). will trigger a @RP_MBOX_CRASH reply.
+ */
+enum {
+ RP_MBOX_READY = 0xFFFFFF00,
+ RP_MBOX_PENDING_MSG = 0xFFFFFF01,
+ RP_MBOX_CRASH = 0xFFFFFF02,
+ RP_MBOX_ECHO_REQUEST = 0xFFFFFF03,
+ RP_MBOX_ECHO_REPLY = 0xFFFFFF04,
+ RP_MBOX_ABORT_REQUEST = 0xFFFFFF05,
+};
+
+#endif /* _PLAT_RPMSG_H */
diff --git a/arch/arm/plat-omap/include/plat/rpres.h b/arch/arm/plat-omap/include/plat/rpres.h
new file mode 100644
index 0000000..0dfb781
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/rpres.h
@@ -0,0 +1,57 @@
+/*
+ * Remote processor resources
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Fernando Guzman Lugo <fernando.lugo@ti.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ */
+
+#ifndef _PLAT_OMAP_RPRES_H
+#define _PLAT_OMAP_RPRES_H
+
+enum rpres_constraint {
+ RPRES_CONSTRAINT_SCALE,
+ RPRES_CONSTRAINT_LATENCY,
+ RPRES_CONSTRAINT_BANDWIDTH,
+};
+
+enum {
+ RPRES_INACTIVE,
+ RPRES_ACTIVE,
+};
+
+struct rpres_ops {
+ int (*start)(struct platform_device *pdev);
+ int (*stop)(struct platform_device *pdev);
+ int (*set_lat)(struct platform_device *pdev, long v);
+ int (*set_bw)(struct platform_device *pdev, long v);
+ int (*scale_dev)(struct platform_device *pdev, long v);
+};
+
+struct rpres_platform_data {
+ const char *name;
+ const char *oh_name;
+ struct omap_hwmod *oh;
+ struct rpres_ops *ops;
+ struct clk *opt_clk;
+ const char *opt_clk_name;
+ struct device *(*get_dev)(void);
+};
+
+struct rpres {
+ struct list_head next;
+ const char *name;
+ struct platform_device *pdev;
+ int state;
+ struct mutex lock;
+};
+
+struct rpres *rpres_get(const char *);
+void rpres_put(struct rpres *);
+int rpres_set_constraints(struct rpres *, enum rpres_constraint type, long val);
+#endif /* _PLAT_OMAP_RPRES_H */
diff --git a/arch/arm/plat-omap/include/plat/serial.h b/arch/arm/plat-omap/include/plat/serial.h
index 2723f91..8e41fb6 100644
--- a/arch/arm/plat-omap/include/plat/serial.h
+++ b/arch/arm/plat-omap/include/plat/serial.h
@@ -103,15 +103,23 @@
#ifndef __ASSEMBLER__
struct omap_board_data;
+struct omap_uart_port_info;
+struct omap_device_pad;
extern void omap_serial_init(void);
-extern void omap_serial_init_port(struct omap_board_data *bdata);
-extern int omap_uart_can_sleep(void);
-extern void omap_uart_check_wakeup(void);
-extern void omap_uart_prepare_suspend(void);
-extern void omap_uart_prepare_idle(int num);
-extern void omap_uart_resume_idle(int num);
-extern void omap_uart_enable_irqs(int enable);
+extern void omap_serial_board_init(struct omap_uart_port_info *platform_data);
+extern void omap_serial_init_port(struct omap_board_data *bdata,
+ struct omap_uart_port_info *platform_data);
+void __init omap_serial_init_port_pads(int id, struct omap_device_pad *pads,
+ int size, struct omap_uart_port_info *info);
+extern u32 omap_uart_resume_idle(void);
+extern int omap_uart_wake(u8 id);
+extern int omap_uart_enable(u8 uart_num);
+extern int omap_uart_disable(u8 uart_num);
+
+#define MUX_PULL_UP ((1<<8) | (1<<4) | (1<<3) | (7))
+void omap_rts_mux_write(u16 val, int num);
+
#endif
#endif
diff --git a/arch/arm/plat-omap/include/plat/sram.h b/arch/arm/plat-omap/include/plat/sram.h
index f500fc3..a6000d4 100644
--- a/arch/arm/plat-omap/include/plat/sram.h
+++ b/arch/arm/plat-omap/include/plat/sram.h
@@ -15,6 +15,7 @@
#include <asm/fncpy.h>
extern void *omap_sram_push_address(unsigned long size);
+extern unsigned long omap_get_sram_barrier_base(void);
/* Macro to push a function to the internal SRAM, using the fncpy API */
#define omap_sram_push(funcp, size) ({ \
diff --git a/arch/arm/plat-omap/include/plat/temperature_sensor.h b/arch/arm/plat-omap/include/plat/temperature_sensor.h
new file mode 100644
index 0000000..5f0d6b3
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/temperature_sensor.h
@@ -0,0 +1,65 @@
+/*
+ * OMAP446x Temperature sensor header file
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: J Keerthy <j-keerthy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __ARCH_ARM_PLAT_OMAP_INCLUDE_PLAT_TEMPERATURE_SENSOR_H
+#define __ARCH_ARM_PLAT_OMAP_INCLUDE_PLAT_TEMPERATURE_SENSOR_H
+
+/*
+ * Offsets from the base of temperature sensor registers
+ */
+#define TEMP_SENSOR_CTRL_OFFSET 0x00
+#define BGAP_CTRL_OFFSET 0x4c
+#define BGAP_COUNTER_OFFSET 0x50
+#define BGAP_THRESHOLD_OFFSET 0x54
+#define BGAP_TSHUT_OFFSET 0x58
+#define BGAP_STATUS_OFFSET 0x5c
+
+#define OMAP_TSHUT_GPIO 86
+
+
+/*
+ * omap_temp_sensor platform data
+ * @name - name
+ * @irq - Irq number for thermal alertemp_sensor
+ * @offset - offset of the temp sensor ctrl register
+ */
+struct omap_temp_sensor_pdata {
+ char *name;
+ u32 offset;
+ int irq;
+};
+
+#ifdef CONFIG_OMAP_TEMP_SENSOR
+void omap_temp_sensor_resume_idle(void);
+void omap_temp_sensor_prepare_idle(void);
+#else
+static inline void omap_temp_sensor_resume_idle(void) { }
+static inline void omap_temp_sensor_prepare_idle(void) { }
+#endif
+
+#ifdef CONFIG_OMAP_DIE_TEMP_SENSOR
+void omap_temp_sensor_idle(int idle_state);
+#else
+static inline void omap_temp_sensor_idle(int idle_state) { }
+#endif
+
+#endif
diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h
index 17d3c93..3be4a83 100644
--- a/arch/arm/plat-omap/include/plat/usb.h
+++ b/arch/arm/plat-omap/include/plat/usb.h
@@ -41,6 +41,11 @@
* Each PHY can have a separate regulator.
*/
struct regulator *regulator[OMAP3_HS_USB_PORTS];
+ /*
+ * Each Port can have an external transceiver requiring clock control
+ * for low power mode entry
+ */
+ struct clk *transceiver_clk[OMAP3_HS_USB_PORTS];
};
struct ehci_hcd_omap_platform_data {
@@ -48,6 +53,11 @@
int reset_gpio_port[OMAP3_HS_USB_PORTS];
struct regulator *regulator[OMAP3_HS_USB_PORTS];
unsigned phy_reset:1;
+ /*
+ * Each Port can have an external transceiver requiring clock control
+ * for low power mode entry
+ */
+ struct clk *transceiver_clk[OMAP3_HS_USB_PORTS];
};
struct ohci_hcd_omap_platform_data {
@@ -100,9 +110,6 @@
extern void usbhs_init(const struct usbhs_omap_board_data *pdata);
-extern int omap_usbhs_enable(struct device *dev);
-extern void omap_usbhs_disable(struct device *dev);
-
extern int omap4430_phy_power(struct device *dev, int ID, int on);
extern int omap4430_phy_set_clk(struct device *dev, int on);
extern int omap4430_phy_init(struct device *dev);
@@ -293,4 +300,9 @@
}
#endif
+extern void usbhs_wakeup(void);
+extern void omap4_trigger_ioctrl(void);
+
+#define USBHS_EHCI_HWMODNAME "usbhs_ehci"
+
#endif /* __ASM_ARCH_OMAP_USB_H */
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index 34fc31e..632fbe2 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -107,11 +107,8 @@
if (!arch_iommu)
return -ENODEV;
- clk_enable(obj->clk);
-
err = arch_iommu->enable(obj);
- clk_disable(obj->clk);
return err;
}
@@ -120,11 +117,7 @@
if (!obj)
return;
- clk_enable(obj->clk);
-
arch_iommu->disable(obj);
-
- clk_disable(obj->clk);
}
/*
@@ -244,11 +237,14 @@
struct iotlb_lock l;
struct cr_regs *cr;
+ if (obj && obj->secure_mode) {
+ WARN_ON(1);
+ return -EBUSY;
+ }
+
if (!obj || !obj->nr_tlb_entries || !e)
return -EINVAL;
- clk_enable(obj->clk);
-
iotlb_lock_get(obj, &l);
if (l.base == obj->nr_tlb_entries) {
dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
@@ -277,7 +273,6 @@
cr = iotlb_alloc_cr(obj, e);
if (IS_ERR(cr)) {
- clk_disable(obj->clk);
return PTR_ERR(cr);
}
@@ -291,7 +286,6 @@
l.vict = l.base;
iotlb_lock_set(obj, &l);
out:
- clk_disable(obj->clk);
return err;
}
EXPORT_SYMBOL_GPL(load_iotlb_entry);
@@ -308,7 +302,10 @@
int i;
struct cr_regs cr;
- clk_enable(obj->clk);
+ if (obj && obj->secure_mode) {
+ WARN_ON(1);
+ return;
+ }
for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
u32 start;
@@ -327,7 +324,6 @@
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
}
}
- clk_disable(obj->clk);
if (i == obj->nr_tlb_entries)
dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
@@ -362,15 +358,11 @@
{
struct iotlb_lock l;
- clk_enable(obj->clk);
-
l.base = 0;
l.vict = 0;
iotlb_lock_set(obj, &l);
iommu_write_reg(obj, 1, MMU_GFLUSH);
-
- clk_disable(obj->clk);
}
EXPORT_SYMBOL_GPL(flush_iotlb_all);
@@ -385,9 +377,7 @@
*/
void iommu_set_twl(struct iommu *obj, bool on)
{
- clk_enable(obj->clk);
arch_iommu->set_twl(obj, on);
- clk_disable(obj->clk);
}
EXPORT_SYMBOL_GPL(iommu_set_twl);
@@ -398,12 +388,8 @@
if (!obj || !buf)
return -EINVAL;
- clk_enable(obj->clk);
-
bytes = arch_iommu->dump_ctx(obj, buf, bytes);
- clk_disable(obj->clk);
-
return bytes;
}
EXPORT_SYMBOL_GPL(iommu_dump_ctx);
@@ -415,7 +401,6 @@
struct cr_regs tmp;
struct cr_regs *p = crs;
- clk_enable(obj->clk);
iotlb_lock_get(obj, &saved);
for_each_iotlb_cr(obj, num, i, tmp) {
@@ -425,7 +410,6 @@
}
iotlb_lock_set(obj, &saved);
- clk_disable(obj->clk);
return p - crs;
}
@@ -471,22 +455,15 @@
*/
static void flush_iopgd_range(u32 *first, u32 *last)
{
- /* FIXME: L2 cache should be taken care of if it exists */
- do {
- asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
- : : "r" (first));
- first += L1_CACHE_BYTES / sizeof(*first);
- } while (first <= last);
+ dmac_flush_range(first, last);
+ outer_flush_range(virt_to_phys(first), virt_to_phys(last));
}
+
static void flush_iopte_range(u32 *first, u32 *last)
{
- /* FIXME: L2 cache should be taken care of if it exists */
- do {
- asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
- : : "r" (first));
- first += L1_CACHE_BYTES / sizeof(*first);
- } while (first <= last);
+ dmac_flush_range(first, last);
+ outer_flush_range(virt_to_phys(first), virt_to_phys(last));
}
static void iopte_free(u32 *iopte)
@@ -515,7 +492,7 @@
return ERR_PTR(-ENOMEM);
*iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
- flush_iopgd_range(iopgd, iopgd);
+ flush_iopgd_range(iopgd, iopgd + 1);
dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
} else {
@@ -544,7 +521,7 @@
}
*iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
- flush_iopgd_range(iopgd, iopgd);
+ flush_iopgd_range(iopgd, iopgd + 1);
return 0;
}
@@ -561,7 +538,7 @@
for (i = 0; i < 16; i++)
*(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
- flush_iopgd_range(iopgd, iopgd + 15);
+ flush_iopgd_range(iopgd, iopgd + 16);
return 0;
}
@@ -574,7 +551,7 @@
return PTR_ERR(iopte);
*iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
- flush_iopte_range(iopte, iopte);
+ flush_iopte_range(iopte, iopte + 1);
dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
__func__, da, pa, iopte, *iopte);
@@ -599,7 +576,7 @@
for (i = 0; i < 16; i++)
*(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
- flush_iopte_range(iopte, iopte + 15);
+ flush_iopte_range(iopte, iopte + 16);
return 0;
}
@@ -649,6 +626,11 @@
{
int err;
+ if (obj && obj->secure_mode) {
+ WARN_ON(1);
+ return -EBUSY;
+ }
+
flush_iotlb_page(obj, e->da);
err = iopgtable_store_entry_core(obj, e);
#ifdef PREFETCH_IOTLB
@@ -670,6 +652,11 @@
{
u32 *iopgd, *iopte = NULL;
+ if (obj && obj->secure_mode) {
+ WARN_ON(1);
+ return;
+ }
+
iopgd = iopgd_offset(obj, da);
if (!*iopgd)
goto out;
@@ -739,6 +726,11 @@
{
size_t bytes;
+ if (obj && obj->secure_mode) {
+ WARN_ON(1);
+ return 0;
+ }
+
spin_lock(&obj->page_table_lock);
bytes = iopgtable_clear_entry_core(obj, da);
@@ -770,7 +762,7 @@
iopte_free(iopte_offset(iopgd, 0));
*iopgd = 0;
- flush_iopgd_range(iopgd, iopgd);
+ flush_iopgd_range(iopgd, iopgd + 1);
}
flush_iotlb_all(obj);
@@ -790,9 +782,7 @@
if (!obj->refcount)
return IRQ_NONE;
- clk_enable(obj->clk);
errs = iommu_report_fault(obj, &da);
- clk_disable(obj->clk);
if (errs == 0)
return IRQ_HANDLED;
@@ -800,7 +790,7 @@
if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv))
return IRQ_HANDLED;
- iommu_disable(obj);
+ iommu_write_reg(obj, 0, MMU_IRQENABLE);
iopgd = iopgd_offset(obj, da);
@@ -839,7 +829,7 @@
{
if (!obj)
- return -EFAULT;
+ return -EIO;
if (end < start || !PAGE_ALIGN(start | end))
return -EINVAL;
@@ -871,9 +861,13 @@
mutex_lock(&obj->iommu_lock);
if (obj->refcount++ == 0) {
+ dev_info(obj->dev, "%s: %s qos_request\n", __func__, obj->name);
+ pm_qos_update_request(obj->qos_request, 10);
err = iommu_enable(obj);
- if (err)
+ if (err) {
+ pm_qos_update_request(obj->qos_request, -1);
goto err_enable;
+ }
flush_iotlb_all(obj);
}
@@ -906,8 +900,16 @@
mutex_lock(&obj->iommu_lock);
- if (--obj->refcount == 0)
+ if (!obj->refcount) {
+ dev_err(obj->dev, "%s: %s unbalanced iommu_get/put\n",
+ __func__, obj->name);
+ return;
+ }
+
+ if (--obj->refcount == 0) {
iommu_disable(obj);
+ pm_qos_update_request(obj->qos_request, -1);
+ }
module_put(obj->owner);
@@ -944,6 +946,30 @@
}
EXPORT_SYMBOL_GPL(iommu_set_isr);
+int iommu_set_secure(const char *name, bool enable, void *data)
+{
+ struct device *dev;
+ struct iommu *obj;
+
+ dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
+ device_match_by_alias);
+ if (!dev)
+ return -ENODEV;
+
+ obj = to_iommu(dev);
+ mutex_lock(&obj->iommu_lock);
+ if (obj->refcount) {
+ mutex_unlock(&obj->iommu_lock);
+ return -EBUSY;
+ }
+ obj->secure_mode = enable;
+ obj->secure_ttb = data;
+ mutex_unlock(&obj->iommu_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_set_secure);
+
/*
* OMAP Device MMU(IOMMU) detection
*/
@@ -951,25 +977,17 @@
{
int err = -ENODEV;
void *p;
- int irq;
struct iommu *obj;
- struct resource *res;
struct iommu_platform_data *pdata = pdev->dev.platform_data;
- if (pdev->num_resources != 2)
- return -EINVAL;
-
obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
if (!obj)
return -ENOMEM;
- obj->clk = clk_get(&pdev->dev, pdata->clk_name);
- if (IS_ERR(obj->clk))
- goto err_clk;
-
obj->nr_tlb_entries = pdata->nr_tlb_entries;
obj->name = pdata->name;
obj->dev = &pdev->dev;
+ obj->pdev = pdev;
obj->ctx = (void *)obj + sizeof(*obj);
obj->da_start = pdata->da_start;
obj->da_end = pdata->da_end;
@@ -979,31 +997,18 @@
spin_lock_init(&obj->page_table_lock);
INIT_LIST_HEAD(&obj->mmap);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -ENODEV;
- goto err_mem;
+ obj->regbase = pdata->io_base;
+
+ obj->qos_request = kzalloc(sizeof(*obj->qos_request), GFP_KERNEL);
+ if (!obj->qos_request) {
+ kfree(obj);
+ return -ENOMEM;
}
- res = request_mem_region(res->start, resource_size(res),
- dev_name(&pdev->dev));
- if (!res) {
- err = -EIO;
- goto err_mem;
- }
+ pm_qos_add_request(obj->qos_request, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
- obj->regbase = ioremap(res->start, resource_size(res));
- if (!obj->regbase) {
- err = -ENOMEM;
- goto err_ioremap;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err = -ENODEV;
- goto err_irq;
- }
- err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
+ err = request_irq(pdata->irq, iommu_fault_handler, IRQF_SHARED,
dev_name(&pdev->dev), obj);
if (err < 0)
goto err_irq;
@@ -1024,36 +1029,27 @@
return 0;
err_pgd:
- free_irq(irq, obj);
+ free_irq(pdata->irq, obj);
err_irq:
- iounmap(obj->regbase);
-err_ioremap:
- release_mem_region(res->start, resource_size(res));
-err_mem:
- clk_put(obj->clk);
-err_clk:
kfree(obj);
return err;
}
static int __devexit omap_iommu_remove(struct platform_device *pdev)
{
- int irq;
- struct resource *res;
struct iommu *obj = platform_get_drvdata(pdev);
+ struct iommu_platform_data *pdata = pdev->dev.platform_data;
+
+ free_irq(pdata->irq, obj);
platform_set_drvdata(pdev, NULL);
iopgtable_clear_entry_all(obj);
free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE));
- irq = platform_get_irq(pdev, 0);
- free_irq(irq, obj);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
- iounmap(obj->regbase);
+ pm_qos_remove_request(obj->qos_request);
+ kfree(obj->qos_request);
- clk_put(obj->clk);
dev_info(&pdev->dev, "%s removed\n", obj->name);
kfree(obj);
return 0;
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
index 83a37c5..6e711b9 100644
--- a/arch/arm/plat-omap/iovmm.c
+++ b/arch/arm/plat-omap/iovmm.c
@@ -163,10 +163,10 @@
if (!sgt)
return;
+ pr_debug("%s: sgt:%p\n", __func__, sgt);
+
sg_free_table(sgt);
kfree(sgt);
-
- pr_debug("%s: sgt:%p\n", __func__, sgt);
}
/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index 69ddc9f..653153e 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -29,6 +29,7 @@
#include <linux/kfifo.h>
#include <linux/err.h>
#include <linux/notifier.h>
+#include <linux/pm_qos_params.h>
#include <plat/mailbox.h>
@@ -36,6 +37,10 @@
static int mbox_configured;
static DEFINE_MUTEX(mbox_configured_lock);
+struct pm_qos_request_list mbox_qos_request;
+
+#define SET_MPU_CORE_CONSTRAINT 10
+#define CLEAR_MPU_CORE_CONSTRAINT -1
static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE;
module_param(mbox_kfifo_size, uint, S_IRUGO);
@@ -251,6 +256,8 @@
mutex_lock(&mbox_configured_lock);
if (!mbox_configured++) {
+ pm_qos_update_request(&mbox_qos_request,
+ SET_MPU_CORE_CONSTRAINT);
if (likely(mbox->ops->startup)) {
ret = mbox->ops->startup(mbox);
if (unlikely(ret))
@@ -260,13 +267,6 @@
}
if (!mbox->use_count++) {
- ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
- mbox->name, mbox);
- if (unlikely(ret)) {
- pr_err("failed to register mailbox interrupt:%d\n",
- ret);
- goto fail_request_irq;
- }
mq = mbox_queue_alloc(mbox, NULL, mbox_tx_tasklet);
if (!mq) {
ret = -ENOMEM;
@@ -281,20 +281,29 @@
}
mbox->rxq = mq;
mq->mbox = mbox;
+ ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
+ mbox->name, mbox);
+ if (unlikely(ret)) {
+ pr_err("failed to register mailbox interrupt:%d\n",
+ ret);
+ goto fail_request_irq;
+ }
}
mutex_unlock(&mbox_configured_lock);
return 0;
+fail_request_irq:
+ mbox_queue_free(mbox->rxq);
fail_alloc_rxq:
mbox_queue_free(mbox->txq);
fail_alloc_txq:
- free_irq(mbox->irq, mbox);
-fail_request_irq:
if (mbox->ops->shutdown)
mbox->ops->shutdown(mbox);
mbox->use_count--;
fail_startup:
- mbox_configured--;
+ if (!--mbox_configured)
+ pm_qos_update_request(&mbox_qos_request,
+ CLEAR_MPU_CORE_CONSTRAINT);
mutex_unlock(&mbox_configured_lock);
return ret;
}
@@ -306,14 +315,17 @@
if (!--mbox->use_count) {
free_irq(mbox->irq, mbox);
tasklet_kill(&mbox->txq->tasklet);
- flush_work_sync(&mbox->rxq->work);
+ flush_work_sync(&mbox->rxq->work);
mbox_queue_free(mbox->txq);
mbox_queue_free(mbox->rxq);
}
if (likely(mbox->ops->shutdown)) {
- if (!--mbox_configured)
+ if (!--mbox_configured) {
mbox->ops->shutdown(mbox);
+ pm_qos_update_request(&mbox_qos_request,
+ CLEAR_MPU_CORE_CONSTRAINT);
+ }
}
mutex_unlock(&mbox_configured_lock);
@@ -350,7 +362,8 @@
void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb)
{
- blocking_notifier_chain_unregister(&mbox->notifier, nb);
+ if (nb)
+ blocking_notifier_chain_unregister(&mbox->notifier, nb);
omap_mbox_fini(mbox);
}
EXPORT_SYMBOL(omap_mbox_put);
@@ -395,6 +408,7 @@
for (i = 0; mboxes[i]; i++)
device_unregister(mboxes[i]->dev);
+
mboxes = NULL;
return 0;
}
@@ -413,6 +427,8 @@
mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size,
sizeof(mbox_msg_t));
+ pm_qos_add_request(&mbox_qos_request, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
return 0;
}
subsys_initcall(omap_mbox_init);
@@ -420,6 +436,7 @@
static void __exit omap_mbox_exit(void)
{
class_unregister(&omap_mbox_class);
+ pm_qos_remove_request(&mbox_qos_request);
}
module_exit(omap_mbox_exit);
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index 5587acf..e02baa4 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -75,6 +75,11 @@
{
return __raw_readl(mcbsp->st_data->io_base_st + reg);
}
+
+#define MCBSP_ST_READ(mcbsp, reg) \
+ omap_mcbsp_st_read(mcbsp, OMAP_ST_REG_##reg)
+#define MCBSP_ST_WRITE(mcbsp, reg, val) \
+ omap_mcbsp_st_write(mcbsp, OMAP_ST_REG_##reg, val)
#endif
#define MCBSP_READ(mcbsp, reg) \
@@ -84,11 +89,6 @@
#define MCBSP_READ_CACHE(mcbsp, reg) \
omap_mcbsp_read(mcbsp, OMAP_MCBSP_REG_##reg, 1)
-#define MCBSP_ST_READ(mcbsp, reg) \
- omap_mcbsp_st_read(mcbsp, OMAP_ST_REG_##reg)
-#define MCBSP_ST_WRITE(mcbsp, reg, val) \
- omap_mcbsp_st_write(mcbsp, OMAP_ST_REG_##reg, val)
-
static void omap_mcbsp_dump_reg(u8 id)
{
struct omap_mcbsp *mcbsp = id_to_mcbsp_ptr(id);
@@ -292,14 +292,16 @@
}
EXPORT_SYMBOL(omap_mcbsp_dma_reg_params);
-#ifdef CONFIG_ARCH_OMAP3
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
static struct omap_device *find_omap_device_by_dev(struct device *dev)
{
struct platform_device *pdev = container_of(dev,
struct platform_device, dev);
return container_of(pdev, struct omap_device, pdev);
}
+#endif
+#ifdef CONFIG_ARCH_OMAP3
static void omap_st_on(struct omap_mcbsp *mcbsp)
{
unsigned int w;
@@ -550,7 +552,12 @@
return st_data->enabled;
}
EXPORT_SYMBOL(omap_st_is_enabled);
+#else
+static inline void omap_st_start(struct omap_mcbsp *mcbsp) {}
+static inline void omap_st_stop(struct omap_mcbsp *mcbsp) {}
+#endif
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
/*
* omap_mcbsp_set_rx_threshold configures the transmit threshold in words.
* The threshold parameter is 1 based, and it is converted (threshold - 1)
@@ -754,8 +761,6 @@
#else
static inline void omap34xx_mcbsp_request(struct omap_mcbsp *mcbsp) {}
static inline void omap34xx_mcbsp_free(struct omap_mcbsp *mcbsp) {}
-static inline void omap_st_start(struct omap_mcbsp *mcbsp) {}
-static inline void omap_st_stop(struct omap_mcbsp *mcbsp) {}
#endif
/*
@@ -991,6 +996,25 @@
MCBSP_WRITE(mcbsp, RCCR, w);
}
+ /*Disable and Re-enable transmitter if ready */
+ if (tx && (MCBSP_READ(mcbsp, SPCR2) & XRDY)) {
+ MCBSP_WRITE(mcbsp, SPCR2,
+ MCBSP_READ_CACHE(mcbsp, SPCR2) &
+ (~XRST));
+ MCBSP_WRITE(mcbsp, SPCR2,
+ MCBSP_READ_CACHE(mcbsp, SPCR2) |
+ (XRST));
+ }
+ /*Disable and Re-enable receiver if ready */
+ if (rx && (MCBSP_READ(mcbsp, SPCR1) & RRDY)) {
+ MCBSP_WRITE(mcbsp, SPCR1,
+ MCBSP_READ_CACHE(mcbsp, SPCR1) &
+ (~RRST));
+ MCBSP_WRITE(mcbsp, SPCR1,
+ MCBSP_READ_CACHE(mcbsp, SPCR1) |
+ (RRST));
+ }
+
/* Dump McBSP Regs */
omap_mcbsp_dump_reg(id);
}
@@ -1522,7 +1546,7 @@
}
EXPORT_SYMBOL(omap_mcbsp_set_spi_mode);
-#ifdef CONFIG_ARCH_OMAP3
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
#define max_thres(m) (mcbsp->pdata->buffer_size)
#define valid_threshold(m, val) ((val) <= max_thres(m))
#define THRESHOLD_PROP_BUILDER(prop) \
@@ -1613,6 +1637,29 @@
static DEVICE_ATTR(dma_op_mode, 0644, dma_op_mode_show, dma_op_mode_store);
+static const struct attribute *additional_attrs[] = {
+ &dev_attr_max_tx_thres.attr,
+ &dev_attr_max_rx_thres.attr,
+ &dev_attr_dma_op_mode.attr,
+ NULL,
+};
+
+static const struct attribute_group additional_attr_group = {
+ .attrs = (struct attribute **)additional_attrs,
+};
+
+static inline int __devinit omap_additional_add(struct device *dev)
+{
+ return sysfs_create_group(&dev->kobj, &additional_attr_group);
+}
+
+static inline void __devexit omap_additional_remove(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, &additional_attr_group);
+}
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
static ssize_t st_taps_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1671,27 +1718,6 @@
static DEVICE_ATTR(st_taps, 0644, st_taps_show, st_taps_store);
-static const struct attribute *additional_attrs[] = {
- &dev_attr_max_tx_thres.attr,
- &dev_attr_max_rx_thres.attr,
- &dev_attr_dma_op_mode.attr,
- NULL,
-};
-
-static const struct attribute_group additional_attr_group = {
- .attrs = (struct attribute **)additional_attrs,
-};
-
-static inline int __devinit omap_additional_add(struct device *dev)
-{
- return sysfs_create_group(&dev->kobj, &additional_attr_group);
-}
-
-static inline void __devexit omap_additional_remove(struct device *dev)
-{
- sysfs_remove_group(&dev->kobj, &additional_attr_group);
-}
-
static const struct attribute *sidetone_attrs[] = {
&dev_attr_st_taps.attr,
NULL,
@@ -1749,11 +1775,16 @@
kfree(st_data);
}
}
+#else
+static inline int __devinit omap_st_add(struct omap_mcbsp *mcbsp) { return 0; }
+static inline void __devexit omap_st_remove(struct omap_mcbsp *mcbsp) {}
+#endif
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
static inline void __devinit omap34xx_device_init(struct omap_mcbsp *mcbsp)
{
mcbsp->dma_op_mode = MCBSP_DMA_MODE_ELEMENT;
- if (cpu_is_omap34xx()) {
+ if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
/*
* Initially configure the maximum thresholds to a safe value.
* The McBSP FIFO usage with these values should not go under
@@ -1771,26 +1802,26 @@
if (omap_additional_add(mcbsp->dev))
dev_warn(mcbsp->dev,
"Unable to create additional controls\n");
+ } else {
+ mcbsp->max_tx_thres = -EINVAL;
+ mcbsp->max_rx_thres = -EINVAL;
+ }
+ if (cpu_is_omap34xx()) {
if (mcbsp->id == 2 || mcbsp->id == 3)
if (omap_st_add(mcbsp))
dev_warn(mcbsp->dev,
"Unable to create sidetone controls\n");
-
- } else {
- mcbsp->max_tx_thres = -EINVAL;
- mcbsp->max_rx_thres = -EINVAL;
}
}
static inline void __devexit omap34xx_device_exit(struct omap_mcbsp *mcbsp)
{
- if (cpu_is_omap34xx()) {
+ if (cpu_is_omap34xx() || cpu_is_omap44xx())
omap_additional_remove(mcbsp->dev);
-
+ if (cpu_is_omap34xx())
if (mcbsp->id == 2 || mcbsp->id == 3)
omap_st_remove(mcbsp);
- }
}
#else
static inline void __devinit omap34xx_device_init(struct omap_mcbsp *mcbsp) {}
diff --git a/arch/arm/plat-omap/omap-pm-helper.c b/arch/arm/plat-omap/omap-pm-helper.c
new file mode 100644
index 0000000..2fdd7af4
--- /dev/null
+++ b/arch/arm/plat-omap/omap-pm-helper.c
@@ -0,0 +1,345 @@
+/*
+ * omap-pm.c - OMAP power management interface
+ *
+ * Copyright (C) 2008-2011 Texas Instruments, Inc.
+ * Copyright (C) 2008-2009 Nokia Corporation
+ * Vishwanath BS
+ *
+ * This code is based on plat-omap/omap-pm-noop.c.
+ *
+ * Interface developed by (in alphabetical order):
+ * Karthik Dasu, Tony Lindgren, Rajendra Nayak, Sakari Poussa, Veeramanikandan
+ * Raju, Anand Sawant, Igor Stoppa, Paul Walmsley, Richard Woodruff
+ */
+
+#undef DEBUG
+
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+/* Interface documentation is in mach/omap-pm.h */
+#include <plat/omap-pm.h>
+#include <plat/omap_device.h>
+#include <plat/common.h>
+#include "../mach-omap2/powerdomain.h"
+#include "../mach-omap2/dvfs.h"
+#include "omap-pm-helper.h"
+
+struct omap_opp *dsp_opps;
+struct omap_opp *mpu_opps;
+struct omap_opp *l3_opps;
+
+static DEFINE_MUTEX(bus_tput_mutex);
+static DEFINE_MUTEX(mpu_tput_mutex);
+static DEFINE_MUTEX(mpu_lat_mutex);
+
+/* Used to model a Interconnect Throughput */
+static struct interconnect_tput {
+ /* Total no of users at any point of interconnect */
+ u8 no_of_users;
+ /* List of all the current users for interconnect */
+ struct list_head users_list;
+ struct list_head node;
+ /* Protect interconnect throughput */
+ struct mutex throughput_mutex;
+ /* Target level for interconnect throughput */
+ unsigned long target_level;
+
+} *bus_tput;
+
+/* Used to represent a user of a interconnect throughput */
+struct users {
+ /* Device pointer used to uniquely identify the user */
+ struct device *dev;
+ struct list_head node;
+ /* Current level as requested for interconnect throughput by the user */
+ u32 level;
+};
+
+/* Private/Internal Functions */
+
+/**
+ * user_lookup - look up a user by its device pointer, return a pointer
+ * @dev: The device to be looked up
+ *
+ * Looks for a interconnect user by its device pointer. Returns a
+ * pointer to
+ * the struct users if found, else returns NULL.
+ */
+static struct users *user_lookup(struct device *dev)
+{
+ struct users *usr, *tmp_usr;
+
+ usr = NULL;
+ list_for_each_entry(tmp_usr, &bus_tput->users_list, node) {
+ if (tmp_usr->dev == dev) {
+ usr = tmp_usr;
+ break;
+ }
+ }
+
+ return usr;
+}
+
+/**
+ * get_user - gets a new users_list struct dynamically
+ *
+ * This function allocates dynamcially the user node
+ * Returns a pointer to users struct on success. On dynamic allocation
+ * failure
+ * returns a ERR_PTR(-ENOMEM).
+ */
+static struct users *get_user(void)
+{
+ struct users *user;
+
+ user = kmalloc(sizeof(struct users), GFP_KERNEL);
+ if (!user) {
+ pr_err("%s FATAL ERROR: kmalloc failed\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+ return user;
+}
+
+#ifdef CONFIG_PM_DEBUG
+static int pm_dbg_show_tput(struct seq_file *s, void *unused)
+{
+ struct users *usr;
+
+ mutex_lock(&bus_tput->throughput_mutex);
+ list_for_each_entry(usr, &bus_tput->users_list, node)
+ seq_printf(s, "%s: %u\n", dev_name(usr->dev),
+ usr->level);
+ mutex_unlock(&bus_tput->throughput_mutex);
+
+ return 0;
+}
+
+static int pm_dbg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pm_dbg_show_tput,
+ &inode->i_private);
+}
+
+static const struct file_operations tputdebugfs_fops = {
+ .open = pm_dbg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+/**
+ * omap_bus_tput_init - Initializes the interconnect throughput
+ * userlist
+ * Allocates memory for global throughput variable dynamically.
+ * Intializes Userlist, no. of users and throughput target level.
+ * Returns 0 on sucess, else returns EINVAL if memory
+ * allocation fails.
+ */
+static int __init omap_bus_tput_init(void)
+{
+ bus_tput = kmalloc(sizeof(struct interconnect_tput), GFP_KERNEL);
+ if (!bus_tput) {
+ pr_err("%s FATAL ERROR: kmalloc failed\n", __func__);
+ return -EINVAL;
+ }
+ INIT_LIST_HEAD(&bus_tput->users_list);
+ mutex_init(&bus_tput->throughput_mutex);
+ bus_tput->no_of_users = 0;
+ bus_tput->target_level = 0;
+
+#ifdef CONFIG_PM_DEBUG
+ (void) debugfs_create_file("tput", S_IRUGO,
+ NULL, (void *)bus_tput, &tputdebugfs_fops);
+#endif
+
+ return 0;
+}
+
+/**
+ * add_req_tput - Request for a required level by a device
+ * @dev: Uniquely identifes the caller
+ * @level: The requested level for the interconnect bandwidth in KiB/s
+ *
+ * This function recomputes the target level of the interconnect
+ * bandwidth
+ * based on the level requested by all the users.
+ * Multiple calls to this function by the same device will
+ * replace the previous level requested
+ * Returns the updated level of interconnect throughput.
+ * In case of Invalid dev or user pointer, it returns 0.
+ */
+static unsigned long add_req_tput(struct device *dev, unsigned long level)
+{
+ int ret;
+ struct users *user;
+
+ if (!dev) {
+ pr_err("Invalid dev pointer\n");
+ ret = 0;
+ }
+ mutex_lock(&bus_tput->throughput_mutex);
+ user = user_lookup(dev);
+ if (user == NULL) {
+ user = get_user();
+ if (IS_ERR(user)) {
+ pr_err("Couldn't get user from the list to"
+ "add new throughput constraint");
+ ret = 0;
+ goto unlock;
+ }
+ bus_tput->target_level += level;
+ bus_tput->no_of_users++;
+ user->dev = dev;
+ list_add(&user->node, &bus_tput->users_list);
+ user->level = level;
+ } else {
+ bus_tput->target_level -= user->level;
+ bus_tput->target_level += level;
+ user->level = level;
+ }
+ ret = bus_tput->target_level;
+unlock:
+ mutex_unlock(&bus_tput->throughput_mutex);
+ return ret;
+}
+
+/**
+ * remove_req_tput - Release a previously requested level of
+ * a throughput level for interconnect
+ * @dev: Device pointer to dev
+ *
+ * This function recomputes the target level of the interconnect
+ * throughput after removing
+ * the level requested by the user.
+ * Returns 0, if the dev structure is invalid
+ * else returns modified interconnect throughput rate.
+ */
+static unsigned long remove_req_tput(struct device *dev)
+{
+ struct users *user;
+ int found = 0;
+ int ret;
+
+ mutex_lock(&bus_tput->throughput_mutex);
+ list_for_each_entry(user, &bus_tput->users_list, node) {
+ if (user->dev == dev) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ /* No such user exists */
+ pr_err("Invalid Device Structure\n");
+ ret = 0;
+ goto unlock;
+ }
+ bus_tput->target_level -= user->level;
+ bus_tput->no_of_users--;
+ list_del(&user->node);
+ kfree(user);
+ ret = bus_tput->target_level;
+unlock:
+ mutex_unlock(&bus_tput->throughput_mutex);
+ return ret;
+}
+
+int omap_pm_set_min_bus_tput_helper(struct device *dev, u8 agent_id, long r)
+{
+
+ int ret = 0;
+ struct device *l3_dev;
+ static struct device dummy_l3_dev = {
+ .init_name = "omap_pm_set_min_bus_tput",
+ };
+ unsigned long target_level = 0;
+
+ mutex_lock(&bus_tput_mutex);
+
+ l3_dev = omap2_get_l3_device();
+ if (!l3_dev) {
+ pr_err("Unable to get l3 device pointer");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (r == -1)
+ target_level = remove_req_tput(dev);
+ else
+ target_level = add_req_tput(dev, r);
+
+ /* Convert the throughput(in KiB/s) into Hz. */
+ target_level = (target_level * 1000) / 4;
+
+ ret = omap_device_scale(&dummy_l3_dev, l3_dev, target_level);
+ if (ret)
+ pr_err("Failed: change interconnect bandwidth to %ld\n",
+ target_level);
+unlock:
+ mutex_unlock(&bus_tput_mutex);
+ return ret;
+}
+
+int omap_pm_set_max_dev_wakeup_lat_helper(struct device *req_dev,
+ struct device *dev, long t)
+{
+ struct omap_device *odev;
+ struct powerdomain *pwrdm_dev;
+ struct platform_device *pdev;
+ int ret = 0;
+
+ if (!req_dev || !dev || t < -1) {
+ WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
+ return -EINVAL;
+ };
+
+ /* Look for the devices Power Domain */
+ pdev = container_of(dev, struct platform_device, dev);
+
+ /* Try to catch non platform devices. */
+ if (pdev->name == NULL) {
+ pr_err("OMAP-PM: Error: platform device not valid\n");
+ return -EINVAL;
+ }
+
+ odev = to_omap_device(pdev);
+ if (odev) {
+ pwrdm_dev = omap_device_get_pwrdm(odev);
+ } else {
+ pr_err("OMAP-PM: Error: Could not find omap_device for %s\n",
+ pdev->name);
+ return -EINVAL;
+ }
+
+ /* Catch devices with undefined powerdomains. */
+ if (!pwrdm_dev) {
+ pr_err("OMAP-PM: Error: could not find parent pwrdm for %s\n",
+ pdev->name);
+ return -EINVAL;
+ }
+
+ if (t == -1)
+ ret = pwrdm_wakeuplat_release_constraint(pwrdm_dev, req_dev);
+ else
+ ret = pwrdm_wakeuplat_set_constraint(pwrdm_dev, req_dev, t);
+
+ return ret;
+}
+
+/* Must be called after clock framework is initialized */
+int __init omap_pm_if_init_helper(void)
+{
+ int ret;
+ ret = omap_bus_tput_init();
+ if (ret)
+ pr_err("Failed: init of interconnect bandwidth users list\n");
+ return ret;
+}
diff --git a/arch/arm/plat-omap/omap-pm-helper.h b/arch/arm/plat-omap/omap-pm-helper.h
new file mode 100644
index 0000000..9c4b5d7
--- /dev/null
+++ b/arch/arm/plat-omap/omap-pm-helper.h
@@ -0,0 +1,40 @@
+/*
+ * OMAP PM interface helpers
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OMAP_PM_HELPER_INTERFACE_H__
+#define __OMAP_PM_HELPER_INTERFACE_H__
+
+#ifdef CONFIG_OMAP_PM
+int omap_pm_set_min_bus_tput_helper(struct device *dev, u8 agent_id, long r);
+int omap_pm_set_max_dev_wakeup_lat_helper(struct device *req_dev,
+ struct device *dev, long t);
+int __init omap_pm_if_init_helper(void);
+
+#else
+static inline int omap_pm_set_min_bus_tput_helper(struct device *dev,
+ u8 agent_id, long r)
+{
+ return 0;
+}
+
+static inline int omap_pm_set_max_dev_wakeup_lat_helper(struct device *req_dev,
+ struct device *dev, long t)
+{
+ return 0;
+}
+
+static inline int omap_pm_if_init_helper(void)
+{
+ return 0;
+}
+#endif /* CONFIG_OMAP_PM */
+
+#endif /* __OMAP_PM_HELPER_INTERFACE_H__ */
diff --git a/arch/arm/plat-omap/omap-pm-interface.c b/arch/arm/plat-omap/omap-pm-interface.c
new file mode 100644
index 0000000..e166395
--- /dev/null
+++ b/arch/arm/plat-omap/omap-pm-interface.c
@@ -0,0 +1,251 @@
+/*
+ * omap-pm-interface.c - OMAP power management interface
+ *
+ * This code implements the OMAP power management interface to
+ * drivers, CPUIdle, CPUFreq, and DSP Bridge.
+ *
+ * Copyright (C) 2008-2011 Texas Instruments, Inc.
+ * Copyright (C) 2008-2009 Nokia Corporation
+ * Paul Walmsley
+ *
+ * Interface developed by (in alphabetical order):
+ * Karthik Dasu, Tony Lindgren, Rajendra Nayak, Sakari Poussa, Veeramanikandan
+ * Raju, Anand Sawant, Igor Stoppa, Paul Walmsley, Richard Woodruff
+ */
+
+#undef DEBUG
+
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+/* Interface documentation is in mach/omap-pm.h */
+#include <plat/omap-pm.h>
+#include <plat/omap_device.h>
+
+#include "omap-pm-helper.h"
+#include "../mach-omap2/prm44xx.h"
+
+bool off_mode_enabled;
+
+/*
+ * Device-driver-originated constraints (via board-*.c files)
+ * WARNING: Device drivers need to now use pm_qos directly.
+ */
+int omap_pm_set_max_mpu_wakeup_lat(struct pm_qos_request_list **pmqos_req,
+ long t)
+{
+ WARN(1, "Deprecated %s: Driver should use pm_qos to add request\n",
+ __func__);
+
+ return -EINVAL;
+}
+
+int omap_pm_set_min_bus_tput(struct device *dev, u8 agent_id, long r)
+{
+ int ret;
+ if (!dev || (agent_id != OCP_INITIATOR_AGENT &&
+ agent_id != OCP_TARGET_AGENT)) {
+ WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
+ return -EINVAL;
+ };
+
+ if (r == -1)
+ pr_debug("OMAP PM: remove min bus tput constraint: "
+ "dev %s for agent_id %d\n", dev_name(dev), agent_id);
+ else
+ pr_debug("OMAP PM: add min bus tput constraint: "
+ "dev %s for agent_id %d: rate %ld KiB\n",
+ dev_name(dev), agent_id, r);
+
+ ret = omap_pm_set_min_bus_tput_helper(dev, agent_id, r);
+
+ return ret;
+}
+
+int omap_pm_set_max_dev_wakeup_lat(struct device *req_dev, struct device *dev,
+ long t)
+{
+ int ret;
+ if (!req_dev || !dev || t < -1) {
+ WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
+ return -EINVAL;
+ };
+
+ if (t == -1)
+ pr_debug("OMAP PM: remove max device latency constraint: "
+ "dev %s\n", dev_name(dev));
+ else
+ pr_debug("OMAP PM: add max device latency constraint: "
+ "dev %s, t = %ld usec\n", dev_name(dev), t);
+
+ ret = omap_pm_set_max_dev_wakeup_lat_helper(req_dev, dev, t);
+
+ return ret;
+}
+
+/* WARNING: Device drivers need to now use pm_qos directly. */
+int omap_pm_set_max_sdma_lat(struct pm_qos_request_list **qos_request, long t)
+{
+ WARN(1, "Deprecated %s: Driver should use pm_qos to add request\n",
+ __func__);
+
+ return -EINVAL;
+}
+
+int omap_pm_set_min_clk_rate(struct device *dev, struct clk *c, long r)
+{
+ WARN(1, "Deprecated %s: Driver should use omap_device_scale/opp\n",
+ __func__);
+
+ return -EINVAL;
+}
+
+/*
+ * DSP Bridge-specific constraints
+ * WARNING: Device drivers need to now use opp layer/omap_device_scale directly.
+ */
+const struct omap_opp *omap_pm_dsp_get_opp_table(void)
+{
+ WARN(1, "Deprecated %s: Driver should use omap_device_scale/opp\n",
+ __func__);
+
+ return ERR_PTR(-EINVAL);
+}
+
+void omap_pm_dsp_set_min_opp(u8 opp_id)
+{
+ WARN(1, "Deprecated %s: Driver should use omap_device_scale/opp\n",
+ __func__);
+
+ return;
+}
+
+int omap_pm_set_min_mpu_freq(struct device *dev, unsigned long f)
+{
+ WARN(1, "Deprecated %s: Driver should NOT use this function\n",
+ __func__);
+
+ return -EINVAL;
+
+}
+
+EXPORT_SYMBOL(omap_pm_set_min_mpu_freq);
+
+u8 omap_pm_dsp_get_opp(void)
+{
+ WARN(1, "Deprecated %s: Driver should use omap_device_scale/opp\n",
+ __func__);
+
+ return 0;
+}
+
+/*
+ * CPUFreq-originated constraint
+ *
+ * In the future, this should be handled by custom OPP clocktype
+ * functions.
+ */
+
+struct cpufreq_frequency_table **omap_pm_cpu_get_freq_table(void)
+{
+ WARN(1, "Deprecated %s: Driver should use omap_device_scale/opp\n",
+ __func__);
+
+ return ERR_PTR(-EINVAL);
+}
+
+void omap_pm_cpu_set_freq(unsigned long f)
+{
+ WARN(1, "Deprecated %s: Driver should use omap_device_scale/opp\n",
+ __func__);
+
+ return;
+}
+
+unsigned long omap_pm_cpu_get_freq(void)
+{
+ WARN(1, "Deprecated %s: Driver should use omap_device_scale/opp\n",
+ __func__);
+
+ return 0;
+}
+
+/**
+ * omap_pm_enable_off_mode - notify OMAP PM that off-mode is enabled
+ *
+ * Intended for use only by OMAP PM core code to notify this layer
+ * that off mode has been enabled.
+ */
+void omap_pm_enable_off_mode(void)
+{
+ off_mode_enabled = true;
+}
+
+/**
+ * omap_pm_disable_off_mode - notify OMAP PM that off-mode is disabled
+ *
+ * Intended for use only by OMAP PM core code to notify this layer
+ * that off mode has been disabled.
+ */
+void omap_pm_disable_off_mode(void)
+{
+ off_mode_enabled = false;
+}
+
+bool omap_pm_was_context_lost(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct omap_device *od;
+ struct omap_hwmod *oh;
+
+ if (!dev)
+ goto save_ctx;
+
+ pdev = container_of(dev, struct platform_device, dev);
+ od = container_of(pdev, struct omap_device, pdev);
+ oh = od->hwmods[0];
+
+ if (!oh || !cpu_is_omap44xx())
+ goto save_ctx;
+
+ if (oh->prcm.omap4.context_reg) {
+ u32 context_reg_val = 0;
+
+ /*Read what context was lost.*/
+ context_reg_val = __raw_readl(oh->prcm.omap4.context_reg);
+
+ /*clear context lost bits after read*/
+ __raw_writel(context_reg_val, oh->prcm.omap4.context_reg);
+
+ /* ABE special case, only report ctx lost when we loose
+ * mem, otherwise, constant firmware reload causes problems.
+ */
+ if (oh->prcm.omap4.context_reg == OMAP4430_RM_ABE_AESS_CONTEXT)
+ context_reg_val &= (1 << 8);
+
+ return (context_reg_val != 0);
+ }
+
+save_ctx:
+ /* by default return true so that driver will restore context*/
+ return true;
+}
+
+/* Should be called before clk framework init */
+int __init omap_pm_if_early_init(void)
+{
+ return 0;
+}
+
+/* Must be called after clock framework is initialized */
+int __init omap_pm_if_init(void)
+{
+ return omap_pm_if_init_helper();
+}
+
+void omap_pm_if_exit(void)
+{
+ /* Deallocate CPUFreq frequency table here */
+}
diff --git a/arch/arm/plat-omap/omap-pm-noop.c b/arch/arm/plat-omap/omap-pm-noop.c
deleted file mode 100644
index b0471bb2..0000000
--- a/arch/arm/plat-omap/omap-pm-noop.c
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * omap-pm-noop.c - OMAP power management interface - dummy version
- *
- * This code implements the OMAP power management interface to
- * drivers, CPUIdle, CPUFreq, and DSP Bridge. It is strictly for
- * debug/demonstration use, as it does nothing but printk() whenever a
- * function is called (when DEBUG is defined, below)
- *
- * Copyright (C) 2008-2009 Texas Instruments, Inc.
- * Copyright (C) 2008-2009 Nokia Corporation
- * Paul Walmsley
- *
- * Interface developed by (in alphabetical order):
- * Karthik Dasu, Tony Lindgren, Rajendra Nayak, Sakari Poussa, Veeramanikandan
- * Raju, Anand Sawant, Igor Stoppa, Paul Walmsley, Richard Woodruff
- */
-
-#undef DEBUG
-
-#include <linux/init.h>
-#include <linux/cpufreq.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-
-/* Interface documentation is in mach/omap-pm.h */
-#include <plat/omap-pm.h>
-#include <plat/omap_device.h>
-
-static bool off_mode_enabled;
-static u32 dummy_context_loss_counter;
-
-/*
- * Device-driver-originated constraints (via board-*.c files)
- */
-
-int omap_pm_set_max_mpu_wakeup_lat(struct device *dev, long t)
-{
- if (!dev || t < -1) {
- WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
- return -EINVAL;
- };
-
- if (t == -1)
- pr_debug("OMAP PM: remove max MPU wakeup latency constraint: "
- "dev %s\n", dev_name(dev));
- else
- pr_debug("OMAP PM: add max MPU wakeup latency constraint: "
- "dev %s, t = %ld usec\n", dev_name(dev), t);
-
- /*
- * For current Linux, this needs to map the MPU to a
- * powerdomain, then go through the list of current max lat
- * constraints on the MPU and find the smallest. If
- * the latency constraint has changed, the code should
- * recompute the state to enter for the next powerdomain
- * state.
- *
- * TI CDP code can call constraint_set here.
- */
-
- return 0;
-}
-
-int omap_pm_set_min_bus_tput(struct device *dev, u8 agent_id, unsigned long r)
-{
- if (!dev || (agent_id != OCP_INITIATOR_AGENT &&
- agent_id != OCP_TARGET_AGENT)) {
- WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
- return -EINVAL;
- };
-
- if (r == 0)
- pr_debug("OMAP PM: remove min bus tput constraint: "
- "dev %s for agent_id %d\n", dev_name(dev), agent_id);
- else
- pr_debug("OMAP PM: add min bus tput constraint: "
- "dev %s for agent_id %d: rate %ld KiB\n",
- dev_name(dev), agent_id, r);
-
- /*
- * This code should model the interconnect and compute the
- * required clock frequency, convert that to a VDD2 OPP ID, then
- * set the VDD2 OPP appropriately.
- *
- * TI CDP code can call constraint_set here on the VDD2 OPP.
- */
-
- return 0;
-}
-
-int omap_pm_set_max_dev_wakeup_lat(struct device *req_dev, struct device *dev,
- long t)
-{
- if (!req_dev || !dev || t < -1) {
- WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
- return -EINVAL;
- };
-
- if (t == -1)
- pr_debug("OMAP PM: remove max device latency constraint: "
- "dev %s\n", dev_name(dev));
- else
- pr_debug("OMAP PM: add max device latency constraint: "
- "dev %s, t = %ld usec\n", dev_name(dev), t);
-
- /*
- * For current Linux, this needs to map the device to a
- * powerdomain, then go through the list of current max lat
- * constraints on that powerdomain and find the smallest. If
- * the latency constraint has changed, the code should
- * recompute the state to enter for the next powerdomain
- * state. Conceivably, this code should also determine
- * whether to actually disable the device clocks or not,
- * depending on how long it takes to re-enable the clocks.
- *
- * TI CDP code can call constraint_set here.
- */
-
- return 0;
-}
-
-int omap_pm_set_max_sdma_lat(struct device *dev, long t)
-{
- if (!dev || t < -1) {
- WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
- return -EINVAL;
- };
-
- if (t == -1)
- pr_debug("OMAP PM: remove max DMA latency constraint: "
- "dev %s\n", dev_name(dev));
- else
- pr_debug("OMAP PM: add max DMA latency constraint: "
- "dev %s, t = %ld usec\n", dev_name(dev), t);
-
- /*
- * For current Linux PM QOS params, this code should scan the
- * list of maximum CPU and DMA latencies and select the
- * smallest, then set cpu_dma_latency pm_qos_param
- * accordingly.
- *
- * For future Linux PM QOS params, with separate CPU and DMA
- * latency params, this code should just set the dma_latency param.
- *
- * TI CDP code can call constraint_set here.
- */
-
- return 0;
-}
-
-int omap_pm_set_min_clk_rate(struct device *dev, struct clk *c, long r)
-{
- if (!dev || !c || r < 0) {
- WARN(1, "OMAP PM: %s: invalid parameter(s)", __func__);
- return -EINVAL;
- }
-
- if (r == 0)
- pr_debug("OMAP PM: remove min clk rate constraint: "
- "dev %s\n", dev_name(dev));
- else
- pr_debug("OMAP PM: add min clk rate constraint: "
- "dev %s, rate = %ld Hz\n", dev_name(dev), r);
-
- /*
- * Code in a real implementation should keep track of these
- * constraints on the clock, and determine the highest minimum
- * clock rate. It should iterate over each OPP and determine
- * whether the OPP will result in a clock rate that would
- * satisfy this constraint (and any other PM constraint in effect
- * at that time). Once it finds the lowest-voltage OPP that
- * meets those conditions, it should switch to it, or return
- * an error if the code is not capable of doing so.
- */
-
- return 0;
-}
-
-/*
- * DSP Bridge-specific constraints
- */
-
-const struct omap_opp *omap_pm_dsp_get_opp_table(void)
-{
- pr_debug("OMAP PM: DSP request for OPP table\n");
-
- /*
- * Return DSP frequency table here: The final item in the
- * array should have .rate = .opp_id = 0.
- */
-
- return NULL;
-}
-
-void omap_pm_dsp_set_min_opp(u8 opp_id)
-{
- if (opp_id == 0) {
- WARN_ON(1);
- return;
- }
-
- pr_debug("OMAP PM: DSP requests minimum VDD1 OPP to be %d\n", opp_id);
-
- /*
- *
- * For l-o dev tree, our VDD1 clk is keyed on OPP ID, so we
- * can just test to see which is higher, the CPU's desired OPP
- * ID or the DSP's desired OPP ID, and use whichever is
- * highest.
- *
- * In CDP12.14+, the VDD1 OPP custom clock that controls the DSP
- * rate is keyed on MPU speed, not the OPP ID. So we need to
- * map the OPP ID to the MPU speed for use with clk_set_rate()
- * if it is higher than the current OPP clock rate.
- *
- */
-}
-
-
-u8 omap_pm_dsp_get_opp(void)
-{
- pr_debug("OMAP PM: DSP requests current DSP OPP ID\n");
-
- /*
- * For l-o dev tree, call clk_get_rate() on VDD1 OPP clock
- *
- * CDP12.14+:
- * Call clk_get_rate() on the OPP custom clock, map that to an
- * OPP ID using the tables defined in board-*.c/chip-*.c files.
- */
-
- return 0;
-}
-
-/*
- * CPUFreq-originated constraint
- *
- * In the future, this should be handled by custom OPP clocktype
- * functions.
- */
-
-struct cpufreq_frequency_table **omap_pm_cpu_get_freq_table(void)
-{
- pr_debug("OMAP PM: CPUFreq request for frequency table\n");
-
- /*
- * Return CPUFreq frequency table here: loop over
- * all VDD1 clkrates, pull out the mpu_ck frequencies, build
- * table
- */
-
- return NULL;
-}
-
-void omap_pm_cpu_set_freq(unsigned long f)
-{
- if (f == 0) {
- WARN_ON(1);
- return;
- }
-
- pr_debug("OMAP PM: CPUFreq requests CPU frequency to be set to %lu\n",
- f);
-
- /*
- * For l-o dev tree, determine whether MPU freq or DSP OPP id
- * freq is higher. Find the OPP ID corresponding to the
- * higher frequency. Call clk_round_rate() and clk_set_rate()
- * on the OPP custom clock.
- *
- * CDP should just be able to set the VDD1 OPP clock rate here.
- */
-}
-
-unsigned long omap_pm_cpu_get_freq(void)
-{
- pr_debug("OMAP PM: CPUFreq requests current CPU frequency\n");
-
- /*
- * Call clk_get_rate() on the mpu_ck.
- */
-
- return 0;
-}
-
-/**
- * omap_pm_enable_off_mode - notify OMAP PM that off-mode is enabled
- *
- * Intended for use only by OMAP PM core code to notify this layer
- * that off mode has been enabled.
- */
-void omap_pm_enable_off_mode(void)
-{
- off_mode_enabled = true;
-}
-
-/**
- * omap_pm_disable_off_mode - notify OMAP PM that off-mode is disabled
- *
- * Intended for use only by OMAP PM core code to notify this layer
- * that off mode has been disabled.
- */
-void omap_pm_disable_off_mode(void)
-{
- off_mode_enabled = false;
-}
-
-/*
- * Device context loss tracking
- */
-
-#ifdef CONFIG_ARCH_OMAP2PLUS
-
-u32 omap_pm_get_dev_context_loss_count(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- u32 count;
-
- if (WARN_ON(!dev))
- return 0;
-
- if (dev->parent == &omap_device_parent) {
- count = omap_device_get_context_loss_count(pdev);
- } else {
- WARN_ONCE(off_mode_enabled, "omap_pm: using dummy context loss counter; device %s should be converted to omap_device",
- dev_name(dev));
- if (off_mode_enabled)
- dummy_context_loss_counter++;
- count = dummy_context_loss_counter;
- }
-
- pr_debug("OMAP PM: context loss count for dev %s = %d\n",
- dev_name(dev), count);
-
- return count;
-}
-
-#else
-
-u32 omap_pm_get_dev_context_loss_count(struct device *dev)
-{
- return dummy_context_loss_counter;
-}
-
-#endif
-
-/* Should be called before clk framework init */
-int __init omap_pm_if_early_init(void)
-{
- return 0;
-}
-
-/* Must be called after clock framework is initialized */
-int __init omap_pm_if_init(void)
-{
- return 0;
-}
-
-void omap_pm_if_exit(void)
-{
- /* Deallocate CPUFreq frequency table here */
-}
-
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index 49fc0df..92b4496 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -146,12 +146,12 @@
odpl->activate_lat_worst = act_lat;
if (odpl->flags & OMAP_DEVICE_LATENCY_AUTO_ADJUST) {
odpl->activate_lat = act_lat;
- pr_warning("omap_device: %s.%d: new worst case "
+ pr_debug("omap_device: %s.%d: new worst case "
"activate latency %d: %llu\n",
od->pdev.name, od->pdev.id,
od->pm_lat_level, act_lat);
} else
- pr_warning("omap_device: %s.%d: activate "
+ pr_debug("omap_device: %s.%d: activate "
"latency %d higher than exptected. "
"(%llu > %d)\n",
od->pdev.name, od->pdev.id,
@@ -214,12 +214,12 @@
odpl->deactivate_lat_worst = deact_lat;
if (odpl->flags & OMAP_DEVICE_LATENCY_AUTO_ADJUST) {
odpl->deactivate_lat = deact_lat;
- pr_warning("omap_device: %s.%d: new worst case "
+ pr_debug("omap_device: %s.%d: new worst case "
"deactivate latency %d: %llu\n",
od->pdev.name, od->pdev.id,
od->pm_lat_level, deact_lat);
} else
- pr_warning("omap_device: %s.%d: deactivate "
+ pr_debug("omap_device: %s.%d: deactivate "
"latency %d higher than exptected. "
"(%llu > %d)\n",
od->pdev.name, od->pdev.id,
@@ -311,7 +311,7 @@
* return the context loss counter for that hwmod, otherwise return
* zero.
*/
-u32 omap_device_get_context_loss_count(struct platform_device *pdev)
+int omap_device_get_context_loss_count(struct platform_device *pdev)
{
struct omap_device *od;
u32 ret = 0;
diff --git a/arch/arm/plat-omap/omap_rpmsg.c b/arch/arm/plat-omap/omap_rpmsg.c
new file mode 100644
index 0000000..c0257be
--- /dev/null
+++ b/arch/arm/plat-omap/omap_rpmsg.c
@@ -0,0 +1,601 @@
+/*
+ * Remote processor messaging transport (OMAP platform-specific bits)
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Authors: Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ids.h>
+#include <linux/interrupt.h>
+#include <linux/virtio_ring.h>
+#include <linux/rpmsg.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/memblock.h>
+#include <linux/remoteproc.h>
+#include <asm/io.h>
+
+#include <plat/rpmsg.h>
+#include <plat/mailbox.h>
+#include <plat/remoteproc.h>
+
+struct omap_rpmsg_vproc {
+ struct virtio_device vdev;
+ unsigned int vring[2]; /* mpu owns first vring, ipu owns the 2nd */
+ unsigned int buf_addr;
+ unsigned int buf_size; /* must be page-aligned */
+ void *buf_mapped;
+ char *mbox_name;
+ char *rproc_name;
+ struct omap_mbox *mbox;
+ struct rproc *rproc;
+ struct notifier_block nb;
+ struct notifier_block rproc_nb;
+ struct work_struct reset_work;
+ bool slave_reset;
+ struct omap_rpmsg_vproc *slave_next;
+ struct virtqueue *vq[2];
+ int base_vq_id;
+ int num_of_vqs;
+ struct rpmsg_channel_info *hardcoded_chnls;
+};
+
+#define to_omap_rpdev(vd) container_of(vd, struct omap_rpmsg_vproc, vdev)
+static void rpmsg_reset_work(struct work_struct *work);
+
+struct omap_rpmsg_vq_info {
+ __u16 num; /* number of entries in the virtio_ring */
+ __u16 vq_id; /* a globaly unique index of this virtqueue */
+ void *addr; /* address where we mapped the virtio ring */
+ struct omap_rpmsg_vproc *rpdev;
+};
+
+/*
+ * For now, allocate 256 buffers of 512 bytes for each side. each buffer
+ * will then have 16B for the msg header and 496B for the payload.
+ * This will require a total space of 256KB for the buffers themselves, and
+ * 3 pages for every vring (the size of the vring depends on the number of
+ * buffers it supports).
+ */
+#define RPMSG_NUM_BUFS (512)
+#define RPMSG_BUF_SIZE (512)
+#define RPMSG_BUFS_SPACE (RPMSG_NUM_BUFS * RPMSG_BUF_SIZE)
+
+/*
+ * The alignment between the consumer and producer parts of the vring.
+ * Note: this is part of the "wire" protocol. If you change this, you need
+ * to update your BIOS image as well
+ */
+#define RPMSG_VRING_ALIGN (4096)
+
+/* With 256 buffers, our vring will occupy 3 pages */
+#define RPMSG_RING_SIZE ((DIV_ROUND_UP(vring_size(RPMSG_NUM_BUFS / 2, \
+ RPMSG_VRING_ALIGN), PAGE_SIZE)) * PAGE_SIZE)
+
+/* The total IPC space needed to communicate with a remote processor */
+#define RPMSG_IPC_MEM (RPMSG_BUFS_SPACE + 2 * RPMSG_RING_SIZE)
+
+/* provide drivers with platform-specific details */
+static void omap_rpmsg_get(struct virtio_device *vdev, unsigned int request,
+ void *buf, unsigned len)
+{
+ struct omap_rpmsg_vproc *rpdev = to_omap_rpdev(vdev);
+ void *presult;
+ int iresult;
+
+ switch (request) {
+ case VPROC_BUF_ADDR:
+ /* user data is at stake so bugs here cannot be tolerated */
+ BUG_ON(len != sizeof(rpdev->buf_mapped));
+ memcpy(buf, &rpdev->buf_mapped, len);
+ break;
+ case VPROC_SIM_BASE:
+ /* user data is at stake so bugs here cannot be tolerated */
+ BUG_ON(len != sizeof(presult));
+ /*
+ * calculate a simulated base address to make virtio's
+ * virt_to_page() happy.
+ */
+ presult = __va(rpdev->buf_addr);
+ memcpy(buf, &presult, len);
+ break;
+ case VPROC_BUF_NUM:
+ /* user data is at stake so bugs here cannot be tolerated */
+ BUG_ON(len != sizeof(iresult));
+ iresult = RPMSG_NUM_BUFS;
+ memcpy(buf, &iresult, len);
+ break;
+ case VPROC_BUF_SZ:
+ /* user data is at stake so bugs here cannot be tolerated */
+ BUG_ON(len != sizeof(iresult));
+ iresult = RPMSG_BUF_SIZE;
+ memcpy(buf, &iresult, len);
+ break;
+ case VPROC_STATIC_CHANNELS:
+ /* user data is at stake so bugs here cannot be tolerated */
+ BUG_ON(len != sizeof(rpdev->hardcoded_chnls));
+ memcpy(buf, &rpdev->hardcoded_chnls, len);
+ break;
+ default:
+ dev_err(&vdev->dev, "invalid request: %d\n", request);
+ }
+}
+
+/* kick the remote processor, and let it know which virtqueue to poke at */
+static void omap_rpmsg_notify(struct virtqueue *vq)
+{
+ struct omap_rpmsg_vq_info *rpvq = vq->priv;
+ int ret;
+
+ pr_debug("sending mailbox msg: %d\n", rpvq->vq_id);
+ rproc_last_busy(rpvq->rpdev->rproc);
+ /* send the index of the triggered virtqueue as the mailbox payload */
+ ret = omap_mbox_msg_send(rpvq->rpdev->mbox, rpvq->vq_id);
+ if (ret)
+ pr_err("ugh, omap_mbox_msg_send() failed: %d\n", ret);
+}
+
+static int omap_rpmsg_mbox_callback(struct notifier_block *this,
+ unsigned long index, void *data)
+{
+ mbox_msg_t msg = (mbox_msg_t) data;
+ struct omap_rpmsg_vproc *rpdev;
+
+ rpdev = container_of(this, struct omap_rpmsg_vproc, nb);
+
+ pr_debug("mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+ case RP_MBOX_CRASH:
+ pr_err("%s has just crashed !\n", rpdev->rproc_name);
+ rproc_error_notify(rpdev->rproc);
+ break;
+ case RP_MBOX_ECHO_REPLY:
+ pr_info("received echo reply from %s !\n", rpdev->rproc_name);
+ break;
+ case RP_MBOX_PENDING_MSG:
+ /*
+ * a new inbound message is waiting in our own vring (index 0).
+ * Let's pretend the message explicitly contained the vring
+ * index number and handle it generically
+ */
+ msg = rpdev->base_vq_id;
+ /* intentional fall-through */
+ default:
+ /* ignore vq indices which are clearly not for us */
+ if (msg < rpdev->base_vq_id)
+ break;
+
+ msg -= rpdev->base_vq_id;
+
+ /*
+ * Currently both PENDING_MSG and explicit-virtqueue-index
+ * messaging are supported.
+ * Whatever approach is taken, at this point 'msg' contains
+ * the index of the vring which was just triggered.
+ */
+ if (msg < rpdev->num_of_vqs)
+ vring_interrupt(msg, rpdev->vq[msg]);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void rpmsg_reset_devices(struct omap_rpmsg_vproc *rpdev)
+{
+ /* wait until previous reset requests have finished */
+ flush_work_sync(&rpdev->reset_work);
+ schedule_work(&rpdev->reset_work);
+}
+
+static int rpmsg_rproc_error(struct omap_rpmsg_vproc *rpdev)
+{
+ pr_err("Fatal error in %s\n", rpdev->rproc_name);
+#ifdef CONFIG_OMAP_RPMSG_RECOVERY
+ if (rpdev->slave_reset)
+ return NOTIFY_DONE;
+ rpmsg_reset_devices(rpdev);
+#endif
+
+ return NOTIFY_DONE;
+}
+
+static int rpmsg_rproc_suspend(struct omap_rpmsg_vproc *rpdev)
+{
+ if (virtqueue_more_used(rpdev->vq[0]))
+ return NOTIFY_BAD;
+ return NOTIFY_DONE;
+}
+
+static int rpmsg_rproc_pos_suspend(struct omap_rpmsg_vproc *rpdev)
+{
+ if (rpdev->mbox) {
+ omap_mbox_put(rpdev->mbox, &rpdev->nb);
+ rpdev->mbox = NULL;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int rpmsg_rproc_resume(struct omap_rpmsg_vproc *rpdev)
+{
+ if (!rpdev->mbox)
+ rpdev->mbox = omap_mbox_get(rpdev->mbox_name, &rpdev->nb);
+
+ return NOTIFY_DONE;
+}
+
+static int rpmsg_rproc_secure(struct omap_rpmsg_vproc *rpdev, bool s)
+{
+ pr_err("%s: %s secure mode\n", rpdev->rproc_name, s ? "enter" : "exit");
+ if (rpdev->slave_reset)
+ return NOTIFY_DONE;
+ rpmsg_reset_devices(rpdev);
+
+ return NOTIFY_DONE;
+}
+
+static int rpmsg_rproc_events(struct notifier_block *this,
+ unsigned long type, void *data)
+{
+ struct omap_rpmsg_vproc *rpdev = container_of(this,
+ struct omap_rpmsg_vproc, rproc_nb);
+
+ switch (type) {
+ case RPROC_ERROR:
+ return rpmsg_rproc_error(rpdev);
+ case RPROC_PRE_SUSPEND:
+ return rpmsg_rproc_suspend(rpdev);
+ case RPROC_POS_SUSPEND:
+ return rpmsg_rproc_pos_suspend(rpdev);
+ case RPROC_RESUME:
+ return rpmsg_rproc_resume(rpdev);
+ case RPROC_SECURE:
+ return rpmsg_rproc_secure(rpdev, !!data);
+ }
+ return NOTIFY_DONE;
+}
+
+static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
+ unsigned index,
+ void (*callback)(struct virtqueue *vq),
+ const char *name)
+{
+ struct omap_rpmsg_vproc *rpdev = to_omap_rpdev(vdev);
+ struct omap_rpmsg_vq_info *rpvq;
+ struct virtqueue *vq;
+ int err;
+
+ rpvq = kmalloc(sizeof(*rpvq), GFP_KERNEL);
+ if (!rpvq)
+ return ERR_PTR(-ENOMEM);
+
+ /* ioremap'ing normal memory, so we cast away sparse's complaints */
+ rpvq->addr = (__force void *) ioremap_nocache(rpdev->vring[index],
+ RPMSG_RING_SIZE);
+ if (!rpvq->addr) {
+ err = -ENOMEM;
+ goto free_rpvq;
+ }
+
+ memset(rpvq->addr, 0, RPMSG_RING_SIZE);
+
+ pr_debug("vring%d: phys 0x%x, virt 0x%x\n", index, rpdev->vring[index],
+ (unsigned int) rpvq->addr);
+
+ vq = vring_new_virtqueue(RPMSG_NUM_BUFS / 2, RPMSG_VRING_ALIGN, vdev,
+ rpvq->addr, omap_rpmsg_notify, callback, name);
+ if (!vq) {
+ pr_err("vring_new_virtqueue failed\n");
+ err = -ENOMEM;
+ goto unmap_vring;
+ }
+
+ rpdev->vq[index] = vq;
+ vq->priv = rpvq;
+ /* system-wide unique id for this virtqueue */
+ rpvq->vq_id = rpdev->base_vq_id + index;
+ rpvq->rpdev = rpdev;
+
+ return vq;
+
+unmap_vring:
+ /* iounmap normal memory, so make sparse happy */
+ iounmap((__force void __iomem *) rpvq->addr);
+free_rpvq:
+ kfree(rpvq);
+ return ERR_PTR(err);
+}
+
+static void omap_rpmsg_del_vqs(struct virtio_device *vdev)
+{
+ struct virtqueue *vq, *n;
+ struct omap_rpmsg_vproc *rpdev = to_omap_rpdev(vdev);
+
+ rproc_event_unregister(rpdev->rproc, &rpdev->rproc_nb);
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
+ struct omap_rpmsg_vq_info *rpvq = vq->priv;
+ iounmap(rpvq->addr);
+ vring_del_virtqueue(vq);
+ kfree(rpvq);
+ }
+
+ if (rpdev->mbox)
+ omap_mbox_put(rpdev->mbox, &rpdev->nb);
+
+ if (rpdev->rproc)
+ rproc_put(rpdev->rproc);
+
+ iounmap(rpdev->buf_mapped);
+}
+
+static int omap_rpmsg_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[])
+{
+ struct omap_rpmsg_vproc *rpdev = to_omap_rpdev(vdev);
+ int i, err;
+
+ /* we maintain two virtqueues per remote processor (for RX and TX) */
+ if (nvqs != 2)
+ return -EINVAL;
+
+ for (i = 0; i < nvqs; ++i) {
+ vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i]);
+ if (IS_ERR(vqs[i])) {
+ err = PTR_ERR(vqs[i]);
+ goto error;
+ }
+ }
+
+ rpdev->num_of_vqs = nvqs;
+
+ /* ioremap'ing normal memory, so we cast away sparse's complaints */
+ rpdev->buf_mapped = (__force void *) ioremap_nocache(rpdev->buf_addr,
+ rpdev->buf_size);
+ if (!rpdev->buf_mapped) {
+ pr_err("ioremap failed\n");
+ err = -ENOMEM;
+ goto error;
+ }
+
+ /* for now, use mailbox's notifiers. later that can be optimized */
+ rpdev->nb.notifier_call = omap_rpmsg_mbox_callback;
+ rpdev->mbox = omap_mbox_get(rpdev->mbox_name, &rpdev->nb);
+ if (IS_ERR(rpdev->mbox)) {
+ pr_err("failed to get mailbox %s\n", rpdev->mbox_name);
+ err = -EINVAL;
+ goto unmap_buf;
+ }
+
+ pr_debug("buf: phys 0x%x, virt 0x%x\n", rpdev->buf_addr,
+ (unsigned int) rpdev->buf_mapped);
+
+ /* tell the M3 we're ready. hmm. do we really need this msg */
+ err = omap_mbox_msg_send(rpdev->mbox, RP_MBOX_READY);
+ if (err) {
+ pr_err("ugh, omap_mbox_msg_send() failed: %d\n", err);
+ goto put_mbox;
+ }
+
+ /* send it the physical address of the mapped buffer + vrings, */
+ /* this should be moved to the resource table logic */
+ err = omap_mbox_msg_send(rpdev->mbox, (mbox_msg_t) rpdev->buf_addr);
+ if (err) {
+ pr_err("ugh, omap_mbox_msg_send() failed: %d\n", err);
+ goto put_mbox;
+ }
+
+ /* ping the remote processor. this is only for sanity-sake;
+ * there is no functional effect whatsoever */
+ err = omap_mbox_msg_send(rpdev->mbox, RP_MBOX_ECHO_REQUEST);
+ if (err) {
+ pr_err("ugh, omap_mbox_msg_send() failed: %d\n", err);
+ goto put_mbox;
+ }
+
+ /* now load the firmware, and take the M3 out of reset */
+ rpdev->rproc = rproc_get(rpdev->rproc_name);
+ if (!rpdev->rproc) {
+ pr_err("failed to get rproc %s\n", rpdev->rproc_name);
+ err = -EINVAL;
+ goto put_mbox;
+ }
+ /* register for remoteproc events */
+ rpdev->rproc_nb.notifier_call = rpmsg_rproc_events;
+ rproc_event_register(rpdev->rproc, &rpdev->rproc_nb);
+
+ return 0;
+
+put_mbox:
+ omap_mbox_put(rpdev->mbox, &rpdev->nb);
+unmap_buf:
+ /* iounmap normal memory, so make sparse happy */
+ iounmap((__force void __iomem *)rpdev->buf_mapped);
+error:
+ omap_rpmsg_del_vqs(vdev);
+ return err;
+}
+
+/*
+ * should be nice to add firmware support for these handlers.
+ * for now provide them so virtio doesn't crash
+ */
+static u8 omap_rpmsg_get_status(struct virtio_device *vdev)
+{
+ return 0;
+}
+
+static void omap_rpmsg_set_status(struct virtio_device *vdev, u8 status)
+{
+ dev_dbg(&vdev->dev, "new status: %d\n", status);
+}
+
+static void omap_rpmsg_reset(struct virtio_device *vdev)
+{
+ dev_dbg(&vdev->dev, "reset !\n");
+}
+
+static u32 omap_rpmsg_get_features(struct virtio_device *vdev)
+{
+ /* for now, use hardcoded bitmap. later this should be provided
+ * by the firmware itself */
+ return (1 << VIRTIO_RPMSG_F_NS);
+}
+
+static void omap_rpmsg_finalize_features(struct virtio_device *vdev)
+{
+ /* Give virtio_ring a chance to accept features */
+ vring_transport_features(vdev);
+}
+
+static void omap_rpmsg_vproc_release(struct device *dev)
+{
+ /* this handler is provided so driver core doesn't yell at us */
+}
+
+static void rpmsg_reset_work(struct work_struct *work)
+{
+ struct omap_rpmsg_vproc *rpdev =
+ container_of(work, struct omap_rpmsg_vproc, reset_work);
+ struct omap_rpmsg_vproc *tmp;
+ int ret;
+
+ for (tmp = rpdev; tmp; tmp = tmp->slave_next) {
+ pr_err("reseting virtio device %d\n", tmp->vdev.index);
+ unregister_virtio_device(&tmp->vdev);
+ }
+ for (tmp = rpdev; tmp; tmp = tmp->slave_next) {
+ memset(&tmp->vdev.dev, 0, sizeof(struct device));
+ tmp->vdev.dev.release = omap_rpmsg_vproc_release;
+ ret = register_virtio_device(&tmp->vdev);
+ if (ret)
+ pr_err("error creating virtio device %d\n", ret);
+ }
+}
+
+static struct virtio_config_ops omap_rpmsg_config_ops = {
+ .get_features = omap_rpmsg_get_features,
+ .finalize_features = omap_rpmsg_finalize_features,
+ .get = omap_rpmsg_get,
+ .find_vqs = omap_rpmsg_find_vqs,
+ .del_vqs = omap_rpmsg_del_vqs,
+ .reset = omap_rpmsg_reset,
+ .set_status = omap_rpmsg_set_status,
+ .get_status = omap_rpmsg_get_status,
+};
+
+static struct rpmsg_channel_info omap_ipuc0_hardcoded_chnls[] = {
+ { "rpmsg-resmgr", 100, RPMSG_ADDR_ANY },
+ { "rpmsg-server-sample", 137, RPMSG_ADDR_ANY },
+ { },
+};
+
+static struct rpmsg_channel_info omap_ipuc1_hardcoded_chnls[] = {
+ { "rpmsg-resmgr", 100, RPMSG_ADDR_ANY },
+ { },
+};
+
+static struct omap_rpmsg_vproc omap_rpmsg_vprocs[] = {
+ /* ipu_c0's rpmsg backend */
+ {
+ .vdev.id.device = VIRTIO_ID_RPMSG,
+ .vdev.config = &omap_rpmsg_config_ops,
+ .mbox_name = "mailbox-1",
+ .rproc_name = "ipu",
+ .base_vq_id = 0,
+ .hardcoded_chnls = omap_ipuc0_hardcoded_chnls,
+ .slave_next = &omap_rpmsg_vprocs[1],
+ },
+ /* ipu_c1's rpmsg backend */
+ {
+ .vdev.id.device = VIRTIO_ID_RPMSG,
+ .vdev.config = &omap_rpmsg_config_ops,
+ .mbox_name = "mailbox-1",
+ .rproc_name = "ipu",
+ .base_vq_id = 2,
+ .hardcoded_chnls = omap_ipuc1_hardcoded_chnls,
+ .slave_reset = true,
+ },
+};
+
+static int __init omap_rpmsg_ini(void)
+{
+ int i, ret = 0;
+ phys_addr_t paddr = omap_ipu_get_mempool_base(
+ OMAP_RPROC_MEMPOOL_STATIC);
+ phys_addr_t psize = omap_ipu_get_mempool_size(
+ OMAP_RPROC_MEMPOOL_STATIC);
+
+ for (i = 0; i < ARRAY_SIZE(omap_rpmsg_vprocs); i++) {
+ struct omap_rpmsg_vproc *rpdev = &omap_rpmsg_vprocs[i];
+
+ if (psize < RPMSG_IPC_MEM) {
+ pr_err("out of carveout memory: %d (%d)\n", psize, i);
+ return -ENOMEM;
+ }
+
+ /*
+ * vring buffers are expected to be present at the beginning
+ * of the chosen remoteproc pool
+ */
+ rpdev->buf_addr = paddr;
+ rpdev->buf_size = RPMSG_BUFS_SPACE;
+ rpdev->vring[0] = paddr + RPMSG_BUFS_SPACE;
+ rpdev->vring[1] = paddr + RPMSG_BUFS_SPACE + RPMSG_RING_SIZE;
+ INIT_WORK(&rpdev->reset_work, rpmsg_reset_work);
+
+ paddr += RPMSG_IPC_MEM;
+ psize -= RPMSG_IPC_MEM;
+
+ pr_debug("rpdev%d: buf 0x%x, vring0 0x%x, vring1 0x%x\n", i,
+ rpdev->buf_addr, rpdev->vring[0], rpdev->vring[1]);
+
+ rpdev->vdev.dev.release = omap_rpmsg_vproc_release;
+
+ ret = register_virtio_device(&rpdev->vdev);
+ if (ret) {
+ pr_err("failed to register rpdev: %d\n", ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+module_init(omap_rpmsg_ini);
+
+static void __exit omap_rpmsg_fini(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(omap_rpmsg_vprocs); i++) {
+ struct omap_rpmsg_vproc *rpdev = &omap_rpmsg_vprocs[i];
+
+ unregister_virtio_device(&rpdev->vdev);
+ }
+}
+module_exit(omap_rpmsg_fini);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("OMAP Remote processor messaging virtio device");
diff --git a/arch/arm/plat-omap/rproc_user.c b/arch/arm/plat-omap/rproc_user.c
new file mode 100644
index 0000000..083e4ae
--- /dev/null
+++ b/arch/arm/plat-omap/rproc_user.c
@@ -0,0 +1,185 @@
+/*
+ * Secure Mode Input interface to remoteproc driver
+ *
+ * Copyright (C) 2011 Texas Instruments. All rights reserved.
+ *
+ * Authors: Suman Anna <s-anna@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <linux/remoteproc.h>
+
+
+#define RPROC_USER_NAME "rproc_user"
+#define RPROC_USER_DEVICES 1
+
+static DEFINE_MUTEX(rproc_user_mutex);
+
+struct rproc_user_device {
+ struct miscdevice mdev;
+};
+
+static struct rproc_user_device *ipu_device;
+static char *rproc_user_name = RPROC_USER_NAME;
+static unsigned secure_cnt;
+
+static int rproc_user_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = NULL;
+ return 0;
+}
+
+static int rproc_user_release(struct inode *inode, struct file *filp)
+{
+ int ret = 0;
+
+ if (filp->private_data) {
+ mutex_lock(&rproc_user_mutex);
+ if (!--secure_cnt)
+ ret = rproc_set_secure("ipu", false);
+ mutex_unlock(&rproc_user_mutex);
+ if (ret)
+ pr_err("rproc normal start failed 0x%x, urghh!!", ret);
+ }
+ return ret;
+}
+
+static ssize_t rproc_user_read(struct file *filp, char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ u8 enable;
+ int ret = 1;
+
+ if (len != 1)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&rproc_user_mutex))
+ return -EINTR;
+ enable = secure_cnt ? 1 : 0;
+ if (copy_to_user((void *)ubuf, &enable, sizeof(enable)))
+ ret = -EFAULT;
+ mutex_unlock(&rproc_user_mutex);
+
+ return ret;
+}
+
+static ssize_t rproc_user_write(struct file *filp, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ int ret = 0;
+ u8 enable;
+
+ if (len != 1)
+ return -EINVAL;
+
+ if (copy_from_user(&enable, (char __user *) ubuf, sizeof(enable)))
+ return -EFAULT;
+
+ if (mutex_lock_interruptible(&rproc_user_mutex))
+ return -EINTR;
+
+ enable = enable ? 1 : 0;
+ if (enable == (int)filp->private_data) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (enable) {
+ case 1:
+ if (!secure_cnt++)
+ ret = rproc_set_secure("ipu", true);
+ if (!ret) {
+ filp->private_data = (void *)1;
+ goto out;
+ }
+ /* fall through in case of failure */
+ pr_err("rproc secure start failed, 0x%x\n", ret);
+ case 0:
+ if (!--secure_cnt)
+ ret = rproc_set_secure("ipu", false);
+ if (ret)
+ pr_err("rproc normal start failed 0x%x, urghh!!", ret);
+ else
+ filp->private_data = (void *)0;
+ }
+ if (enable != (int)filp->private_data)
+ ret = -EACCES;
+out:
+ mutex_unlock(&rproc_user_mutex);
+
+ return ret ? ret : 1;
+}
+
+static const struct file_operations rproc_user_fops = {
+ .owner = THIS_MODULE,
+ .open = rproc_user_open,
+ .release = rproc_user_release,
+ .read = rproc_user_read,
+ .write = rproc_user_write,
+};
+
+static int __init rproc_user_init(void)
+{
+ int ret;
+
+ ipu_device = kzalloc(sizeof(struct rproc_user_device), GFP_KERNEL);
+ if (!ipu_device) {
+ pr_err("%s: memory allocation failed for ipu_device\n",
+ __func__);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ipu_device->mdev.minor = MISC_DYNAMIC_MINOR;
+ ipu_device->mdev.name = rproc_user_name;
+ ipu_device->mdev.fops = &rproc_user_fops;
+ ipu_device->mdev.parent = NULL;
+ ret = misc_register(&ipu_device->mdev);
+ if (ret) {
+ pr_err("rproc_user_init: failed to register rproc_user misc "
+ "device\n");
+ goto misc_fail;
+ }
+ return ret;
+
+misc_fail:
+ kfree(ipu_device);
+exit:
+ return ret;
+}
+module_init(rproc_user_init);
+
+static void __exit rproc_user_exit(void)
+{
+ misc_deregister(&ipu_device->mdev);
+ kfree(ipu_device);
+}
+module_exit(rproc_user_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("RemoteProc Secure Mode Interface Driver");
+MODULE_AUTHOR("Suman Anna");
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 6af3d0b..da97e1f 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -48,9 +48,14 @@
#define OMAP3_SRAM_VA 0xfe400000
#define OMAP3_SRAM_PUB_PA (OMAP3_SRAM_PA + 0x8000)
#define OMAP3_SRAM_PUB_VA (OMAP3_SRAM_VA + 0x8000)
-#define OMAP4_SRAM_VA 0xfe400000
-#define OMAP4_SRAM_PUB_PA (OMAP4_SRAM_PA + 0x4000)
-#define OMAP4_SRAM_PUB_VA (OMAP4_SRAM_VA + 0x4000)
+
+#define OMAP4_SRAM_MAX 0xe000 /* 56K */
+#define OMAP4_SRAM_VA 0xfe400000
+
+#define OMAP4_HS_SRAM_SIZE 0x1000 /* 4K */
+#define OMAP4_HS_SRAM_OFFSET (OMAP4_SRAM_MAX - OMAP4_HS_SRAM_SIZE)
+#define OMAP4_SRAM_PUB_PA (OMAP4_SRAM_PA + OMAP4_HS_SRAM_OFFSET)
+#define OMAP4_SRAM_PUB_VA (OMAP4_SRAM_VA + OMAP4_HS_SRAM_OFFSET)
#if defined(CONFIG_ARCH_OMAP2PLUS)
#define SRAM_BOOTLOADER_SZ 0x00
@@ -76,6 +81,12 @@
static unsigned long omap_sram_base;
static unsigned long omap_sram_size;
static unsigned long omap_sram_ceil;
+static unsigned long omap_barrier_base;
+
+unsigned long omap_get_sram_barrier_base(void)
+{
+ return omap_barrier_base;
+}
/*
* Depending on the target RAMFS firewall setup, the public usable amount of
@@ -128,7 +139,7 @@
} else if (cpu_is_omap44xx()) {
omap_sram_base = OMAP4_SRAM_PUB_VA;
omap_sram_start = OMAP4_SRAM_PUB_PA;
- omap_sram_size = 0xa000; /* 40K */
+ omap_sram_size = OMAP4_HS_SRAM_SIZE;
} else {
omap_sram_base = OMAP2_SRAM_PUB_VA;
omap_sram_start = OMAP2_SRAM_PUB_PA;
@@ -185,24 +196,25 @@
omap_sram_ceil = omap_sram_base + omap_sram_size;
}
-static struct map_desc omap_sram_io_desc[] __initdata = {
- { /* .length gets filled in at runtime */
- .virtual = OMAP1_SRAM_VA,
- .pfn = __phys_to_pfn(OMAP1_SRAM_PA),
- .type = MT_MEMORY
- }
-};
-
/*
* Note that we cannot use ioremap for SRAM, as clock init needs SRAM early.
*/
static void __init omap_map_sram(void)
{
unsigned long base;
+ struct map_desc omap_sram_io_desc[2];
+ int nr_desc = 1;
if (omap_sram_size == 0)
return;
+ omap_sram_io_desc[0].virtual = omap_sram_base;
+ base = omap_sram_start;
+ base = ROUND_DOWN(base, PAGE_SIZE);
+ omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
+ omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
+ omap_sram_io_desc[0].type = MT_MEMORY;
+
if (cpu_is_omap34xx()) {
/*
* SRAM must be marked as non-cached on OMAP3 since the
@@ -212,14 +224,33 @@
* which will cause the system to hang.
*/
omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED;
+ } else if (cpu_is_omap44xx()) {
+ /*
+ * Map a page of SRAM with strongly ordered attributes
+ * for interconnect barrier usage.
+ * if we have space, then use a new page, else remap
+ * first map
+ */
+ if (omap_sram_size <= PAGE_SIZE) {
+ omap_sram_io_desc[0].type = MT_MEMORY_SO;
+ omap_barrier_base = omap_sram_io_desc[0].virtual;
+ } else {
+ omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size
+ - PAGE_SIZE, PAGE_SIZE);
+ omap_sram_io_desc[1].virtual =
+ omap_sram_base + omap_sram_io_desc[0].length;
+ omap_barrier_base = omap_sram_io_desc[1].virtual;
+ base = omap_sram_start + omap_sram_io_desc[0].length;
+ base = ROUND_DOWN(base, PAGE_SIZE);
+ omap_sram_io_desc[1].pfn = __phys_to_pfn(base);
+ omap_sram_io_desc[1].length = PAGE_SIZE;
+ omap_sram_io_desc[1].type = MT_MEMORY_SO;
+ nr_desc = 2;
+ }
}
- omap_sram_io_desc[0].virtual = omap_sram_base;
- base = omap_sram_start;
- base = ROUND_DOWN(base, PAGE_SIZE);
- omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
- omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
- iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
+
+ iotable_init(omap_sram_io_desc, nr_desc);
pr_info("SRAM: Mapped pa 0x%08llx to va 0x%08lx size: 0x%lx\n",
(long long) __pfn_to_phys(omap_sram_io_desc[0].pfn),
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 192e9dd..fec4bf8 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/signal.h>
@@ -403,9 +404,7 @@
set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
}
-#ifdef CONFIG_PM
-#include <linux/syscore_ops.h>
-
+#ifdef CONFIG_CPU_PM
static int vfp_pm_suspend(void)
{
struct thread_info *ti = current_thread_info();
@@ -441,19 +440,33 @@
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
}
-static struct syscore_ops vfp_pm_syscore_ops = {
- .suspend = vfp_pm_suspend,
- .resume = vfp_pm_resume,
+static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ vfp_pm_suspend();
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ vfp_pm_resume();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block vfp_cpu_pm_notifier_block = {
+ .notifier_call = vfp_cpu_pm_notifier,
};
static void vfp_pm_init(void)
{
- register_syscore_ops(&vfp_pm_syscore_ops);
+ cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block);
}
#else
static inline void vfp_pm_init(void) { }
-#endif /* CONFIG_PM */
+#endif /* CONFIG_CPU_PM */
void vfp_sync_hwstate(struct thread_info *thread)
{
diff --git a/drivers/Kconfig b/drivers/Kconfig
index d0258eb..1cce7f29 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -128,4 +128,11 @@
source "drivers/clocksource/Kconfig"
+source "drivers/remoteproc/Kconfig"
+
+source "drivers/virtio/Kconfig"
+
+source "drivers/rpmsg/Kconfig"
+
+source "drivers/omap_hsi/Kconfig"
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 4ea4ac9..2f047a4 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -22,6 +22,7 @@
obj-$(CONFIG_DMA_ENGINE) += dma/
obj-$(CONFIG_VIRTIO) += virtio/
+obj-$(CONFIG_RPMSG) += rpmsg/
obj-$(CONFIG_XEN) += xen/
# regulators early, since some subsystems rely on them to initialize
@@ -123,3 +124,8 @@
obj-y += clk/
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
+obj-$(CONFIG_REMOTE_PROC) += remoteproc/
+
+obj-$(CONFIG_DMM_OMAP) += media/
+obj-$(CONFIG_TILER_OMAP) += media/
+obj-$(CONFIG_OMAP_HSI) += omap_hsi/
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 56a6899..5cc1232 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -625,4 +625,21 @@
return 0;
}
+
+/**
+ * opp_free_cpufreq_table() - free the cpufreq table
+ * @dev: device for which we do this operation
+ * @table: table to free
+ *
+ * Free up the table allocated by opp_init_cpufreq_table
+ */
+void opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ if (!table)
+ return;
+
+ kfree(*table);
+ *table = NULL;
+}
#endif /* CONFIG_CPU_FREQ */
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 1947088..7e07d94 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -109,6 +109,19 @@
loading your cpufreq low-level hardware driver, using the
'interactive' governor for latency-sensitive workloads.
+
+config CPU_FREQ_DEFAULT_GOV_HOTPLUG
+ bool "hotplug"
+ select CPU_FREQ_GOV_HOTPLUG
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the CPUFreq governor 'hotplug' as default. This allows you
+ to get a full dynamic frequency capable system with CPU
+ hotplug support by simply loading your cpufreq low-level
+ hardware driver. Be aware that not all cpufreq drivers
+ support the hotplug governor. If unsure have a look at
+ the help section of the driver. Fallback governor will be the
+ performance governor.
endchoice
config CPU_FREQ_GOV_PERFORMANCE
@@ -211,5 +224,25 @@
source "drivers/cpufreq/Kconfig.x86"
endmenu
+config CPU_FREQ_GOV_HOTPLUG
+ tristate "'hotplug' cpufreq governor"
+ depends on CPU_FREQ && NO_HZ && HOTPLUG_CPU
+ help
+ 'hotplug' - this driver mimics the frequency scaling behavior
+ in 'ondemand', but with several key differences. First is
+ that frequency transitions use the CPUFreq table directly,
+ instead of incrementing in a percentage of the maximum
+ available frequency. Second 'hotplug' will offline auxillary
+ CPUs when the system is idle, and online those CPUs once the
+ system becomes busy again. This last feature is needed for
+ architectures which transition to low power states when only
+ the "master" CPU is online, or for thermally constrained
+ devices.
+
+ If you don't have one of these architectures or devices, use
+ 'ondemand' instead.
+
+ If in doubt, say N.
+
endif
endmenu
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index c044060..1a5e64d 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -10,6 +10,7 @@
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
+obj-$(CONFIG_CPU_FREQ_GOV_HOTPLUG) += cpufreq_hotplug.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
diff --git a/drivers/cpufreq/cpufreq_hotplug.c b/drivers/cpufreq/cpufreq_hotplug.c
new file mode 100644
index 0000000..4a1479d
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_hotplug.c
@@ -0,0 +1,744 @@
+/*
+ * CPUFreq hotplug governor
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * Mike Turquette <mturquette@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * Based on ondemand governor
+ * Copyright (C) 2001 Russell King
+ * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>,
+ * Jun Nakajima <jun.nakajima@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/cpu.h>
+#include <linux/jiffies.h>
+#include <linux/kernel_stat.h>
+#include <linux/mutex.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/ktime.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+/* greater than 80% avg load across online CPUs increases frequency */
+#define DEFAULT_UP_FREQ_MIN_LOAD (80)
+
+/* Keep 10% of idle under the up threshold when decreasing the frequency */
+#define DEFAULT_FREQ_DOWN_DIFFERENTIAL (10)
+
+/* less than 35% avg load across online CPUs decreases frequency */
+#define DEFAULT_DOWN_FREQ_MAX_LOAD (35)
+
+/* default sampling period (uSec) is bogus; 10x ondemand's default for x86 */
+#define DEFAULT_SAMPLING_PERIOD (100000)
+
+/* default number of sampling periods to average before hotplug-in decision */
+#define DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS (5)
+
+/* default number of sampling periods to average before hotplug-out decision */
+#define DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS (20)
+
+static void do_dbs_timer(struct work_struct *work);
+static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ unsigned int event);
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_HOTPLUG
+static
+#endif
+struct cpufreq_governor cpufreq_gov_hotplug = {
+ .name = "hotplug",
+ .governor = cpufreq_governor_dbs,
+ .owner = THIS_MODULE,
+};
+
+struct cpu_dbs_info_s {
+ cputime64_t prev_cpu_idle;
+ cputime64_t prev_cpu_wall;
+ cputime64_t prev_cpu_nice;
+ struct cpufreq_policy *cur_policy;
+ struct delayed_work work;
+ struct cpufreq_frequency_table *freq_table;
+ int cpu;
+ /*
+ * percpu mutex that serializes governor limit change with
+ * do_dbs_timer invocation. We do not want do_dbs_timer to run
+ * when user is changing the governor or limits.
+ */
+ struct mutex timer_mutex;
+};
+static DEFINE_PER_CPU(struct cpu_dbs_info_s, hp_cpu_dbs_info);
+
+static unsigned int dbs_enable; /* number of CPUs using this policy */
+
+/*
+ * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
+ * different CPUs. It protects dbs_enable in governor start/stop.
+ */
+static DEFINE_MUTEX(dbs_mutex);
+
+static struct workqueue_struct *khotplug_wq;
+
+static struct dbs_tuners {
+ unsigned int sampling_rate;
+ unsigned int up_threshold;
+ unsigned int down_differential;
+ unsigned int down_threshold;
+ unsigned int hotplug_in_sampling_periods;
+ unsigned int hotplug_out_sampling_periods;
+ unsigned int hotplug_load_index;
+ unsigned int *hotplug_load_history;
+ unsigned int ignore_nice;
+ unsigned int io_is_busy;
+} dbs_tuners_ins = {
+ .sampling_rate = DEFAULT_SAMPLING_PERIOD,
+ .up_threshold = DEFAULT_UP_FREQ_MIN_LOAD,
+ .down_differential = DEFAULT_FREQ_DOWN_DIFFERENTIAL,
+ .down_threshold = DEFAULT_DOWN_FREQ_MAX_LOAD,
+ .hotplug_in_sampling_periods = DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS,
+ .hotplug_out_sampling_periods = DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS,
+ .hotplug_load_index = 0,
+ .ignore_nice = 0,
+ .io_is_busy = 0,
+};
+
+/*
+ * A corner case exists when switching io_is_busy at run-time: comparing idle
+ * times from a non-io_is_busy period to an io_is_busy period (or vice-versa)
+ * will misrepresent the actual change in system idleness. We ignore this
+ * corner case: enabling io_is_busy might cause freq increase and disabling
+ * might cause freq decrease, which probably matches the original intent.
+ */
+static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
+{
+ u64 idle_time;
+ u64 iowait_time;
+
+ /* cpufreq-hotplug always assumes CONFIG_NO_HZ */
+ idle_time = get_cpu_idle_time_us(cpu, wall);
+
+ /* add time spent doing I/O to idle time */
+ if (dbs_tuners_ins.io_is_busy) {
+ iowait_time = get_cpu_iowait_time_us(cpu, wall);
+ /* cpufreq-hotplug always assumes CONFIG_NO_HZ */
+ if (iowait_time != -1ULL && idle_time >= iowait_time)
+ idle_time -= iowait_time;
+ }
+
+ return idle_time;
+}
+
+/************************** sysfs interface ************************/
+
+/* XXX look at global sysfs macros in cpufreq.h, can those be used here? */
+
+/* cpufreq_hotplug Governor Tunables */
+#define show_one(file_name, object) \
+static ssize_t show_##file_name \
+(struct kobject *kobj, struct attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
+}
+show_one(sampling_rate, sampling_rate);
+show_one(up_threshold, up_threshold);
+show_one(down_differential, down_differential);
+show_one(down_threshold, down_threshold);
+show_one(hotplug_in_sampling_periods, hotplug_in_sampling_periods);
+show_one(hotplug_out_sampling_periods, hotplug_out_sampling_periods);
+show_one(ignore_nice_load, ignore_nice);
+show_one(io_is_busy, io_is_busy);
+
+static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.sampling_rate = input;
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
+static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input <= dbs_tuners_ins.down_threshold) {
+ return -EINVAL;
+ }
+
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.up_threshold = input;
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
+static ssize_t store_down_differential(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input >= dbs_tuners_ins.up_threshold)
+ return -EINVAL;
+
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.down_differential = input;
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
+static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input >= dbs_tuners_ins.up_threshold) {
+ return -EINVAL;
+ }
+
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.down_threshold = input;
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
+static ssize_t store_hotplug_in_sampling_periods(struct kobject *a,
+ struct attribute *b, const char *buf, size_t count)
+{
+ unsigned int input;
+ unsigned int *temp;
+ unsigned int max_windows;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ /* already using this value, bail out */
+ if (input == dbs_tuners_ins.hotplug_in_sampling_periods)
+ return count;
+
+ mutex_lock(&dbs_mutex);
+ ret = count;
+ max_windows = max(dbs_tuners_ins.hotplug_in_sampling_periods,
+ dbs_tuners_ins.hotplug_out_sampling_periods);
+
+ /* no need to resize array */
+ if (input <= max_windows) {
+ dbs_tuners_ins.hotplug_in_sampling_periods = input;
+ goto out;
+ }
+
+ /* resize array */
+ temp = kmalloc((sizeof(unsigned int) * input), GFP_KERNEL);
+
+ if (!temp || IS_ERR(temp)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(temp, dbs_tuners_ins.hotplug_load_history,
+ (max_windows * sizeof(unsigned int)));
+ kfree(dbs_tuners_ins.hotplug_load_history);
+
+ /* replace old buffer, old number of sampling periods & old index */
+ dbs_tuners_ins.hotplug_load_history = temp;
+ dbs_tuners_ins.hotplug_in_sampling_periods = input;
+ dbs_tuners_ins.hotplug_load_index = max_windows;
+out:
+ mutex_unlock(&dbs_mutex);
+
+ return ret;
+}
+
+static ssize_t store_hotplug_out_sampling_periods(struct kobject *a,
+ struct attribute *b, const char *buf, size_t count)
+{
+ unsigned int input;
+ unsigned int *temp;
+ unsigned int max_windows;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ /* already using this value, bail out */
+ if (input == dbs_tuners_ins.hotplug_out_sampling_periods)
+ return count;
+
+ mutex_lock(&dbs_mutex);
+ ret = count;
+ max_windows = max(dbs_tuners_ins.hotplug_in_sampling_periods,
+ dbs_tuners_ins.hotplug_out_sampling_periods);
+
+ /* no need to resize array */
+ if (input <= max_windows) {
+ dbs_tuners_ins.hotplug_out_sampling_periods = input;
+ goto out;
+ }
+
+ /* resize array */
+ temp = kmalloc((sizeof(unsigned int) * input), GFP_KERNEL);
+
+ if (!temp || IS_ERR(temp)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(temp, dbs_tuners_ins.hotplug_load_history,
+ (max_windows * sizeof(unsigned int)));
+ kfree(dbs_tuners_ins.hotplug_load_history);
+
+ /* replace old buffer, old number of sampling periods & old index */
+ dbs_tuners_ins.hotplug_load_history = temp;
+ dbs_tuners_ins.hotplug_out_sampling_periods = input;
+ dbs_tuners_ins.hotplug_load_index = max_windows;
+out:
+ mutex_unlock(&dbs_mutex);
+
+ return ret;
+}
+
+static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ unsigned int j;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (input > 1)
+ input = 1;
+
+ mutex_lock(&dbs_mutex);
+ if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
+ mutex_unlock(&dbs_mutex);
+ return count;
+ }
+ dbs_tuners_ins.ignore_nice = input;
+
+ /* we need to re-evaluate prev_cpu_idle */
+ for_each_online_cpu(j) {
+ struct cpu_dbs_info_s *dbs_info;
+ dbs_info = &per_cpu(hp_cpu_dbs_info, j);
+ dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
+ &dbs_info->prev_cpu_wall);
+ if (dbs_tuners_ins.ignore_nice)
+ dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+
+ }
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
+static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.io_is_busy = !!input;
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
+define_one_global_rw(sampling_rate);
+define_one_global_rw(up_threshold);
+define_one_global_rw(down_differential);
+define_one_global_rw(down_threshold);
+define_one_global_rw(hotplug_in_sampling_periods);
+define_one_global_rw(hotplug_out_sampling_periods);
+define_one_global_rw(ignore_nice_load);
+define_one_global_rw(io_is_busy);
+
+static struct attribute *dbs_attributes[] = {
+ &sampling_rate.attr,
+ &up_threshold.attr,
+ &down_differential.attr,
+ &down_threshold.attr,
+ &hotplug_in_sampling_periods.attr,
+ &hotplug_out_sampling_periods.attr,
+ &ignore_nice_load.attr,
+ &io_is_busy.attr,
+ NULL
+};
+
+static struct attribute_group dbs_attr_group = {
+ .attrs = dbs_attributes,
+ .name = "hotplug",
+};
+
+/************************** sysfs end ************************/
+
+static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
+{
+ /* combined load of all enabled CPUs */
+ unsigned int total_load = 0;
+ /* single largest CPU load percentage*/
+ unsigned int max_load = 0;
+ /* largest CPU load in terms of frequency */
+ unsigned int max_load_freq = 0;
+ /* average load across all enabled CPUs */
+ unsigned int avg_load = 0;
+ /* average load across multiple sampling periods for hotplug events */
+ unsigned int hotplug_in_avg_load = 0;
+ unsigned int hotplug_out_avg_load = 0;
+ /* number of sampling periods averaged for hotplug decisions */
+ unsigned int periods;
+
+ struct cpufreq_policy *policy;
+ unsigned int i, j;
+
+ policy = this_dbs_info->cur_policy;
+
+ /*
+ * cpu load accounting
+ * get highest load, total load and average load across all CPUs
+ */
+ for_each_cpu(j, policy->cpus) {
+ unsigned int load;
+ unsigned int idle_time, wall_time;
+ cputime64_t cur_wall_time, cur_idle_time;
+ struct cpu_dbs_info_s *j_dbs_info;
+
+ j_dbs_info = &per_cpu(hp_cpu_dbs_info, j);
+
+ /* update both cur_idle_time and cur_wall_time */
+ cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
+
+ /* how much wall time has passed since last iteration? */
+ wall_time = (unsigned int) cputime64_sub(cur_wall_time,
+ j_dbs_info->prev_cpu_wall);
+ j_dbs_info->prev_cpu_wall = cur_wall_time;
+
+ /* how much idle time has passed since last iteration? */
+ idle_time = (unsigned int) cputime64_sub(cur_idle_time,
+ j_dbs_info->prev_cpu_idle);
+ j_dbs_info->prev_cpu_idle = cur_idle_time;
+
+ if (unlikely(!wall_time || wall_time < idle_time))
+ continue;
+
+ /* load is the percentage of time not spent in idle */
+ load = 100 * (wall_time - idle_time) / wall_time;
+
+ /* keep track of combined load across all CPUs */
+ total_load += load;
+
+ /* keep track of highest single load across all CPUs */
+ if (load > max_load)
+ max_load = load;
+ }
+
+ /* use the max load in the OPP freq change policy */
+ max_load_freq = max_load * policy->cur;
+
+ /* calculate the average load across all related CPUs */
+ avg_load = total_load / num_online_cpus();
+
+
+ /*
+ * hotplug load accounting
+ * average load over multiple sampling periods
+ */
+
+ /* how many sampling periods do we use for hotplug decisions? */
+ periods = max(dbs_tuners_ins.hotplug_in_sampling_periods,
+ dbs_tuners_ins.hotplug_out_sampling_periods);
+
+ /* store avg_load in the circular buffer */
+ dbs_tuners_ins.hotplug_load_history[dbs_tuners_ins.hotplug_load_index]
+ = avg_load;
+
+ /* compute average load across in & out sampling periods */
+ for (i = 0, j = dbs_tuners_ins.hotplug_load_index;
+ i < periods; i++, j--) {
+ if (i < dbs_tuners_ins.hotplug_in_sampling_periods)
+ hotplug_in_avg_load +=
+ dbs_tuners_ins.hotplug_load_history[j];
+ if (i < dbs_tuners_ins.hotplug_out_sampling_periods)
+ hotplug_out_avg_load +=
+ dbs_tuners_ins.hotplug_load_history[j];
+
+ if (j == 0)
+ j = periods;
+ }
+
+ hotplug_in_avg_load = hotplug_in_avg_load /
+ dbs_tuners_ins.hotplug_in_sampling_periods;
+
+ hotplug_out_avg_load = hotplug_out_avg_load /
+ dbs_tuners_ins.hotplug_out_sampling_periods;
+
+ /* return to first element if we're at the circular buffer's end */
+ if (++dbs_tuners_ins.hotplug_load_index == periods)
+ dbs_tuners_ins.hotplug_load_index = 0;
+
+ /* check if auxiliary CPU is needed based on avg_load */
+ if (avg_load > dbs_tuners_ins.up_threshold) {
+ /* should we enable auxillary CPUs? */
+ if (num_online_cpus() < 2 && hotplug_in_avg_load >
+ dbs_tuners_ins.up_threshold) {
+ /* hotplug with cpufreq is nasty
+ * a call to cpufreq_governor_dbs may cause a lockup.
+ * wq is not running here so its safe.
+ */
+ mutex_unlock(&this_dbs_info->timer_mutex);
+ cpu_up(1);
+ mutex_lock(&this_dbs_info->timer_mutex);
+ goto out;
+ }
+ }
+
+ /* check for frequency increase based on max_load */
+ if (max_load > dbs_tuners_ins.up_threshold) {
+ /* increase to highest frequency supported */
+ if (policy->cur < policy->max)
+ __cpufreq_driver_target(policy, policy->max,
+ CPUFREQ_RELATION_H);
+
+ goto out;
+ }
+
+ /* check for frequency decrease */
+ if (avg_load < dbs_tuners_ins.down_threshold) {
+ /* are we at the minimum frequency already? */
+ if (policy->cur == policy->min) {
+ /* should we disable auxillary CPUs? */
+ if (num_online_cpus() > 1 && hotplug_out_avg_load <
+ dbs_tuners_ins.down_threshold) {
+ mutex_unlock(&this_dbs_info->timer_mutex);
+ cpu_down(1);
+ mutex_lock(&this_dbs_info->timer_mutex);
+ }
+ goto out;
+ }
+ }
+
+ /*
+ * go down to the lowest frequency which can sustain the load by
+ * keeping 30% of idle in order to not cross the up_threshold
+ */
+ if ((max_load_freq <
+ (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
+ policy->cur) && (policy->cur > policy->min)) {
+ unsigned int freq_next;
+ freq_next = max_load_freq /
+ (dbs_tuners_ins.up_threshold -
+ dbs_tuners_ins.down_differential);
+
+ if (freq_next < policy->min)
+ freq_next = policy->min;
+
+ __cpufreq_driver_target(policy, freq_next,
+ CPUFREQ_RELATION_L);
+ }
+out:
+ return;
+}
+
+static void do_dbs_timer(struct work_struct *work)
+{
+ struct cpu_dbs_info_s *dbs_info =
+ container_of(work, struct cpu_dbs_info_s, work.work);
+ unsigned int cpu = dbs_info->cpu;
+
+ /* We want all related CPUs to do sampling nearly on same jiffy */
+ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+
+ mutex_lock(&dbs_info->timer_mutex);
+ dbs_check_cpu(dbs_info);
+ queue_delayed_work_on(cpu, khotplug_wq, &dbs_info->work, delay);
+ mutex_unlock(&dbs_info->timer_mutex);
+}
+
+static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
+{
+ /* We want all related CPUs to do sampling nearly on same jiffy */
+ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+ delay -= jiffies % delay;
+
+ INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
+ queue_delayed_work_on(dbs_info->cpu, khotplug_wq, &dbs_info->work,
+ delay);
+}
+
+static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
+{
+ cancel_delayed_work_sync(&dbs_info->work);
+}
+
+static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ unsigned int cpu = policy->cpu;
+ struct cpu_dbs_info_s *this_dbs_info;
+ unsigned int i, j, max_periods;
+ int rc;
+
+ this_dbs_info = &per_cpu(hp_cpu_dbs_info, cpu);
+
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ if ((!cpu_online(cpu)) || (!policy->cur))
+ return -EINVAL;
+
+ mutex_lock(&dbs_mutex);
+ dbs_enable++;
+ for_each_cpu(j, policy->cpus) {
+ struct cpu_dbs_info_s *j_dbs_info;
+ j_dbs_info = &per_cpu(hp_cpu_dbs_info, j);
+ j_dbs_info->cur_policy = policy;
+
+ j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
+ &j_dbs_info->prev_cpu_wall);
+ if (dbs_tuners_ins.ignore_nice) {
+ j_dbs_info->prev_cpu_nice =
+ kstat_cpu(j).cpustat.nice;
+ }
+
+ max_periods = max(DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS,
+ DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS);
+ dbs_tuners_ins.hotplug_load_history = kmalloc(
+ (sizeof(unsigned int) * max_periods),
+ GFP_KERNEL);
+ if (!dbs_tuners_ins.hotplug_load_history) {
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+ for (i = 0; i < max_periods; i++)
+ dbs_tuners_ins.hotplug_load_history[i] = 50;
+ }
+ this_dbs_info->cpu = cpu;
+ this_dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
+ /*
+ * Start the timerschedule work, when this governor
+ * is used for first time
+ */
+ if (dbs_enable == 1) {
+ rc = sysfs_create_group(cpufreq_global_kobject,
+ &dbs_attr_group);
+ if (rc) {
+ mutex_unlock(&dbs_mutex);
+ return rc;
+ }
+ }
+ mutex_unlock(&dbs_mutex);
+
+ mutex_init(&this_dbs_info->timer_mutex);
+ dbs_timer_init(this_dbs_info);
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ dbs_timer_exit(this_dbs_info);
+
+ mutex_lock(&dbs_mutex);
+ mutex_destroy(&this_dbs_info->timer_mutex);
+ dbs_enable--;
+ mutex_unlock(&dbs_mutex);
+ if (!dbs_enable)
+ sysfs_remove_group(cpufreq_global_kobject,
+ &dbs_attr_group);
+ kfree(dbs_tuners_ins.hotplug_load_history);
+ /*
+ * XXX BIG CAVEAT: Stopping the governor with CPU1 offline
+ * will result in it remaining offline until the user onlines
+ * it again. It is up to the user to do this (for now).
+ */
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
+ mutex_lock(&this_dbs_info->timer_mutex);
+ if (policy->max < this_dbs_info->cur_policy->cur)
+ __cpufreq_driver_target(this_dbs_info->cur_policy,
+ policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > this_dbs_info->cur_policy->cur)
+ __cpufreq_driver_target(this_dbs_info->cur_policy,
+ policy->min, CPUFREQ_RELATION_L);
+ mutex_unlock(&this_dbs_info->timer_mutex);
+ break;
+ }
+ return 0;
+}
+
+static int __init cpufreq_gov_dbs_init(void)
+{
+ int err;
+ cputime64_t wall;
+ u64 idle_time;
+ int cpu = get_cpu();
+
+ idle_time = get_cpu_idle_time_us(cpu, &wall);
+ put_cpu();
+ if (idle_time != -1ULL) {
+ dbs_tuners_ins.up_threshold = DEFAULT_UP_FREQ_MIN_LOAD;
+ } else {
+ pr_err("cpufreq-hotplug: %s: assumes CONFIG_NO_HZ\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ khotplug_wq = create_workqueue("khotplug");
+ if (!khotplug_wq) {
+ pr_err("Creation of khotplug failed\n");
+ return -EFAULT;
+ }
+ err = cpufreq_register_governor(&cpufreq_gov_hotplug);
+ if (err)
+ destroy_workqueue(khotplug_wq);
+
+ return err;
+}
+
+static void __exit cpufreq_gov_dbs_exit(void)
+{
+ cpufreq_unregister_governor(&cpufreq_gov_hotplug);
+ destroy_workqueue(khotplug_wq);
+}
+
+MODULE_AUTHOR("Mike Turquette <mturquette@ti.com>");
+MODULE_DESCRIPTION("'cpufreq_hotplug' - cpufreq governor for dynamic frequency scaling and CPU hotplugging");
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_HOTPLUG
+fs_initcall(cpufreq_gov_dbs_init);
+#else
+module_init(cpufreq_gov_dbs_init);
+#endif
+module_exit(cpufreq_gov_dbs_exit);
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 90431cb..54c4de4 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
+#include <linux/err.h>
/*********************************************************************
* FREQUENCY TABLE HELPERS *
@@ -171,6 +172,78 @@
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
+int cpufreq_frequency_table_next_lowest(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table, int *index)
+{
+ unsigned int cur_freq;
+ unsigned int next_lowest_freq;
+ int optimal_index = -1;
+ int i = 0;
+
+ if (!policy || IS_ERR(policy) || !table || IS_ERR(table) ||
+ !index || IS_ERR(index))
+ return -ENOMEM;
+
+ cur_freq = policy->cur;
+ next_lowest_freq = policy->min;
+
+ /* we're at the lowest frequency in the table already, bail out */
+ if (cur_freq == policy->min)
+ return -EINVAL;
+
+ /* walk the list, find closest freq to cur_freq that is below it */
+ while(table[i].frequency != CPUFREQ_TABLE_END) {
+ if (table[i].frequency < cur_freq &&
+ table[i].frequency >= next_lowest_freq) {
+ next_lowest_freq = table[i].frequency;
+ optimal_index = table[i].index;
+ }
+
+ i++;
+ }
+
+ *index = optimal_index;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_frequency_table_next_lowest);
+
+int cpufreq_frequency_table_next_highest(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table, int *index)
+{
+ unsigned int cur_freq;
+ unsigned int next_higher_freq;
+ int optimal_index = -1;
+ int i = 0;
+
+ if (!policy || IS_ERR(policy) || !table || IS_ERR(table) ||
+ !index || IS_ERR(index))
+ return -ENOMEM;
+
+ cur_freq = policy->cur;
+ next_higher_freq = policy->max;
+
+ /* we're at the highest frequency in the table already, bail out */
+ if (cur_freq == policy->max)
+ return -EINVAL;
+
+ /* walk the list, find closest freq to cur_freq that is above it */
+ while(table[i].frequency != CPUFREQ_TABLE_END) {
+ if (table[i].frequency > cur_freq &&
+ table[i].frequency <= next_higher_freq) {
+ next_higher_freq = table[i].frequency;
+ optimal_index = table[i].index;
+ }
+
+ i++;
+ }
+
+ *index = optimal_index;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_frequency_table_next_highest);
+
static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
/**
* show_available_freqs - show available frequencies for the specified CPU
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 35bebde..561cb4f 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -21,26 +21,53 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/bitops.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <mach/irqs.h>
#include <mach/gpio.h>
#include <asm/mach/irq.h>
+#include <plat/omap-pm.h>
+#include <plat/usb.h> /* for omap4_trigger_ioctrl */
+
+#include "../mux.h"
+
+static LIST_HEAD(omap_gpio_list);
+
+struct gpio_regs {
+ u32 irqenable1;
+ u32 irqenable2;
+ u32 wake_en;
+ u32 ctrl;
+ u32 oe;
+ u32 leveldetect0;
+ u32 leveldetect1;
+ u32 risingdetect;
+ u32 fallingdetect;
+ u32 dataout;
+ u32 debounce;
+ u32 debounce_en;
+ u32 edge_falling;
+ u32 edge_rising;
+
+ u32 ew_leveldetect0;
+ u32 ew_leveldetect1;
+
+ u32 pad_set_wakeupenable;
+};
struct gpio_bank {
+ struct list_head node;
unsigned long pbase;
void __iomem *base;
u16 irq;
u16 virtual_irq_start;
- int method;
-#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
u32 suspend_wakeup;
u32 saved_wakeup;
-#endif
u32 non_wakeup_gpios;
u32 enabled_non_wakeup_gpios;
-
+ struct gpio_regs context;
u32 saved_datain;
u32 saved_fallingdetect;
u32 saved_risingdetect;
@@ -52,106 +79,51 @@
u32 mod_usage;
u32 dbck_enable_mask;
struct device *dev;
+ bool is_mpuio;
bool dbck_flag;
+ bool loses_context;
+ bool suspend_support;
+ bool saved_context;
int stride;
+ u32 width;
+ u16 id;
+
+ u32 type_leveldetect0;
+ u32 type_leveldetect1;
+ u32 type_risingedge;
+ u32 type_fallingedge;
+
+ void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable);
+ struct omap_gpio_reg_offs *regs;
+
+ struct omap_mux *mux[32];
};
-#ifdef CONFIG_ARCH_OMAP3
-struct omap3_gpio_regs {
- u32 irqenable1;
- u32 irqenable2;
- u32 wake_en;
- u32 ctrl;
- u32 oe;
- u32 leveldetect0;
- u32 leveldetect1;
- u32 risingdetect;
- u32 fallingdetect;
- u32 dataout;
-};
+static void omap_gpio_mod_init(struct gpio_bank *bank);
-static struct omap3_gpio_regs gpio_context[OMAP34XX_NR_GPIOS];
-#endif
-
-/*
- * TODO: Cleanup gpio_bank usage as it is having information
- * related to all instances of the device
- */
-static struct gpio_bank *gpio_bank;
-
-static int bank_width;
-
-/* TODO: Analyze removing gpio_bank_count usage from driver code */
-int gpio_bank_count;
-
-static inline struct gpio_bank *get_gpio_bank(int gpio)
+#define GPIO_INDEX(bank, gpio) (gpio % bank->width)
+#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
+#define GPIO_MOD_CTRL_BIT BIT(0)
+static void _set_gpio_waken(struct gpio_bank *bank, int gpio)
{
- if (cpu_is_omap15xx()) {
- if (OMAP_GPIO_IS_MPUIO(gpio))
- return &gpio_bank[0];
- return &gpio_bank[1];
+ if (bank->regs->wkup_set != bank->regs->wkup_clear) {
+ __raw_writel((1 << gpio), bank->base + bank->regs->wkup_set);
+ } else {
+ u32 val = __raw_readl(bank->base + bank->regs->wkup_set);
+ val |= 1 << gpio;
+ __raw_writel(val, bank->base + bank->regs->wkup_set);
}
- if (cpu_is_omap16xx()) {
- if (OMAP_GPIO_IS_MPUIO(gpio))
- return &gpio_bank[0];
- return &gpio_bank[1 + (gpio >> 4)];
- }
- if (cpu_is_omap7xx()) {
- if (OMAP_GPIO_IS_MPUIO(gpio))
- return &gpio_bank[0];
- return &gpio_bank[1 + (gpio >> 5)];
- }
- if (cpu_is_omap24xx())
- return &gpio_bank[gpio >> 5];
- if (cpu_is_omap34xx() || cpu_is_omap44xx())
- return &gpio_bank[gpio >> 5];
- BUG();
- return NULL;
}
-
-static inline int get_gpio_index(int gpio)
+static void _clear_gpio_waken(struct gpio_bank *bank, int gpio)
{
- if (cpu_is_omap7xx())
- return gpio & 0x1f;
- if (cpu_is_omap24xx())
- return gpio & 0x1f;
- if (cpu_is_omap34xx() || cpu_is_omap44xx())
- return gpio & 0x1f;
- return gpio & 0x0f;
-}
-
-static inline int gpio_valid(int gpio)
-{
- if (gpio < 0)
- return -1;
- if (cpu_class_is_omap1() && OMAP_GPIO_IS_MPUIO(gpio)) {
- if (gpio >= OMAP_MAX_GPIO_LINES + 16)
- return -1;
- return 0;
+ if (bank->regs->wkup_set != bank->regs->wkup_clear) {
+ __raw_writel((1 << gpio), bank->base + bank->regs->wkup_clear);
+ } else {
+ u32 val = __raw_readl(bank->base + bank->regs->wkup_clear);
+ val &= ~(1 << gpio);
+ __raw_writel(val, bank->base + bank->regs->wkup_clear);
}
- if (cpu_is_omap15xx() && gpio < 16)
- return 0;
- if ((cpu_is_omap16xx()) && gpio < 64)
- return 0;
- if (cpu_is_omap7xx() && gpio < 192)
- return 0;
- if (cpu_is_omap2420() && gpio < 128)
- return 0;
- if (cpu_is_omap2430() && gpio < 160)
- return 0;
- if ((cpu_is_omap34xx() || cpu_is_omap44xx()) && gpio < 192)
- return 0;
- return -1;
-}
-static int check_gpio(int gpio)
-{
- if (unlikely(gpio_valid(gpio) < 0)) {
- printk(KERN_ERR "omap-gpio: invalid GPIO %d\n", gpio);
- dump_stack();
- return -1;
- }
- return 0;
}
static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
@@ -159,41 +131,7 @@
void __iomem *reg = bank->base;
u32 l;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_IO_CNTL / bank->stride;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_DIR_CONTROL;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_DIRECTION;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_DIR_CONTROL;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_OE;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP4)
- case METHOD_GPIO_44XX:
- reg += OMAP4_GPIO_OE;
- break;
-#endif
- default:
- WARN_ON(1);
- return;
- }
+ reg += bank->regs->direction;
l = __raw_readl(reg);
if (is_input)
l |= 1 << gpio;
@@ -202,165 +140,48 @@
__raw_writel(l, reg);
}
-static void _set_gpio_dataout(struct gpio_bank *bank, int gpio, int enable)
+
+/* set data out value using dedicate set/clear register */
+static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable)
{
void __iomem *reg = bank->base;
- u32 l = 0;
+ u32 l = GPIO_BIT(bank, gpio);
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_OUTPUT / bank->stride;
- l = __raw_readl(reg);
- if (enable)
- l |= 1 << gpio;
- else
- l &= ~(1 << gpio);
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_DATA_OUTPUT;
- l = __raw_readl(reg);
- if (enable)
- l |= 1 << gpio;
- else
- l &= ~(1 << gpio);
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- if (enable)
- reg += OMAP1610_GPIO_SET_DATAOUT;
- else
- reg += OMAP1610_GPIO_CLEAR_DATAOUT;
- l = 1 << gpio;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_DATA_OUTPUT;
- l = __raw_readl(reg);
- if (enable)
- l |= 1 << gpio;
- else
- l &= ~(1 << gpio);
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- if (enable)
- reg += OMAP24XX_GPIO_SETDATAOUT;
- else
- reg += OMAP24XX_GPIO_CLEARDATAOUT;
- l = 1 << gpio;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- case METHOD_GPIO_44XX:
- if (enable)
- reg += OMAP4_GPIO_SETDATAOUT;
- else
- reg += OMAP4_GPIO_CLEARDATAOUT;
- l = 1 << gpio;
- break;
-#endif
- default:
- WARN_ON(1);
- return;
- }
+ if (enable)
+ reg += bank->regs->set_dataout;
+ else
+ reg += bank->regs->clr_dataout;
+
+ __raw_writel(l, reg);
+}
+
+/* set data out value using mask register */
+static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
+{
+ void __iomem *reg = bank->base + bank->regs->dataout;
+ u32 gpio_bit = GPIO_BIT(bank, gpio);
+ u32 l;
+
+ l = __raw_readl(reg);
+ if (enable)
+ l |= gpio_bit;
+ else
+ l &= ~gpio_bit;
__raw_writel(l, reg);
}
static int _get_gpio_datain(struct gpio_bank *bank, int gpio)
{
- void __iomem *reg;
+ void __iomem *reg = bank->base + bank->regs->datain;
- if (check_gpio(gpio) < 0)
- return -EINVAL;
- reg = bank->base;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_INPUT_LATCH / bank->stride;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_DATA_INPUT;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_DATAIN;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_DATA_INPUT;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_DATAIN;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- case METHOD_GPIO_44XX:
- reg += OMAP4_GPIO_DATAIN;
- break;
-#endif
- default:
- return -EINVAL;
- }
- return (__raw_readl(reg)
- & (1 << get_gpio_index(gpio))) != 0;
+ return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
}
static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
{
- void __iomem *reg;
+ void __iomem *reg = bank->base + bank->regs->dataout;
- if (check_gpio(gpio) < 0)
- return -EINVAL;
- reg = bank->base;
-
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_OUTPUT / bank->stride;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_DATA_OUTPUT;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_DATAOUT;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_DATA_OUTPUT;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_DATAOUT;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- case METHOD_GPIO_44XX:
- reg += OMAP4_GPIO_DATAOUT;
- break;
-#endif
- default:
- return -EINVAL;
- }
-
- return (__raw_readl(reg) & (1 << get_gpio_index(gpio))) != 0;
+ return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
}
#define MOD_REG_BIT(reg, bit_mask, set) \
@@ -383,7 +204,7 @@
static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
unsigned debounce)
{
- void __iomem *reg = bank->base;
+ void __iomem *reg;
u32 val;
u32 l;
@@ -397,21 +218,12 @@
else
debounce = (debounce / 0x1f) - 1;
- l = 1 << get_gpio_index(gpio);
+ l = GPIO_BIT(bank, gpio);
- if (bank->method == METHOD_GPIO_44XX)
- reg += OMAP4_GPIO_DEBOUNCINGTIME;
- else
- reg += OMAP24XX_GPIO_DEBOUNCE_VAL;
-
+ reg = bank->base + bank->regs->debounce;
__raw_writel(debounce, reg);
- reg = bank->base;
- if (bank->method == METHOD_GPIO_44XX)
- reg += OMAP4_GPIO_DEBOUNCENABLE;
- else
- reg += OMAP24XX_GPIO_DEBOUNCE_EN;
-
+ reg = bank->base + bank->regs->debounce_en;
val = __raw_readl(reg);
if (debounce) {
@@ -426,35 +238,24 @@
__raw_writel(val, reg);
}
-#ifdef CONFIG_ARCH_OMAP2PLUS
-static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
+static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
int trigger)
{
void __iomem *base = bank->base;
u32 gpio_bit = 1 << gpio;
- if (cpu_is_omap44xx()) {
- MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT0, gpio_bit,
+ MOD_REG_BIT(bank->regs->leveldetect0, gpio_bit,
trigger & IRQ_TYPE_LEVEL_LOW);
- MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT1, gpio_bit,
+ MOD_REG_BIT(bank->regs->leveldetect1, gpio_bit,
trigger & IRQ_TYPE_LEVEL_HIGH);
- MOD_REG_BIT(OMAP4_GPIO_RISINGDETECT, gpio_bit,
+ MOD_REG_BIT(bank->regs->risingdetect, gpio_bit,
trigger & IRQ_TYPE_EDGE_RISING);
- MOD_REG_BIT(OMAP4_GPIO_FALLINGDETECT, gpio_bit,
+ MOD_REG_BIT(bank->regs->fallingdetect, gpio_bit,
trigger & IRQ_TYPE_EDGE_FALLING);
- } else {
- MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT0, gpio_bit,
- trigger & IRQ_TYPE_LEVEL_LOW);
- MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT1, gpio_bit,
- trigger & IRQ_TYPE_LEVEL_HIGH);
- MOD_REG_BIT(OMAP24XX_GPIO_RISINGDETECT, gpio_bit,
- trigger & IRQ_TYPE_EDGE_RISING);
- MOD_REG_BIT(OMAP24XX_GPIO_FALLINGDETECT, gpio_bit,
- trigger & IRQ_TYPE_EDGE_FALLING);
- }
+
if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
if (cpu_is_omap44xx()) {
- MOD_REG_BIT(OMAP4_GPIO_IRQWAKEN0, gpio_bit,
+ MOD_REG_BIT(bank->regs->wkup_status, gpio_bit,
trigger != 0);
} else {
/*
@@ -462,11 +263,9 @@
* transitions
*/
if (trigger & IRQ_TYPE_EDGE_BOTH)
- __raw_writel(1 << gpio, bank->base
- + OMAP24XX_GPIO_SETWKUENA);
+ _set_gpio_waken(bank, gpio);
else
- __raw_writel(1 << gpio, bank->base
- + OMAP24XX_GPIO_CLEARWKUENA);
+ _clear_gpio_waken(bank, gpio);
}
}
/* This part needs to be executed always for OMAP{34xx, 44xx} */
@@ -484,17 +283,10 @@
bank->enabled_non_wakeup_gpios &= ~gpio_bit;
}
- if (cpu_is_omap44xx()) {
- bank->level_mask =
- __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT0) |
- __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT1);
- } else {
- bank->level_mask =
- __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0) |
- __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
- }
+ bank->level_mask =
+ __raw_readl(bank->base + bank->regs->leveldetect0) |
+ __raw_readl(bank->base + bank->regs->leveldetect1);
}
-#endif
#ifdef CONFIG_ARCH_OMAP1
/*
@@ -506,23 +298,10 @@
void __iomem *reg = bank->base;
u32 l = 0;
- switch (bank->method) {
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_GPIO_INT_EDGE / bank->stride;
- break;
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_INT_CONTROL;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_INT_CONTROL;
- break;
-#endif
- default:
+ if (!bank->regs->irqctrl)
return;
- }
+
+ reg += bank->regs->irqctrl;
l = __raw_readl(reg);
if ((l >> gpio) & 1)
@@ -539,10 +318,11 @@
void __iomem *reg = bank->base;
u32 l = 0;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_GPIO_INT_EDGE / bank->stride;
+ if (bank->regs->leveldetect0 && bank->regs->wkup_status) {
+ set_gpio_trigger(bank, gpio, trigger);
+ } else if (bank->regs->irqctrl) {
+ reg += bank->regs->irqctrl;
+
l = __raw_readl(reg);
if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
bank->toggle_mask |= 1 << gpio;
@@ -551,29 +331,16 @@
else if (trigger & IRQ_TYPE_EDGE_FALLING)
l &= ~(1 << gpio);
else
- goto bad;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_INT_CONTROL;
- l = __raw_readl(reg);
- if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
- bank->toggle_mask |= 1 << gpio;
- if (trigger & IRQ_TYPE_EDGE_RISING)
- l |= 1 << gpio;
- else if (trigger & IRQ_TYPE_EDGE_FALLING)
- l &= ~(1 << gpio);
- else
- goto bad;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
+ return -EINVAL;
+
+ __raw_writel(l, reg);
+
+ } else if (bank->regs->edgectrl1) {
if (gpio & 0x08)
- reg += OMAP1610_GPIO_EDGE_CTRL2;
+ reg += bank->regs->edgectrl2;
else
- reg += OMAP1610_GPIO_EDGE_CTRL1;
+ reg += bank->regs->edgectrl1;
+
gpio &= 0x07;
l = __raw_readl(reg);
l &= ~(3 << (gpio << 1));
@@ -581,40 +348,17 @@
l |= 2 << (gpio << 1);
if (trigger & IRQ_TYPE_EDGE_FALLING)
l |= 1 << (gpio << 1);
+
if (trigger)
/* Enable wake-up during idle for dynamic tick */
- __raw_writel(1 << gpio, bank->base + OMAP1610_GPIO_SET_WAKEUPENA);
+ _set_gpio_waken(bank, gpio);
else
- __raw_writel(1 << gpio, bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA);
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_INT_CONTROL;
- l = __raw_readl(reg);
- if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
- bank->toggle_mask |= 1 << gpio;
- if (trigger & IRQ_TYPE_EDGE_RISING)
- l |= 1 << gpio;
- else if (trigger & IRQ_TYPE_EDGE_FALLING)
- l &= ~(1 << gpio);
- else
- goto bad;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP2PLUS
- case METHOD_GPIO_24XX:
- case METHOD_GPIO_44XX:
- set_24xx_gpio_triggering(bank, gpio, trigger);
- return 0;
-#endif
- default:
- goto bad;
+ _clear_gpio_waken(bank, gpio);
+
+ __raw_writel(l, reg);
}
- __raw_writel(l, reg);
+
return 0;
-bad:
- return -EINVAL;
}
static int gpio_irq_type(struct irq_data *d, unsigned type)
@@ -629,20 +373,34 @@
else
gpio = d->irq - IH_GPIO_BASE;
- if (check_gpio(gpio) < 0)
- return -EINVAL;
-
if (type & ~IRQ_TYPE_SENSE_MASK)
return -EINVAL;
- /* OMAP1 allows only only edge triggering */
- if (!cpu_class_is_omap2()
- && (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
+ bank = irq_data_get_irq_chip_data(d);
+
+ /* OMAP1 allows only edge triggering */
+ if (!bank->regs->leveldetect0 && (type &
+ (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
return -EINVAL;
- bank = irq_data_get_irq_chip_data(d);
spin_lock_irqsave(&bank->lock, flags);
- retval = _set_gpio_triggering(bank, get_gpio_index(gpio), type);
+
+ retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
+
+ bank->type_leveldetect0 &= ~GPIO_BIT(bank, gpio);
+ bank->type_leveldetect1 &= ~GPIO_BIT(bank, gpio);
+ bank->type_fallingedge &= ~GPIO_BIT(bank, gpio);
+ bank->type_risingedge &= ~GPIO_BIT(bank, gpio);
+
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ bank->type_leveldetect0 |= GPIO_BIT(bank, gpio);
+ if (type & IRQ_TYPE_LEVEL_HIGH)
+ bank->type_leveldetect1 |= GPIO_BIT(bank, gpio);
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ bank->type_fallingedge |= GPIO_BIT(bank, gpio);
+ if (type & IRQ_TYPE_EDGE_RISING)
+ bank->type_risingedge |= GPIO_BIT(bank, gpio);
+
spin_unlock_irqrestore(&bank->lock, flags);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -657,195 +415,81 @@
{
void __iomem *reg = bank->base;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- /* MPUIO irqstatus is reset by reading the status register,
- * so do nothing here */
- return;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_INT_STATUS;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_IRQSTATUS1;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_INT_STATUS;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_IRQSTATUS1;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP4)
- case METHOD_GPIO_44XX:
- reg += OMAP4_GPIO_IRQSTATUS0;
- break;
-#endif
- default:
- WARN_ON(1);
- return;
- }
+ reg += bank->regs->irqstatus;
__raw_writel(gpio_mask, reg);
/* Workaround for clearing DSP GPIO interrupts to allow retention */
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
- reg = bank->base + OMAP24XX_GPIO_IRQSTATUS2;
- else if (cpu_is_omap44xx())
- reg = bank->base + OMAP4_GPIO_IRQSTATUS1;
-
- if (cpu_is_omap24xx() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
+ if (bank->regs->irqstatus2) {
+ reg = bank->base + bank->regs->irqstatus2;
__raw_writel(gpio_mask, reg);
+ }
/* Flush posted write for the irq status to avoid spurious interrupts */
__raw_readl(reg);
- }
}
static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
{
- _clear_gpio_irqbank(bank, 1 << get_gpio_index(gpio));
+ _clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
}
static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
{
void __iomem *reg = bank->base;
- int inv = 0;
u32 l;
- u32 mask;
+ u32 mask = (1 << bank->width) - 1;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_GPIO_MASKIT / bank->stride;
- mask = 0xffff;
- inv = 1;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_INT_MASK;
- mask = 0xffff;
- inv = 1;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_IRQENABLE1;
- mask = 0xffff;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_INT_MASK;
- mask = 0xffffffff;
- inv = 1;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_IRQENABLE1;
- mask = 0xffffffff;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP4)
- case METHOD_GPIO_44XX:
- reg += OMAP4_GPIO_IRQSTATUSSET0;
- mask = 0xffffffff;
- break;
-#endif
- default:
- WARN_ON(1);
- return 0;
- }
-
+ reg += bank->regs->irqenable;
l = __raw_readl(reg);
- if (inv)
+ if (bank->regs->irqenable_inv)
l = ~l;
l &= mask;
return l;
}
-static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask, int enable)
+static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
{
void __iomem *reg = bank->base;
u32 l;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_GPIO_MASKIT / bank->stride;
+ if (bank->regs->set_irqenable) {
+ reg += bank->regs->set_irqenable;
+ l = gpio_mask;
+ } else {
+ reg += bank->regs->irqenable;
l = __raw_readl(reg);
- if (enable)
- l &= ~(gpio_mask);
+ if (bank->regs->irqenable_inv)
+ l &= ~gpio_mask;
else
l |= gpio_mask;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_INT_MASK;
- l = __raw_readl(reg);
- if (enable)
- l &= ~(gpio_mask);
- else
- l |= gpio_mask;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- if (enable)
- reg += OMAP1610_GPIO_SET_IRQENABLE1;
- else
- reg += OMAP1610_GPIO_CLEAR_IRQENABLE1;
- l = gpio_mask;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_INT_MASK;
- l = __raw_readl(reg);
- if (enable)
- l &= ~(gpio_mask);
- else
- l |= gpio_mask;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- if (enable)
- reg += OMAP24XX_GPIO_SETIRQENABLE1;
- else
- reg += OMAP24XX_GPIO_CLEARIRQENABLE1;
- l = gpio_mask;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- case METHOD_GPIO_44XX:
- if (enable)
- reg += OMAP4_GPIO_IRQSTATUSSET0;
- else
- reg += OMAP4_GPIO_IRQSTATUSCLR0;
- l = gpio_mask;
- break;
-#endif
- default:
- WARN_ON(1);
- return;
}
+
+ __raw_writel(l, reg);
+}
+
+static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
+{
+ void __iomem *reg = bank->base;
+ u32 l;
+
+ if (bank->regs->clr_irqenable) {
+ reg += bank->regs->clr_irqenable;
+ l = gpio_mask;
+ } else {
+ reg += bank->regs->irqenable;
+ l = __raw_readl(reg);
+ if (bank->regs->irqenable_inv)
+ l |= gpio_mask;
+ else
+ l &= ~gpio_mask;
+ }
+
__raw_writel(l, reg);
}
static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
{
- _enable_gpio_irqbank(bank, 1 << get_gpio_index(gpio), enable);
+ _enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
}
/*
@@ -858,50 +502,32 @@
*/
static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
{
- unsigned long uninitialized_var(flags);
+ u32 gpio_bit = GPIO_BIT(bank, gpio);
+ unsigned long flags;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_MPUIO:
- case METHOD_GPIO_1610:
- spin_lock_irqsave(&bank->lock, flags);
- if (enable)
- bank->suspend_wakeup |= (1 << gpio);
- else
- bank->suspend_wakeup &= ~(1 << gpio);
- spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
-#endif
-#ifdef CONFIG_ARCH_OMAP2PLUS
- case METHOD_GPIO_24XX:
- case METHOD_GPIO_44XX:
- if (bank->non_wakeup_gpios & (1 << gpio)) {
- printk(KERN_ERR "Unable to modify wakeup on "
- "non-wakeup GPIO%d\n",
- (bank - gpio_bank) * 32 + gpio);
- return -EINVAL;
- }
- spin_lock_irqsave(&bank->lock, flags);
- if (enable)
- bank->suspend_wakeup |= (1 << gpio);
- else
- bank->suspend_wakeup &= ~(1 << gpio);
- spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
-#endif
- default:
- printk(KERN_ERR "Can't enable GPIO wakeup for method %i\n",
- bank->method);
+ if (bank->non_wakeup_gpios & gpio_bit) {
+ dev_err(bank->dev,
+ "Unable to modify wakeup on non-wakeup GPIO%d\n", gpio);
return -EINVAL;
}
+
+ spin_lock_irqsave(&bank->lock, flags);
+ if (enable)
+ bank->suspend_wakeup |= gpio_bit;
+ else
+ bank->suspend_wakeup &= ~gpio_bit;
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
}
static void _reset_gpio(struct gpio_bank *bank, int gpio)
{
- _set_gpio_direction(bank, get_gpio_index(gpio), 1);
+ _set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1);
_set_gpio_irqenable(bank, gpio, 0);
_clear_gpio_irqstatus(bank, gpio);
- _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
+ _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
}
/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
@@ -911,10 +537,8 @@
struct gpio_bank *bank;
int retval;
- if (check_gpio(gpio) < 0)
- return -ENODEV;
bank = irq_data_get_irq_chip_data(d);
- retval = _set_gpio_wakeup(bank, get_gpio_index(gpio), enable);
+ retval = _set_gpio_wakeup(bank, gpio, enable);
return retval;
}
@@ -925,37 +549,47 @@
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
+ /*
+ * If this is the first gpio_request for the bank,
+ * enable the bank module.
+ */
+ if (!bank->mod_usage) {
+ if (pm_runtime_get_sync(bank->dev) < 0) {
+ dev_err(bank->dev, "%s: GPIO bank %d "
+ "pm_runtime_get_sync failed\n",
+ __func__, bank->id);
+ spin_unlock_irqrestore(&bank->lock, flags);
+ return -EINVAL;
+ }
+
+ /* Initialize the gpio bank registers to init time value */
+ omap_gpio_mod_init(bank);
+ }
/* Set trigger to none. You need to enable the desired trigger with
* request_irq() or set_irq_type().
*/
_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
-#ifdef CONFIG_ARCH_OMAP15XX
- if (bank->method == METHOD_GPIO_1510) {
- void __iomem *reg;
+ if (bank->regs->pinctrl) {
+ void __iomem *reg = bank->base + bank->regs->pinctrl;
/* Claim the pin for MPU */
- reg = bank->base + OMAP1510_GPIO_PIN_CONTROL;
__raw_writel(__raw_readl(reg) | (1 << offset), reg);
}
-#endif
- if (!cpu_class_is_omap1()) {
- if (!bank->mod_usage) {
- void __iomem *reg = bank->base;
- u32 ctrl;
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
- reg += OMAP24XX_GPIO_CTRL;
- else if (cpu_is_omap44xx())
- reg += OMAP4_GPIO_CTRL;
- ctrl = __raw_readl(reg);
- /* Module is enabled, clocks are not gated */
- ctrl &= 0xFFFFFFFE;
- __raw_writel(ctrl, reg);
- }
- bank->mod_usage |= 1 << offset;
+ if (bank->regs->ctrl && !bank->mod_usage) {
+ void __iomem *reg = bank->base + bank->regs->ctrl;
+ u32 ctrl;
+
+ ctrl = __raw_readl(reg);
+ /* Module is enabled, clocks are not gated */
+ ctrl &= ~GPIO_MOD_CTRL_BIT;
+ __raw_writel(ctrl, reg);
}
+
+ bank->mod_usage |= 1 << offset;
+
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
@@ -967,44 +601,36 @@
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
-#ifdef CONFIG_ARCH_OMAP16XX
- if (bank->method == METHOD_GPIO_1610) {
- /* Disable wake-up during idle for dynamic tick */
- void __iomem *reg = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
- __raw_writel(1 << offset, reg);
- }
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- if (bank->method == METHOD_GPIO_24XX) {
- /* Disable wake-up during idle for dynamic tick */
- void __iomem *reg = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
- __raw_writel(1 << offset, reg);
- }
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- if (bank->method == METHOD_GPIO_44XX) {
- /* Disable wake-up during idle for dynamic tick */
- void __iomem *reg = bank->base + OMAP4_GPIO_IRQWAKEN0;
- __raw_writel(1 << offset, reg);
- }
-#endif
- if (!cpu_class_is_omap1()) {
- bank->mod_usage &= ~(1 << offset);
- if (!bank->mod_usage) {
- void __iomem *reg = bank->base;
- u32 ctrl;
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
- reg += OMAP24XX_GPIO_CTRL;
- else if (cpu_is_omap44xx())
- reg += OMAP4_GPIO_CTRL;
- ctrl = __raw_readl(reg);
- /* Module is disabled, clocks are gated */
- ctrl |= 1;
- __raw_writel(ctrl, reg);
+ if (bank->regs->wkup_clear)
+ /* Disable wake-up during idle for dynamic tick */
+ _clear_gpio_waken(bank, offset);
+
+ bank->mod_usage &= ~(1 << offset);
+
+ if (bank->regs->ctrl && !bank->mod_usage) {
+ void __iomem *reg = bank->base + bank->regs->ctrl;
+ u32 ctrl;
+
+ ctrl = __raw_readl(reg);
+ /* Module is disabled, clocks are gated */
+ ctrl |= GPIO_MOD_CTRL_BIT;
+ __raw_writel(ctrl, reg);
+ }
+
+ _reset_gpio(bank, bank->chip.base + offset);
+
+ /*
+ * If this is the last gpio to be freed in the bank,
+ * disable the bank module.
+ */
+ if (!bank->mod_usage) {
+ if (pm_runtime_put_sync_suspend(bank->dev)) {
+ dev_err(bank->dev, "%s: GPIO bank %d "
+ "pm_runtime_put_sync_suspend failed\n",
+ __func__, bank->id);
}
}
- _reset_gpio(bank, bank->chip.base + offset);
spin_unlock_irqrestore(&bank->lock, flags);
}
@@ -1030,31 +656,10 @@
chained_irq_enter(chip, desc);
bank = irq_get_handler_data(irq);
-#ifdef CONFIG_ARCH_OMAP1
- if (bank->method == METHOD_MPUIO)
- isr_reg = bank->base +
- OMAP_MPUIO_GPIO_INT / bank->stride;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- if (bank->method == METHOD_GPIO_1510)
- isr_reg = bank->base + OMAP1510_GPIO_INT_STATUS;
-#endif
-#if defined(CONFIG_ARCH_OMAP16XX)
- if (bank->method == METHOD_GPIO_1610)
- isr_reg = bank->base + OMAP1610_GPIO_IRQSTATUS1;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- if (bank->method == METHOD_GPIO_7XX)
- isr_reg = bank->base + OMAP7XX_GPIO_INT_STATUS;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- if (bank->method == METHOD_GPIO_24XX)
- isr_reg = bank->base + OMAP24XX_GPIO_IRQSTATUS1;
-#endif
-#if defined(CONFIG_ARCH_OMAP4)
- if (bank->method == METHOD_GPIO_44XX)
- isr_reg = bank->base + OMAP4_GPIO_IRQSTATUS0;
-#endif
+
+ pm_runtime_get_sync(bank->dev);
+
+ isr_reg = bank->base + bank->regs->irqstatus;
if (WARN_ON(!isr_reg))
goto exit;
@@ -1064,21 +669,22 @@
u32 enabled;
enabled = _get_gpio_irqbank_mask(bank);
- isr_saved = isr = __raw_readl(isr_reg) & enabled;
- if (cpu_is_omap15xx() && (bank->method == METHOD_MPUIO))
- isr &= 0x0000ffff;
+ if (bank->width == 32)
+ isr = __raw_readl(isr_reg) & enabled;
+ else if (bank->width == 16)
+ isr = (__raw_readw(isr_reg) & enabled) & 0x0000ffff;
+ isr_saved = isr;
- if (cpu_class_is_omap2()) {
+ if (bank->regs->leveldetect0)
level_mask = bank->level_mask & enabled;
- }
/* clear edge sensitive interrupts before handler(s) are
called so that we don't miss any interrupt occurred while
executing them */
- _enable_gpio_irqbank(bank, isr_saved & ~level_mask, 0);
+ _disable_gpio_irqbank(bank, isr_saved & ~level_mask);
_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
- _enable_gpio_irqbank(bank, isr_saved & ~level_mask, 1);
+ _enable_gpio_irqbank(bank, isr_saved & ~level_mask);
/* if there is only edge sensitive GPIO pin interrupts
configured, we could unmask GPIO bank interrupt immediately */
@@ -1094,7 +700,7 @@
gpio_irq = bank->virtual_irq_start;
for (; isr != 0; isr >>= 1, gpio_irq++) {
- gpio_index = get_gpio_index(irq_to_gpio(gpio_irq));
+ gpio_index = GPIO_INDEX(bank, irq_to_gpio(gpio_irq));
if (!(isr & 1))
continue;
@@ -1121,6 +727,8 @@
exit:
if (!unmasked)
chained_irq_exit(chip, desc);
+
+ pm_runtime_put_sync_suspend(bank->dev);
}
static void gpio_irq_shutdown(struct irq_data *d)
@@ -1150,7 +758,7 @@
spin_lock_irqsave(&bank->lock, flags);
_set_gpio_irqenable(bank, gpio, 0);
- _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
+ _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
spin_unlock_irqrestore(&bank->lock, flags);
}
@@ -1158,13 +766,13 @@
{
unsigned int gpio = d->irq - IH_GPIO_BASE;
struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
- unsigned int irq_mask = 1 << get_gpio_index(gpio);
+ unsigned int irq_mask = GPIO_BIT(bank, gpio);
u32 trigger = irqd_get_trigger_type(d);
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
if (trigger)
- _set_gpio_triggering(bank, get_gpio_index(gpio), trigger);
+ _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
/* For level-triggered GPIOs, the clearing must be done after
* the HW source is cleared, thus after the handler has run */
@@ -1185,55 +793,10 @@
.irq_unmask = gpio_unmask_irq,
.irq_set_type = gpio_irq_type,
.irq_set_wake = gpio_wake_enable,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
};
/*---------------------------------------------------------------------*/
-
-#ifdef CONFIG_ARCH_OMAP1
-
-/* MPUIO uses the always-on 32k clock */
-
-static void mpuio_ack_irq(struct irq_data *d)
-{
- /* The ISR is reset automatically, so do nothing here. */
-}
-
-static void mpuio_mask_irq(struct irq_data *d)
-{
- unsigned int gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
- struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
-
- _set_gpio_irqenable(bank, gpio, 0);
-}
-
-static void mpuio_unmask_irq(struct irq_data *d)
-{
- unsigned int gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
- struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
-
- _set_gpio_irqenable(bank, gpio, 1);
-}
-
-static struct irq_chip mpuio_irq_chip = {
- .name = "MPUIO",
- .irq_ack = mpuio_ack_irq,
- .irq_mask = mpuio_mask_irq,
- .irq_unmask = mpuio_unmask_irq,
- .irq_set_type = gpio_irq_type,
-#ifdef CONFIG_ARCH_OMAP16XX
- /* REVISIT: assuming only 16xx supports MPUIO wake events */
- .irq_set_wake = gpio_wake_enable,
-#endif
-};
-
-
-#define bank_is_mpuio(bank) ((bank)->method == METHOD_MPUIO)
-
-
-#ifdef CONFIG_ARCH_OMAP16XX
-
-#include <linux/platform_device.h>
-
static int omap_mpuio_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1287,34 +850,16 @@
/* could list the /proc/iomem resources */
};
-static inline void mpuio_init(void)
+static inline void mpuio_init(struct gpio_bank *bank)
{
- struct gpio_bank *bank = get_gpio_bank(OMAP_MPUIO(0));
platform_set_drvdata(&omap_mpuio_device, bank);
if (platform_driver_register(&omap_mpuio_driver) == 0)
(void) platform_device_register(&omap_mpuio_device);
}
-#else
-static inline void mpuio_init(void) {}
-#endif /* 16xx */
-
-#else
-
-extern struct irq_chip mpuio_irq_chip;
-
-#define bank_is_mpuio(bank) 0
-static inline void mpuio_init(void) {}
-
-#endif
-
/*---------------------------------------------------------------------*/
-/* REVISIT these are stupid implementations! replace by ones that
- * don't switch on METHOD_* and which mostly avoid spinlocks
- */
-
static int gpio_input(struct gpio_chip *chip, unsigned offset)
{
struct gpio_bank *bank;
@@ -1329,31 +874,8 @@
static int gpio_is_input(struct gpio_bank *bank, int mask)
{
- void __iomem *reg = bank->base;
+ void __iomem *reg = bank->base + bank->regs->direction;
- switch (bank->method) {
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_IO_CNTL / bank->stride;
- break;
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_DIR_CONTROL;
- break;
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_DIRECTION;
- break;
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_DIR_CONTROL;
- break;
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_OE;
- break;
- case METHOD_GPIO_44XX:
- reg += OMAP4_GPIO_OE;
- break;
- default:
- WARN_ONCE(1, "gpio_is_input: incorrect OMAP GPIO method");
- return -EINVAL;
- }
return __raw_readl(reg) & mask;
}
@@ -1365,9 +887,9 @@
u32 mask;
gpio = chip->base + offset;
- bank = get_gpio_bank(gpio);
+ bank = container_of(chip, struct gpio_bank, chip);
reg = bank->base;
- mask = 1 << get_gpio_index(gpio);
+ mask = GPIO_BIT(bank, gpio);
if (gpio_is_input(bank, mask))
return _get_gpio_datain(bank, gpio);
@@ -1382,7 +904,7 @@
bank = container_of(chip, struct gpio_bank, chip);
spin_lock_irqsave(&bank->lock, flags);
- _set_gpio_dataout(bank, offset, value);
+ bank->set_dataout(bank, offset, value);
_set_gpio_direction(bank, offset, 0);
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
@@ -1416,7 +938,7 @@
bank = container_of(chip, struct gpio_bank, chip);
spin_lock_irqsave(&bank->lock, flags);
- _set_gpio_dataout(bank, offset, value);
+ bank->set_dataout(bank, offset, value);
spin_unlock_irqrestore(&bank->lock, flags);
}
@@ -1432,19 +954,17 @@
static void __init omap_gpio_show_rev(struct gpio_bank *bank)
{
+ static bool called;
u32 rev;
- if (cpu_is_omap16xx() && !(bank->method != METHOD_MPUIO))
- rev = __raw_readw(bank->base + OMAP1610_GPIO_REVISION);
- else if (cpu_is_omap24xx() || cpu_is_omap34xx())
- rev = __raw_readl(bank->base + OMAP24XX_GPIO_REVISION);
- else if (cpu_is_omap44xx())
- rev = __raw_readl(bank->base + OMAP4_GPIO_REVISION);
- else
+ if (called || bank->regs->revision == USHRT_MAX)
return;
- printk(KERN_INFO "OMAP GPIO hardware version %d.%d\n",
+ rev = __raw_readw(bank->base + bank->regs->revision);
+ pr_info("OMAP GPIO hardware version %d.%d\n",
(rev >> 4) & 0x0f, rev & 0x0f);
+
+ called = true;
}
/* This lock class tells lockdep that GPIO irqs are in a different
@@ -1452,63 +972,96 @@
*/
static struct lock_class_key gpio_lock_class;
-static inline int init_gpio_info(struct platform_device *pdev)
+static void omap_gpio_mod_init(struct gpio_bank *bank)
{
- /* TODO: Analyze removing gpio_bank_count usage from driver code */
- gpio_bank = kzalloc(gpio_bank_count * sizeof(struct gpio_bank),
- GFP_KERNEL);
- if (!gpio_bank) {
- dev_err(&pdev->dev, "Memory alloc failed for gpio_bank\n");
- return -ENOMEM;
- }
- return 0;
-}
+ int i;
-/* TODO: Cleanup cpu_is_* checks */
-static void omap_gpio_mod_init(struct gpio_bank *bank, int id)
-{
- if (cpu_class_is_omap2()) {
- if (cpu_is_omap44xx()) {
- __raw_writel(0xffffffff, bank->base +
- OMAP4_GPIO_IRQSTATUSCLR0);
- __raw_writel(0x00000000, bank->base +
- OMAP4_GPIO_DEBOUNCENABLE);
- /* Initialize interface clk ungated, module enabled */
- __raw_writel(0, bank->base + OMAP4_GPIO_CTRL);
- } else if (cpu_is_omap34xx()) {
- __raw_writel(0x00000000, bank->base +
- OMAP24XX_GPIO_IRQENABLE1);
- __raw_writel(0xffffffff, bank->base +
- OMAP24XX_GPIO_IRQSTATUS1);
- __raw_writel(0x00000000, bank->base +
- OMAP24XX_GPIO_DEBOUNCE_EN);
+ if (bank->width == 32) {
+ u32 clr_all = 0; /* clear all the bits */
+ u32 set_all = 0xFFFFFFFF; /* set all the bits */
+ if (bank->is_mpuio) {
+ __raw_writel(set_all, bank->base +
+ bank->regs->irqenable);
+
+ if (bank->suspend_support)
+ mpuio_init(bank);
+
+ return;
+ }
+
+ if (bank->regs->ctrl)
/* Initialize interface clk ungated, module enabled */
- __raw_writel(0, bank->base + OMAP24XX_GPIO_CTRL);
- } else if (cpu_is_omap24xx()) {
- static const u32 non_wakeup_gpios[] = {
- 0xe203ffc0, 0x08700040
- };
- if (id < ARRAY_SIZE(non_wakeup_gpios))
- bank->non_wakeup_gpios = non_wakeup_gpios[id];
+ __raw_writel(clr_all, bank->base + bank->regs->ctrl);
+
+ if (bank->regs->clr_irqenable) {
+ __raw_writel(set_all, bank->base +
+ bank->regs->clr_irqenable);
+ } else if (bank->regs->irqenable) {
+ u32 i;
+
+ if (bank->regs->irqenable_inv)
+ i = set_all;
+ else
+ i = clr_all;
+
+ __raw_writel(i, bank->base + bank->regs->irqenable);
}
- } else if (cpu_class_is_omap1()) {
- if (bank_is_mpuio(bank))
- __raw_writew(0xffff, bank->base +
- OMAP_MPUIO_GPIO_MASKIT / bank->stride);
- if (cpu_is_omap15xx() && bank->method == METHOD_GPIO_1510) {
- __raw_writew(0xffff, bank->base
- + OMAP1510_GPIO_INT_MASK);
- __raw_writew(0x0000, bank->base
- + OMAP1510_GPIO_INT_STATUS);
+
+ if (bank->regs->irqstatus) {
+ u32 i;
+
+ if (bank->regs->irqenable_inv)
+ i = clr_all;
+ else
+ i = set_all;
+
+ __raw_writel(i, bank->base + bank->regs->irqstatus);
}
- if (cpu_is_omap16xx() && bank->method == METHOD_GPIO_1610) {
- __raw_writew(0x0000, bank->base
- + OMAP1610_GPIO_IRQENABLE1);
- __raw_writew(0xffff, bank->base
- + OMAP1610_GPIO_IRQSTATUS1);
- __raw_writew(0x0014, bank->base
- + OMAP1610_GPIO_SYSCONFIG);
+
+ if (bank->regs->debounce_en)
+ __raw_writel(clr_all, bank->base +
+ bank->regs->debounce_en);
+
+ } else if (bank->width == 16) {
+ u16 clr_all = 0; /* clear all the bits */
+ u16 set_all = 0xFFFF; /* set all the bits */
+
+ if (bank->is_mpuio) {
+ __raw_writew(set_all, bank->base +
+ bank->regs->irqenable);
+
+ if (bank->suspend_support)
+ mpuio_init(bank);
+
+ return;
+ }
+
+ if (bank->regs->irqenable) {
+ u16 i;
+
+ if (bank->regs->irqenable_inv)
+ i = set_all;
+ else
+ i = clr_all;
+
+ __raw_writew(i, bank->base + bank->regs->irqenable);
+ }
+
+ if (bank->regs->irqstatus) {
+ u32 i;
+
+ if (bank->regs->irqenable_inv)
+ i = clr_all;
+ else
+ i = set_all;
+
+ __raw_writew(i, bank->base + bank->regs->irqstatus);
+ }
+
+ if (bank->regs->sysconfig) {
+ /* set wakeup-enable and smart-idle */
+ __raw_writew(0x14, bank->base + bank->regs->sysconfig);
/*
* Enable system clock for GPIO module.
@@ -1517,13 +1070,41 @@
omap_writel(omap_readl(ULPD_CAM_CLK_CTRL) | 0x04,
ULPD_CAM_CLK_CTRL);
}
- if (cpu_is_omap7xx() && bank->method == METHOD_GPIO_7XX) {
- __raw_writel(0xffffffff, bank->base
- + OMAP7XX_GPIO_INT_MASK);
- __raw_writel(0x00000000, bank->base
- + OMAP7XX_GPIO_INT_STATUS);
- }
}
+
+ for (i = 0; i < bank->width; i++) {
+ int gpio = irq_to_gpio(bank->virtual_irq_start + i);
+ bank->mux[i] = omap_mux_get_gpio(gpio);
+ }
+}
+
+static __init void
+omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
+ unsigned int num)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
+ handle_simple_irq);
+ if (!gc) {
+ dev_err(bank->dev, "Memory alloc failed for gc\n");
+ return;
+ }
+
+ ct = gc->chip_types;
+
+ /* NOTE: No ack required, reading IRQ status clears it. */
+ ct->chip.irq_mask = irq_gc_mask_set_bit;
+ ct->chip.irq_unmask = irq_gc_mask_clr_bit;
+ ct->chip.irq_set_type = gpio_irq_type;
+
+ if (bank->suspend_support)
+ ct->chip.irq_set_wake = gpio_wake_enable,
+
+ ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
+ irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
}
static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
@@ -1544,31 +1125,31 @@
bank->chip.set_debounce = gpio_debounce;
bank->chip.set = gpio_set;
bank->chip.to_irq = gpio_2irq;
- if (bank_is_mpuio(bank)) {
+ if (bank->is_mpuio) {
bank->chip.label = "mpuio";
-#ifdef CONFIG_ARCH_OMAP16XX
- bank->chip.dev = &omap_mpuio_device.dev;
-#endif
+ if (bank->suspend_support)
+ bank->chip.dev = &omap_mpuio_device.dev;
bank->chip.base = OMAP_MPUIO(0);
} else {
bank->chip.label = "gpio";
bank->chip.base = gpio;
- gpio += bank_width;
+ gpio += bank->width;
}
- bank->chip.ngpio = bank_width;
+ bank->chip.ngpio = bank->width;
gpiochip_add(&bank->chip);
for (j = bank->virtual_irq_start;
- j < bank->virtual_irq_start + bank_width; j++) {
+ j < bank->virtual_irq_start + bank->width; j++) {
irq_set_lockdep_class(j, &gpio_lock_class);
irq_set_chip_data(j, bank);
- if (bank_is_mpuio(bank))
- irq_set_chip(j, &mpuio_irq_chip);
- else
+ if (bank->is_mpuio) {
+ omap_mpuio_alloc_gc(bank, j, bank->width);
+ } else {
irq_set_chip(j, &gpio_irq_chip);
- irq_set_handler(j, handle_simple_irq);
- set_irq_flags(j, IRQF_VALID);
+ irq_set_handler(j, handle_simple_irq);
+ set_irq_flags(j, IRQF_VALID);
+ }
}
irq_set_chained_handler(bank->irq, gpio_irq_handler);
irq_set_handler_data(bank->irq, bank);
@@ -1576,410 +1157,549 @@
static int __devinit omap_gpio_probe(struct platform_device *pdev)
{
- static int gpio_init_done;
struct omap_gpio_platform_data *pdata;
struct resource *res;
- int id;
struct gpio_bank *bank;
+ int ret = 0;
- if (!pdev->dev.platform_data)
- return -EINVAL;
-
- pdata = pdev->dev.platform_data;
-
- if (!gpio_init_done) {
- int ret;
-
- ret = init_gpio_info(pdev);
- if (ret)
- return ret;
+ if (!pdev->dev.platform_data) {
+ ret = -EINVAL;
+ goto err_exit;
}
- id = pdev->id;
- bank = &gpio_bank[id];
+ bank = kzalloc(sizeof(struct gpio_bank), GFP_KERNEL);
+ if (!bank) {
+ dev_err(&pdev->dev, "Memory alloc failed for gpio_bank\n");
+ ret = -ENOMEM;
+ goto err_exit;
+ }
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (unlikely(!res)) {
- dev_err(&pdev->dev, "GPIO Bank %i Invalid IRQ resource\n", id);
- return -ENODEV;
+ dev_err(&pdev->dev, "GPIO Bank %i Invalid IRQ resource\n",
+ pdev->id);
+ ret = -ENODEV;
+ goto err_free;
}
bank->irq = res->start;
+ bank->id = pdev->id;
+
+ pdata = pdev->dev.platform_data;
bank->virtual_irq_start = pdata->virtual_irq_start;
- bank->method = pdata->bank_type;
bank->dev = &pdev->dev;
bank->dbck_flag = pdata->dbck_flag;
bank->stride = pdata->bank_stride;
- bank_width = pdata->bank_width;
+ bank->width = pdata->bank_width;
+ bank->is_mpuio = pdata->is_mpuio;
+ bank->suspend_support = pdata->suspend_support;
+ bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
+ bank->loses_context = pdata->loses_context;
+ bank->regs = pdata->regs;
+ bank->saved_context = 0;
+ if (bank->regs->set_dataout && bank->regs->clr_dataout)
+ bank->set_dataout = _set_gpio_dataout_reg;
+ else
+ bank->set_dataout = _set_gpio_dataout_mask;
spin_lock_init(&bank->lock);
/* Static mapping, never released */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(!res)) {
- dev_err(&pdev->dev, "GPIO Bank %i Invalid mem resource\n", id);
- return -ENODEV;
+ dev_err(&pdev->dev, "GPIO Bank %i Invalid mem resource\n",
+ pdev->id);
+ ret = -ENODEV;
+ goto err_free;
}
bank->base = ioremap(res->start, resource_size(res));
if (!bank->base) {
- dev_err(&pdev->dev, "Could not ioremap gpio bank%i\n", id);
- return -ENOMEM;
+ dev_err(&pdev->dev, "Could not ioremap gpio bank%i\n",
+ pdev->id);
+ ret = -ENOMEM;
+ goto err_free;
}
- pm_runtime_enable(bank->dev);
- pm_runtime_get_sync(bank->dev);
+ platform_set_drvdata(pdev, bank);
- omap_gpio_mod_init(bank, id);
+ pm_runtime_enable(bank->dev);
+ pm_runtime_irq_safe(bank->dev);
+ if (pm_runtime_get_sync(bank->dev) < 0) {
+ dev_err(bank->dev, "%s: GPIO bank %d pm_runtime_get_sync "
+ "failed\n", __func__, bank->id);
+ iounmap(bank->base);
+ return -EINVAL;
+ }
+
+ omap_gpio_mod_init(bank);
omap_gpio_chip_init(bank);
omap_gpio_show_rev(bank);
- if (!gpio_init_done)
- gpio_init_done = 1;
+ if (pm_runtime_put_sync(bank->dev) < 0) {
+ dev_err(bank->dev, "%s: GPIO bank %d pm_runtime_put_sync "
+ "failed\n", __func__, bank->id);
+ iounmap(bank->base);
+ return -EINVAL;
+ }
- return 0;
+ list_add_tail(&bank->node, &omap_gpio_list);
+
+ return ret;
+
+err_free:
+ kfree(bank);
+err_exit:
+ return ret;
}
-#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
-static int omap_gpio_suspend(void)
+static int omap_gpio_suspend(struct device *dev)
{
- int i;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_bank *bank = platform_get_drvdata(pdev);
+ void __iomem *wake_status;
+ void __iomem *wake_clear;
+ void __iomem *wake_set;
+ unsigned long flags;
- if (!cpu_class_is_omap2() && !cpu_is_omap16xx())
+ if (!bank->suspend_support)
return 0;
- for (i = 0; i < gpio_bank_count; i++) {
- struct gpio_bank *bank = &gpio_bank[i];
- void __iomem *wake_status;
- void __iomem *wake_clear;
- void __iomem *wake_set;
- unsigned long flags;
+ wake_status = bank->base + bank->regs->wkup_status;
+ wake_clear = bank->base + bank->regs->wkup_clear;
+ wake_set = bank->base + bank->regs->wkup_set;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- wake_status = bank->base + OMAP1610_GPIO_WAKEUPENABLE;
- wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
- wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- wake_status = bank->base + OMAP24XX_GPIO_WAKE_EN;
- wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
- wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- case METHOD_GPIO_44XX:
- wake_status = bank->base + OMAP4_GPIO_IRQWAKEN0;
- wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
- wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
- break;
-#endif
- default:
- continue;
- }
+ pm_runtime_get_sync(dev);
- spin_lock_irqsave(&bank->lock, flags);
- bank->saved_wakeup = __raw_readl(wake_status);
- __raw_writel(0xffffffff, wake_clear);
- __raw_writel(bank->suspend_wakeup, wake_set);
- spin_unlock_irqrestore(&bank->lock, flags);
- }
+ spin_lock_irqsave(&bank->lock, flags);
+ bank->saved_wakeup = __raw_readl(wake_status);
+ __raw_writel(0xffffffff, wake_clear);
+ __raw_writel(bank->suspend_wakeup, wake_set);
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ pm_runtime_put_sync(dev);
return 0;
}
-static void omap_gpio_resume(void)
+static int omap_gpio_resume(struct device *dev)
{
- int i;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_bank *bank = platform_get_drvdata(pdev);
+ void __iomem *wake_clear;
+ void __iomem *wake_set;
+ unsigned long flags;
- if (!cpu_class_is_omap2() && !cpu_is_omap16xx())
- return;
+ if (!bank->suspend_support)
+ return 0;
- for (i = 0; i < gpio_bank_count; i++) {
- struct gpio_bank *bank = &gpio_bank[i];
- void __iomem *wake_clear;
- void __iomem *wake_set;
- unsigned long flags;
+ wake_clear = bank->base + bank->regs->wkup_clear;
+ wake_set = bank->base + bank->regs->wkup_set;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
- wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
- wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- case METHOD_GPIO_44XX:
- wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
- wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
- break;
-#endif
- default:
- continue;
- }
+ pm_runtime_get_sync(dev);
- spin_lock_irqsave(&bank->lock, flags);
- __raw_writel(0xffffffff, wake_clear);
- __raw_writel(bank->saved_wakeup, wake_set);
- spin_unlock_irqrestore(&bank->lock, flags);
- }
+ spin_lock_irqsave(&bank->lock, flags);
+ __raw_writel(0xffffffff, wake_clear);
+ __raw_writel(bank->saved_wakeup, wake_set);
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
}
-static struct syscore_ops omap_gpio_syscore_ops = {
- .suspend = omap_gpio_suspend,
- .resume = omap_gpio_resume,
-};
-
-#endif
-
#ifdef CONFIG_ARCH_OMAP2PLUS
+static void omap_gpio_save_context(struct gpio_bank *bank);
+static void omap_gpio_restore_context(struct gpio_bank *bank);
-static int workaround_enabled;
-
-void omap2_gpio_prepare_for_idle(int off_mode)
+static void omap2_gpio_set_wakeupenables(struct gpio_bank *bank)
{
- int i, c = 0;
- int min = 0;
+ unsigned long pad_wakeup;
+ int i;
- if (cpu_is_omap34xx())
- min = 1;
+ bank->context.pad_set_wakeupenable = 0;
- for (i = min; i < gpio_bank_count; i++) {
- struct gpio_bank *bank = &gpio_bank[i];
- u32 l1 = 0, l2 = 0;
- int j;
+ pad_wakeup = __raw_readl(bank->base + bank->regs->irqenable);
- for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
- clk_disable(bank->dbck);
-
- if (!off_mode)
- continue;
-
- /* If going to OFF, remove triggering for all
- * non-wakeup GPIOs. Otherwise spurious IRQs will be
- * generated. See OMAP2420 Errata item 1.101. */
- if (!(bank->enabled_non_wakeup_gpios))
- continue;
-
- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
- bank->saved_datain = __raw_readl(bank->base +
- OMAP24XX_GPIO_DATAIN);
- l1 = __raw_readl(bank->base +
- OMAP24XX_GPIO_FALLINGDETECT);
- l2 = __raw_readl(bank->base +
- OMAP24XX_GPIO_RISINGDETECT);
- }
-
- if (cpu_is_omap44xx()) {
- bank->saved_datain = __raw_readl(bank->base +
- OMAP4_GPIO_DATAIN);
- l1 = __raw_readl(bank->base +
- OMAP4_GPIO_FALLINGDETECT);
- l2 = __raw_readl(bank->base +
- OMAP4_GPIO_RISINGDETECT);
- }
-
- bank->saved_fallingdetect = l1;
- bank->saved_risingdetect = l2;
- l1 &= ~bank->enabled_non_wakeup_gpios;
- l2 &= ~bank->enabled_non_wakeup_gpios;
-
- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
- __raw_writel(l1, bank->base +
- OMAP24XX_GPIO_FALLINGDETECT);
- __raw_writel(l2, bank->base +
- OMAP24XX_GPIO_RISINGDETECT);
- }
-
- if (cpu_is_omap44xx()) {
- __raw_writel(l1, bank->base + OMAP4_GPIO_FALLINGDETECT);
- __raw_writel(l2, bank->base + OMAP4_GPIO_RISINGDETECT);
- }
-
- c++;
+ /*
+ * HACK: Ignore gpios that have multiple sources.
+ * Gpio 0-3 and 86 are special and may be used as gpio
+ * interrupts without being connected to the pad that
+ * mux points to.
+ */
+ if (cpu_is_omap44xx()) {
+ if (bank->id == 0)
+ pad_wakeup &= ~0xf;
+ if (bank->id == 2)
+ pad_wakeup &= ~BIT(22);
}
- if (!c) {
- workaround_enabled = 0;
+
+ for_each_set_bit(i, &pad_wakeup, bank->width) {
+ if (!omap_mux_get_wakeupenable(bank->mux[i])) {
+ bank->context.pad_set_wakeupenable |= BIT(i);
+ omap_mux_set_wakeupenable(bank->mux[i]);
+ }
+ }
+}
+
+static void omap2_gpio_clear_wakeupenables(struct gpio_bank *bank)
+{
+ unsigned long pad_wakeup;
+ int i;
+
+ pad_wakeup = bank->context.pad_set_wakeupenable;
+
+ for_each_set_bit(i, &pad_wakeup, bank->width)
+ omap_mux_clear_wakeupenable(bank->mux[i]);
+}
+
+#endif
+
+static int omap_gpio_pm_runtime_suspend(struct device *dev)
+{
+#ifdef CONFIG_ARCH_OMAP2PLUS
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_bank *bank = platform_get_drvdata(pdev);
+ u32 l1 = 0, l2 = 0;
+ int j;
+
+ /* If going to OFF, remove triggering for all
+ * non-wakeup GPIOs. Otherwise spurious IRQs will be
+ * generated. See OMAP2420 Errata item 1.101. */
+ if (!(bank->enabled_non_wakeup_gpios))
+ goto save_gpio_ctx;
+
+ bank->saved_datain = __raw_readl(bank->base +
+ bank->regs->datain);
+ l1 = __raw_readl(bank->base + bank->regs->fallingdetect);
+ l2 = __raw_readl(bank->base + bank->regs->risingdetect);
+
+ bank->saved_fallingdetect = l1;
+ bank->saved_risingdetect = l2;
+ l1 &= ~bank->enabled_non_wakeup_gpios;
+ l2 &= ~bank->enabled_non_wakeup_gpios;
+
+ __raw_writel(l1, bank->base + bank->regs->fallingdetect);
+ __raw_writel(l2, bank->base + bank->regs->risingdetect);
+
+save_gpio_ctx:
+ omap_gpio_save_context(bank);
+ for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
+ clk_disable(bank->dbck);
+
+#endif
+ return 0;
+}
+
+static int omap_gpio_pm_runtime_resume(struct device *dev)
+{
+#ifdef CONFIG_ARCH_OMAP2PLUS
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_bank *bank = platform_get_drvdata(pdev);
+ u32 l = 0, gen, gen0, gen1;
+ int j;
+ unsigned long pad_wakeup;
+ int i;
+
+ for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
+ clk_enable(bank->dbck);
+
+ if (omap_pm_was_context_lost(dev))
+ omap_gpio_restore_context(bank);
+
+ if (!(bank->enabled_non_wakeup_gpios))
+ return 0;
+
+ __raw_writel(bank->saved_fallingdetect,
+ bank->base + bank->regs->fallingdetect);
+ __raw_writel(bank->saved_risingdetect,
+ bank->base + bank->regs->risingdetect);
+ l = __raw_readl(bank->base + bank->regs->datain);
+
+ /* Check if any of the non-wakeup interrupt GPIOs have changed
+ * state. If so, generate an IRQ by software. This is
+ * horribly racy, but it's the best we can do to work around
+ * this silicon bug. */
+ l ^= bank->saved_datain;
+ l &= bank->enabled_non_wakeup_gpios;
+
+ pad_wakeup = bank->enabled_non_wakeup_gpios;
+ for_each_set_bit(i, &pad_wakeup, bank->width)
+ if (omap_mux_get_wakeupevent(bank->mux[i]))
+ l |= BIT(i);
+
+ /*
+ * No need to generate IRQs for the rising edge for gpio IRQs
+ * configured with falling edge only; and vice versa.
+ */
+ gen0 = l & bank->saved_fallingdetect;
+ gen0 &= bank->saved_datain;
+ gen1 = l & bank->saved_risingdetect;
+ gen1 &= ~(bank->saved_datain);
+
+ /* FIXME: Consider GPIO IRQs with level detections properly! */
+ gen = l & (~(bank->saved_fallingdetect) &
+ ~(bank->saved_risingdetect));
+ /* Consider all GPIO IRQs needed to be updated */
+ gen |= gen0 | gen1;
+
+ if (gen) {
+ u32 old0, old1;
+
+ old0 = __raw_readl(bank->base +
+ bank->regs->leveldetect0);
+ old1 = __raw_readl(bank->base +
+ bank->regs->leveldetect1);
+
+ __raw_writel(old0, bank->base +
+ bank->regs->leveldetect0);
+ __raw_writel(old1, bank->base +
+ bank->regs->leveldetect1);
+ if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
+ old0 |= gen;
+ old1 |= gen;
+ }
+
+ if (cpu_is_omap44xx()) {
+ old0 |= l;
+ old1 |= l;
+ }
+ __raw_writel(old0, bank->base +
+ bank->regs->leveldetect0);
+ __raw_writel(old1, bank->base +
+ bank->regs->leveldetect1);
+ }
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_ARCH_OMAP2PLUS
+static int omap2_gpio_set_edge_wakeup(struct gpio_bank *bank, bool suspend)
+{
+ int ret = 0;
+ u32 wkup_status = 0;
+ u32 datain;
+ u32 mask;
+ u32 active;
+
+ if (pm_runtime_get_sync(bank->dev) < 0) {
+ dev_err(bank->dev, "%s: GPIO bank %d pm_runtime_get_sync "
+ "failed\n", __func__, bank->id);
+ return -EINVAL;
+ }
+
+ bank->context.ew_leveldetect0 = __raw_readl(bank->base +
+ bank->regs->leveldetect0);
+ bank->context.ew_leveldetect1 = __raw_readl(bank->base +
+ bank->regs->leveldetect1);
+ wkup_status = __raw_readl(bank->base +
+ bank->regs->wkup_status);
+ bank->context.edge_falling = __raw_readl(bank->base +
+ bank->regs->fallingdetect);
+ bank->context.edge_rising = __raw_readl(bank->base +
+ bank->regs->risingdetect);
+
+ /*
+ * Set edge trigger for all gpio's that are
+ * expected to produce wakeup from low power.
+ * even if they are set for level detection only.
+ */
+ __raw_writel(bank->context.edge_falling |
+ (bank->type_leveldetect0 & wkup_status),
+ (bank->base + bank->regs->fallingdetect));
+ __raw_writel(bank->context.edge_rising |
+ (bank->type_leveldetect1 & wkup_status),
+ (bank->base + bank->regs->risingdetect));
+ __raw_writel(0, bank->base + bank->regs->leveldetect0);
+ __raw_writel(0, bank->base + bank->regs->leveldetect1);
+
+ /*
+ * If a level interrupt is pending it will be lost since
+ * we just cleared it's enable bit. Detect and abort,
+ * the interrupt will be delivered when
+ * omap2_gpio_restore_edge_wakeup restores the level
+ * interrupt mask.
+ */
+ datain = __raw_readl(bank->base + bank->regs->datain);
+ if (suspend)
+ mask = bank->suspend_wakeup;
+ else
+ mask = wkup_status;
+
+ active = (datain & bank->type_leveldetect1 & mask) |
+ (~datain & bank->type_leveldetect0 & mask);
+
+ if (active) {
+ if (suspend)
+ pr_info("%s: aborted suspend due to gpio %d\n",
+ __func__, bank->id * bank->width + __ffs(active));
+ ret = -EBUSY;
+ }
+
+ if (pm_runtime_put_sync_suspend(bank->dev) < 0) {
+ dev_err(bank->dev, "%s: GPIO bank %d pm_runtime_put_sync "
+ "failed\n", __func__, bank->id);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void omap2_gpio_restore_edge_wakeup(struct gpio_bank *bank)
+{
+ if (pm_runtime_get_sync(bank->dev) < 0) {
+ dev_err(bank->dev, "%s: GPIO bank %d pm_runtime_get_sync "
+ "failed\n", __func__, bank->id);
return;
}
- workaround_enabled = 1;
+
+ __raw_writel(bank->context.edge_falling,
+ (bank->base + bank->regs->fallingdetect));
+ __raw_writel(bank->context.edge_rising,
+ (bank->base + bank->regs->risingdetect));
+ __raw_writel(bank->context.ew_leveldetect0,
+ (bank->base + bank->regs->leveldetect0));
+ __raw_writel(bank->context.ew_leveldetect1,
+ (bank->base + bank->regs->leveldetect1));
+
+ if (pm_runtime_put_sync_suspend(bank->dev) < 0) {
+ dev_err(bank->dev, "%s: GPIO bank %d pm_runtime_put_sync "
+ "failed\n", __func__, bank->id);
+ return;
+ }
}
-void omap2_gpio_resume_after_idle(void)
+int omap2_gpio_prepare_for_idle(int off_mode, bool suspend)
{
- int i;
- int min = 0;
+ int ret = 0;
+ struct gpio_bank *bank;
- if (cpu_is_omap34xx())
- min = 1;
- for (i = min; i < gpio_bank_count; i++) {
- struct gpio_bank *bank = &gpio_bank[i];
- u32 l = 0, gen, gen0, gen1;
- int j;
-
- for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
- clk_enable(bank->dbck);
-
- if (!workaround_enabled)
+ list_for_each_entry(bank, &omap_gpio_list, node) {
+ if (!bank->mod_usage)
continue;
- if (!(bank->enabled_non_wakeup_gpios))
+ omap2_gpio_set_wakeupenables(bank);
+
+ if (omap2_gpio_set_edge_wakeup(bank, suspend))
+ ret = -EBUSY;
+ }
+
+ if (cpu_is_omap44xx())
+ omap4_trigger_ioctrl();
+
+ list_for_each_entry(bank, &omap_gpio_list, node) {
+ if (!bank->mod_usage)
continue;
- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
- __raw_writel(bank->saved_fallingdetect,
- bank->base + OMAP24XX_GPIO_FALLINGDETECT);
- __raw_writel(bank->saved_risingdetect,
- bank->base + OMAP24XX_GPIO_RISINGDETECT);
- l = __raw_readl(bank->base + OMAP24XX_GPIO_DATAIN);
- }
+ if (bank->loses_context)
+ if (pm_runtime_put_sync_suspend(bank->dev) < 0)
+ dev_err(bank->dev, "%s: GPIO bank %d "
+ "pm_runtime_put_sync failed\n",
+ __func__, bank->id);
+ }
- if (cpu_is_omap44xx()) {
- __raw_writel(bank->saved_fallingdetect,
- bank->base + OMAP4_GPIO_FALLINGDETECT);
- __raw_writel(bank->saved_risingdetect,
- bank->base + OMAP4_GPIO_RISINGDETECT);
- l = __raw_readl(bank->base + OMAP4_GPIO_DATAIN);
- }
+ if (ret)
+ omap2_gpio_resume_after_idle(off_mode);
- /* Check if any of the non-wakeup interrupt GPIOs have changed
- * state. If so, generate an IRQ by software. This is
- * horribly racy, but it's the best we can do to work around
- * this silicon bug. */
- l ^= bank->saved_datain;
- l &= bank->enabled_non_wakeup_gpios;
+ return ret;
+}
- /*
- * No need to generate IRQs for the rising edge for gpio IRQs
- * configured with falling edge only; and vice versa.
- */
- gen0 = l & bank->saved_fallingdetect;
- gen0 &= bank->saved_datain;
+void omap2_gpio_resume_after_idle(int off_mode)
+{
+ struct gpio_bank *bank;
- gen1 = l & bank->saved_risingdetect;
- gen1 &= ~(bank->saved_datain);
+ list_for_each_entry(bank, &omap_gpio_list, node) {
+ if (!bank->mod_usage)
+ continue;
- /* FIXME: Consider GPIO IRQs with level detections properly! */
- gen = l & (~(bank->saved_fallingdetect) &
- ~(bank->saved_risingdetect));
- /* Consider all GPIO IRQs needed to be updated */
- gen |= gen0 | gen1;
+ if (bank->loses_context)
+ if (pm_runtime_get_sync(bank->dev) < 0)
+ dev_err(bank->dev, "%s: GPIO bank %d "
+ "pm_runtime_get_sync failed\n",
+ __func__, bank->id);
- if (gen) {
- u32 old0, old1;
+ omap2_gpio_restore_edge_wakeup(bank);
- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
- old0 = __raw_readl(bank->base +
- OMAP24XX_GPIO_LEVELDETECT0);
- old1 = __raw_readl(bank->base +
- OMAP24XX_GPIO_LEVELDETECT1);
- __raw_writel(old0 | gen, bank->base +
- OMAP24XX_GPIO_LEVELDETECT0);
- __raw_writel(old1 | gen, bank->base +
- OMAP24XX_GPIO_LEVELDETECT1);
- __raw_writel(old0, bank->base +
- OMAP24XX_GPIO_LEVELDETECT0);
- __raw_writel(old1, bank->base +
- OMAP24XX_GPIO_LEVELDETECT1);
- }
-
- if (cpu_is_omap44xx()) {
- old0 = __raw_readl(bank->base +
- OMAP4_GPIO_LEVELDETECT0);
- old1 = __raw_readl(bank->base +
- OMAP4_GPIO_LEVELDETECT1);
- __raw_writel(old0 | l, bank->base +
- OMAP4_GPIO_LEVELDETECT0);
- __raw_writel(old1 | l, bank->base +
- OMAP4_GPIO_LEVELDETECT1);
- __raw_writel(old0, bank->base +
- OMAP4_GPIO_LEVELDETECT0);
- __raw_writel(old1, bank->base +
- OMAP4_GPIO_LEVELDETECT1);
- }
- }
+ omap2_gpio_clear_wakeupenables(bank);
}
}
+void omap_gpio_save_context(struct gpio_bank *bank)
+{
+ bank->context.irqenable1 =
+ __raw_readl(bank->base + bank->regs->irqenable);
+ bank->context.irqenable2 =
+ __raw_readl(bank->base + bank->regs->irqenable2);
+ bank->context.wake_en =
+ __raw_readl(bank->base + bank->regs->wkup_set);
+ bank->context.ctrl = __raw_readl(bank->base + bank->regs->ctrl);
+ bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
+ bank->context.leveldetect0 =
+ __raw_readl(bank->base + bank->regs->leveldetect0);
+ bank->context.leveldetect1 =
+ __raw_readl(bank->base + bank->regs->leveldetect1);
+ bank->context.risingdetect =
+ __raw_readl(bank->base + bank->regs->risingdetect);
+ bank->context.fallingdetect =
+ __raw_readl(bank->base + bank->regs->fallingdetect);
+ bank->context.dataout = __raw_readl(bank->base + bank->regs->dataout);
+ if (bank->dbck_enable_mask) {
+ bank->context.debounce = __raw_readl(bank->base +
+ bank->regs->debounce);
+ bank->context.debounce_en = __raw_readl(bank->base +
+ bank->regs->debounce_en);
+ }
+ bank->saved_context = 1;
+}
+void omap_gpio_restore_context(struct gpio_bank *bank)
+{
+ if(!bank->saved_context)
+ return;
+ __raw_writel(bank->context.wake_en,
+ bank->base + bank->regs->wkup_set);
+ __raw_writel(bank->context.ctrl, bank->base + bank->regs->ctrl);
+ __raw_writel(bank->context.leveldetect0,
+ bank->base + bank->regs->leveldetect0);
+ __raw_writel(bank->context.leveldetect1,
+ bank->base + bank->regs->leveldetect1);
+ __raw_writel(bank->context.risingdetect,
+ bank->base + bank->regs->risingdetect);
+ __raw_writel(bank->context.fallingdetect,
+ bank->base + bank->regs->fallingdetect);
+ if (bank->regs->set_dataout && bank->regs->clr_dataout)
+ __raw_writel(bank->context.dataout,
+ bank->base + bank->regs->set_dataout);
+ else
+ __raw_writel(bank->context.dataout,
+ bank->base + bank->regs->dataout);
+ __raw_writel(bank->context.oe, bank->base + bank->regs->direction);
+ if (bank->dbck_enable_mask) {
+ __raw_writel(bank->context.debounce, bank->base +
+ bank->regs->debounce);
+ __raw_writel(bank->context.debounce_en,
+ bank->base + bank->regs->debounce_en);
+ }
+ __raw_writel(bank->context.irqenable1,
+ bank->base + bank->regs->irqenable);
+ __raw_writel(bank->context.irqenable2,
+ bank->base + bank->regs->irqenable2);
+ bank->saved_context = 0;
+}
#endif
-#ifdef CONFIG_ARCH_OMAP3
-/* save the registers of bank 2-6 */
-void omap_gpio_save_context(void)
-{
- int i;
-
- /* saving banks from 2-6 only since GPIO1 is in WKUP */
- for (i = 1; i < gpio_bank_count; i++) {
- struct gpio_bank *bank = &gpio_bank[i];
- gpio_context[i].irqenable1 =
- __raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE1);
- gpio_context[i].irqenable2 =
- __raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE2);
- gpio_context[i].wake_en =
- __raw_readl(bank->base + OMAP24XX_GPIO_WAKE_EN);
- gpio_context[i].ctrl =
- __raw_readl(bank->base + OMAP24XX_GPIO_CTRL);
- gpio_context[i].oe =
- __raw_readl(bank->base + OMAP24XX_GPIO_OE);
- gpio_context[i].leveldetect0 =
- __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0);
- gpio_context[i].leveldetect1 =
- __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
- gpio_context[i].risingdetect =
- __raw_readl(bank->base + OMAP24XX_GPIO_RISINGDETECT);
- gpio_context[i].fallingdetect =
- __raw_readl(bank->base + OMAP24XX_GPIO_FALLINGDETECT);
- gpio_context[i].dataout =
- __raw_readl(bank->base + OMAP24XX_GPIO_DATAOUT);
- }
-}
-
-/* restore the required registers of bank 2-6 */
-void omap_gpio_restore_context(void)
-{
- int i;
-
- for (i = 1; i < gpio_bank_count; i++) {
- struct gpio_bank *bank = &gpio_bank[i];
- __raw_writel(gpio_context[i].irqenable1,
- bank->base + OMAP24XX_GPIO_IRQENABLE1);
- __raw_writel(gpio_context[i].irqenable2,
- bank->base + OMAP24XX_GPIO_IRQENABLE2);
- __raw_writel(gpio_context[i].wake_en,
- bank->base + OMAP24XX_GPIO_WAKE_EN);
- __raw_writel(gpio_context[i].ctrl,
- bank->base + OMAP24XX_GPIO_CTRL);
- __raw_writel(gpio_context[i].oe,
- bank->base + OMAP24XX_GPIO_OE);
- __raw_writel(gpio_context[i].leveldetect0,
- bank->base + OMAP24XX_GPIO_LEVELDETECT0);
- __raw_writel(gpio_context[i].leveldetect1,
- bank->base + OMAP24XX_GPIO_LEVELDETECT1);
- __raw_writel(gpio_context[i].risingdetect,
- bank->base + OMAP24XX_GPIO_RISINGDETECT);
- __raw_writel(gpio_context[i].fallingdetect,
- bank->base + OMAP24XX_GPIO_FALLINGDETECT);
- __raw_writel(gpio_context[i].dataout,
- bank->base + OMAP24XX_GPIO_DATAOUT);
- }
-}
-#endif
+static const struct dev_pm_ops gpio_pm_ops = {
+ .runtime_suspend = omap_gpio_pm_runtime_suspend,
+ .runtime_resume = omap_gpio_pm_runtime_resume,
+ .suspend = omap_gpio_suspend,
+ .resume = omap_gpio_resume,
+};
static struct platform_driver omap_gpio_driver = {
.probe = omap_gpio_probe,
.driver = {
.name = "omap_gpio",
+ .pm = &gpio_pm_ops,
},
};
@@ -1994,16 +1714,3 @@
}
postcore_initcall(omap_gpio_drv_reg);
-static int __init omap_gpio_sysinit(void)
-{
- mpuio_init();
-
-#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
- if (cpu_is_omap16xx() || cpu_class_is_omap2())
- register_syscore_ops(&omap_gpio_syscore_ops);
-#endif
-
- return 0;
-}
-
-arch_initcall(omap_gpio_sysinit);
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index ca2d3b3..59ae0d6 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1,2 @@
obj-y += drm/ vga/ stub/ ion/
+obj-y += pvr/
diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig
index 5b48b4e..a68878a 100644
--- a/drivers/gpu/ion/Kconfig
+++ b/drivers/gpu/ion/Kconfig
@@ -10,3 +10,9 @@
help
Choose this option if you wish to use ion on an nVidia Tegra.
+config ION_OMAP
+ tristate "Ion for OMAP"
+ depends on ARCH_OMAP4 && ION && TI_TILER
+ help
+ Choose this option if you wish to use ion on OMAP4.
+
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
index 73fe3fa..a81e0f3 100644
--- a/drivers/gpu/ion/Makefile
+++ b/drivers/gpu/ion/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o
obj-$(CONFIG_ION_TEGRA) += tegra/
+obj-$(CONFIG_ION_OMAP) += omap/
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index 37b23af..688e7ed 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -332,6 +332,7 @@
ion_buffer_put(buffer);
return handle;
}
+EXPORT_SYMBOL(ion_alloc);
void ion_free(struct ion_client *client, struct ion_handle *handle)
{
@@ -349,6 +350,7 @@
}
ion_handle_put(handle);
}
+EXPORT_SYMBOL(ion_free);
static void ion_client_get(struct ion_client *client);
static int ion_client_put(struct ion_client *client);
@@ -406,6 +408,7 @@
ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
return ret;
}
+EXPORT_SYMBOL(ion_phys);
void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
{
@@ -443,6 +446,7 @@
mutex_unlock(&client->lock);
return vaddr;
}
+EXPORT_SYMBOL(ion_map_kernel);
struct scatterlist *ion_map_dma(struct ion_client *client,
struct ion_handle *handle)
@@ -479,6 +483,7 @@
mutex_unlock(&client->lock);
return sglist;
}
+EXPORT_SYMBOL(ion_map_dma);
void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
{
@@ -494,6 +499,7 @@
mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
}
+EXPORT_SYMBOL(ion_unmap_kernel);
void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
{
@@ -509,7 +515,7 @@
mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
}
-
+EXPORT_SYMBOL(ion_unmap_dma);
struct ion_buffer *ion_share(struct ion_client *client,
struct ion_handle *handle)
@@ -531,6 +537,7 @@
*/
return handle->buffer;
}
+EXPORT_SYMBOL(ion_share);
struct ion_handle *ion_import(struct ion_client *client,
struct ion_buffer *buffer)
@@ -552,6 +559,7 @@
mutex_unlock(&client->lock);
return handle;
}
+EXPORT_SYMBOL(ion_import);
static const struct file_operations ion_share_fops;
@@ -575,6 +583,7 @@
fput(file);
return handle;
}
+EXPORT_SYMBOL(ion_import_fd);
static int ion_debug_client_show(struct seq_file *s, void *unused)
{
@@ -728,6 +737,7 @@
return client;
}
+EXPORT_SYMBOL(ion_client_create);
static void _ion_client_destroy(struct kref *kref)
{
@@ -768,6 +778,7 @@
{
ion_client_put(client);
}
+EXPORT_SYMBOL(ion_client_destroy);
static int ion_share_release(struct inode *inode, struct file* file)
{
diff --git a/drivers/gpu/ion/omap/Makefile b/drivers/gpu/ion/omap/Makefile
new file mode 100644
index 0000000..9b93884
--- /dev/null
+++ b/drivers/gpu/ion/omap/Makefile
@@ -0,0 +1 @@
+obj-y += omap_tiler_heap.o omap_ion.o
diff --git a/drivers/gpu/ion/omap/omap_ion.c b/drivers/gpu/ion/omap/omap_ion.c
new file mode 100644
index 0000000..1ae3e53
--- /dev/null
+++ b/drivers/gpu/ion/omap/omap_ion.c
@@ -0,0 +1,167 @@
+/*
+ * drivers/gpu/omap/omap_ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ion.h>
+#include <linux/omap_ion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "../ion_priv.h"
+#include "omap_ion_priv.h"
+
+struct ion_device *omap_ion_device;
+EXPORT_SYMBOL(omap_ion_device);
+
+int num_heaps;
+struct ion_heap **heaps;
+struct ion_heap *tiler_heap;
+static struct ion_heap *nonsecure_tiler_heap;
+
+int omap_ion_tiler_alloc(struct ion_client *client,
+ struct omap_ion_tiler_alloc_data *data)
+{
+ return omap_tiler_alloc(tiler_heap, client, data);
+}
+
+int omap_ion_nonsecure_tiler_alloc(struct ion_client *client,
+ struct omap_ion_tiler_alloc_data *data)
+{
+ if (!nonsecure_tiler_heap)
+ return -ENOMEM;
+ return omap_tiler_alloc(nonsecure_tiler_heap, client, data);
+}
+
+long omap_ion_ioctl(struct ion_client *client, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case OMAP_ION_TILER_ALLOC:
+ {
+ struct omap_ion_tiler_alloc_data data;
+ int ret;
+
+ if (!tiler_heap) {
+ pr_err("%s: Tiler heap requested but no tiler "
+ "heap exists on this platform\n", __func__);
+ return -EINVAL;
+ }
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+ ret = omap_ion_tiler_alloc(client, &data);
+ if (ret)
+ return ret;
+ if (copy_to_user((void __user *)arg, &data,
+ sizeof(data)))
+ return -EFAULT;
+ break;
+ }
+ default:
+ pr_err("%s: Unknown custom ioctl\n", __func__);
+ return -ENOTTY;
+ }
+ return 0;
+}
+
+int omap_ion_probe(struct platform_device *pdev)
+{
+ struct ion_platform_data *pdata = pdev->dev.platform_data;
+ int err;
+ int i;
+
+ num_heaps = pdata->nr;
+
+ heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
+
+ omap_ion_device = ion_device_create(omap_ion_ioctl);
+ if (IS_ERR_OR_NULL(omap_ion_device)) {
+ kfree(heaps);
+ return PTR_ERR(omap_ion_device);
+ }
+
+ /* create the heaps as specified in the board file */
+ for (i = 0; i < num_heaps; i++) {
+ struct ion_platform_heap *heap_data = &pdata->heaps[i];
+
+ if (heap_data->type == OMAP_ION_HEAP_TYPE_TILER) {
+ heaps[i] = omap_tiler_heap_create(heap_data);
+ if (heap_data->id == OMAP_ION_HEAP_NONSECURE_TILER)
+ nonsecure_tiler_heap = heaps[i];
+ else
+ tiler_heap = heaps[i];
+ } else {
+ heaps[i] = ion_heap_create(heap_data);
+ }
+ if (IS_ERR_OR_NULL(heaps[i])) {
+ err = PTR_ERR(heaps[i]);
+ goto err;
+ }
+ ion_device_add_heap(omap_ion_device, heaps[i]);
+ pr_info("%s: adding heap %s of type %d with %lx@%x\n",
+ __func__, heap_data->name, heap_data->type,
+ heap_data->base, heap_data->size);
+
+ }
+
+ platform_set_drvdata(pdev, omap_ion_device);
+ return 0;
+err:
+ for (i = 0; i < num_heaps; i++) {
+ if (heaps[i]) {
+ if (heaps[i]->type == OMAP_ION_HEAP_TYPE_TILER)
+ omap_tiler_heap_destroy(heaps[i]);
+ else
+ ion_heap_destroy(heaps[i]);
+ }
+ }
+ kfree(heaps);
+ return err;
+}
+
+int omap_ion_remove(struct platform_device *pdev)
+{
+ struct ion_device *idev = platform_get_drvdata(pdev);
+ int i;
+
+ ion_device_destroy(idev);
+ for (i = 0; i < num_heaps; i++)
+ if (heaps[i]->type == OMAP_ION_HEAP_TYPE_TILER)
+ omap_tiler_heap_destroy(heaps[i]);
+ else
+ ion_heap_destroy(heaps[i]);
+ kfree(heaps);
+ return 0;
+}
+
+static struct platform_driver ion_driver = {
+ .probe = omap_ion_probe,
+ .remove = omap_ion_remove,
+ .driver = { .name = "ion-omap4" }
+};
+
+static int __init ion_init(void)
+{
+ return platform_driver_register(&ion_driver);
+}
+
+static void __exit ion_exit(void)
+{
+ platform_driver_unregister(&ion_driver);
+}
+
+module_init(ion_init);
+module_exit(ion_exit);
+
diff --git a/drivers/gpu/ion/omap/omap_ion_priv.h b/drivers/gpu/ion/omap/omap_ion_priv.h
new file mode 100644
index 0000000..2bb3bda
--- /dev/null
+++ b/drivers/gpu/ion/omap/omap_ion_priv.h
@@ -0,0 +1,28 @@
+/*
+ * include/linux/omap/omap_ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_OMAP_ION_PRIV_H
+#define _LINUX_OMAP_ION_PRIV_H
+
+#include <linux/types.h>
+
+int omap_tiler_alloc(struct ion_heap *heap,
+ struct ion_client *client,
+ struct omap_ion_tiler_alloc_data *data);
+struct ion_heap *omap_tiler_heap_create(struct ion_platform_heap *heap_data);
+void omap_tiler_heap_destroy(struct ion_heap *heap);
+
+#endif /* _LINUX_OMAP_ION_PRIV_H */
diff --git a/drivers/gpu/ion/omap/omap_tiler_heap.c b/drivers/gpu/ion/omap/omap_tiler_heap.c
new file mode 100644
index 0000000..652bbf9
--- /dev/null
+++ b/drivers/gpu/ion/omap/omap_tiler_heap.c
@@ -0,0 +1,260 @@
+/*
+ * drivers/gpu/ion/omap_tiler_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/spinlock.h>
+
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/omap_ion.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <mach/tiler.h>
+#include <asm/mach/map.h>
+#include <asm/page.h>
+
+#include "../ion_priv.h"
+
+static int omap_tiler_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ if (size == 0)
+ return 0;
+
+ pr_err("%s: This should never be called directly -- use the "
+ "OMAP_ION_TILER_ALLOC flag to the ION_IOC_CUSTOM "
+ "instead\n", __func__);
+ return -EINVAL;
+}
+
+struct omap_tiler_info {
+ tiler_blk_handle tiler_handle; /* handle of the allocation intiler */
+ bool lump; /* true for a single lump allocation */
+ u32 n_phys_pages; /* number of physical pages */
+ u32 *phys_addrs; /* array addrs of pages */
+ u32 n_tiler_pages; /* number of tiler pages */
+ u32 *tiler_addrs; /* array of addrs of tiler pages */
+ u32 tiler_start; /* start addr in tiler -- if not page
+ aligned this may not equal the
+ first entry onf tiler_addrs */
+};
+
+int omap_tiler_alloc(struct ion_heap *heap,
+ struct ion_client *client,
+ struct omap_ion_tiler_alloc_data *data)
+{
+ struct ion_handle *handle;
+ struct ion_buffer *buffer;
+ struct omap_tiler_info *info;
+ u32 n_phys_pages;
+ u32 n_tiler_pages;
+ ion_phys_addr_t addr;
+ int i, ret;
+
+ if (data->fmt == TILER_PIXEL_FMT_PAGE && data->h != 1) {
+ pr_err("%s: Page mode (1D) allocations must have a height "
+ "of one\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = tiler_memsize(data->fmt, data->w, data->h,
+ &n_phys_pages,
+ &n_tiler_pages);
+
+ if (ret) {
+ pr_err("%s: invalid tiler request w %u h %u fmt %u\n", __func__,
+ data->w, data->h, data->fmt);
+ return ret;
+ }
+
+ BUG_ON(!n_phys_pages || !n_tiler_pages);
+
+ info = kzalloc(sizeof(struct omap_tiler_info) +
+ sizeof(u32) * n_phys_pages +
+ sizeof(u32) * n_tiler_pages, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->n_phys_pages = n_phys_pages;
+ info->n_tiler_pages = n_tiler_pages;
+ info->phys_addrs = (u32 *)(info + 1);
+ info->tiler_addrs = info->phys_addrs + n_phys_pages;
+
+ info->tiler_handle = tiler_alloc_block_area(data->fmt, data->w, data->h,
+ &info->tiler_start,
+ info->tiler_addrs);
+ if (IS_ERR_OR_NULL(info->tiler_handle)) {
+ ret = PTR_ERR(info->tiler_handle);
+ pr_err("%s: failure to allocate address space from tiler\n",
+ __func__);
+ goto err_nomem;
+ }
+
+ addr = ion_carveout_allocate(heap, n_phys_pages*PAGE_SIZE, 0);
+ if (addr == ION_CARVEOUT_ALLOCATE_FAIL) {
+ for (i = 0; i < n_phys_pages; i++) {
+ addr = ion_carveout_allocate(heap, PAGE_SIZE, 0);
+
+ if (addr == ION_CARVEOUT_ALLOCATE_FAIL) {
+ ret = -ENOMEM;
+ pr_err("%s: failed to allocate pages to back "
+ "tiler address space\n", __func__);
+ goto err_alloc;
+ }
+ info->phys_addrs[i] = addr;
+ }
+ } else {
+ info->lump = true;
+ for (i = 0; i < n_phys_pages; i++)
+ info->phys_addrs[i] = addr + i*PAGE_SIZE;
+ }
+
+ ret = tiler_pin_block(info->tiler_handle, info->phys_addrs,
+ info->n_phys_pages);
+ if (ret) {
+ pr_err("%s: failure to pin pages to tiler\n", __func__);
+ goto err_alloc;
+ }
+
+ data->stride = tiler_block_vstride(info->tiler_handle);
+
+ /* create an ion handle for the allocation */
+ handle = ion_alloc(client, 0, 0, 1 << OMAP_ION_HEAP_TILER);
+ if (IS_ERR_OR_NULL(handle)) {
+ ret = PTR_ERR(handle);
+ pr_err("%s: failure to allocate handle to manage tiler"
+ " allocation\n", __func__);
+ goto err;
+ }
+
+ buffer = ion_handle_buffer(handle);
+ buffer->size = info->n_tiler_pages * PAGE_SIZE;
+ buffer->priv_virt = info;
+ data->handle = handle;
+ return 0;
+
+err:
+ tiler_unpin_block(info->tiler_handle);
+err_alloc:
+ tiler_free_block_area(info->tiler_handle);
+ if (info->lump)
+ ion_carveout_free(heap, addr, n_phys_pages * PAGE_SIZE);
+ else
+ for (i -= 1; i >= 0; i--)
+ ion_carveout_free(heap, info->phys_addrs[i], PAGE_SIZE);
+err_nomem:
+ kfree(info);
+ return ret;
+}
+
+void omap_tiler_heap_free(struct ion_buffer *buffer)
+{
+ struct omap_tiler_info *info = buffer->priv_virt;
+
+ tiler_unpin_block(info->tiler_handle);
+ tiler_free_block_area(info->tiler_handle);
+
+ if (info->lump) {
+ ion_carveout_free(buffer->heap, info->phys_addrs[0],
+ info->n_phys_pages*PAGE_SIZE);
+ } else {
+ int i;
+ for (i = 0; i < info->n_phys_pages; i++)
+ ion_carveout_free(buffer->heap,
+ info->phys_addrs[i], PAGE_SIZE);
+ }
+
+ kfree(info);
+}
+
+static int omap_tiler_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct omap_tiler_info *info = buffer->priv_virt;
+
+ *addr = info->tiler_start;
+ *len = buffer->size;
+ return 0;
+}
+
+int omap_tiler_pages(struct ion_client *client, struct ion_handle *handle,
+ int *n, u32 **tiler_addrs)
+{
+ ion_phys_addr_t addr;
+ size_t len;
+ int ret;
+ struct omap_tiler_info *info = ion_handle_buffer(handle)->priv_virt;
+
+ /* validate that the handle exists in this client */
+ ret = ion_phys(client, handle, &addr, &len);
+ if (ret)
+ return ret;
+
+ *n = info->n_tiler_pages;
+ *tiler_addrs = info->tiler_addrs;
+ return 0;
+}
+
+int omap_tiler_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ struct omap_tiler_info *info = buffer->priv_virt;
+ unsigned long addr = vma->vm_start;
+ u32 vma_pages = (vma->vm_end - vma->vm_start) / PAGE_SIZE;
+ int n_pages = min(vma_pages, info->n_tiler_pages);
+ int i, ret;
+
+ for (i = vma->vm_pgoff; i < n_pages; i++, addr += PAGE_SIZE) {
+ ret = remap_pfn_range(vma, addr,
+ __phys_to_pfn(info->tiler_addrs[i]),
+ PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot));
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static struct ion_heap_ops omap_tiler_ops = {
+ .allocate = omap_tiler_heap_allocate,
+ .free = omap_tiler_heap_free,
+ .phys = omap_tiler_phys,
+ .map_user = omap_tiler_heap_map_user,
+};
+
+struct ion_heap *omap_tiler_heap_create(struct ion_platform_heap *data)
+{
+ struct ion_heap *heap;
+
+ heap = ion_carveout_heap_create(data);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+ heap->ops = &omap_tiler_ops;
+ heap->type = OMAP_ION_HEAP_TYPE_TILER;
+ heap->name = data->name;
+ heap->id = data->id;
+ return heap;
+}
+
+void omap_tiler_heap_destroy(struct ion_heap *heap)
+{
+ kfree(heap);
+}
diff --git a/drivers/gpu/pvr/Kconfig b/drivers/gpu/pvr/Kconfig
new file mode 100644
index 0000000..0fe0d94
--- /dev/null
+++ b/drivers/gpu/pvr/Kconfig
@@ -0,0 +1,142 @@
+
+config PVR_SGX
+ tristate "PowerVR SGX support"
+ depends on ARCH_OMAP && OMAP2_DSS
+ select PVR_OMAP_DSS2
+ help
+ Enable this option to build support for the PowerVR SGX 3D core.
+
+ To compile this driver as a module, choose M here:
+ this will generate two modules, called pvrsrvkm and omaplfb.
+
+choice
+ prompt "PowerVR SGX core"
+ depends on PVR_SGX
+ default PVR_SGXCORE_540
+
+config PVR_SGXCORE_540
+ bool "SGX 540"
+
+endchoice
+
+choice
+ prompt "PowerVR build type"
+ depends on PVR_SGX
+ default PVR_BUILD_RELEASE
+
+config PVR_BUILD_RELEASE
+ bool "Release"
+
+config PVR_BUILD_DEBUG
+ bool "Debug"
+
+endchoice
+
+# Release build debugging options
+
+config PVR_NEED_PVR_DPF
+ bool "Enable debugging messages in release build"
+ depends on PVR_BUILD_RELEASE
+
+config PVR_NEED_PVR_ASSERT
+ bool "Enable assertions in release build"
+ depends on PVR_BUILD_RELEASE
+
+# Debugging options
+
+config PVR_DEBUG_MEMORY
+ bool "Record memory-related debugging information"
+ depends on PVR_BUILD_DEBUG
+ default y
+
+config PVR_DEBUG_BRIDGE_KM
+ bool "Collect bridge statistics"
+ depends on PVR_BUILD_DEBUG
+ default y
+
+config PVR_DEBUG_TRACE_BRIDGE_KM
+ bool "Trace bridge calls"
+ depends on PVR_DEBUG_BRIDGE_KM
+ default n
+
+config PVR_DEBUG_BRIDGE_KM_DISPATCH_TABLE
+ bool "Dump bridge dispatch table entries"
+ depends on PVR_BUILD_DEBUG
+ default n
+
+
+#
+# General options
+#
+
+config PVR_PERCONTEXT_PB
+ bool "Per-context parameter buffer (recommended)"
+ depends on PVR_SGX
+ default y
+
+config PVR_ACTIVE_POWER_MANAGEMENT
+ bool "Support for active power management (recommended)"
+ depends on PVR_SGX
+ default y
+
+config PVR_ACTIVE_POWER_LATENCY_MS
+ int "Active power event latency (ms)"
+ depends on PVR_ACTIVE_POWER_MANAGEMENT
+ default 100
+
+config PVR_SGX_LOW_LATENCY_SCHEDULING
+ bool "Enable low-latency scheduling"
+ depends on PVR_SGX
+ default y
+
+config PVR_USSE_EDM_STATUS_DEBUG
+ bool "Trace microkernel status"
+ depends on PVR_SGX
+ default y if PVR_BUILD_DEBUG
+
+config PVR_DUMP_MK_TRACE
+ bool "Dump microkernel trace on HW recovery"
+ depends on PVR_USSE_EDM_STATUS_DEBUG
+ default y
+
+config PVR_PDUMP
+ bool "Support for parameter dumping (Pdump)"
+ depends on PVR_SGX
+ default n
+
+config PVR_OMAP_DSS2
+ bool
+
+choice
+ prompt "SGX DVFS mode"
+ depends on PVR_SGX
+ default SGX_DVFS_MODE_NONE
+
+config SGX_DVFS_MODE_NONE
+ bool "None"
+
+config SGX_DVFS_MODE_LINEAR
+ bool "Linear"
+
+config SGX_DVFS_MODE_OPTIMIZED
+ bool "Optimized"
+endchoice
+
+config SGX_DVFS_IDLE_TIMEOUT
+ int "DVFS idle timeout (us)"
+ depends on PVR_SGX
+ default 1000
+
+config PVR_LINUX_MEM_AREA_POOL
+ bool "Enable uncached allocation pool"
+ depends on PVR_SGX
+ default n
+
+config PVR_LINUX_MEM_AREA_POOL_MAX_PAGES
+ int "Maximum number of pages in pool"
+ depends on PVR_LINUX_MEM_AREA_POOL
+ default 10800
+ help
+ Pool size in pages.
+ A size of 0 disables the pool.
+ A size of -1 allows the pool to grow indefinitely.
diff --git a/drivers/gpu/pvr/Makefile b/drivers/gpu/pvr/Makefile
new file mode 100644
index 0000000..183d76d
--- /dev/null
+++ b/drivers/gpu/pvr/Makefile
@@ -0,0 +1,119 @@
+ccflags-y = -DLINUX -D__linux__ -DANDROID -DPVR_BUILD_DIR="\"omap4430_android\""
+ccflags-y += -Idrivers/gpu/pvr -Idrivers/gpu/pvr/omap4 -Idrivers/video/omap2
+
+ccflags-y += \
+ -DSUPPORT_SGX \
+ -DTRANSFER_QUEUE \
+ -DSGX_DYNAMIC_TIMING_INFO \
+ -DSUPPORT_HW_RECOVERY \
+ -DLDM_PLATFORM \
+ -DSUPPORT_SGX_NEW_STATUS_VALS \
+ -DSUPPORT_SGX_HWPERF \
+ -DSYS_USING_INTERRUPTS \
+ -DPVR_SECURE_HANDLES \
+ -DPVR_LINUX_USING_WORKQUEUES \
+ -DPVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE \
+ -DPVR_LINUX_TIMERS_USING_WORKQUEUES \
+ -DSYS_CUSTOM_POWERLOCK_WRAP \
+ -DSUPPORT_MEMINFO_IDS \
+ -DDISPLAY_CONTROLLER=omaplfb \
+ -DPVRSRV_MODNAME="\"pvrsrvkm\"" \
+ -DPVR_LDM_DRIVER_REGISTRATION_NAME="\"pvrsrvkm\"" \
+ -DSUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED \
+ -DSUPPORT_LARGE_GENERAL_HEAP \
+ -DPVR_NO_OMAP_TIMER \
+ -DSYS_SUPPORTS_SGX_IDLE_CALLBACK \
+ -DPVRSRV_REFCOUNT_DEBUG \
+ -Idrivers/gpu/pvr/sgx
+
+ccflags-$(CONFIG_PVR_SGXCORE_540) += \
+ -DSGX540 -DSUPPORT_SGX540 \
+ -DSGX_CORE_REV=120
+
+ccflags-$(CONFIG_PVR_BUILD_RELEASE) += \
+ -DPVR_BUILD_TYPE="\"release\"" -DRELEASE
+
+ccflags-$(CONFIG_PVR_BUILD_DEBUG) += \
+ -DPVR_BUILD_TYPE="\"debug\"" -DDEBUG
+
+ccflags-$(CONFIG_PVR_NEED_PVR_DPF) += -DPVRSRV_NEED_PVR_DPF
+ccflags-$(CONFIG_PVR_NEED_PVR_ASSERT) += -DPVRSRV_NEED_PVR_ASSERT
+
+ccflags-$(CONFIG_PVR_DEBUG_MEMORY) += \
+ -DDEBUG_LINUX_MEMORY_ALLOCATIONS \
+ -DDEBUG_LINUX_MEM_AREAS \
+ -DDEBUG_LINUX_MMAP_AREAS
+
+ccflags-$(CONFIG_PVR_DEBUG_BRIDGE_KM) += -DDEBUG_BRIDGE_KM
+ccflags-$(CONFIG_PVR_DEBUG_TRACE_BRIDGE_KM) += -DDEBUG_TRACE_BRIDGE_KM
+ccflags-$(CONFIG_PVR_DEBUG_BRIDGE_KM_DISPATCH_TABLE) += -DDEBUG_BRIDGE_KM_DISPATCH_TABLE
+
+ccflags-$(CONFIG_PVR_PERCONTEXT_PB) += -DSUPPORT_PERCONTEXT_PB
+ccflags-$(CONFIG_PVR_SGX_LOW_LATENCY_SCHEDULING) += -DSUPPORT_SGX_LOW_LATENCY_SCHEDULING
+ccflags-$(CONFIG_PVR_ACTIVE_POWER_MANAGEMENT) += -DSUPPORT_ACTIVE_POWER_MANAGEMENT
+ccflags-$(CONFIG_PVR_ACTIVE_POWER_MANAGEMENT) += \
+ -DSYS_SGX_ACTIVE_POWER_LATENCY_MS=CONFIG_PVR_ACTIVE_POWER_LATENCY_MS
+
+ccflags-$(CONFIG_PVR_USSE_EDM_STATUS_DEBUG) += -DPVRSRV_USSE_EDM_STATUS_DEBUG
+ccflags-$(CONFIG_PVR_DUMP_MK_TRACE) += -DPVRSRV_DUMP_MK_TRACE
+
+ccflags-$(CONFIG_PVR_PDUMP) += \
+ -DPDUMP -DSUPPORT_DBGDRV_EVENT_OBJECTS -DSUPPORT_PDUMP_MULTI_PROCESS
+
+ccflags-$(CONFIG_PVR_LINUX_MEM_AREA_POOL) += \
+ -DPVR_LINUX_MEM_AREA_POOL_MAX_PAGES=CONFIG_PVR_LINUX_MEM_AREA_POOL_MAX_PAGES \
+ -DPVR_LINUX_MEM_AREA_USE_VMAP -DPVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK
+
+pvrsrvkm-y := \
+ osfunc.o \
+ mutils.o \
+ mmap.o \
+ module.o \
+ pdump.o \
+ proc.o \
+ pvr_bridge_k.o \
+ pvr_debug.o \
+ mm.o \
+ mutex.o \
+ event.o \
+ osperproc.o \
+ buffer_manager.o \
+ devicemem.o \
+ deviceclass.o \
+ handle.o \
+ hash.o \
+ metrics.o \
+ pvrsrv.o \
+ queue.o \
+ ra.o \
+ resman.o \
+ power.o \
+ mem.o \
+ pdump_common.o \
+ bridged_support.o \
+ bridged_pvr_bridge.o \
+ perproc.o \
+ lists.o \
+ refcount.o \
+ omap4/sysconfig.o \
+ omap4/sysutils.o \
+ sgx/bridged_sgx_bridge.o \
+ sgx/sgxinit.o \
+ sgx/sgxpower.o \
+ sgx/sgxreset.o \
+ sgx/sgxutils.o \
+ sgx/sgxkick.o \
+ sgx/sgxtransfer.o \
+ sgx/mmu.o \
+ sgx/pb.o
+
+pvrsrvkm-$(CONFIG_ION_OMAP) += ion.o
+
+omaplfb-y := \
+ omaplfb/omaplfb_displayclass.o \
+ omaplfb/omaplfb_linux.o
+
+obj-$(CONFIG_PVR_SGX) += pvrsrvkm.o
+obj-$(CONFIG_PVR_SGX) += omaplfb.o
+# no Makefile in dbgdrv
+#obj-$(CONFIG_PVR_PDUMP) += dbgdrv/
diff --git a/drivers/gpu/pvr/bridged_pvr_bridge.c b/drivers/gpu/pvr/bridged_pvr_bridge.c
new file mode 100644
index 0000000..b585b99
--- /dev/null
+++ b/drivers/gpu/pvr/bridged_pvr_bridge.c
@@ -0,0 +1,4894 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+
+
+#include <stddef.h>
+
+#include "img_defs.h"
+#include "services.h"
+#include "pvr_bridge_km.h"
+#include "pvr_debug.h"
+#include "ra.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_SGX)
+#include "sgx_bridge.h"
+#endif
+#if defined(SUPPORT_VGX)
+#include "vgx_bridge.h"
+#endif
+#if defined(SUPPORT_MSVDX)
+#include "msvdx_bridge.h"
+#endif
+#include "perproc.h"
+#include "device.h"
+#include "buffer_manager.h"
+#include "refcount.h"
+
+#include "pdump_km.h"
+#include "syscommon.h"
+
+#include "bridged_pvr_bridge.h"
+#if defined(SUPPORT_SGX)
+#include "bridged_sgx_bridge.h"
+#endif
+#if defined(SUPPORT_VGX)
+#include "bridged_vgx_bridge.h"
+#endif
+#if defined(SUPPORT_MSVDX)
+#include "bridged_msvdx_bridge.h"
+#endif
+
+#include "env_data.h"
+
+#if defined (__linux__)
+#include "mmap.h"
+#endif
+
+
+#include "srvkm.h"
+
+PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
+
+#if defined(DEBUG_BRIDGE_KM)
+PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
+#endif
+
+#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
+static IMG_BOOL abSharedDeviceMemHeap[PVRSRV_MAX_CLIENT_HEAPS];
+static IMG_BOOL *pbSharedDeviceMemHeap = abSharedDeviceMemHeap;
+#else
+static IMG_BOOL *pbSharedDeviceMemHeap = (IMG_BOOL*)IMG_NULL;
+#endif
+
+
+#if defined(DEBUG_BRIDGE_KM)
+PVRSRV_ERROR
+CopyFromUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
+ IMG_UINT32 ui32BridgeID,
+ IMG_VOID *pvDest,
+ IMG_VOID *pvSrc,
+ IMG_UINT32 ui32Size)
+{
+ g_BridgeDispatchTable[ui32BridgeID].ui32CopyFromUserTotalBytes+=ui32Size;
+ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size;
+ return OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size);
+}
+PVRSRV_ERROR
+CopyToUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
+ IMG_UINT32 ui32BridgeID,
+ IMG_VOID *pvDest,
+ IMG_VOID *pvSrc,
+ IMG_UINT32 ui32Size)
+{
+ g_BridgeDispatchTable[ui32BridgeID].ui32CopyToUserTotalBytes+=ui32Size;
+ g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size;
+ return OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size);
+}
+#endif
+
+
+static IMG_INT
+PVRSRVEnumerateDevicesBW(IMG_UINT32 ui32BridgeID,
+ IMG_VOID *psBridgeIn,
+ PVRSRV_BRIDGE_OUT_ENUMDEVICE *psEnumDeviceOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DEVICES);
+
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+
+ psEnumDeviceOUT->eError =
+ PVRSRVEnumerateDevicesKM(&psEnumDeviceOUT->ui32NumDevices,
+ psEnumDeviceOUT->asDeviceIdentifier);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVAcquireDeviceDataBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO *psAcquireDevInfoIN,
+ PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO *psAcquireDevInfoOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO);
+
+ psAcquireDevInfoOUT->eError =
+ PVRSRVAcquireDeviceDataKM(psAcquireDevInfoIN->uiDevIndex,
+ psAcquireDevInfoIN->eDeviceType,
+ &hDevCookieInt);
+ if(psAcquireDevInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ psAcquireDevInfoOUT->eError =
+ PVRSRVAllocHandle(psPerProc->psHandleBase,
+ &psAcquireDevInfoOUT->hDevCookie,
+ hDevCookieInt,
+ PVRSRV_HANDLE_TYPE_DEV_NODE,
+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVCreateDeviceMemContextBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT *psCreateDevMemContextIN,
+ PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT *psCreateDevMemContextOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hDevMemContextInt;
+ IMG_UINT32 i;
+ IMG_BOOL bCreated;
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_HEAP_INFO_KM asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
+#endif
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT);
+
+
+ NEW_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS + 1)
+
+ psCreateDevMemContextOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
+ psCreateDevMemContextIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psCreateDevMemContextOUT->eError =
+ PVRSRVCreateDeviceMemContextKM(hDevCookieInt,
+ psPerProc,
+ &hDevMemContextInt,
+ &psCreateDevMemContextOUT->ui32ClientHeapCount,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asHeapInfo[0],
+#else
+ &psCreateDevMemContextOUT->sHeapInfo[0],
+#endif
+ &bCreated,
+ pbSharedDeviceMemHeap);
+
+ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ if(bCreated)
+ {
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psCreateDevMemContextOUT->hDevMemContext,
+ hDevMemContextInt,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+ }
+ else
+ {
+ psCreateDevMemContextOUT->eError =
+ PVRSRVFindHandle(psPerProc->psHandleBase,
+ &psCreateDevMemContextOUT->hDevMemContext,
+ hDevMemContextInt,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+ for(i = 0; i < psCreateDevMemContextOUT->ui32ClientHeapCount; i++)
+ {
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevMemHeapExt;
+#else
+ IMG_HANDLE hDevMemHeapExt;
+#endif
+
+#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
+ if(abSharedDeviceMemHeap[i])
+#endif
+ {
+
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &hDevMemHeapExt,
+ asHeapInfo[i].hDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
+#else
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt,
+ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
+#endif
+ }
+#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
+ else
+ {
+
+ if(bCreated)
+ {
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &hDevMemHeapExt,
+ asHeapInfo[i].hDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psCreateDevMemContextOUT->hDevMemContext);
+#else
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt,
+ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psCreateDevMemContextOUT->hDevMemContext);
+#endif
+ }
+ else
+ {
+ psCreateDevMemContextOUT->eError =
+ PVRSRVFindHandle(psPerProc->psHandleBase,
+ &hDevMemHeapExt,
+#if defined (SUPPORT_SID_INTERFACE)
+ asHeapInfo[i].hDevMemHeap,
+#else
+ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
+#endif
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
+ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+ }
+#endif
+ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt;
+#if defined (SUPPORT_SID_INTERFACE)
+ psCreateDevMemContextOUT->sHeapInfo[i].ui32HeapID = asHeapInfo[i].ui32HeapID;
+ psCreateDevMemContextOUT->sHeapInfo[i].sDevVAddrBase = asHeapInfo[i].sDevVAddrBase;
+ psCreateDevMemContextOUT->sHeapInfo[i].ui32HeapByteSize = asHeapInfo[i].ui32HeapByteSize;
+ psCreateDevMemContextOUT->sHeapInfo[i].ui32Attribs = asHeapInfo[i].ui32Attribs;
+ psCreateDevMemContextOUT->sHeapInfo[i].ui32XTileStride = asHeapInfo[i].ui32XTileStride;
+#endif
+ }
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc)
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVDestroyDeviceMemContextBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT *psDestroyDevMemContextIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hDevMemContextInt;
+ IMG_BOOL bDestroyed;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
+ psDestroyDevMemContextIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
+ psDestroyDevMemContextIN->hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVDestroyDeviceMemContextKM(hDevCookieInt, hDevMemContextInt, &bDestroyed);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ if(bDestroyed)
+ {
+ psRetOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psDestroyDevMemContextIN->hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+ }
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVGetDeviceMemHeapInfoBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO *psGetDevMemHeapInfoIN,
+ PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO *psGetDevMemHeapInfoOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hDevMemContextInt;
+ IMG_UINT32 i;
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_HEAP_INFO_KM asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
+#endif
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS)
+
+ psGetDevMemHeapInfoOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
+ psGetDevMemHeapInfoIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psGetDevMemHeapInfoOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
+ psGetDevMemHeapInfoIN->hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psGetDevMemHeapInfoOUT->eError =
+ PVRSRVGetDeviceMemHeapInfoKM(hDevCookieInt,
+ hDevMemContextInt,
+ &psGetDevMemHeapInfoOUT->ui32ClientHeapCount,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asHeapInfo[0],
+#else
+ &psGetDevMemHeapInfoOUT->sHeapInfo[0],
+#endif
+ pbSharedDeviceMemHeap);
+
+ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ for(i = 0; i < psGetDevMemHeapInfoOUT->ui32ClientHeapCount; i++)
+ {
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevMemHeapExt;
+#else
+ IMG_HANDLE hDevMemHeapExt;
+#endif
+
+#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
+ if(abSharedDeviceMemHeap[i])
+#endif
+ {
+
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &hDevMemHeapExt,
+ asHeapInfo[i].hDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
+#else
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt,
+ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
+#endif
+ }
+#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
+ else
+ {
+
+ psGetDevMemHeapInfoOUT->eError =
+ PVRSRVFindHandle(psPerProc->psHandleBase,
+ &hDevMemHeapExt,
+#if defined (SUPPORT_SID_INTERFACE)
+ asHeapInfo[i].hDevMemHeap,
+#else
+ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap,
+#endif
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
+ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+#endif
+ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt;
+#if defined (SUPPORT_SID_INTERFACE)
+ psGetDevMemHeapInfoOUT->sHeapInfo[i].ui32HeapID = asHeapInfo[i].ui32HeapID;
+ psGetDevMemHeapInfoOUT->sHeapInfo[i].sDevVAddrBase = asHeapInfo[i].sDevVAddrBase;
+ psGetDevMemHeapInfoOUT->sHeapInfo[i].ui32HeapByteSize = asHeapInfo[i].ui32HeapByteSize;
+ psGetDevMemHeapInfoOUT->sHeapInfo[i].ui32Attribs = asHeapInfo[i].ui32Attribs;
+ psGetDevMemHeapInfoOUT->sHeapInfo[i].ui32XTileStride = asHeapInfo[i].ui32XTileStride;
+#endif
+ }
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc)
+
+ return 0;
+}
+
+
+#if defined(OS_PVRSRV_ALLOC_DEVICE_MEM_BW)
+IMG_INT
+PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
+ PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc);
+#else
+static IMG_INT
+PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
+ PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hDevMemHeapInt;
+ IMG_UINT32 ui32ShareIndex;
+ IMG_BOOL bUseShareMemWorkaround;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_DEVICEMEM);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc, 2)
+
+ psAllocDeviceMemOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
+ psAllocDeviceMemIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psAllocDeviceMemOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemHeapInt,
+ psAllocDeviceMemIN->hDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
+
+ if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+
+ bUseShareMemWorkaround = ((psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_XPROC) != 0) ? IMG_TRUE : IMG_FALSE;
+ ui32ShareIndex = 7654321;
+
+ if (bUseShareMemWorkaround)
+ {
+
+
+
+ psAllocDeviceMemOUT->eError =
+ BM_XProcWorkaroundFindNewBufferAndSetShareIndex(&ui32ShareIndex);
+ if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+
+ if(psAllocDeviceMemIN->pvPrivData)
+ {
+ if(!OSAccessOK(PVR_VERIFY_READ,
+ psAllocDeviceMemIN->pvPrivData,
+ psAllocDeviceMemIN->ui32PrivDataLength))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocDeviceMemBW: Access check failed for pvPrivData"));
+ return -EFAULT;
+ }
+ }
+
+ psAllocDeviceMemOUT->eError =
+ PVRSRVAllocDeviceMemKM(hDevCookieInt,
+ psPerProc,
+ hDevMemHeapInt,
+ psAllocDeviceMemIN->ui32Attribs,
+ psAllocDeviceMemIN->ui32Size,
+ psAllocDeviceMemIN->ui32Alignment,
+ psAllocDeviceMemIN->pvPrivData,
+ psAllocDeviceMemIN->ui32PrivDataLength,
+ &psMemInfo,
+ "" );
+
+ if (bUseShareMemWorkaround)
+ {
+ PVR_ASSERT(ui32ShareIndex != 7654321);
+ BM_XProcWorkaroundUnsetShareIndex(ui32ShareIndex);
+ }
+
+ if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psMemInfo->sShareMemWorkaround.bInUse = bUseShareMemWorkaround;
+ if (bUseShareMemWorkaround)
+ {
+ PVR_ASSERT(ui32ShareIndex != 7654321);
+ psMemInfo->sShareMemWorkaround.ui32ShareIndex = ui32ShareIndex;
+ psMemInfo->sShareMemWorkaround.hDevCookieInt = hDevCookieInt;
+ psMemInfo->sShareMemWorkaround.ui32OrigReqAttribs = psAllocDeviceMemIN->ui32Attribs;
+ psMemInfo->sShareMemWorkaround.ui32OrigReqSize = (IMG_UINT32)psAllocDeviceMemIN->ui32Size;
+ psMemInfo->sShareMemWorkaround.ui32OrigReqAlignment = (IMG_UINT32)psAllocDeviceMemIN->ui32Alignment;
+ }
+
+ OSMemSet(&psAllocDeviceMemOUT->sClientMemInfo,
+ 0,
+ sizeof(psAllocDeviceMemOUT->sClientMemInfo));
+
+ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddrKM =
+ psMemInfo->pvLinAddrKM;
+
+#if defined (__linux__)
+ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = 0;
+#else
+ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = psMemInfo->pvLinAddrKM;
+#endif
+ psAllocDeviceMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
+ psAllocDeviceMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
+ psAllocDeviceMemOUT->sClientMemInfo.uAllocSize = psMemInfo->uAllocSize;
+#if defined (SUPPORT_SID_INTERFACE)
+
+#else
+ psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
+#endif
+
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo,
+ psMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+
+#if defined (SUPPORT_SID_INTERFACE)
+ PVR_ASSERT(psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo != 0);
+
+ if (psMemInfo->sMemBlk.hOSMemHandle != IMG_NULL)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo,
+ psMemInfo->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo);
+ }
+ else
+ {
+ psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = 0;
+ }
+#endif
+
+ if(psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_NO_SYNCOBJ)
+ {
+
+ OSMemSet(&psAllocDeviceMemOUT->sClientSyncInfo,
+ 0,
+ sizeof (PVRSRV_CLIENT_SYNC_INFO));
+ psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo = IMG_NULL;
+ }
+ else
+ {
+
+
+#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS)
+ psAllocDeviceMemOUT->sClientSyncInfo.psSyncData =
+ psMemInfo->psKernelSyncInfo->psSyncData;
+ psAllocDeviceMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
+ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
+ psAllocDeviceMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
+ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
+ psAllocDeviceMemOUT->sClientSyncInfo.sReadOps2CompleteDevVAddr =
+ psMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr;
+
+#if defined (SUPPORT_SID_INTERFACE)
+ if (psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != IMG_NULL)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo,
+ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo);
+ }
+ else
+ {
+ psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo = 0;
+ }
+#else
+ psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo =
+ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
+#endif
+#endif
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psAllocDeviceMemOUT->sClientSyncInfo.hKernelSyncInfo,
+ psMemInfo->psKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo);
+
+ psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo =
+ &psAllocDeviceMemOUT->sClientSyncInfo;
+ }
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc)
+
+ return 0;
+}
+
+#endif
+
+static IMG_INT
+PVRSRVFreeDeviceMemBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_FREEDEVICEMEM *psFreeDeviceMemIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ IMG_VOID *pvKernelMemInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_DEVICEMEM);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
+ psFreeDeviceMemIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvKernelMemInfo,
+#if defined (SUPPORT_SID_INTERFACE)
+ psFreeDeviceMemIN->hKernelMemInfo,
+#else
+ psFreeDeviceMemIN->psKernelMemInfo,
+#endif
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = PVRSRVFreeDeviceMemKM(hDevCookieInt, pvKernelMemInfo);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ psFreeDeviceMemIN->hKernelMemInfo,
+#else
+ psFreeDeviceMemIN->psKernelMemInfo,
+#endif
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVExportDeviceMemBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM *psExportDeviceMemIN,
+ PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL;
+#else
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+#endif
+
+ PVR_ASSERT(ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_EXPORT_DEVICEMEM) ||
+ ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2));
+ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
+
+
+ psExportDeviceMemOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psExportDeviceMemIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psExportDeviceMemOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: can't find devcookie"));
+ return 0;
+ }
+
+
+ psExportDeviceMemOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_PVOID *)&psKernelMemInfo,
+#if defined (SUPPORT_SID_INTERFACE)
+ psExportDeviceMemIN->hKernelMemInfo,
+#else
+ psExportDeviceMemIN->psKernelMemInfo,
+#endif
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+
+ if(psExportDeviceMemOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: can't find kernel meminfo"));
+ return 0;
+ }
+
+
+ psExportDeviceMemOUT->eError =
+ PVRSRVFindHandle(KERNEL_HANDLE_BASE,
+ &psExportDeviceMemOUT->hMemInfo,
+ psKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psExportDeviceMemOUT->eError == PVRSRV_OK)
+ {
+
+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVExportDeviceMemBW: allocation is already exported"));
+ return 0;
+ }
+
+
+ psExportDeviceMemOUT->eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
+ &psExportDeviceMemOUT->hMemInfo,
+ psKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+ if (psExportDeviceMemOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: failed to allocate handle from global handle list"));
+ return 0;
+ }
+
+
+ psKernelMemInfo->ui32Flags |= PVRSRV_MEM_EXPORTED;
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVMapDeviceMemoryBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN,
+ PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDevMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_KERNEL_MEM_INFO *psSrcKernelMemInfo = IMG_NULL;
+ PVRSRV_KERNEL_MEM_INFO *psDstKernelMemInfo = IMG_NULL;
+ IMG_HANDLE hDstDevMemHeap = IMG_NULL;
+
+ PVR_ASSERT(ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_MAP_DEV_MEMORY) ||
+ ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_MAP_DEV_MEMORY_2));
+ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc, 2)
+
+
+ psMapDevMemOUT->eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
+ (IMG_VOID**)&psSrcKernelMemInfo,
+ psMapDevMemIN->hKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psMapDevMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ psMapDevMemOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDstDevMemHeap,
+ psMapDevMemIN->hDstDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
+ if(psMapDevMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ if (psSrcKernelMemInfo->sShareMemWorkaround.bInUse)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "using the mem wrap workaround."));
+
+
+
+
+
+
+
+
+
+ psMapDevMemOUT->eError = BM_XProcWorkaroundSetShareIndex(psSrcKernelMemInfo->sShareMemWorkaround.ui32ShareIndex);
+ if(psMapDevMemOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVMapDeviceMemoryBW(): failed to recycle shared buffer"));
+ return 0;
+ }
+
+ psMapDevMemOUT->eError =
+ PVRSRVAllocDeviceMemKM(psSrcKernelMemInfo->sShareMemWorkaround.hDevCookieInt,
+ psPerProc,
+ hDstDevMemHeap,
+ psSrcKernelMemInfo->sShareMemWorkaround.ui32OrigReqAttribs | PVRSRV_MEM_NO_SYNCOBJ,
+ psSrcKernelMemInfo->sShareMemWorkaround.ui32OrigReqSize,
+ psSrcKernelMemInfo->sShareMemWorkaround.ui32OrigReqAlignment,
+ IMG_NULL,
+ 0,
+ &psDstKernelMemInfo,
+ "" );
+
+
+ BM_XProcWorkaroundUnsetShareIndex(psSrcKernelMemInfo->sShareMemWorkaround.ui32ShareIndex);
+ if(psMapDevMemOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVMapDeviceMemoryBW: Failed to create allocation for cross-process memory map"));
+ return 0;
+ }
+
+ if(psSrcKernelMemInfo->psKernelSyncInfo)
+ {
+ PVRSRVKernelSyncInfoIncRef(psSrcKernelMemInfo->psKernelSyncInfo, psSrcKernelMemInfo);
+ }
+
+ psDstKernelMemInfo->psKernelSyncInfo = psSrcKernelMemInfo->psKernelSyncInfo;
+ }
+ else
+ {
+
+ psMapDevMemOUT->eError = PVRSRVMapDeviceMemoryKM(psPerProc,
+ psSrcKernelMemInfo,
+ hDstDevMemHeap,
+ &psDstKernelMemInfo);
+ if(psMapDevMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+
+ psDstKernelMemInfo->sShareMemWorkaround = psSrcKernelMemInfo->sShareMemWorkaround;
+
+ OSMemSet(&psMapDevMemOUT->sDstClientMemInfo,
+ 0,
+ sizeof(psMapDevMemOUT->sDstClientMemInfo));
+ OSMemSet(&psMapDevMemOUT->sDstClientSyncInfo,
+ 0,
+ sizeof(psMapDevMemOUT->sDstClientSyncInfo));
+
+ psMapDevMemOUT->sDstClientMemInfo.pvLinAddrKM =
+ psDstKernelMemInfo->pvLinAddrKM;
+
+ psMapDevMemOUT->sDstClientMemInfo.pvLinAddr = 0;
+ psMapDevMemOUT->sDstClientMemInfo.sDevVAddr = psDstKernelMemInfo->sDevVAddr;
+ psMapDevMemOUT->sDstClientMemInfo.ui32Flags = psDstKernelMemInfo->ui32Flags;
+ psMapDevMemOUT->sDstClientMemInfo.uAllocSize = psDstKernelMemInfo->uAllocSize;
+#if defined (SUPPORT_SID_INTERFACE)
+
+#else
+ psMapDevMemOUT->sDstClientMemInfo.hMappingInfo = psDstKernelMemInfo->sMemBlk.hOSMemHandle;
+#endif
+
+
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo,
+ psDstKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+ psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo = IMG_NULL;
+
+#if defined (SUPPORT_SID_INTERFACE)
+
+ if (psDstKernelMemInfo->sMemBlk.hOSMemHandle != IMG_NULL)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapDevMemOUT->sDstClientMemInfo.hMappingInfo,
+ psDstKernelMemInfo->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo);
+ }
+ else
+ {
+ psMapDevMemOUT->sDstClientMemInfo.hMappingInfo = 0;
+ }
+#endif
+
+
+ if(psDstKernelMemInfo->psKernelSyncInfo)
+ {
+#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS)
+ psMapDevMemOUT->sDstClientSyncInfo.psSyncData =
+ psDstKernelMemInfo->psKernelSyncInfo->psSyncData;
+ psMapDevMemOUT->sDstClientSyncInfo.sWriteOpsCompleteDevVAddr =
+ psDstKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
+ psMapDevMemOUT->sDstClientSyncInfo.sReadOpsCompleteDevVAddr =
+ psDstKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
+ psMapDevMemOUT->sDstClientSyncInfo.sReadOps2CompleteDevVAddr =
+ psDstKernelMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr;
+
+#if defined (SUPPORT_SID_INTERFACE)
+
+ if (psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != IMG_NULL)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo,
+ psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo);
+ }
+ else
+ {
+ psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo = 0;
+ }
+#else
+ psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo =
+ psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
+#endif
+#endif
+
+ psMapDevMemOUT->sDstClientMemInfo.psClientSyncInfo = &psMapDevMemOUT->sDstClientSyncInfo;
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo,
+ psDstKernelMemInfo->psKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo);
+ }
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc)
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVUnmapDeviceMemoryBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY *psUnmapDevMemIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEV_MEMORY);
+
+ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psKernelMemInfo,
+#if defined (SUPPORT_SID_INTERFACE)
+ psUnmapDevMemIN->hKernelMemInfo,
+#else
+ psUnmapDevMemIN->psKernelMemInfo,
+#endif
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ if (psKernelMemInfo->sShareMemWorkaround.bInUse)
+ {
+ psRetOUT->eError = PVRSRVFreeDeviceMemKM(psKernelMemInfo->sShareMemWorkaround.hDevCookieInt, psKernelMemInfo);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVUnmapDeviceMemoryBW: internal error, should expect FreeDeviceMem to fail"));
+ return 0;
+ }
+ }
+ else
+ {
+ psRetOUT->eError = PVRSRVUnmapDeviceMemoryKM(psKernelMemInfo);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ psUnmapDevMemIN->hKernelMemInfo,
+#else
+ psUnmapDevMemIN->psKernelMemInfo,
+#endif
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+
+ return 0;
+}
+
+
+
+static IMG_INT
+PVRSRVMapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY *psMapDevClassMemIN,
+ PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psMapDevClassMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
+ IMG_HANDLE hOSMapInfo;
+ IMG_HANDLE hDeviceClassBufferInt;
+ IMG_HANDLE hDevMemContextInt;
+ PVRSRV_HANDLE_TYPE eHandleType;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc, 2)
+
+
+ psMapDevClassMemOUT->eError =
+ PVRSRVLookupHandleAnyType(psPerProc->psHandleBase,
+ &hDeviceClassBufferInt,
+ &eHandleType,
+ psMapDevClassMemIN->hDeviceClassBuffer);
+
+ if(psMapDevClassMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ psMapDevClassMemOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevMemContextInt,
+ psMapDevClassMemIN->hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if(psMapDevClassMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ switch(eHandleType)
+ {
+#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
+ case PVRSRV_HANDLE_TYPE_DISP_BUFFER:
+ case PVRSRV_HANDLE_TYPE_BUF_BUFFER:
+#else
+ case PVRSRV_HANDLE_TYPE_NONE:
+#endif
+ break;
+ default:
+ psMapDevClassMemOUT->eError = PVRSRV_ERROR_INVALID_HANDLE_TYPE;
+ return 0;
+ }
+
+ psMapDevClassMemOUT->eError =
+ PVRSRVMapDeviceClassMemoryKM(psPerProc,
+ hDevMemContextInt,
+ hDeviceClassBufferInt,
+ &psMemInfo,
+ &hOSMapInfo);
+ if(psMapDevClassMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ OSMemSet(&psMapDevClassMemOUT->sClientMemInfo,
+ 0,
+ sizeof(psMapDevClassMemOUT->sClientMemInfo));
+ OSMemSet(&psMapDevClassMemOUT->sClientSyncInfo,
+ 0,
+ sizeof(psMapDevClassMemOUT->sClientSyncInfo));
+
+ psMapDevClassMemOUT->sClientMemInfo.pvLinAddrKM =
+ psMemInfo->pvLinAddrKM;
+
+ psMapDevClassMemOUT->sClientMemInfo.pvLinAddr = 0;
+ psMapDevClassMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
+ psMapDevClassMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
+ psMapDevClassMemOUT->sClientMemInfo.uAllocSize = psMemInfo->uAllocSize;
+#if defined (SUPPORT_SID_INTERFACE)
+ if (psMemInfo->sMemBlk.hOSMemHandle != 0)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapDevClassMemOUT->sClientMemInfo.hMappingInfo,
+ psMemInfo->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psMapDevClassMemIN->hDeviceClassBuffer);
+ }
+ else
+ {
+ psMapDevClassMemOUT->sClientMemInfo.hMappingInfo = 0;
+ }
+#else
+ psMapDevClassMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
+#endif
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo,
+ psMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psMapDevClassMemIN->hDeviceClassBuffer);
+
+ psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo = IMG_NULL;
+
+
+ if(psMemInfo->psKernelSyncInfo)
+ {
+#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS)
+ psMapDevClassMemOUT->sClientSyncInfo.psSyncData =
+ psMemInfo->psKernelSyncInfo->psSyncData;
+ psMapDevClassMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
+ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
+ psMapDevClassMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
+ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
+ psMapDevClassMemOUT->sClientSyncInfo.sReadOps2CompleteDevVAddr =
+ psMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr;
+
+#if defined (SUPPORT_SID_INTERFACE)
+ if (psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != 0)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo,
+ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo);
+ }
+ else
+ {
+ psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo = 0;
+ }
+#else
+ psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo =
+ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
+#endif
+#endif
+
+ psMapDevClassMemOUT->sClientMemInfo.psClientSyncInfo = &psMapDevClassMemOUT->sClientSyncInfo;
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo,
+ psMemInfo->psKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo);
+ }
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc)
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVUnmapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY *psUnmapDevClassMemIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvKernelMemInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo,
+#if defined (SUPPORT_SID_INTERFACE)
+ psUnmapDevClassMemIN->hKernelMemInfo,
+#else
+ psUnmapDevClassMemIN->psKernelMemInfo,
+#endif
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = PVRSRVUnmapDeviceClassMemoryKM(pvKernelMemInfo);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ psUnmapDevClassMemIN->hKernelMemInfo,
+#else
+ psUnmapDevClassMemIN->psKernelMemInfo,
+#endif
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+
+ return 0;
+}
+
+
+#if defined(OS_PVRSRV_WRAP_EXT_MEM_BW)
+IMG_INT
+PVRSRVWrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN,
+ PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc);
+#else
+static IMG_INT
+PVRSRVWrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN,
+ PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hDevMemContextInt;
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
+ IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL;
+ IMG_UINT32 ui32PageTableSize = 0;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_WRAP_EXT_MEMORY);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc, 2)
+
+
+ psWrapExtMemOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
+ psWrapExtMemIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psWrapExtMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ psWrapExtMemOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
+ psWrapExtMemIN->hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if(psWrapExtMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ if(psWrapExtMemIN->ui32NumPageTableEntries)
+ {
+ ui32PageTableSize = psWrapExtMemIN->ui32NumPageTableEntries
+ * sizeof(IMG_SYS_PHYADDR);
+
+ ASSIGN_AND_EXIT_ON_ERROR(psWrapExtMemOUT->eError,
+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ ui32PageTableSize,
+ (IMG_VOID **)&psSysPAddr, 0,
+ "Page Table"));
+
+ if(CopyFromUserWrapper(psPerProc,
+ ui32BridgeID,
+ psSysPAddr,
+ psWrapExtMemIN->psSysPAddr,
+ ui32PageTableSize) != PVRSRV_OK)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32PageTableSize, (IMG_VOID *)psSysPAddr, 0);
+
+ return -EFAULT;
+ }
+ }
+
+ psWrapExtMemOUT->eError =
+ PVRSRVWrapExtMemoryKM(hDevCookieInt,
+ psPerProc,
+ hDevMemContextInt,
+ psWrapExtMemIN->ui32ByteSize,
+ psWrapExtMemIN->ui32PageOffset,
+ psWrapExtMemIN->bPhysContig,
+ psSysPAddr,
+ psWrapExtMemIN->pvLinAddr,
+ psWrapExtMemIN->ui32Flags,
+ &psMemInfo);
+
+ if(psWrapExtMemIN->ui32NumPageTableEntries)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ ui32PageTableSize,
+ (IMG_VOID *)psSysPAddr, 0);
+
+ }
+
+ if(psWrapExtMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psWrapExtMemOUT->sClientMemInfo.pvLinAddrKM =
+ psMemInfo->pvLinAddrKM;
+
+
+ psWrapExtMemOUT->sClientMemInfo.pvLinAddr = 0;
+ psWrapExtMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
+ psWrapExtMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
+ psWrapExtMemOUT->sClientMemInfo.uAllocSize = psMemInfo->uAllocSize;
+#if defined (SUPPORT_SID_INTERFACE)
+#else
+ psWrapExtMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
+#endif
+
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo,
+ psMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+
+#if defined (SUPPORT_SID_INTERFACE)
+
+ if (psMemInfo->sMemBlk.hOSMemHandle != IMG_NULL)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psWrapExtMemOUT->sClientMemInfo.hMappingInfo,
+ psMemInfo->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo);
+ }
+ else
+ {
+ psWrapExtMemOUT->sClientMemInfo.hMappingInfo = 0;
+ }
+#endif
+
+
+#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS)
+ psWrapExtMemOUT->sClientSyncInfo.psSyncData =
+ psMemInfo->psKernelSyncInfo->psSyncData;
+ psWrapExtMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
+ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
+ psWrapExtMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
+ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
+ psWrapExtMemOUT->sClientSyncInfo.sReadOps2CompleteDevVAddr =
+ psMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr;
+
+#if defined (SUPPORT_SID_INTERFACE)
+
+ if (psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != IMG_NULL)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psWrapExtMemOUT->sClientSyncInfo.hMappingInfo,
+ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo);
+ }
+ else
+ {
+ psWrapExtMemOUT->sClientSyncInfo.hMappingInfo = 0;
+ }
+#else
+ psWrapExtMemOUT->sClientSyncInfo.hMappingInfo =
+ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
+#endif
+#endif
+
+ psWrapExtMemOUT->sClientMemInfo.psClientSyncInfo = &psWrapExtMemOUT->sClientSyncInfo;
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psWrapExtMemOUT->sClientSyncInfo.hKernelSyncInfo,
+ (IMG_HANDLE)psMemInfo->psKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo);
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc)
+
+ return 0;
+}
+#endif
+
+static IMG_INT
+PVRSRVUnwrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY *psUnwrapExtMemIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvMemInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvMemInfo,
+ psUnwrapExtMemIN->hKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVUnwrapExtMemoryKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psUnwrapExtMemIN->hKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVGetFreeDeviceMemBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM *psGetFreeDeviceMemIN,
+ PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM *psGetFreeDeviceMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETFREE_DEVICEMEM);
+
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+
+ psGetFreeDeviceMemOUT->eError =
+ PVRSRVGetFreeDeviceMemKM(psGetFreeDeviceMemIN->ui32Flags,
+ &psGetFreeDeviceMemOUT->ui32Total,
+ &psGetFreeDeviceMemOUT->ui32Free,
+ &psGetFreeDeviceMemOUT->ui32LargestBlock);
+
+ return 0;
+}
+
+static IMG_INT
+PVRMMapOSMemHandleToMMapDataBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA *psMMapDataIN,
+ PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA *psMMapDataOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA);
+
+#if defined (__linux__)
+ psMMapDataOUT->eError =
+ PVRMMapOSMemHandleToMMapData(psPerProc,
+ psMMapDataIN->hMHandle,
+ &psMMapDataOUT->ui32MMapOffset,
+ &psMMapDataOUT->ui32ByteOffset,
+ &psMMapDataOUT->ui32RealByteSize,
+ &psMMapDataOUT->ui32UserVAddr);
+#else
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+ PVR_UNREFERENCED_PARAMETER(psMMapDataIN);
+
+ psMMapDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+ return 0;
+}
+
+
+static IMG_INT
+PVRMMapReleaseMMapDataBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA *psMMapDataIN,
+ PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA *psMMapDataOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_RELEASE_MMAP_DATA);
+
+#if defined (__linux__)
+ psMMapDataOUT->eError =
+ PVRMMapReleaseMMapData(psPerProc,
+ psMMapDataIN->hMHandle,
+ &psMMapDataOUT->bMUnmap,
+ &psMMapDataOUT->ui32RealByteSize,
+ &psMMapDataOUT->ui32UserVAddr);
+#else
+
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+ PVR_UNREFERENCED_PARAMETER(psMMapDataIN);
+
+ psMMapDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+ return 0;
+}
+
+
+#if defined (SUPPORT_SID_INTERFACE)
+static IMG_INT
+PVRSRVChangeDeviceMemoryAttributesBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_CHG_DEV_MEM_ATTRIBS *psChgMemAttribIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hKernelMemInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CHG_DEV_MEM_ATTRIBS);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hKernelMemInfo,
+ psChgMemAttribIN->hKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVChangeDeviceMemoryAttributesKM(hKernelMemInfo, psChgMemAttribIN->ui32Attribs);
+
+ return 0;
+}
+#else
+static IMG_INT
+PVRSRVChangeDeviceMemoryAttributesBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_CHG_DEV_MEM_ATTRIBS *psChgMemAttribIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
+ PVR_UNREFERENCED_PARAMETER(psChgMemAttribIN);
+ PVR_UNREFERENCED_PARAMETER(psRetOUT);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+
+ return 0;
+}
+#endif
+
+#ifdef PDUMP
+static IMG_INT
+PDumpIsCaptureFrameBW(IMG_UINT32 ui32BridgeID,
+ IMG_VOID *psBridgeIn,
+ PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING *psPDumpIsCapturingOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_ISCAPTURING);
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+
+ psPDumpIsCapturingOUT->bIsCapturing = PDumpIsCaptureFrameKM();
+ psPDumpIsCapturingOUT->eError = PVRSRV_OK;
+
+ return 0;
+}
+
+static IMG_INT
+PDumpCommentBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_COMMENT *psPDumpCommentIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_COMMENT);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+
+ psRetOUT->eError = PDumpCommentKM(&psPDumpCommentIN->szComment[0],
+ psPDumpCommentIN->ui32Flags);
+ return 0;
+}
+
+static IMG_INT
+PDumpSetFrameBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_SETFRAME *psPDumpSetFrameIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SETFRAME);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+
+ psRetOUT->eError = PDumpSetFrameKM(psPDumpSetFrameIN->ui32Frame);
+
+ return 0;
+}
+
+static IMG_INT
+PDumpRegWithFlagsBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_DUMPREG *psPDumpRegDumpIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REG);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID **)&psDeviceNode,
+ psPDumpRegDumpIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = PDumpRegWithFlagsKM (psPDumpRegDumpIN->szRegRegion,
+ psPDumpRegDumpIN->sHWReg.ui32RegAddr,
+ psPDumpRegDumpIN->sHWReg.ui32RegVal,
+ psPDumpRegDumpIN->ui32Flags);
+
+ return 0;
+}
+
+static IMG_INT
+PDumpRegPolBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_REGPOL *psPDumpRegPolIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REGPOL);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID **)&psDeviceNode,
+ psPDumpRegPolIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ psRetOUT->eError =
+ PDumpRegPolWithFlagsKM(psPDumpRegPolIN->szRegRegion,
+ psPDumpRegPolIN->sHWReg.ui32RegAddr,
+ psPDumpRegPolIN->sHWReg.ui32RegVal,
+ psPDumpRegPolIN->ui32Mask,
+ psPDumpRegPolIN->ui32Flags,
+ PDUMP_POLL_OPERATOR_EQUAL);
+
+ return 0;
+}
+
+static IMG_INT
+PDumpMemPolBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_MEMPOL *psPDumpMemPolIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvMemInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_MEMPOL);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvMemInfo,
+#if defined (SUPPORT_SID_INTERFACE)
+ psPDumpMemPolIN->hKernelMemInfo,
+#else
+ psPDumpMemPolIN->psKernelMemInfo,
+#endif
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PDumpMemPolKM(((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo),
+ psPDumpMemPolIN->ui32Offset,
+ psPDumpMemPolIN->ui32Value,
+ psPDumpMemPolIN->ui32Mask,
+ psPDumpMemPolIN->eOperator,
+ psPDumpMemPolIN->ui32Flags,
+ MAKEUNIQUETAG(pvMemInfo));
+
+ return 0;
+}
+
+static IMG_INT
+PDumpMemBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM *psPDumpMemDumpIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvMemInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPMEM);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvMemInfo,
+#if defined (SUPPORT_SID_INTERFACE)
+ psPDumpMemDumpIN->hKernelMemInfo,
+#else
+ psPDumpMemDumpIN->psKernelMemInfo,
+#endif
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PDumpMemUM(psPerProc,
+ psPDumpMemDumpIN->pvAltLinAddr,
+ psPDumpMemDumpIN->pvLinAddr,
+ pvMemInfo,
+ psPDumpMemDumpIN->ui32Offset,
+ psPDumpMemDumpIN->ui32Bytes,
+ psPDumpMemDumpIN->ui32Flags,
+ MAKEUNIQUETAG(pvMemInfo));
+
+ return 0;
+}
+
+static IMG_INT
+PDumpBitmapBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_BITMAP *psPDumpBitmapIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ IMG_HANDLE hDevMemContextInt;
+
+ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_VOID **)&psDeviceNode,
+ psPDumpBitmapIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle( psPerProc->psHandleBase,
+ &hDevMemContextInt,
+ psPDumpBitmapIN->hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PDumpBitmapKM(psDeviceNode,
+ &psPDumpBitmapIN->szFileName[0],
+ psPDumpBitmapIN->ui32FileOffset,
+ psPDumpBitmapIN->ui32Width,
+ psPDumpBitmapIN->ui32Height,
+ psPDumpBitmapIN->ui32StrideInBytes,
+ psPDumpBitmapIN->sDevBaseAddr,
+ hDevMemContextInt,
+ psPDumpBitmapIN->ui32Size,
+ psPDumpBitmapIN->ePixelFormat,
+ psPDumpBitmapIN->eMemFormat,
+ psPDumpBitmapIN->ui32Flags);
+
+ return 0;
+}
+
+static IMG_INT
+PDumpReadRegBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_READREG *psPDumpReadRegIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPREADREG);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_VOID **)&psDeviceNode,
+ psPDumpReadRegIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ psRetOUT->eError =
+ PDumpReadRegKM(&psPDumpReadRegIN->szRegRegion[0],
+ &psPDumpReadRegIN->szFileName[0],
+ psPDumpReadRegIN->ui32FileOffset,
+ psPDumpReadRegIN->ui32Address,
+ psPDumpReadRegIN->ui32Size,
+ psPDumpReadRegIN->ui32Flags);
+
+ return 0;
+}
+
+static IMG_INT
+PDumpMemPagesBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES *psPDumpMemPagesIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_MEMPAGES);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID **)&psDeviceNode,
+ psPDumpMemPagesIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ return 0;
+}
+
+static IMG_INT
+PDumpDriverInfoBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO *psPDumpDriverInfoIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_UINT32 ui32PDumpFlags;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DRIVERINFO);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+
+ ui32PDumpFlags = 0;
+ if(psPDumpDriverInfoIN->bContinuous)
+ {
+ ui32PDumpFlags |= PDUMP_FLAGS_CONTINUOUS;
+ }
+ psRetOUT->eError =
+ PDumpDriverInfoKM(&psPDumpDriverInfoIN->szString[0],
+ ui32PDumpFlags);
+
+ return 0;
+}
+
+static IMG_INT
+PDumpSyncDumpBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC *psPDumpSyncDumpIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_UINT32 ui32Bytes = psPDumpSyncDumpIN->ui32Bytes;
+ IMG_VOID *pvSyncInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPSYNC);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
+#if defined (SUPPORT_SID_INTERFACE)
+ psPDumpSyncDumpIN->hKernelSyncInfo,
+#else
+ psPDumpSyncDumpIN->psKernelSyncInfo,
+#endif
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PDumpMemUM(psPerProc,
+ psPDumpSyncDumpIN->pvAltLinAddr,
+ IMG_NULL,
+ ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM,
+ psPDumpSyncDumpIN->ui32Offset,
+ ui32Bytes,
+ 0,
+ MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM));
+
+ return 0;
+}
+
+static IMG_INT
+PDumpSyncPolBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL *psPDumpSyncPolIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_UINT32 ui32Offset;
+ IMG_VOID *pvSyncInfo;
+ IMG_UINT32 ui32Value;
+ IMG_UINT32 ui32Mask;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SYNCPOL);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvSyncInfo,
+#if defined (SUPPORT_SID_INTERFACE)
+ psPDumpSyncPolIN->hKernelSyncInfo,
+#else
+ psPDumpSyncPolIN->psKernelSyncInfo,
+#endif
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ if(psPDumpSyncPolIN->bIsRead)
+ {
+ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
+ }
+ else
+ {
+ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete);
+ }
+
+
+ if (psPDumpSyncPolIN->bUseLastOpDumpVal)
+ {
+ if(psPDumpSyncPolIN->bIsRead)
+ {
+ ui32Value = ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncData->ui32LastReadOpDumpVal;
+ }
+ else
+ {
+ ui32Value = ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncData->ui32LastOpDumpVal;
+ }
+ ui32Mask = 0xffffffff;
+ }
+ else
+ {
+ ui32Value = psPDumpSyncPolIN->ui32Value;
+ ui32Mask = psPDumpSyncPolIN->ui32Mask;
+ }
+
+ psRetOUT->eError =
+ PDumpMemPolKM(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM,
+ ui32Offset,
+ ui32Value,
+ ui32Mask,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0,
+ MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM));
+
+ return 0;
+}
+
+
+static IMG_INT
+PDumpCycleCountRegReadBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ *psPDumpCycleCountRegReadIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID **)&psDeviceNode,
+ psPDumpCycleCountRegReadIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ PDumpCycleCountRegRead(&psDeviceNode->sDevId,
+ psPDumpCycleCountRegReadIN->ui32RegOffset,
+ psPDumpCycleCountRegReadIN->bLastFrame);
+
+ psRetOUT->eError = PVRSRV_OK;
+
+ return 0;
+}
+
+static IMG_INT
+PDumpPDDevPAddrBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR *psPDumpPDDevPAddrIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvMemInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvMemInfo,
+ psPDumpPDDevPAddrIN->hKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PDumpPDDevPAddrKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo,
+ psPDumpPDDevPAddrIN->ui32Offset,
+ psPDumpPDDevPAddrIN->sPDDevPAddr,
+ MAKEUNIQUETAG(pvMemInfo),
+ PDUMP_PD_UNIQUETAG);
+ return 0;
+}
+
+static IMG_INT
+PDumpStartInitPhaseBW(IMG_UINT32 ui32BridgeID,
+ IMG_VOID *psBridgeIn,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_STARTINITPHASE);
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+
+ psRetOUT->eError = PDumpStartInitPhaseKM();
+
+ return 0;
+}
+
+static IMG_INT
+PDumpStopInitPhaseBW(IMG_UINT32 ui32BridgeID,
+ IMG_VOID *psBridgeIn,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_STOPINITPHASE);
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+
+ psRetOUT->eError = PDumpStopInitPhaseKM();
+
+ return 0;
+}
+
+#endif
+
+
+static IMG_INT
+PVRSRVGetMiscInfoBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_GET_MISC_INFO *psGetMiscInfoIN,
+ PVRSRV_BRIDGE_OUT_GET_MISC_INFO *psGetMiscInfoOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_MISC_INFO_KM sMiscInfo = {0};
+#endif
+ PVRSRV_ERROR eError;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_MISC_INFO);
+
+#if defined (SUPPORT_SID_INTERFACE)
+ sMiscInfo.ui32StateRequest = psGetMiscInfoIN->sMiscInfo.ui32StateRequest;
+ sMiscInfo.ui32StatePresent = psGetMiscInfoIN->sMiscInfo.ui32StatePresent;
+ sMiscInfo.ui32MemoryStrLen = psGetMiscInfoIN->sMiscInfo.ui32MemoryStrLen;
+ sMiscInfo.pszMemoryStr = psGetMiscInfoIN->sMiscInfo.pszMemoryStr;
+
+ OSMemCopy(&sMiscInfo.sCacheOpCtl,
+ &psGetMiscInfoIN->sMiscInfo.sCacheOpCtl,
+ sizeof(sMiscInfo.sCacheOpCtl));
+ OSMemCopy(&sMiscInfo.sGetRefCountCtl,
+ &psGetMiscInfoIN->sMiscInfo.sGetRefCountCtl,
+ sizeof(sMiscInfo.sGetRefCountCtl));
+#else
+ OSMemCopy(&psGetMiscInfoOUT->sMiscInfo,
+ &psGetMiscInfoIN->sMiscInfo,
+ sizeof(PVRSRV_MISC_INFO));
+#endif
+
+ if (((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0) &&
+ ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0) &&
+ ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) != 0))
+ {
+
+ psGetMiscInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return 0;
+ }
+
+ if (((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0) ||
+ ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0) ||
+ ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) != 0))
+ {
+
+#if defined (SUPPORT_SID_INTERFACE)
+ ASSIGN_AND_EXIT_ON_ERROR(psGetMiscInfoOUT->eError,
+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
+ (IMG_VOID **)&sMiscInfo.pszMemoryStr, 0,
+ "Output string buffer"));
+ psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&sMiscInfo);
+
+
+ eError = CopyToUserWrapper(psPerProc, ui32BridgeID,
+ psGetMiscInfoIN->sMiscInfo.pszMemoryStr,
+ sMiscInfo.pszMemoryStr,
+ sMiscInfo.ui32MemoryStrLen);
+#else
+ ASSIGN_AND_EXIT_ON_ERROR(psGetMiscInfoOUT->eError,
+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
+ (IMG_VOID **)&psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0,
+ "Output string buffer"));
+
+ psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
+
+
+ eError = CopyToUserWrapper(psPerProc, ui32BridgeID,
+ psGetMiscInfoIN->sMiscInfo.pszMemoryStr,
+ psGetMiscInfoOUT->sMiscInfo.pszMemoryStr,
+ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen);
+#endif
+
+
+#if defined (SUPPORT_SID_INTERFACE)
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sMiscInfo.ui32MemoryStrLen,
+ (IMG_VOID *)sMiscInfo.pszMemoryStr, 0);
+#else
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
+ (IMG_VOID *)psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0);
+#endif
+
+
+ psGetMiscInfoOUT->sMiscInfo.pszMemoryStr = psGetMiscInfoIN->sMiscInfo.pszMemoryStr;
+
+ if(eError != PVRSRV_OK)
+ {
+
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoBW Error copy to user"));
+ return -EFAULT;
+ }
+ }
+ else
+ {
+#if defined (SUPPORT_SID_INTERFACE)
+ psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&sMiscInfo);
+#else
+ psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
+#endif
+ }
+
+
+ if (psGetMiscInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+#if defined (SUPPORT_SID_INTERFACE)
+ if (sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT)
+#else
+ if (psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT)
+#endif
+ {
+ psGetMiscInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
+ &psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM,
+#if defined (SUPPORT_SID_INTERFACE)
+ sMiscInfo.sGlobalEventObject.hOSEventKM,
+#else
+ psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM,
+#endif
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
+
+ if (psGetMiscInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ OSMemCopy(&psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.szName,
+ sMiscInfo.sGlobalEventObject.szName,
+ EVENTOBJNAME_MAXLENGTH);
+#endif
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ if (sMiscInfo.hSOCTimerRegisterOSMemHandle)
+#else
+ if (psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle)
+#endif
+ {
+
+ psGetMiscInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
+ &psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle,
+#if defined (SUPPORT_SID_INTERFACE)
+ sMiscInfo.hSOCTimerRegisterOSMemHandle,
+#else
+ psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle,
+#endif
+ PVRSRV_HANDLE_TYPE_SOC_TIMER,
+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
+
+ if (psGetMiscInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+#if defined (SUPPORT_SID_INTERFACE)
+ else
+ {
+ psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle = 0;
+ }
+
+
+ psGetMiscInfoOUT->sMiscInfo.ui32StateRequest = sMiscInfo.ui32StateRequest;
+ psGetMiscInfoOUT->sMiscInfo.ui32StatePresent = sMiscInfo.ui32StatePresent;
+
+ psGetMiscInfoOUT->sMiscInfo.pvSOCTimerRegisterKM = sMiscInfo.pvSOCTimerRegisterKM;
+ psGetMiscInfoOUT->sMiscInfo.pvSOCTimerRegisterUM = sMiscInfo.pvSOCTimerRegisterUM;
+ psGetMiscInfoOUT->sMiscInfo.pvSOCClockGateRegs = sMiscInfo.pvSOCClockGateRegs;
+
+ psGetMiscInfoOUT->sMiscInfo.ui32SOCClockGateRegsSize = sMiscInfo.ui32SOCClockGateRegsSize;
+
+ OSMemCopy(&psGetMiscInfoOUT->sMiscInfo.aui32DDKVersion,
+ &sMiscInfo.aui32DDKVersion,
+ sizeof(psGetMiscInfoOUT->sMiscInfo.aui32DDKVersion));
+ OSMemCopy(&psGetMiscInfoOUT->sMiscInfo.sCacheOpCtl,
+ &sMiscInfo.sCacheOpCtl,
+ sizeof(psGetMiscInfoOUT->sMiscInfo.sCacheOpCtl));
+ OSMemCopy(&psGetMiscInfoOUT->sMiscInfo.sGetRefCountCtl,
+ &sMiscInfo.sGetRefCountCtl,
+ sizeof(psGetMiscInfoOUT->sMiscInfo.sGetRefCountCtl));
+#endif
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVConnectBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_CONNECT_SERVICES *psConnectServicesIN,
+ PVRSRV_BRIDGE_OUT_CONNECT_SERVICES *psConnectServicesOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CONNECT_SERVICES);
+
+#if defined(PDUMP)
+
+ if ((psConnectServicesIN->ui32Flags & SRV_FLAGS_PERSIST) != 0)
+ {
+ psPerProc->bPDumpPersistent = IMG_TRUE;
+ }
+
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+
+ if ((psConnectServicesIN->ui32Flags & SRV_FLAGS_PDUMP_ACTIVE) != 0)
+ {
+ psPerProc->bPDumpActive = IMG_TRUE;
+ }
+#endif
+#else
+ PVR_UNREFERENCED_PARAMETER(psConnectServicesIN);
+#endif
+ psConnectServicesOUT->hKernelServices = psPerProc->hPerProcData;
+ psConnectServicesOUT->eError = PVRSRV_OK;
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVDisconnectBW(IMG_UINT32 ui32BridgeID,
+ IMG_VOID *psBridgeIn,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DISCONNECT_SERVICES);
+
+
+ psRetOUT->eError = PVRSRV_OK;
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVEnumerateDCBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_ENUMCLASS *psEnumDispClassIN,
+ PVRSRV_BRIDGE_OUT_ENUMCLASS *psEnumDispClassOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_CLASS);
+
+ psEnumDispClassOUT->eError =
+ PVRSRVEnumerateDCKM(psEnumDispClassIN->sDeviceClass,
+ &psEnumDispClassOUT->ui32NumDevices,
+ &psEnumDispClassOUT->ui32DevID[0]);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVOpenDCDeviceBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceIN,
+ PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hDispClassInfoInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc, 1)
+
+ psOpenDispClassDeviceOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psOpenDispClassDeviceIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psOpenDispClassDeviceOUT->eError =
+ PVRSRVOpenDCDeviceKM(psPerProc,
+ psOpenDispClassDeviceIN->ui32DeviceID,
+ hDevCookieInt,
+ &hDispClassInfoInt);
+
+ if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psOpenDispClassDeviceOUT->hDeviceKM,
+ hDispClassInfoInt,
+ PVRSRV_HANDLE_TYPE_DISP_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+ COMMIT_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc)
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVCloseDCDeviceBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE *psCloseDispClassDeviceIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvDispClassInfoInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfoInt,
+ psCloseDispClassDeviceIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = PVRSRVCloseDCDeviceKM(pvDispClassInfoInt, IMG_FALSE);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psCloseDispClassDeviceIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ return 0;
+}
+
+static IMG_INT
+PVRSRVEnumDCFormatsBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsIN,
+ PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvDispClassInfoInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS);
+
+ psEnumDispClassFormatsOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfoInt,
+ psEnumDispClassFormatsIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psEnumDispClassFormatsOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psEnumDispClassFormatsOUT->eError =
+ PVRSRVEnumDCFormatsKM(pvDispClassInfoInt,
+ &psEnumDispClassFormatsOUT->ui32Count,
+ psEnumDispClassFormatsOUT->asFormat);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVEnumDCDimsBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsIN,
+ PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvDispClassInfoInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS);
+
+ psEnumDispClassDimsOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfoInt,
+ psEnumDispClassDimsIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+
+ if(psEnumDispClassDimsOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psEnumDispClassDimsOUT->eError =
+ PVRSRVEnumDCDimsKM(pvDispClassInfoInt,
+ &psEnumDispClassDimsIN->sFormat,
+ &psEnumDispClassDimsOUT->ui32Count,
+ psEnumDispClassDimsOUT->asDim);
+
+ return 0;
+}
+
+#if defined(SUPPORT_PVRSRV_GET_DC_SYSTEM_BUFFER)
+static IMG_INT
+PVRSRVGetDCSystemBufferBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferIN,
+ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hBufferInt;
+ IMG_VOID *pvDispClassInfoInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc, 1)
+
+ psGetDispClassSysBufferOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfoInt,
+ psGetDispClassSysBufferIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psGetDispClassSysBufferOUT->eError =
+ PVRSRVGetDCSystemBufferKM(pvDispClassInfoInt,
+ &hBufferInt);
+
+ if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psGetDispClassSysBufferOUT->hBuffer,
+ hBufferInt,
+ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
+ (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
+ psGetDispClassSysBufferIN->hDeviceKM);
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc)
+
+ return 0;
+}
+#endif
+
+static IMG_INT
+PVRSRVGetDCInfoBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO *psGetDispClassInfoIN,
+ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO *psGetDispClassInfoOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvDispClassInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_INFO);
+
+ psGetDispClassInfoOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psGetDispClassInfoIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psGetDispClassInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psGetDispClassInfoOUT->eError =
+ PVRSRVGetDCInfoKM(pvDispClassInfo,
+ &psGetDispClassInfoOUT->sDisplayInfo);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVCreateDCSwapChainBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainIN,
+ PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvDispClassInfo;
+ IMG_HANDLE hSwapChainInt;
+ IMG_UINT32 ui32SwapChainID;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError, psPerProc, 1)
+
+ psCreateDispClassSwapChainOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psCreateDispClassSwapChainIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+
+ if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ ui32SwapChainID = psCreateDispClassSwapChainIN->ui32SwapChainID;
+
+ psCreateDispClassSwapChainOUT->eError =
+ PVRSRVCreateDCSwapChainKM(psPerProc, pvDispClassInfo,
+ psCreateDispClassSwapChainIN->ui32Flags,
+ &psCreateDispClassSwapChainIN->sDstSurfAttrib,
+ &psCreateDispClassSwapChainIN->sSrcSurfAttrib,
+ psCreateDispClassSwapChainIN->ui32BufferCount,
+ psCreateDispClassSwapChainIN->ui32OEMFlags,
+ &hSwapChainInt,
+ &ui32SwapChainID);
+
+ if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ psCreateDispClassSwapChainOUT->ui32SwapChainID = ui32SwapChainID;
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psCreateDispClassSwapChainOUT->hSwapChain,
+ hSwapChainInt,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psCreateDispClassSwapChainIN->hDeviceKM);
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError, psPerProc)
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVDestroyDCSwapChainBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN *psDestroyDispClassSwapChainIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvSwapChain;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSwapChain,
+ psDestroyDispClassSwapChainIN->hSwapChain,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVDestroyDCSwapChainKM(pvSwapChain);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psDestroyDispClassSwapChainIN->hSwapChain,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVSetDCDstRectBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassDstRectIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvDispClassInfo;
+ IMG_VOID *pvSwapChain;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psSetDispClassDstRectIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvSwapChain,
+ psSetDispClassDstRectIN->hSwapChain,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVSetDCDstRectKM(pvDispClassInfo,
+ pvSwapChain,
+ &psSetDispClassDstRectIN->sRect);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVSetDCSrcRectBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassSrcRectIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvDispClassInfo;
+ IMG_VOID *pvSwapChain;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psSetDispClassSrcRectIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvSwapChain,
+ psSetDispClassSrcRectIN->hSwapChain,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVSetDCSrcRectKM(pvDispClassInfo,
+ pvSwapChain,
+ &psSetDispClassSrcRectIN->sRect);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVSetDCDstColourKeyBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvDispClassInfo;
+ IMG_VOID *pvSwapChain;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psSetDispClassColKeyIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvSwapChain,
+ psSetDispClassColKeyIN->hSwapChain,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVSetDCDstColourKeyKM(pvDispClassInfo,
+ pvSwapChain,
+ psSetDispClassColKeyIN->ui32CKColour);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVSetDCSrcColourKeyBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvDispClassInfo;
+ IMG_VOID *pvSwapChain;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psSetDispClassColKeyIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvSwapChain,
+ psSetDispClassColKeyIN->hSwapChain,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVSetDCSrcColourKeyKM(pvDispClassInfo,
+ pvSwapChain,
+ psSetDispClassColKeyIN->ui32CKColour);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVGetDCBuffersBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersIN,
+ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvDispClassInfo;
+ IMG_VOID *pvSwapChain;
+ IMG_UINT32 i;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_HANDLE *pahBuffer;
+#endif
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc, PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS)
+
+ psGetDispClassBuffersOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psGetDispClassBuffersIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psGetDispClassBuffersOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psGetDispClassBuffersOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvSwapChain,
+ psGetDispClassBuffersIN->hSwapChain,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
+ if(psGetDispClassBuffersOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ psGetDispClassBuffersOUT->eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(IMG_HANDLE) * PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS,
+ (IMG_PVOID *)&pahBuffer, 0,
+ "Temp Swapchain Buffers");
+
+ if (psGetDispClassBuffersOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+#endif
+
+ psGetDispClassBuffersOUT->eError =
+ PVRSRVGetDCBuffersKM(pvDispClassInfo,
+ pvSwapChain,
+ &psGetDispClassBuffersOUT->ui32BufferCount,
+#if defined (SUPPORT_SID_INTERFACE)
+ pahBuffer,
+#else
+ psGetDispClassBuffersOUT->ahBuffer,
+#endif
+ psGetDispClassBuffersOUT->asPhyAddr);
+ if (psGetDispClassBuffersOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ PVR_ASSERT(psGetDispClassBuffersOUT->ui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
+
+ for(i = 0; i < psGetDispClassBuffersOUT->ui32BufferCount; i++)
+ {
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hBufferExt;
+#else
+ IMG_HANDLE hBufferExt;
+#endif
+
+
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &hBufferExt,
+ pahBuffer[i],
+ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
+ (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
+ psGetDispClassBuffersIN->hSwapChain);
+#else
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &hBufferExt,
+ psGetDispClassBuffersOUT->ahBuffer[i],
+ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
+ (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
+ psGetDispClassBuffersIN->hSwapChain);
+#endif
+
+ psGetDispClassBuffersOUT->ahBuffer[i] = hBufferExt;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(IMG_HANDLE) * PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS,
+ (IMG_PVOID)pahBuffer, 0);
+#endif
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc)
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVSwapToDCBufferBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER *psSwapDispClassBufferIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvDispClassInfo;
+ IMG_VOID *pvSwapChainBuf;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_HANDLE hPrivateTag;
+#endif
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psSwapDispClassBufferIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupSubHandle(psPerProc->psHandleBase,
+ &pvSwapChainBuf,
+ psSwapDispClassBufferIN->hBuffer,
+ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
+ psSwapDispClassBufferIN->hDeviceKM);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ if (psSwapDispClassBufferIN->hPrivateTag != 0)
+ {
+ psRetOUT->eError =
+ PVRSRVLookupSubHandle(psPerProc->psHandleBase,
+ &hPrivateTag,
+ psSwapDispClassBufferIN->hPrivateTag,
+ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
+ psSwapDispClassBufferIN->hDeviceKM);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+ else
+ {
+ hPrivateTag = IMG_NULL;
+ }
+#endif
+
+ psRetOUT->eError =
+ PVRSRVSwapToDCBufferKM(pvDispClassInfo,
+ pvSwapChainBuf,
+ psSwapDispClassBufferIN->ui32SwapInterval,
+#if defined (SUPPORT_SID_INTERFACE)
+ hPrivateTag,
+#else
+ psSwapDispClassBufferIN->hPrivateTag,
+#endif
+ psSwapDispClassBufferIN->ui32ClipRectCount,
+ psSwapDispClassBufferIN->sClipRect);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVSwapToDCBuffer2BW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER2 *psSwapDispClassBufferIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvPrivData = IMG_NULL;
+ IMG_VOID *pvDispClassInfo;
+ IMG_VOID *pvSwapChain;
+ IMG_UINT32 i;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER2);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psSwapDispClassBufferIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Failed to look up DISP_INFO handle"));
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupSubHandle(psPerProc->psHandleBase,
+ &pvSwapChain,
+ psSwapDispClassBufferIN->hSwapChain,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
+ psSwapDispClassBufferIN->hDeviceKM);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Failed to look up DISP_BUFFER handle"));
+ return 0;
+ }
+
+ if(!OSAccessOK(PVR_VERIFY_WRITE,
+ psSwapDispClassBufferIN->ppsKernelMemInfos,
+ sizeof(IMG_HANDLE) * psSwapDispClassBufferIN->ui32NumMemInfos))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Access check failed for ppsKernelMemInfos"));
+ return -EFAULT;
+ }
+
+ if(!OSAccessOK(PVR_VERIFY_WRITE,
+ psSwapDispClassBufferIN->ppsKernelSyncInfos,
+ sizeof(IMG_HANDLE) * psSwapDispClassBufferIN->ui32NumMemInfos))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Access check failed for ppsKernelSyncInfos"));
+ return -EFAULT;
+ }
+
+ for (i = 0; i < psSwapDispClassBufferIN->ui32NumMemInfos; i++)
+ {
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_PVOID *)&psKernelMemInfo,
+ psSwapDispClassBufferIN->ppsKernelMemInfos[i],
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Failed to look up MEM_INFO handle"));
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_PVOID *)&psKernelSyncInfo,
+ psSwapDispClassBufferIN->ppsKernelSyncInfos[i],
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Failed to look up SYNC_INFO handle"));
+ return 0;
+ }
+
+ psSwapDispClassBufferIN->ppsKernelMemInfos[i] = psKernelMemInfo;
+ psSwapDispClassBufferIN->ppsKernelSyncInfos[i] = psKernelSyncInfo;
+ }
+
+ if(psSwapDispClassBufferIN->ui32PrivDataLength > 0)
+ {
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ psSwapDispClassBufferIN->ui32PrivDataLength,
+ (IMG_VOID **)&pvPrivData, IMG_NULL,
+ "Swap Command Private Data") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2BW: Failed to allocate private data space"));
+ return -ENOMEM;
+ }
+
+ if(CopyFromUserWrapper(psPerProc,
+ ui32BridgeID,
+ pvPrivData,
+ psSwapDispClassBufferIN->pvPrivData,
+ psSwapDispClassBufferIN->ui32PrivDataLength) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Failed to copy private data"));
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ psSwapDispClassBufferIN->ui32PrivDataLength,
+ pvPrivData, IMG_NULL);
+ return -EFAULT;
+ }
+ }
+
+ psRetOUT->eError =
+ PVRSRVSwapToDCBuffer2KM(pvDispClassInfo,
+ pvSwapChain,
+ psSwapDispClassBufferIN->ui32SwapInterval,
+ psSwapDispClassBufferIN->ppsKernelMemInfos,
+ psSwapDispClassBufferIN->ppsKernelSyncInfos,
+ psSwapDispClassBufferIN->ui32NumMemInfos,
+ pvPrivData,
+ psSwapDispClassBufferIN->ui32PrivDataLength);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ psSwapDispClassBufferIN->ui32PrivDataLength,
+ pvPrivData, IMG_NULL);
+ }
+
+ return 0;
+}
+
+
+
+static IMG_INT
+PVRSRVSwapToDCSystemBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM *psSwapDispClassSystemIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvDispClassInfo;
+ IMG_VOID *pvSwapChain;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvDispClassInfo,
+ psSwapDispClassSystemIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_DISP_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupSubHandle(psPerProc->psHandleBase,
+ &pvSwapChain,
+ psSwapDispClassSystemIN->hSwapChain,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
+ psSwapDispClassSystemIN->hDeviceKM);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ psRetOUT->eError =
+ PVRSRVSwapToDCSystemKM(pvDispClassInfo,
+ pvSwapChain);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVOpenBCDeviceBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceIN,
+ PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hBufClassInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc, 1)
+
+ psOpenBufferClassDeviceOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psOpenBufferClassDeviceIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psOpenBufferClassDeviceOUT->eError =
+ PVRSRVOpenBCDeviceKM(psPerProc,
+ psOpenBufferClassDeviceIN->ui32DeviceID,
+ hDevCookieInt,
+ &hBufClassInfo);
+ if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psOpenBufferClassDeviceOUT->hDeviceKM,
+ hBufClassInfo,
+ PVRSRV_HANDLE_TYPE_BUF_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc)
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVCloseBCDeviceBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE *psCloseBufferClassDeviceIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvBufClassInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvBufClassInfo,
+ psCloseBufferClassDeviceIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_BUF_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVCloseBCDeviceKM(pvBufClassInfo, IMG_FALSE);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psCloseBufferClassDeviceIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_BUF_INFO);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVGetBCInfoBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO *psGetBufferClassInfoIN,
+ PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO *psGetBufferClassInfoOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvBufClassInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO);
+
+ psGetBufferClassInfoOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvBufClassInfo,
+ psGetBufferClassInfoIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_BUF_INFO);
+ if(psGetBufferClassInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psGetBufferClassInfoOUT->eError =
+ PVRSRVGetBCInfoKM(pvBufClassInfo,
+ &psGetBufferClassInfoOUT->sBufferInfo);
+ return 0;
+}
+
+static IMG_INT
+PVRSRVGetBCBufferBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferIN,
+ PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_VOID *pvBufClassInfo;
+ IMG_HANDLE hBufferInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc, 1)
+
+ psGetBufferClassBufferOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvBufClassInfo,
+ psGetBufferClassBufferIN->hDeviceKM,
+ PVRSRV_HANDLE_TYPE_BUF_INFO);
+ if(psGetBufferClassBufferOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psGetBufferClassBufferOUT->eError =
+ PVRSRVGetBCBufferKM(pvBufClassInfo,
+ psGetBufferClassBufferIN->ui32BufferIndex,
+ &hBufferInt);
+
+ if(psGetBufferClassBufferOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psGetBufferClassBufferOUT->hBuffer,
+ hBufferInt,
+ PVRSRV_HANDLE_TYPE_BUF_BUFFER,
+ (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
+ psGetBufferClassBufferIN->hDeviceKM);
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc)
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVAllocSharedSysMemoryBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemIN,
+ PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc, 1)
+
+ psAllocSharedSysMemOUT->eError =
+ PVRSRVAllocSharedSysMemoryKM(psPerProc,
+ psAllocSharedSysMemIN->ui32Flags,
+ psAllocSharedSysMemIN->ui32Size,
+ &psKernelMemInfo);
+ if(psAllocSharedSysMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ OSMemSet(&psAllocSharedSysMemOUT->sClientMemInfo,
+ 0,
+ sizeof(psAllocSharedSysMemOUT->sClientMemInfo));
+
+ psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddrKM =
+ psKernelMemInfo->pvLinAddrKM;
+
+ psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddr = 0;
+ psAllocSharedSysMemOUT->sClientMemInfo.ui32Flags =
+ psKernelMemInfo->ui32Flags;
+ psAllocSharedSysMemOUT->sClientMemInfo.uAllocSize =
+ psKernelMemInfo->uAllocSize;
+#if defined (SUPPORT_SID_INTERFACE)
+ if (psKernelMemInfo->sMemBlk.hOSMemHandle != IMG_NULL)
+ {
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo,
+ psKernelMemInfo->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+ }
+ else
+ {
+ psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo = 0;
+ }
+#else
+ psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle;
+#endif
+
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psAllocSharedSysMemOUT->sClientMemInfo.hKernelMemInfo,
+ psKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc)
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVFreeSharedSysMemoryBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM *psFreeSharedSysMemIN,
+ PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM *psFreeSharedSysMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM);
+
+ psFreeSharedSysMemOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID **)&psKernelMemInfo,
+#if defined (SUPPORT_SID_INTERFACE)
+ psFreeSharedSysMemIN->hKernelMemInfo,
+#else
+ psFreeSharedSysMemIN->psKernelMemInfo,
+#endif
+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
+
+ if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
+ return 0;
+
+ psFreeSharedSysMemOUT->eError =
+ PVRSRVFreeSharedSysMemoryKM(psKernelMemInfo);
+ if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
+ return 0;
+#if defined (SUPPORT_SID_INTERFACE)
+ if (psFreeSharedSysMemIN->hMappingInfo != 0)
+ {
+ psFreeSharedSysMemOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psFreeSharedSysMemIN->hMappingInfo,
+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
+ if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+#endif
+
+ psFreeSharedSysMemOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ psFreeSharedSysMemIN->hKernelMemInfo,
+#else
+ psFreeSharedSysMemIN->psKernelMemInfo,
+#endif
+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
+ return 0;
+}
+
+static IMG_INT
+PVRSRVMapMemInfoMemBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM *psMapMemInfoMemIN,
+ PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM *psMapMemInfoMemOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+ PVRSRV_HANDLE_TYPE eHandleType;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hParent;
+#else
+ IMG_HANDLE hParent;
+#endif
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_MEMINFO_MEM);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc, 2)
+
+ psMapMemInfoMemOUT->eError =
+ PVRSRVLookupHandleAnyType(psPerProc->psHandleBase,
+ (IMG_VOID **)&psKernelMemInfo,
+ &eHandleType,
+ psMapMemInfoMemIN->hKernelMemInfo);
+ if(psMapMemInfoMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ switch (eHandleType)
+ {
+#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
+ case PVRSRV_HANDLE_TYPE_MEM_INFO:
+ case PVRSRV_HANDLE_TYPE_MEM_INFO_REF:
+ case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO:
+#else
+ case PVRSRV_HANDLE_TYPE_NONE:
+#endif
+ break;
+ default:
+ psMapMemInfoMemOUT->eError = PVRSRV_ERROR_INVALID_HANDLE_TYPE;
+ return 0;
+ }
+
+
+ psMapMemInfoMemOUT->eError =
+ PVRSRVGetParentHandle(psPerProc->psHandleBase,
+ &hParent,
+ psMapMemInfoMemIN->hKernelMemInfo,
+ eHandleType);
+ if (psMapMemInfoMemOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+#if defined (SUPPORT_SID_INTERFACE)
+ if (hParent == 0)
+#else
+ if (hParent == IMG_NULL)
+#endif
+ {
+ hParent = psMapMemInfoMemIN->hKernelMemInfo;
+ }
+
+ OSMemSet(&psMapMemInfoMemOUT->sClientMemInfo,
+ 0,
+ sizeof(psMapMemInfoMemOUT->sClientMemInfo));
+
+ psMapMemInfoMemOUT->sClientMemInfo.pvLinAddrKM =
+ psKernelMemInfo->pvLinAddrKM;
+
+ psMapMemInfoMemOUT->sClientMemInfo.pvLinAddr = 0;
+ psMapMemInfoMemOUT->sClientMemInfo.sDevVAddr =
+ psKernelMemInfo->sDevVAddr;
+ psMapMemInfoMemOUT->sClientMemInfo.ui32Flags =
+ psKernelMemInfo->ui32Flags;
+ psMapMemInfoMemOUT->sClientMemInfo.uAllocSize =
+ psKernelMemInfo->uAllocSize;
+#if defined (SUPPORT_SID_INTERFACE)
+ if (psKernelMemInfo->sMemBlk.hOSMemHandle != IMG_NULL)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo,
+ psKernelMemInfo->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ hParent);
+ }
+ else
+ {
+ psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = 0;
+ }
+#else
+ psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle;
+#endif
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo,
+ psKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ hParent);
+
+ if(psKernelMemInfo->ui32Flags & PVRSRV_MEM_NO_SYNCOBJ)
+ {
+
+ OSMemSet(&psMapMemInfoMemOUT->sClientSyncInfo,
+ 0,
+ sizeof (PVRSRV_CLIENT_SYNC_INFO));
+ }
+ else
+ {
+
+#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS)
+ psMapMemInfoMemOUT->sClientSyncInfo.psSyncData =
+ psKernelMemInfo->psKernelSyncInfo->psSyncData;
+ psMapMemInfoMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
+ psKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
+ psMapMemInfoMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
+ psKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
+ psMapMemInfoMemOUT->sClientSyncInfo.sReadOps2CompleteDevVAddr =
+ psKernelMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr;
+ psMapMemInfoMemOUT->sClientSyncInfo.sReadOps2CompleteDevVAddr =
+ psKernelMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr;
+
+#if defined (SUPPORT_SID_INTERFACE)
+ if (psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != IMG_NULL)
+ {
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo,
+ psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo);
+ }
+ else
+ {
+ psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo = 0;
+ }
+#else
+ psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo =
+ psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
+#endif
+#endif
+
+ psMapMemInfoMemOUT->sClientMemInfo.psClientSyncInfo = &psMapMemInfoMemOUT->sClientSyncInfo;
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psMapMemInfoMemOUT->sClientSyncInfo.hKernelSyncInfo,
+ psKernelMemInfo->psKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo);
+ }
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc)
+
+ return 0;
+}
+
+
+
+IMG_INT
+DummyBW(IMG_UINT32 ui32BridgeID,
+ IMG_VOID *psBridgeIn,
+ IMG_VOID *psBridgeOut,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+#if !defined(DEBUG)
+ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
+#endif
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+
+#if defined(DEBUG_BRIDGE_KM)
+ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %u (%s) mapped to "
+ "Dummy Wrapper (probably not what you want!)",
+ __FUNCTION__, ui32BridgeID, g_BridgeDispatchTable[ui32BridgeID].pszIOCName));
+#else
+ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %u mapped to "
+ "Dummy Wrapper (probably not what you want!)",
+ __FUNCTION__, ui32BridgeID));
+#endif
+ return -ENOTTY;
+}
+
+
+IMG_VOID
+_SetDispatchTableEntry(IMG_UINT32 ui32Index,
+ const IMG_CHAR *pszIOCName,
+ BridgeWrapperFunction pfFunction,
+ const IMG_CHAR *pszFunctionName)
+{
+ static IMG_UINT32 ui32PrevIndex = ~0UL;
+#if !defined(DEBUG)
+ PVR_UNREFERENCED_PARAMETER(pszIOCName);
+#endif
+#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM)
+ PVR_UNREFERENCED_PARAMETER(pszFunctionName);
+#endif
+
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+
+ PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s", __FUNCTION__, ui32Index, pszIOCName, pszFunctionName));
+#endif
+
+
+ if(g_BridgeDispatchTable[ui32Index].pfFunction)
+ {
+#if defined(DEBUG_BRIDGE_KM)
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry for %s",
+ __FUNCTION__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName));
+#else
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry (index=%u)",
+ __FUNCTION__, pszIOCName, ui32Index));
+#endif
+ PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue."));
+ }
+
+
+ if((ui32PrevIndex != ~0UL) &&
+ ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) ||
+ (ui32Index <= ui32PrevIndex)))
+ {
+#if defined(DEBUG_BRIDGE_KM)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: There is a gap in the dispatch table between indices %u (%s) and %u (%s)",
+ __FUNCTION__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName,
+ ui32Index, pszIOCName));
+#else
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: There is a gap in the dispatch table between indices %u and %u (%s)",
+ __FUNCTION__, (IMG_UINT)ui32PrevIndex, (IMG_UINT)ui32Index, pszIOCName));
+#endif
+ PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue."));
+ }
+
+ g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction;
+#if defined(DEBUG_BRIDGE_KM)
+ g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName;
+ g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName;
+ g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
+ g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
+#endif
+
+ ui32PrevIndex = ui32Index;
+}
+
+static IMG_INT
+PVRSRVInitSrvConnectBW(IMG_UINT32 ui32BridgeID,
+ IMG_VOID *psBridgeIn,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_CONNECT);
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+
+
+ if((OSProcHasPrivSrvInit() == IMG_FALSE) || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING) || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN))
+ {
+ psRetOUT->eError = PVRSRV_ERROR_SRV_CONNECT_FAILED;
+ return 0;
+ }
+
+#if defined (__linux__)
+ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_TRUE);
+#endif
+ psPerProc->bInitProcess = IMG_TRUE;
+
+ psRetOUT->eError = PVRSRV_OK;
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVInitSrvDisconnectBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT *psInitSrvDisconnectIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_DISCONNECT);
+
+ if(!psPerProc->bInitProcess)
+ {
+ psRetOUT->eError = PVRSRV_ERROR_SRV_DISCONNECT_FAILED;
+ return 0;
+ }
+
+ psPerProc->bInitProcess = IMG_FALSE;
+
+ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_FALSE);
+ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RAN, IMG_TRUE);
+
+ psRetOUT->eError = PVRSRVFinaliseSystem(psInitSrvDisconnectIN->bInitSuccesful);
+
+ PVRSRVSetInitServerState( PVRSRV_INIT_SERVER_SUCCESSFUL ,
+ ((psRetOUT->eError == PVRSRV_OK) && (psInitSrvDisconnectIN->bInitSuccesful))
+ ? IMG_TRUE : IMG_FALSE);
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVEventObjectWaitBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT *psEventObjectWaitIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hOSEventKM;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_WAIT);
+
+ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hOSEventKM,
+ psEventObjectWaitIN->hOSEventKM,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = OSEventObjectWaitKM(hOSEventKM);
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVEventObjectOpenBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN *psEventObjectOpenIN,
+ PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN *psEventObjectOpenOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_EVENTOBJECT_KM sEventObject;
+ IMG_HANDLE hOSEvent;
+#endif
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_OPEN);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc, 1)
+
+ psEventObjectOpenOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sEventObject.hOSEventKM,
+#else
+ &psEventObjectOpenIN->sEventObject.hOSEventKM,
+#endif
+ psEventObjectOpenIN->sEventObject.hOSEventKM,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
+
+ if(psEventObjectOpenOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ OSMemCopy(&sEventObject.szName,
+ &psEventObjectOpenIN->sEventObject.szName,
+ EVENTOBJNAME_MAXLENGTH);
+
+ psEventObjectOpenOUT->eError = OSEventObjectOpenKM(&sEventObject, &hOSEvent);
+#else
+ psEventObjectOpenOUT->eError = OSEventObjectOpenKM(&psEventObjectOpenIN->sEventObject, &psEventObjectOpenOUT->hOSEvent);
+#endif
+
+ if(psEventObjectOpenOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+#if !defined (WINXP) && !defined(SUPPORT_VISTA)
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psEventObjectOpenOUT->hOSEvent,
+ hOSEvent,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI);
+#endif
+#else
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psEventObjectOpenOUT->hOSEvent,
+ psEventObjectOpenOUT->hOSEvent,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI);
+#endif
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc)
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVEventObjectCloseBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE *psEventObjectCloseIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hOSEventKM;
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_EVENTOBJECT_KM sEventObject;
+#endif
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sEventObject.hOSEventKM,
+#else
+ &psEventObjectCloseIN->sEventObject.hOSEventKM,
+#endif
+ psEventObjectCloseIN->sEventObject.hOSEventKM,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+ &hOSEventKM,
+ psEventObjectCloseIN->hOSEventKM,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ if(CopyFromUserWrapper(psPerProc, ui32BridgeID,
+ &sEventObject.szName,
+ &psEventObjectCloseIN->sEventObject.szName,
+ EVENTOBJNAME_MAXLENGTH) != PVRSRV_OK)
+ {
+
+ return -EFAULT;
+ }
+
+ psRetOUT->eError = OSEventObjectCloseKM(&sEventObject, hOSEventKM);
+#else
+ psRetOUT->eError = OSEventObjectCloseKM(&psEventObjectCloseIN->sEventObject, hOSEventKM);
+#endif
+
+ return 0;
+}
+
+
+typedef struct _MODIFY_SYNC_OP_INFO
+{
+ IMG_HANDLE hResItem;
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
+ IMG_UINT32 ui32ModifyFlags;
+ IMG_UINT32 ui32ReadOpsPendingSnapShot;
+ IMG_UINT32 ui32WriteOpsPendingSnapShot;
+ IMG_UINT32 ui32ReadOps2PendingSnapShot;
+} MODIFY_SYNC_OP_INFO;
+
+
+static PVRSRV_ERROR DoQuerySyncOpsSatisfied(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo,
+ IMG_UINT32 ui32ReadOpsPendingSnapShot,
+ IMG_UINT32 ui32WriteOpsPendingSnapShot,
+ IMG_UINT32 ui32ReadOps2PendingSnapShot)
+{
+ IMG_UINT32 ui32WriteOpsPending;
+ IMG_UINT32 ui32ReadOpsPending;
+ IMG_UINT32 ui32ReadOps2Pending;
+
+
+ if (!psKernelSyncInfo)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+
+
+
+
+
+
+
+
+
+ ui32WriteOpsPending = psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
+ ui32ReadOpsPending = psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
+ ui32ReadOps2Pending = psKernelSyncInfo->psSyncData->ui32ReadOps2Pending;
+
+ if((ui32WriteOpsPending - ui32WriteOpsPendingSnapShot >=
+ ui32WriteOpsPending - psKernelSyncInfo->psSyncData->ui32WriteOpsComplete) &&
+ (ui32ReadOpsPending - ui32ReadOpsPendingSnapShot >=
+ ui32ReadOpsPending - psKernelSyncInfo->psSyncData->ui32ReadOpsComplete) &&
+ (ui32ReadOps2Pending - ui32ReadOps2PendingSnapShot >=
+ ui32ReadOps2Pending - psKernelSyncInfo->psSyncData->ui32ReadOps2Complete))
+ {
+#if defined(PDUMP) && !defined(SUPPORT_VGX)
+
+ PDumpComment("Poll for read ops complete to reach value (pdump: %u, actual snapshot: %u)",
+ psKernelSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ ui32ReadOpsPendingSnapShot);
+ PDumpMemPolKM(psKernelSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
+ psKernelSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ 0xFFFFFFFF,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0,
+ MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM));
+
+
+ PDumpComment("Poll for write ops complete to reach value (pdump: %u, actual snapshot: %u)",
+ psKernelSyncInfo->psSyncData->ui32LastOpDumpVal,
+ ui32WriteOpsPendingSnapShot);
+ PDumpMemPolKM(psKernelSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
+ psKernelSyncInfo->psSyncData->ui32LastOpDumpVal,
+ 0xFFFFFFFF,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0,
+ MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM));
+
+
+#endif
+ return PVRSRV_OK;
+ }
+ else
+ {
+ return PVRSRV_ERROR_RETRY;
+ }
+}
+
+
+static PVRSRV_ERROR DoModifyCompleteSyncOps(MODIFY_SYNC_OP_INFO *psModSyncOpInfo)
+{
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
+
+ psKernelSyncInfo = psModSyncOpInfo->psKernelSyncInfo;
+
+ if (!psKernelSyncInfo)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+ if((psModSyncOpInfo->ui32WriteOpsPendingSnapShot != psKernelSyncInfo->psSyncData->ui32WriteOpsComplete)
+ || (psModSyncOpInfo->ui32ReadOpsPendingSnapShot != psKernelSyncInfo->psSyncData->ui32ReadOpsComplete))
+ {
+ return PVRSRV_ERROR_BAD_SYNC_STATE;
+ }
+
+
+ if(psModSyncOpInfo->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC)
+ {
+ psKernelSyncInfo->psSyncData->ui32WriteOpsComplete++;
+ }
+
+
+ if(psModSyncOpInfo->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC)
+ {
+ psKernelSyncInfo->psSyncData->ui32ReadOpsComplete++;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+static PVRSRV_ERROR ModifyCompleteSyncOpsCallBack(IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bDummy)
+{
+ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+ PVR_UNREFERENCED_PARAMETER(bDummy);
+
+ if (!pvParam)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: invalid parameter"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psModSyncOpInfo = (MODIFY_SYNC_OP_INFO*)pvParam;
+
+ if (psModSyncOpInfo->psKernelSyncInfo)
+ {
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ if (DoQuerySyncOpsSatisfied(psModSyncOpInfo->psKernelSyncInfo,
+ psModSyncOpInfo->ui32ReadOpsPendingSnapShot,
+ psModSyncOpInfo->ui32WriteOpsPendingSnapShot,
+ psModSyncOpInfo->ui32ReadOps2PendingSnapShot) == PVRSRV_OK)
+ {
+ goto OpFlushedComplete;
+ }
+ PVR_DPF((PVR_DBG_WARNING, "ModifyCompleteSyncOpsCallBack: waiting for current Ops to flush"));
+ OSSleepms(1);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: timeout whilst waiting for current Ops to flush."));
+ PVR_DPF((PVR_DBG_ERROR, " Write ops pending snapshot = %d, write ops complete = %d",
+ psModSyncOpInfo->ui32WriteOpsPendingSnapShot,
+ psModSyncOpInfo->psKernelSyncInfo->psSyncData->ui32WriteOpsComplete));
+ PVR_DPF((PVR_DBG_ERROR, " Read ops pending snapshot = %d, read ops complete = %d",
+ psModSyncOpInfo->ui32ReadOpsPendingSnapShot,
+ psModSyncOpInfo->psKernelSyncInfo->psSyncData->ui32ReadOpsComplete));
+ PVR_DPF((PVR_DBG_ERROR, " Read ops pending snapshot = %d, read ops2 complete = %d",
+ psModSyncOpInfo->ui32ReadOps2PendingSnapShot,
+ psModSyncOpInfo->psKernelSyncInfo->psSyncData->ui32ReadOps2Complete));
+ return PVRSRV_ERROR_TIMEOUT;
+
+OpFlushedComplete:
+ DoModifyCompleteSyncOps(psModSyncOpInfo);
+ PVRSRVKernelSyncInfoDecRef(psModSyncOpInfo->psKernelSyncInfo, IMG_NULL);
+ }
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MODIFY_SYNC_OP_INFO), (IMG_VOID *)psModSyncOpInfo, 0);
+
+
+
+ PVRSRVScheduleDeviceCallbacks();
+
+ return PVRSRV_OK;
+}
+
+
+static IMG_INT
+PVRSRVCreateSyncInfoModObjBW(IMG_UINT32 ui32BridgeID,
+ IMG_VOID *psBridgeIn,
+ PVRSRV_BRIDGE_OUT_CREATE_SYNC_INFO_MOD_OBJ *psCreateSyncInfoModObjOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
+
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_SYNC_INFO_MOD_OBJ);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psCreateSyncInfoModObjOUT->eError, psPerProc, 1)
+
+ ASSIGN_AND_EXIT_ON_ERROR(psCreateSyncInfoModObjOUT->eError,
+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(MODIFY_SYNC_OP_INFO),
+ (IMG_VOID **)&psModSyncOpInfo, 0,
+ "ModSyncOpInfo (MODIFY_SYNC_OP_INFO)"));
+
+ psModSyncOpInfo->psKernelSyncInfo = IMG_NULL;
+
+ psCreateSyncInfoModObjOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
+ &psCreateSyncInfoModObjOUT->hKernelSyncInfoModObj,
+ psModSyncOpInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ,
+ PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE);
+
+ if (psCreateSyncInfoModObjOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psModSyncOpInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_MODIFY_SYNC_OPS,
+ psModSyncOpInfo,
+ 0,
+ &ModifyCompleteSyncOpsCallBack);
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psCreateSyncInfoModObjOUT->eError, psPerProc)
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVDestroySyncInfoModObjBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_DESTROY_SYNC_INFO_MOD_OBJ *psDestroySyncInfoModObjIN,
+ PVRSRV_BRIDGE_RETURN *psDestroySyncInfoModObjOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_SYNC_INFO_MOD_OBJ);
+
+ psDestroySyncInfoModObjOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psModSyncOpInfo,
+ psDestroySyncInfoModObjIN->hKernelSyncInfoModObj,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ);
+ if (psDestroySyncInfoModObjOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDestroySyncInfoModObjBW: PVRSRVLookupHandle failed"));
+ return 0;
+ }
+
+ if(psModSyncOpInfo->psKernelSyncInfo != IMG_NULL)
+ {
+
+ psDestroySyncInfoModObjOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return 0;
+ }
+
+ PVRSRVKernelSyncInfoDecRef(psModSyncOpInfo->psKernelSyncInfo, IMG_NULL);
+
+ psDestroySyncInfoModObjOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psDestroySyncInfoModObjIN->hKernelSyncInfoModObj,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ);
+
+ if (psDestroySyncInfoModObjOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDestroySyncInfoModObjBW: PVRSRVReleaseHandle failed"));
+ return 0;
+ }
+
+ psDestroySyncInfoModObjOUT->eError = ResManFreeResByPtr(psModSyncOpInfo->hResItem, CLEANUP_WITH_POLL);
+ if (psDestroySyncInfoModObjOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDestroySyncInfoModObjBW: ResManFreeResByPtr failed"));
+ return 0;
+ }
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVModifyPendingSyncOpsBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS *psModifySyncOpsIN,
+ PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS *psModifySyncOpsOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
+ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS);
+
+ psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psModSyncOpInfo,
+ psModifySyncOpsIN->hKernelSyncInfoModObj,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ);
+ if (psModifySyncOpsOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed"));
+ return 0;
+ }
+
+ psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psKernelSyncInfo,
+ psModifySyncOpsIN->hKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if (psModifySyncOpsOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed"));
+ return 0;
+ }
+
+ if(psModSyncOpInfo->psKernelSyncInfo)
+ {
+
+ psModifySyncOpsOUT->eError = PVRSRV_ERROR_RETRY;
+ PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVModifyPendingSyncOpsBW: SyncInfo Modification object is not empty"));
+ return 0;
+ }
+
+
+ if (psKernelSyncInfo == IMG_NULL)
+ {
+ psModifySyncOpsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVModifyPendingSyncOpsBW: SyncInfo bad handle"));
+ return 0;
+ }
+
+ PVRSRVKernelSyncInfoIncRef(psKernelSyncInfo, IMG_NULL);
+
+ psModSyncOpInfo->psKernelSyncInfo = psKernelSyncInfo;
+ psModSyncOpInfo->ui32ModifyFlags = psModifySyncOpsIN->ui32ModifyFlags;
+ psModSyncOpInfo->ui32ReadOpsPendingSnapShot = psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
+ psModSyncOpInfo->ui32WriteOpsPendingSnapShot = psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
+ psModSyncOpInfo->ui32ReadOps2PendingSnapShot = psKernelSyncInfo->psSyncData->ui32ReadOps2Pending;
+
+
+
+ psModifySyncOpsOUT->ui32ReadOpsPending = psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
+ psModifySyncOpsOUT->ui32WriteOpsPending = psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
+ psModifySyncOpsOUT->ui32ReadOps2Pending = psKernelSyncInfo->psSyncData->ui32ReadOps2Pending;
+
+ if(psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC)
+ {
+ psKernelSyncInfo->psSyncData->ui32WriteOpsPending++;
+ }
+
+ if(psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC)
+ {
+ psKernelSyncInfo->psSyncData->ui32ReadOpsPending++;
+ }
+
+
+ psModifySyncOpsOUT->eError = ResManDissociateRes(psModSyncOpInfo->hResItem,
+ psPerProc->hResManContext);
+
+ if (psModifySyncOpsOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed"));
+ return 0;
+ }
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVModifyCompleteSyncOpsBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS *psModifySyncOpsIN,
+ PVRSRV_BRIDGE_RETURN *psModifySyncOpsOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS);
+
+ psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psModSyncOpInfo,
+ psModifySyncOpsIN->hKernelSyncInfoModObj,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ);
+ if (psModifySyncOpsOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyCompleteSyncOpsBW: PVRSRVLookupHandle failed"));
+ return 0;
+ }
+
+ if(psModSyncOpInfo->psKernelSyncInfo == IMG_NULL)
+ {
+
+ psModifySyncOpsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return 0;
+ }
+
+ psModifySyncOpsOUT->eError = DoModifyCompleteSyncOps(psModSyncOpInfo);
+
+ if (psModifySyncOpsOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyCompleteSyncOpsBW: DoModifyCompleteSyncOps failed"));
+ return 0;
+ }
+
+ PVRSRVKernelSyncInfoDecRef(psModSyncOpInfo->psKernelSyncInfo, IMG_NULL);
+ psModSyncOpInfo->psKernelSyncInfo = IMG_NULL;
+
+
+ PVRSRVScheduleDeviceCallbacks();
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVSyncOpsTakeTokenBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SYNC_OPS_TAKE_TOKEN *psSyncOpsTakeTokenIN,
+ PVRSRV_BRIDGE_OUT_SYNC_OPS_TAKE_TOKEN *psSyncOpsTakeTokenOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_TAKE_TOKEN);
+
+ psSyncOpsTakeTokenOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psKernelSyncInfo,
+ psSyncOpsTakeTokenIN->hKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if (psSyncOpsTakeTokenOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsTakeTokenBW: PVRSRVLookupHandle failed"));
+ return 0;
+ }
+
+
+
+ psSyncOpsTakeTokenOUT->ui32ReadOpsPending = psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
+ psSyncOpsTakeTokenOUT->ui32WriteOpsPending = psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
+ psSyncOpsTakeTokenOUT->ui32ReadOps2Pending = psKernelSyncInfo->psSyncData->ui32ReadOps2Pending;
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVSyncOpsFlushToTokenBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_TOKEN *psSyncOpsFlushToTokenIN,
+ PVRSRV_BRIDGE_RETURN *psSyncOpsFlushToTokenOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
+ IMG_UINT32 ui32ReadOpsPendingSnapshot;
+ IMG_UINT32 ui32WriteOpsPendingSnapshot;
+ IMG_UINT32 ui32ReadOps2PendingSnapshot;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_TOKEN);
+
+ psSyncOpsFlushToTokenOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psKernelSyncInfo,
+ psSyncOpsFlushToTokenIN->hKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if (psSyncOpsFlushToTokenOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToTokenBW: PVRSRVLookupHandle failed"));
+ return 0;
+ }
+
+ ui32ReadOpsPendingSnapshot = psSyncOpsFlushToTokenIN->ui32ReadOpsPendingSnapshot;
+ ui32WriteOpsPendingSnapshot = psSyncOpsFlushToTokenIN->ui32WriteOpsPendingSnapshot;
+ ui32ReadOps2PendingSnapshot = psSyncOpsFlushToTokenIN->ui32ReadOps2PendingSnapshot;
+
+ psSyncOpsFlushToTokenOUT->eError = DoQuerySyncOpsSatisfied(psKernelSyncInfo,
+ ui32ReadOpsPendingSnapshot,
+ ui32WriteOpsPendingSnapshot,
+ ui32ReadOps2PendingSnapshot);
+
+ if (psSyncOpsFlushToTokenOUT->eError != PVRSRV_OK && psSyncOpsFlushToTokenOUT->eError != PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToTokenBW: DoQuerySyncOpsSatisfied failed"));
+ return 0;
+ }
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVSyncOpsFlushToModObjBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_MOD_OBJ *psSyncOpsFlushToModObjIN,
+ PVRSRV_BRIDGE_RETURN *psSyncOpsFlushToModObjOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_MOD_OBJ);
+
+ psSyncOpsFlushToModObjOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psModSyncOpInfo,
+ psSyncOpsFlushToModObjIN->hKernelSyncInfoModObj,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ);
+ if (psSyncOpsFlushToModObjOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToModObjBW: PVRSRVLookupHandle failed"));
+ return 0;
+ }
+
+ if(psModSyncOpInfo->psKernelSyncInfo == IMG_NULL)
+ {
+
+ psSyncOpsFlushToModObjOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return 0;
+ }
+
+ psSyncOpsFlushToModObjOUT->eError = DoQuerySyncOpsSatisfied(psModSyncOpInfo->psKernelSyncInfo,
+ psModSyncOpInfo->ui32ReadOpsPendingSnapShot,
+ psModSyncOpInfo->ui32WriteOpsPendingSnapShot,
+ psModSyncOpInfo->ui32ReadOps2PendingSnapShot);
+
+ if (psSyncOpsFlushToModObjOUT->eError != PVRSRV_OK && psSyncOpsFlushToModObjOUT->eError != PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToModObjBW: DoQuerySyncOpsSatisfied failed"));
+ return 0;
+ }
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVSyncOpsFlushToDeltaBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_DELTA *psSyncOpsFlushToDeltaIN,
+ PVRSRV_BRIDGE_RETURN *psSyncOpsFlushToDeltaOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
+ IMG_UINT32 ui32DeltaRead;
+ IMG_UINT32 ui32DeltaWrite;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_DELTA);
+
+ psSyncOpsFlushToDeltaOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psSyncInfo,
+ psSyncOpsFlushToDeltaIN->hKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if (psSyncOpsFlushToDeltaOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToDeltaBW: PVRSRVLookupHandle failed"));
+ return 0;
+ }
+
+
+ ui32DeltaRead = psSyncInfo->psSyncData->ui32ReadOpsPending - psSyncInfo->psSyncData->ui32ReadOpsComplete;
+ ui32DeltaWrite = psSyncInfo->psSyncData->ui32WriteOpsPending - psSyncInfo->psSyncData->ui32WriteOpsComplete;
+
+ if (ui32DeltaRead <= psSyncOpsFlushToDeltaIN->ui32Delta && ui32DeltaWrite <= psSyncOpsFlushToDeltaIN->ui32Delta)
+ {
+#if defined(PDUMP) && !defined(SUPPORT_VGX)
+
+ PDumpComment("Poll for read ops complete to delta (%u)",
+ psSyncOpsFlushToDeltaIN->ui32Delta);
+ psSyncOpsFlushToDeltaOUT->eError =
+ PDumpMemPolKM(psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ 0xFFFFFFFF,
+ PDUMP_POLL_OPERATOR_GREATEREQUAL,
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+
+
+ PDumpComment("Poll for write ops complete to delta (%u)",
+ psSyncOpsFlushToDeltaIN->ui32Delta);
+ psSyncOpsFlushToDeltaOUT->eError =
+ PDumpMemPolKM(psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
+ psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ 0xFFFFFFFF,
+ PDUMP_POLL_OPERATOR_GREATEREQUAL,
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+#endif
+
+ psSyncOpsFlushToDeltaOUT->eError = PVRSRV_OK;
+ }
+ else
+ {
+ psSyncOpsFlushToDeltaOUT->eError = PVRSRV_ERROR_RETRY;
+ }
+
+ return 0;
+}
+
+
+static PVRSRV_ERROR
+FreeSyncInfoCallback(IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bDummy)
+{
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+ PVR_UNREFERENCED_PARAMETER(bDummy);
+
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)pvParam;
+
+ PVRSRVKernelSyncInfoDecRef(psSyncInfo, IMG_NULL);
+
+ return PVRSRV_OK;
+}
+
+
+static IMG_INT
+PVRSRVAllocSyncInfoBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_ALLOC_SYNC_INFO *psAllocSyncInfoIN,
+ PVRSRV_BRIDGE_OUT_ALLOC_SYNC_INFO *psAllocSyncInfoOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ IMG_HANDLE hDevMemContext;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_SYNC_INFO);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psAllocSyncInfoOUT->eError, psPerProc, 1)
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_HANDLE *)&psDeviceNode,
+ psAllocSyncInfoIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(eError != PVRSRV_OK)
+ {
+ goto allocsyncinfo_errorexit;
+ }
+
+ hDevMemContext = psDeviceNode->sDevMemoryInfo.pBMKernelContext;
+
+ eError = PVRSRVAllocSyncInfoKM(psDeviceNode,
+ hDevMemContext,
+ &psSyncInfo);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto allocsyncinfo_errorexit;
+ }
+
+ eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
+ &psAllocSyncInfoOUT->hKernelSyncInfo,
+ psSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE);
+
+ if(eError != PVRSRV_OK)
+ {
+ goto allocsyncinfo_errorexit_freesyncinfo;
+ }
+
+ psSyncInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_SYNC_INFO,
+ psSyncInfo,
+ 0,
+ FreeSyncInfoCallback);
+
+
+ goto allocsyncinfo_commit;
+
+
+ allocsyncinfo_errorexit_freesyncinfo:
+ PVRSRVKernelSyncInfoDecRef(psSyncInfo, IMG_NULL);
+
+ allocsyncinfo_errorexit:
+
+
+ allocsyncinfo_commit:
+ psAllocSyncInfoOUT->eError = eError;
+ COMMIT_HANDLE_BATCH_OR_ERROR(eError, psPerProc);
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVFreeSyncInfoBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_FREE_SYNC_INFO *psFreeSyncInfoIN,
+ PVRSRV_BRIDGE_RETURN *psFreeSyncInfoOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
+ PVRSRV_ERROR eError;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_SYNC_INFO);
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psSyncInfo,
+ psFreeSyncInfoIN->hKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeSyncInfoBW: PVRSRVLookupHandle failed"));
+ psFreeSyncInfoOUT->eError = eError;
+ return 0;
+ }
+
+ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psFreeSyncInfoIN->hKernelSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeSyncInfoBW: PVRSRVReleaseHandle failed"));
+ psFreeSyncInfoOUT->eError = eError;
+ return 0;
+ }
+
+ eError = ResManFreeResByPtr(psSyncInfo->hResItem, CLEANUP_WITH_POLL);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeSyncInfoBW: ResManFreeResByPtr failed"));
+ psFreeSyncInfoOUT->eError = eError;
+ return 0;
+ }
+
+ return 0;
+}
+
+
+PVRSRV_ERROR
+CommonBridgeInit(IMG_VOID)
+{
+ IMG_UINT32 i;
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DEVICES, PVRSRVEnumerateDevicesBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO, PVRSRVAcquireDeviceDataBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_DEVICEINFO, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT, PVRSRVCreateDeviceMemContextBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT, PVRSRVDestroyDeviceMemContextBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO, PVRSRVGetDeviceMemHeapInfoBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_DEVICEMEM, PVRSRVAllocDeviceMemBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEVICEMEM, PVRSRVFreeDeviceMemBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GETFREE_DEVICEMEM, PVRSRVGetFreeDeviceMemBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_COMMANDQUEUE, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA, PVRMMapOSMemHandleToMMapDataBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CONNECT_SERVICES, PVRSRVConnectBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DISCONNECT_SERVICES, PVRSRVDisconnectBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_DEVICE_MEM, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVICEMEMINFO, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM , DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEV_VIRTMEM, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_EXT_MEMORY, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_EXT_MEMORY, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEV_MEMORY, PVRSRVMapDeviceMemoryBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEV_MEMORY, PVRSRVUnmapDeviceMemoryBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY, PVRSRVMapDeviceClassMemoryBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY, PVRSRVUnmapDeviceClassMemoryBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_EXPORT_DEVICEMEM, PVRSRVExportDeviceMemBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MMAP_DATA, PVRMMapReleaseMMapDataBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CHG_DEV_MEM_ATTRIBS, PVRSRVChangeDeviceMemoryAttributesBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEV_MEMORY_2, PVRSRVMapDeviceMemoryBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2, PVRSRVExportDeviceMemBW);
+
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_REGISTER_SIM_PROCESS, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS, DummyBW);
+
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP, DummyBW);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_FB_STATS, DummyBW);
+
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_MISC_INFO, PVRSRVGetMiscInfoBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MISC_INFO, DummyBW);
+
+
+#if defined (SUPPORT_OVERLAY_ROTATE_BLIT)
+ SetDispatchTableEntry(PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES, DummyBW);
+#endif
+
+
+
+#if defined(PDUMP)
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_INIT, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_MEMPOL, PDumpMemPolBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPMEM, PDumpMemBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REG, PDumpRegWithFlagsBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REGPOL, PDumpRegPolBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_COMMENT, PDumpCommentBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SETFRAME, PDumpSetFrameBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_ISCAPTURING, PDumpIsCaptureFrameBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPBITMAP, PDumpBitmapBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPREADREG, PDumpReadRegBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SYNCPOL, PDumpSyncPolBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPSYNC, PDumpSyncDumpBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_MEMPAGES, PDumpMemPagesBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DRIVERINFO, PDumpDriverInfoBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR, PDumpPDDevPAddrBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ, PDumpCycleCountRegReadBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STARTINITPHASE, PDumpStartInitPhaseBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STOPINITPHASE, PDumpStopInitPhaseBW);
+#endif
+
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_OEMJTABLE, DummyBW);
+
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_CLASS, PVRSRVEnumerateDCBW);
+
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE, PVRSRVOpenDCDeviceBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE, PVRSRVCloseDCDeviceBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS, PVRSRVEnumDCFormatsBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS, PVRSRVEnumDCDimsBW);
+#if defined(SUPPORT_PVRSRV_GET_DC_SYSTEM_BUFFER)
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER, PVRSRVGetDCSystemBufferBW);
+#else
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER, DummyBW);
+#endif
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_INFO, PVRSRVGetDCInfoBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN, PVRSRVCreateDCSwapChainBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN, PVRSRVDestroyDCSwapChainBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT, PVRSRVSetDCDstRectBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT, PVRSRVSetDCSrcRectBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY, PVRSRVSetDCDstColourKeyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY, PVRSRVSetDCSrcColourKeyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS, PVRSRVGetDCBuffersBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER, PVRSRVSwapToDCBufferBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER2, PVRSRVSwapToDCBuffer2BW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM, PVRSRVSwapToDCSystemBW);
+
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE, PVRSRVOpenBCDeviceBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE, PVRSRVCloseBCDeviceBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO, PVRSRVGetBCInfoBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER, PVRSRVGetBCBufferBW);
+
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_EXT_MEMORY, PVRSRVWrapExtMemoryBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY, PVRSRVUnwrapExtMemoryBW);
+
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM, PVRSRVAllocSharedSysMemoryBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM, PVRSRVFreeSharedSysMemoryBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEMINFO_MEM, PVRSRVMapMemInfoMemBW);
+
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_CONNECT, &PVRSRVInitSrvConnectBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_DISCONNECT, &PVRSRVInitSrvDisconnectBW);
+
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_WAIT, &PVRSRVEventObjectWaitBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_OPEN, &PVRSRVEventObjectOpenBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE, &PVRSRVEventObjectCloseBW);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_SYNC_INFO_MOD_OBJ, PVRSRVCreateSyncInfoModObjBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_SYNC_INFO_MOD_OBJ, PVRSRVDestroySyncInfoModObjBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS, PVRSRVModifyPendingSyncOpsBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS, PVRSRVModifyCompleteSyncOpsBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_TAKE_TOKEN, PVRSRVSyncOpsTakeTokenBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_TOKEN, PVRSRVSyncOpsFlushToTokenBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_MOD_OBJ, PVRSRVSyncOpsFlushToModObjBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_DELTA, PVRSRVSyncOpsFlushToDeltaBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_SYNC_INFO, PVRSRVAllocSyncInfoBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_SYNC_INFO, PVRSRVFreeSyncInfoBW);
+
+#if defined (SUPPORT_SGX)
+ SetSGXDispatchTableEntry();
+#endif
+#if defined (SUPPORT_VGX)
+ SetVGXDispatchTableEntry();
+#endif
+#if defined (SUPPORT_MSVDX)
+ SetMSVDXDispatchTableEntry();
+#endif
+
+
+
+ for(i=0;i<BRIDGE_DISPATCH_TABLE_ENTRY_COUNT;i++)
+ {
+ if(!g_BridgeDispatchTable[i].pfFunction)
+ {
+ g_BridgeDispatchTable[i].pfFunction = &DummyBW;
+#if defined(DEBUG_BRIDGE_KM)
+ g_BridgeDispatchTable[i].pszIOCName = "_PVRSRV_BRIDGE_DUMMY";
+ g_BridgeDispatchTable[i].pszFunctionName = "DummyBW";
+ g_BridgeDispatchTable[i].ui32CallCount = 0;
+ g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0;
+ g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0;
+#endif
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+IMG_INT BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
+ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM)
+{
+ IMG_VOID * psBridgeIn;
+ IMG_VOID * psBridgeOut;
+ BridgeWrapperFunction pfBridgeHandler;
+ IMG_UINT32 ui32BridgeID = psBridgePackageKM->ui32BridgeID;
+ IMG_INT err = -EFAULT;
+
+#if defined(DEBUG_TRACE_BRIDGE_KM)
+ PVR_DPF((PVR_DBG_ERROR, "%s: %s",
+ __FUNCTION__,
+ g_BridgeDispatchTable[ui32BridgeID].pszIOCName));
+#endif
+
+#if defined(DEBUG_BRIDGE_KM)
+ g_BridgeDispatchTable[ui32BridgeID].ui32CallCount++;
+ g_BridgeGlobalStats.ui32IOCTLCount++;
+#endif
+
+ if(!psPerProc->bInitProcess)
+ {
+ if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN))
+ {
+ if(!PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation failed. Driver unusable.",
+ __FUNCTION__));
+ goto return_fault;
+ }
+ }
+ else
+ {
+ if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation is in progress",
+ __FUNCTION__));
+ goto return_fault;
+ }
+ else
+ {
+
+ switch(ui32BridgeID)
+ {
+ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_CONNECT_SERVICES):
+ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_DISCONNECT_SERVICES):
+ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_CONNECT):
+ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_DISCONNECT):
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: Driver initialisation not completed yet.",
+ __FUNCTION__));
+ goto return_fault;
+ }
+ }
+ }
+ }
+
+#if defined(__linux__)
+ {
+
+ SYS_DATA *psSysData;
+
+ SysAcquireData(&psSysData);
+
+
+ psBridgeIn = ((ENV_DATA *)psSysData->pvEnvSpecificData)->pvBridgeData;
+ psBridgeOut = (IMG_PVOID)((IMG_PBYTE)psBridgeIn + PVRSRV_MAX_BRIDGE_IN_SIZE);
+
+
+ if((psBridgePackageKM->ui32InBufferSize > PVRSRV_MAX_BRIDGE_IN_SIZE) ||
+ (psBridgePackageKM->ui32OutBufferSize > PVRSRV_MAX_BRIDGE_OUT_SIZE))
+ {
+ goto return_fault;
+ }
+
+
+ if(psBridgePackageKM->ui32InBufferSize > 0)
+ {
+ if(!OSAccessOK(PVR_VERIFY_READ,
+ psBridgePackageKM->pvParamIn,
+ psBridgePackageKM->ui32InBufferSize))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pvParamIn pointer", __FUNCTION__));
+ }
+
+ if(CopyFromUserWrapper(psPerProc,
+ ui32BridgeID,
+ psBridgeIn,
+ psBridgePackageKM->pvParamIn,
+ psBridgePackageKM->ui32InBufferSize)
+ != PVRSRV_OK)
+ {
+ goto return_fault;
+ }
+ }
+ }
+#else
+ psBridgeIn = psBridgePackageKM->pvParamIn;
+ psBridgeOut = psBridgePackageKM->pvParamOut;
+#endif
+
+ if(ui32BridgeID >= (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: ui32BridgeID = %d is out if range!",
+ __FUNCTION__, ui32BridgeID));
+ goto return_fault;
+ }
+ pfBridgeHandler =
+ (BridgeWrapperFunction)g_BridgeDispatchTable[ui32BridgeID].pfFunction;
+ err = pfBridgeHandler(ui32BridgeID,
+ psBridgeIn,
+ psBridgeOut,
+ psPerProc);
+ if(err < 0)
+ {
+ goto return_fault;
+ }
+
+#if defined(__linux__)
+
+ if(CopyToUserWrapper(psPerProc,
+ ui32BridgeID,
+ psBridgePackageKM->pvParamOut,
+ psBridgeOut,
+ psBridgePackageKM->ui32OutBufferSize)
+ != PVRSRV_OK)
+ {
+ goto return_fault;
+ }
+#endif
+
+ err = 0;
+return_fault:
+
+ ReleaseHandleBatch(psPerProc);
+ return err;
+}
+
diff --git a/drivers/gpu/pvr/bridged_pvr_bridge.h b/drivers/gpu/pvr/bridged_pvr_bridge.h
new file mode 100644
index 0000000..6b0dd88
--- /dev/null
+++ b/drivers/gpu/pvr/bridged_pvr_bridge.h
@@ -0,0 +1,232 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __BRIDGED_PVR_BRIDGE_H__
+#define __BRIDGED_PVR_BRIDGE_H__
+
+#include "pvr_bridge.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#if defined(__linux__)
+#define PVRSRV_GET_BRIDGE_ID(X) _IOC_NR(X)
+#else
+#define PVRSRV_GET_BRIDGE_ID(X) ((X) - PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST))
+#endif
+
+#ifndef ENOMEM
+#define ENOMEM 12
+#endif
+#ifndef EFAULT
+#define EFAULT 14
+#endif
+#ifndef ENOTTY
+#define ENOTTY 25
+#endif
+
+#if defined(DEBUG_BRIDGE_KM)
+PVRSRV_ERROR
+CopyFromUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
+ IMG_UINT32 ui32BridgeID,
+ IMG_VOID *pvDest,
+ IMG_VOID *pvSrc,
+ IMG_UINT32 ui32Size);
+PVRSRV_ERROR
+CopyToUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
+ IMG_UINT32 ui32BridgeID,
+ IMG_VOID *pvDest,
+ IMG_VOID *pvSrc,
+ IMG_UINT32 ui32Size);
+#else
+#define CopyFromUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \
+ OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size)
+#define CopyToUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \
+ OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size)
+#endif
+
+
+#define ASSIGN_AND_RETURN_ON_ERROR(error, src, res) \
+ do \
+ { \
+ (error) = (src); \
+ if ((error) != PVRSRV_OK) \
+ { \
+ return (res); \
+ } \
+ } while ((error) != PVRSRV_OK);
+
+#define ASSIGN_AND_EXIT_ON_ERROR(error, src) \
+ ASSIGN_AND_RETURN_ON_ERROR(error, src, 0)
+
+#if defined (PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(NewHandleBatch)
+#endif
+static INLINE PVRSRV_ERROR
+NewHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_UINT32 ui32BatchSize)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(!psPerProc->bHandlesBatched);
+
+ eError = PVRSRVNewHandleBatch(psPerProc->psHandleBase, ui32BatchSize);
+
+ if (eError == PVRSRV_OK)
+ {
+ psPerProc->bHandlesBatched = IMG_TRUE;
+ }
+
+ return eError;
+}
+
+#define NEW_HANDLE_BATCH_OR_ERROR(error, psPerProc, ui32BatchSize) \
+ ASSIGN_AND_EXIT_ON_ERROR(error, NewHandleBatch(psPerProc, ui32BatchSize))
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(CommitHandleBatch)
+#endif
+static INLINE PVRSRV_ERROR
+CommitHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVR_ASSERT(psPerProc->bHandlesBatched);
+
+ psPerProc->bHandlesBatched = IMG_FALSE;
+
+ return PVRSRVCommitHandleBatch(psPerProc->psHandleBase);
+}
+
+
+#define COMMIT_HANDLE_BATCH_OR_ERROR(error, psPerProc) \
+ ASSIGN_AND_EXIT_ON_ERROR(error, CommitHandleBatch(psPerProc))
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(ReleaseHandleBatch)
+#endif
+static INLINE IMG_VOID
+ReleaseHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ if (psPerProc->bHandlesBatched)
+ {
+ psPerProc->bHandlesBatched = IMG_FALSE;
+
+ PVRSRVReleaseHandleBatch(psPerProc->psHandleBase);
+ }
+}
+#else
+#define NEW_HANDLE_BATCH_OR_ERROR(error, psPerProc, ui32BatchSize)
+#define COMMIT_HANDLE_BATCH_OR_ERROR(error, psPerProc)
+#define ReleaseHandleBatch(psPerProc)
+#endif
+
+IMG_INT
+DummyBW(IMG_UINT32 ui32BridgeID,
+ IMG_VOID *psBridgeIn,
+ IMG_VOID *psBridgeOut,
+ PVRSRV_PER_PROCESS_DATA *psPerProc);
+
+typedef IMG_INT (*BridgeWrapperFunction)(IMG_UINT32 ui32BridgeID,
+ IMG_VOID *psBridgeIn,
+ IMG_VOID *psBridgeOut,
+ PVRSRV_PER_PROCESS_DATA *psPerProc);
+
+typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY
+{
+ BridgeWrapperFunction pfFunction;
+#if defined(DEBUG_BRIDGE_KM)
+ const IMG_CHAR *pszIOCName;
+ const IMG_CHAR *pszFunctionName;
+ IMG_UINT32 ui32CallCount;
+ IMG_UINT32 ui32CopyFromUserTotalBytes;
+ IMG_UINT32 ui32CopyToUserTotalBytes;
+#endif
+}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY;
+
+#if defined(SUPPORT_VGX) || defined(SUPPORT_MSVDX)
+ #if defined(SUPPORT_VGX)
+ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_VGX_CMD+1)
+ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_VGX_CMD
+ #else
+ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_MSVDX_CMD+1)
+ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_MSVDX_CMD
+ #endif
+#else
+ #if defined(SUPPORT_SGX)
+ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_SGX_CMD+1)
+ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_SGX_CMD
+ #else
+ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD+1)
+ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD
+ #endif
+#endif
+
+extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
+
+IMG_VOID
+_SetDispatchTableEntry(IMG_UINT32 ui32Index,
+ const IMG_CHAR *pszIOCName,
+ BridgeWrapperFunction pfFunction,
+ const IMG_CHAR *pszFunctionName);
+
+
+
+#define SetDispatchTableEntry(ui32Index, pfFunction) \
+ _SetDispatchTableEntry(PVRSRV_GET_BRIDGE_ID(ui32Index), #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction)
+
+#define DISPATCH_TABLE_GAP_THRESHOLD 5
+
+#if defined(DEBUG)
+#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_ASSERT(X == PVRSRV_GET_BRIDGE_ID(Y))
+#else
+#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_UNREFERENCED_PARAMETER(X)
+#endif
+
+
+#if defined(DEBUG_BRIDGE_KM)
+typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS
+{
+ IMG_UINT32 ui32IOCTLCount;
+ IMG_UINT32 ui32TotalCopyFromUserBytes;
+ IMG_UINT32 ui32TotalCopyToUserBytes;
+}PVRSRV_BRIDGE_GLOBAL_STATS;
+
+extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
+#endif
+
+
+PVRSRV_ERROR CommonBridgeInit(IMG_VOID);
+
+IMG_INT BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
+ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/bridged_support.c b/drivers/gpu/pvr/bridged_support.c
new file mode 100644
index 0000000..dad0800
--- /dev/null
+++ b/drivers/gpu/pvr/bridged_support.c
@@ -0,0 +1,89 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "img_defs.h"
+#include "servicesint.h"
+#include "bridged_support.h"
+
+
+PVRSRV_ERROR
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRVLookupOSMemHandle(PVRSRV_HANDLE_BASE *psHandleBase, IMG_HANDLE *phOSMemHandle, IMG_SID hMHandle)
+#else
+PVRSRVLookupOSMemHandle(PVRSRV_HANDLE_BASE *psHandleBase, IMG_HANDLE *phOSMemHandle, IMG_HANDLE hMHandle)
+#endif
+{
+ IMG_HANDLE hMHandleInt;
+ PVRSRV_HANDLE_TYPE eHandleType;
+ PVRSRV_ERROR eError;
+
+
+ eError = PVRSRVLookupHandleAnyType(psHandleBase, &hMHandleInt,
+ &eHandleType,
+ hMHandle);
+ if(eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ switch(eHandleType)
+ {
+#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
+ case PVRSRV_HANDLE_TYPE_MEM_INFO:
+ case PVRSRV_HANDLE_TYPE_MEM_INFO_REF:
+ case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO:
+ {
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)hMHandleInt;
+
+ *phOSMemHandle = psMemInfo->sMemBlk.hOSMemHandle;
+
+ break;
+ }
+ case PVRSRV_HANDLE_TYPE_SYNC_INFO:
+ {
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)hMHandleInt;
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo = psSyncInfo->psSyncDataMemInfoKM;
+
+ *phOSMemHandle = psMemInfo->sMemBlk.hOSMemHandle;
+
+ break;
+ }
+ case PVRSRV_HANDLE_TYPE_SOC_TIMER:
+ {
+ *phOSMemHandle = (IMG_VOID *)hMHandleInt;
+ break;
+ }
+#else
+ case PVRSRV_HANDLE_TYPE_NONE:
+ *phOSMemHandle = (IMG_VOID *)hMHandleInt;
+ break;
+#endif
+ default:
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/pvr/bridged_support.h b/drivers/gpu/pvr/bridged_support.h
new file mode 100644
index 0000000..d027290
--- /dev/null
+++ b/drivers/gpu/pvr/bridged_support.h
@@ -0,0 +1,47 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __BRIDGED_SUPPORT_H__
+#define __BRIDGED_SUPPORT_H__
+
+#include "handle.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR PVRSRVLookupOSMemHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phOSMemHandle, IMG_SID hMHandle);
+#else
+PVRSRV_ERROR PVRSRVLookupOSMemHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phOSMemHandle, IMG_HANDLE hMHandle);
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/buffer_manager.c b/drivers/gpu/pvr/buffer_manager.c
new file mode 100644
index 0000000..ee65df0
--- /dev/null
+++ b/drivers/gpu/pvr/buffer_manager.c
@@ -0,0 +1,2549 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "services_headers.h"
+
+#include "sysconfig.h"
+#include "hash.h"
+#include "ra.h"
+#include "pdump_km.h"
+#include "lists.h"
+
+static IMG_BOOL
+ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags);
+static IMG_VOID
+BM_FreeMemory (IMG_VOID *pH, IMG_UINTPTR_T base, BM_MAPPING *psMapping);
+static IMG_BOOL
+BM_ImportMemory(IMG_VOID *pH, IMG_SIZE_T uSize,
+ IMG_SIZE_T *pActualSize, BM_MAPPING **ppsMapping,
+ IMG_UINT32 uFlags, IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength, IMG_UINTPTR_T *pBase);
+
+static IMG_BOOL
+DevMemoryAlloc (BM_CONTEXT *pBMContext,
+ BM_MAPPING *pMapping,
+ IMG_SIZE_T *pActualSize,
+ IMG_UINT32 uFlags,
+ IMG_UINT32 dev_vaddr_alignment,
+ IMG_DEV_VIRTADDR *pDevVAddr);
+static IMG_VOID
+DevMemoryFree (BM_MAPPING *pMapping);
+
+static IMG_BOOL
+AllocMemory (BM_CONTEXT *pBMContext,
+ BM_HEAP *psBMHeap,
+ IMG_DEV_VIRTADDR *psDevVAddr,
+ IMG_SIZE_T uSize,
+ IMG_UINT32 uFlags,
+ IMG_UINT32 uDevVAddrAlignment,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
+ BM_BUF *pBuf)
+{
+ BM_MAPPING *pMapping;
+ IMG_UINTPTR_T uOffset;
+ RA_ARENA *pArena = IMG_NULL;
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "AllocMemory (uSize=0x%x, uFlags=0x%x, align=0x%x)",
+ uSize, uFlags, uDevVAddrAlignment));
+
+
+
+
+ if(uFlags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
+ {
+ if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
+ {
+
+ PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: combination of DevVAddr management and RAM backing mode unsupported"));
+ return IMG_FALSE;
+ }
+
+
+
+
+ if(psBMHeap->ui32Attribs
+ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
+ |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
+ {
+
+ pArena = psBMHeap->pImportArena;
+ PVR_ASSERT(psBMHeap->sDevArena.psDeviceMemoryHeapInfo->ui32Attribs & PVRSRV_MEM_RAM_BACKED_ALLOCATION);
+ }
+ else
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: backing store type doesn't match heap"));
+ return IMG_FALSE;
+ }
+
+
+ if (!RA_Alloc(pArena,
+ uSize,
+ IMG_NULL,
+ (IMG_VOID*) &pMapping,
+ uFlags,
+ uDevVAddrAlignment,
+ 0,
+ pvPrivData,
+ ui32PrivDataLength,
+ (IMG_UINTPTR_T *)&(pBuf->DevVAddr.uiAddr)))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: RA_Alloc(0x%x) FAILED", uSize));
+ return IMG_FALSE;
+ }
+
+ uOffset = pBuf->DevVAddr.uiAddr - pMapping->DevVAddr.uiAddr;
+ if(pMapping->CpuVAddr)
+ {
+ pBuf->CpuVAddr = (IMG_VOID*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + uOffset);
+ }
+ else
+ {
+ pBuf->CpuVAddr = IMG_NULL;
+ }
+
+ if(uSize == pMapping->uSize)
+ {
+ pBuf->hOSMemHandle = pMapping->hOSMemHandle;
+ }
+ else
+ {
+ if(OSGetSubMemHandle(pMapping->hOSMemHandle,
+ uOffset,
+ uSize,
+ psBMHeap->ui32Attribs,
+ &pBuf->hOSMemHandle)!=PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSGetSubMemHandle FAILED"));
+ return IMG_FALSE;
+ }
+ }
+
+
+ pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + uOffset;
+
+ if(uFlags & PVRSRV_MEM_ZERO)
+ {
+ if(!ZeroBuf(pBuf, pMapping, uSize, psBMHeap->ui32Attribs | uFlags))
+ {
+ return IMG_FALSE;
+ }
+ }
+ }
+ else
+ {
+ if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
+ {
+
+ PVR_ASSERT(psDevVAddr != IMG_NULL);
+
+ if (psDevVAddr == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: invalid parameter - psDevVAddr"));
+ return IMG_FALSE;
+ }
+
+
+ pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
+ uSize,
+ IMG_NULL,
+ PVRSRV_MEM_USER_SUPPLIED_DEVVADDR,
+ uDevVAddrAlignment,
+ psDevVAddr);
+
+
+ pBuf->DevVAddr = *psDevVAddr;
+ }
+ else
+ {
+ IMG_BOOL bResult;
+
+
+
+ bResult = pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
+ uSize,
+ IMG_NULL,
+ 0,
+ uDevVAddrAlignment,
+ &pBuf->DevVAddr);
+
+ if(!bResult)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: MMUAlloc failed"));
+ return IMG_FALSE;
+ }
+ }
+
+
+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof (struct _BM_MAPPING_),
+ (IMG_PVOID *)&pMapping, IMG_NULL,
+ "Buffer Manager Mapping") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSAllocMem(0x%x) FAILED", sizeof(*pMapping)));
+ return IMG_FALSE;
+ }
+
+
+ pBuf->CpuVAddr = IMG_NULL;
+ pBuf->hOSMemHandle = 0;
+ pBuf->CpuPAddr.uiAddr = 0;
+
+
+ pMapping->CpuVAddr = IMG_NULL;
+ pMapping->CpuPAddr.uiAddr = 0;
+ pMapping->DevVAddr = pBuf->DevVAddr;
+ pMapping->psSysAddr = IMG_NULL;
+ pMapping->uSize = uSize;
+ pMapping->hOSMemHandle = 0;
+ }
+
+
+ pMapping->pArena = pArena;
+
+
+ pMapping->pBMHeap = psBMHeap;
+ pBuf->pMapping = pMapping;
+
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "AllocMemory: pMapping=%08x: DevV=%08X CpuV=%08x CpuP=%08X uSize=0x%x",
+ (IMG_UINTPTR_T)pMapping,
+ pMapping->DevVAddr.uiAddr,
+ (IMG_UINTPTR_T)pMapping->CpuVAddr,
+ pMapping->CpuPAddr.uiAddr,
+ pMapping->uSize));
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "AllocMemory: pBuf=%08x: DevV=%08X CpuV=%08x CpuP=%08X uSize=0x%x",
+ (IMG_UINTPTR_T)pBuf,
+ pBuf->DevVAddr.uiAddr,
+ (IMG_UINTPTR_T)pBuf->CpuVAddr,
+ pBuf->CpuPAddr.uiAddr,
+ uSize));
+
+
+ PVR_ASSERT(((pBuf->DevVAddr.uiAddr) & (uDevVAddrAlignment - 1)) == 0);
+
+ return IMG_TRUE;
+}
+
+
+static IMG_BOOL
+WrapMemory (BM_HEAP *psBMHeap,
+ IMG_SIZE_T uSize,
+ IMG_SIZE_T ui32BaseOffset,
+ IMG_BOOL bPhysContig,
+ IMG_SYS_PHYADDR *psAddr,
+ IMG_VOID *pvCPUVAddr,
+ IMG_UINT32 uFlags,
+ BM_BUF *pBuf)
+{
+ IMG_DEV_VIRTADDR DevVAddr = {0};
+ BM_MAPPING *pMapping;
+ IMG_BOOL bResult;
+ IMG_SIZE_T const ui32PageSize = HOST_PAGESIZE();
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "WrapMemory(psBMHeap=%08X, size=0x%x, offset=0x%x, bPhysContig=0x%x, pvCPUVAddr = 0x%08x, flags=0x%x)",
+ (IMG_UINTPTR_T)psBMHeap, uSize, ui32BaseOffset, bPhysContig, (IMG_UINTPTR_T)pvCPUVAddr, uFlags));
+
+ PVR_ASSERT((psAddr->uiAddr & (ui32PageSize - 1)) == 0);
+
+ PVR_ASSERT(((IMG_UINTPTR_T)pvCPUVAddr & (ui32PageSize - 1)) == 0);
+
+ uSize += ui32BaseOffset;
+ uSize = HOST_PAGEALIGN (uSize);
+
+
+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(*pMapping),
+ (IMG_PVOID *)&pMapping, IMG_NULL,
+ "Mocked-up mapping") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSAllocMem(0x%x) FAILED",sizeof(*pMapping)));
+ return IMG_FALSE;
+ }
+
+ OSMemSet(pMapping, 0, sizeof (*pMapping));
+
+ pMapping->uSize = uSize;
+ pMapping->pBMHeap = psBMHeap;
+
+ if(pvCPUVAddr)
+ {
+ pMapping->CpuVAddr = pvCPUVAddr;
+
+ if (bPhysContig)
+ {
+ pMapping->eCpuMemoryOrigin = hm_wrapped_virtaddr;
+ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);
+
+ if(OSRegisterMem(pMapping->CpuPAddr,
+ pMapping->CpuVAddr,
+ pMapping->uSize,
+ uFlags,
+ &pMapping->hOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSRegisterMem Phys=0x%08X, Size=%d) failed",
+ pMapping->CpuPAddr.uiAddr, pMapping->uSize));
+ goto fail_cleanup;
+ }
+ }
+ else
+ {
+ pMapping->eCpuMemoryOrigin = hm_wrapped_scatter_virtaddr;
+ pMapping->psSysAddr = psAddr;
+
+ if(OSRegisterDiscontigMem(pMapping->psSysAddr,
+ pMapping->CpuVAddr,
+ pMapping->uSize,
+ uFlags,
+ &pMapping->hOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSRegisterDiscontigMem Size=%d) failed",
+ pMapping->uSize));
+ goto fail_cleanup;
+ }
+ }
+ }
+ else
+ {
+ if (bPhysContig)
+ {
+ pMapping->eCpuMemoryOrigin = hm_wrapped;
+ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);
+
+ if(OSReservePhys(pMapping->CpuPAddr,
+ pMapping->uSize,
+ uFlags,
+ &pMapping->CpuVAddr,
+ &pMapping->hOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSReservePhys Phys=0x%08X, Size=%d) failed",
+ pMapping->CpuPAddr.uiAddr, pMapping->uSize));
+ goto fail_cleanup;
+ }
+ }
+ else
+ {
+ pMapping->eCpuMemoryOrigin = hm_wrapped_scatter;
+ pMapping->psSysAddr = psAddr;
+
+ if(OSReserveDiscontigPhys(pMapping->psSysAddr,
+ pMapping->uSize,
+ uFlags,
+ &pMapping->CpuVAddr,
+ &pMapping->hOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSReserveDiscontigPhys Size=%d) failed",
+ pMapping->uSize));
+ goto fail_cleanup;
+ }
+ }
+ }
+
+
+ bResult = DevMemoryAlloc(psBMHeap->pBMContext,
+ pMapping,
+ IMG_NULL,
+ uFlags | PVRSRV_MEM_READ | PVRSRV_MEM_WRITE,
+ IMG_CAST_TO_DEVVADDR_UINT(ui32PageSize),
+ &DevVAddr);
+ if (!bResult)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "WrapMemory: DevMemoryAlloc(0x%x) failed",
+ pMapping->uSize));
+ goto fail_cleanup;
+ }
+
+
+ pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + ui32BaseOffset;
+ if(!ui32BaseOffset)
+ {
+ pBuf->hOSMemHandle = pMapping->hOSMemHandle;
+ }
+ else
+ {
+ if(OSGetSubMemHandle(pMapping->hOSMemHandle,
+ ui32BaseOffset,
+ (pMapping->uSize-ui32BaseOffset),
+ uFlags,
+ &pBuf->hOSMemHandle)!=PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSGetSubMemHandle failed"));
+ goto fail_cleanup;
+ }
+ }
+ if(pMapping->CpuVAddr)
+ {
+ pBuf->CpuVAddr = (IMG_VOID*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + ui32BaseOffset);
+ }
+ pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr + IMG_CAST_TO_DEVVADDR_UINT(ui32BaseOffset);
+
+ if(uFlags & PVRSRV_MEM_ZERO)
+ {
+ if(!ZeroBuf(pBuf, pMapping, uSize, uFlags))
+ {
+ return IMG_FALSE;
+ }
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "DevVaddr.uiAddr=%08X", DevVAddr.uiAddr));
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "WrapMemory: DevV=%08X CpuP=%08X uSize=0x%x",
+ pMapping->DevVAddr.uiAddr, pMapping->CpuPAddr.uiAddr, pMapping->uSize));
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "WrapMemory: DevV=%08X CpuP=%08X uSize=0x%x",
+ pBuf->DevVAddr.uiAddr, pBuf->CpuPAddr.uiAddr, uSize));
+
+ pBuf->pMapping = pMapping;
+ return IMG_TRUE;
+
+fail_cleanup:
+ if(ui32BaseOffset && pBuf->hOSMemHandle)
+ {
+ OSReleaseSubMemHandle(pBuf->hOSMemHandle, uFlags);
+ }
+
+ if(pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
+ {
+ switch(pMapping->eCpuMemoryOrigin)
+ {
+ case hm_wrapped:
+ OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
+ break;
+ case hm_wrapped_virtaddr:
+ OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
+ break;
+ case hm_wrapped_scatter:
+ OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
+ break;
+ case hm_wrapped_scatter_virtaddr:
+ OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
+ break;
+ default:
+ break;
+ }
+
+ }
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
+
+
+ return IMG_FALSE;
+}
+
+
+static IMG_BOOL
+ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags)
+{
+ IMG_VOID *pvCpuVAddr;
+
+ if(pBuf->CpuVAddr)
+ {
+ OSMemSet(pBuf->CpuVAddr, 0, ui32Bytes);
+ }
+ else if(pMapping->eCpuMemoryOrigin == hm_contiguous
+ || pMapping->eCpuMemoryOrigin == hm_wrapped)
+ {
+ pvCpuVAddr = OSMapPhysToLin(pBuf->CpuPAddr,
+ ui32Bytes,
+ PVRSRV_HAP_KERNEL_ONLY
+ | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
+ IMG_NULL);
+ if(!pvCpuVAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin for contiguous buffer failed"));
+ return IMG_FALSE;
+ }
+ OSMemSet(pvCpuVAddr, 0, ui32Bytes);
+ OSUnMapPhysToLin(pvCpuVAddr,
+ ui32Bytes,
+ PVRSRV_HAP_KERNEL_ONLY
+ | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
+ IMG_NULL);
+ }
+ else
+ {
+ IMG_SIZE_T ui32BytesRemaining = ui32Bytes;
+ IMG_SIZE_T ui32CurrentOffset = 0;
+ IMG_CPU_PHYADDR CpuPAddr;
+
+
+ PVR_ASSERT(pBuf->hOSMemHandle);
+
+ while(ui32BytesRemaining > 0)
+ {
+ IMG_SIZE_T ui32BlockBytes = MIN(ui32BytesRemaining, HOST_PAGESIZE());
+ CpuPAddr = OSMemHandleToCpuPAddr(pBuf->hOSMemHandle, ui32CurrentOffset);
+
+ if(CpuPAddr.uiAddr & (HOST_PAGESIZE() -1))
+ {
+ ui32BlockBytes =
+ MIN(ui32BytesRemaining, (IMG_UINT32)(HOST_PAGEALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr));
+ }
+
+ pvCpuVAddr = OSMapPhysToLin(CpuPAddr,
+ ui32BlockBytes,
+ PVRSRV_HAP_KERNEL_ONLY
+ | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
+ IMG_NULL);
+ if(!pvCpuVAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin while zeroing non-contiguous memory FAILED"));
+ return IMG_FALSE;
+ }
+ OSMemSet(pvCpuVAddr, 0, ui32BlockBytes);
+ OSUnMapPhysToLin(pvCpuVAddr,
+ ui32BlockBytes,
+ PVRSRV_HAP_KERNEL_ONLY
+ | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
+ IMG_NULL);
+
+ ui32BytesRemaining -= ui32BlockBytes;
+ ui32CurrentOffset += ui32BlockBytes;
+ }
+ }
+
+ return IMG_TRUE;
+}
+
+static IMG_VOID
+FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags, IMG_BOOL bFromAllocator)
+{
+ BM_MAPPING *pMapping;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "FreeBuf: pBuf=0x%x: DevVAddr=%08X CpuVAddr=0x%x CpuPAddr=%08X",
+ (IMG_UINTPTR_T)pBuf, pBuf->DevVAddr.uiAddr,
+ (IMG_UINTPTR_T)pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr));
+
+
+ pMapping = pBuf->pMapping;
+
+ psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode;
+ if (psDeviceNode->pfnCacheInvalidate)
+ {
+ psDeviceNode->pfnCacheInvalidate(psDeviceNode);
+ }
+
+ if(ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
+ {
+
+ if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
+ {
+
+ if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
+ {
+
+ PVR_DPF ((PVR_DBG_ERROR, "FreeBuf: combination of DevVAddr management and RAM backing mode unsupported"));
+ }
+ else
+ {
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
+ pBuf->pMapping = IMG_NULL;
+ }
+ }
+ }
+ else
+ {
+
+ if(pBuf->hOSMemHandle != pMapping->hOSMemHandle)
+ {
+
+ if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
+ {
+
+ OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Flags);
+ }
+ }
+ if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
+ {
+
+
+ if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
+ {
+
+
+
+ PVR_ASSERT(pBuf->ui32ExportCount == 0)
+ RA_Free (pBuf->pMapping->pArena, pBuf->DevVAddr.uiAddr, IMG_FALSE);
+ }
+ }
+ else
+ {
+ if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
+ {
+ switch (pMapping->eCpuMemoryOrigin)
+ {
+ case hm_wrapped:
+ OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
+ break;
+ case hm_wrapped_virtaddr:
+ OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
+ break;
+ case hm_wrapped_scatter:
+ OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
+ break;
+ case hm_wrapped_scatter_virtaddr:
+ OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
+ break;
+ default:
+ break;
+ }
+ }
+ if (bFromAllocator)
+ DevMemoryFree (pMapping);
+
+ if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
+ {
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
+ pBuf->pMapping = IMG_NULL;
+ }
+ }
+ }
+
+
+ if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_BUF), pBuf, IMG_NULL);
+
+ }
+}
+
+static PVRSRV_ERROR BM_DestroyContext_AnyCb(BM_HEAP *psBMHeap)
+{
+ if(psBMHeap->ui32Attribs
+ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
+ |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
+ {
+ if (psBMHeap->pImportArena)
+ {
+ IMG_BOOL bTestDelete = RA_TestDelete(psBMHeap->pImportArena);
+ if (!bTestDelete)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext_AnyCb: RA_TestDelete failed"));
+ return PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP;
+ }
+ }
+ }
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+BM_DestroyContext(IMG_HANDLE hBMContext,
+ IMG_BOOL *pbDestroyed)
+{
+ PVRSRV_ERROR eError;
+ BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext;
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "BM_DestroyContext"));
+
+ if (pbDestroyed != IMG_NULL)
+ {
+ *pbDestroyed = IMG_FALSE;
+ }
+
+
+
+ if (pBMContext == IMG_NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: Invalid handle"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ pBMContext->ui32RefCount--;
+
+ if (pBMContext->ui32RefCount > 0)
+ {
+
+ return PVRSRV_OK;
+ }
+
+
+
+
+ eError = List_BM_HEAP_PVRSRV_ERROR_Any(pBMContext->psBMHeap, &BM_DestroyContext_AnyCb);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: List_BM_HEAP_PVRSRV_ERROR_Any failed"));
+#if 0
+
+
+
+
+ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: Cleaning up with ResManFreeSpecial"));
+ if(ResManFreeSpecial() != PVRSRV_OK)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: ResManFreeSpecial failed %d",eError));
+ }
+
+#endif
+ return eError;
+ }
+ else
+ {
+
+ eError = ResManFreeResByPtr(pBMContext->hResItem, CLEANUP_WITH_POLL);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: ResManFreeResByPtr failed %d",eError));
+ return eError;
+ }
+
+
+ if (pbDestroyed != IMG_NULL)
+ {
+ *pbDestroyed = IMG_TRUE;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+
+static PVRSRV_ERROR BM_DestroyContextCallBack_AnyVaCb(BM_HEAP *psBMHeap, va_list va)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ psDeviceNode = va_arg(va, PVRSRV_DEVICE_NODE*);
+
+
+ if(psBMHeap->ui32Attribs
+ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
+ |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
+ {
+ if (psBMHeap->pImportArena)
+ {
+ RA_Delete (psBMHeap->pImportArena);
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_DestroyContext: backing store type unsupported"));
+ return PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE;
+ }
+
+
+ psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap);
+
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
+
+
+ return PVRSRV_OK;
+}
+
+
+static PVRSRV_ERROR BM_DestroyContextCallBack(IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bDummy)
+{
+ BM_CONTEXT *pBMContext = pvParam;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+ PVR_UNREFERENCED_PARAMETER(bDummy);
+
+
+
+ psDeviceNode = pBMContext->psDeviceNode;
+
+
+
+ eError = List_BM_HEAP_PVRSRV_ERROR_Any_va(pBMContext->psBMHeap,
+ &BM_DestroyContextCallBack_AnyVaCb,
+ psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+
+ if (pBMContext->psMMUContext)
+ {
+ psDeviceNode->pfnMMUFinalise(pBMContext->psMMUContext);
+ }
+
+
+
+ if (pBMContext->pBufferHash)
+ {
+ HASH_Delete(pBMContext->pBufferHash);
+ }
+
+ if (pBMContext == psDeviceNode->sDevMemoryInfo.pBMKernelContext)
+ {
+
+ psDeviceNode->sDevMemoryInfo.pBMKernelContext = IMG_NULL;
+ }
+ else
+ {
+ if (pBMContext->ppsThis != IMG_NULL)
+ {
+
+ List_BM_CONTEXT_Remove(pBMContext);
+ }
+ }
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_CONTEXT), pBMContext, IMG_NULL);
+
+
+ return PVRSRV_OK;
+}
+
+
+static IMG_HANDLE BM_CreateContext_IncRefCount_AnyVaCb(BM_CONTEXT *pBMContext, va_list va)
+{
+ PRESMAN_CONTEXT hResManContext;
+ hResManContext = va_arg(va, PRESMAN_CONTEXT);
+ if(ResManFindResourceByPtr(hResManContext, pBMContext->hResItem) == PVRSRV_OK)
+ {
+
+ pBMContext->ui32RefCount++;
+ return pBMContext;
+ }
+ return IMG_NULL;
+}
+
+static IMG_VOID BM_CreateContext_InsertHeap_ForEachVaCb(BM_HEAP *psBMHeap, va_list va)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ BM_CONTEXT *pBMContext;
+ psDeviceNode = va_arg(va, PVRSRV_DEVICE_NODE*);
+ pBMContext = va_arg(va, BM_CONTEXT*);
+ switch(psBMHeap->sDevArena.DevMemHeapType)
+ {
+ case DEVICE_MEMORY_HEAP_SHARED:
+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
+ {
+
+ psDeviceNode->pfnMMUInsertHeap(pBMContext->psMMUContext, psBMHeap->pMMUHeap);
+ break;
+ }
+ }
+}
+
+IMG_HANDLE
+BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEV_PHYADDR *psPDDevPAddr,
+ PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_BOOL *pbCreated)
+{
+ BM_CONTEXT *pBMContext;
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ IMG_BOOL bKernelContext;
+ PRESMAN_CONTEXT hResManContext;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateContext"));
+
+ if (psPerProc == IMG_NULL)
+ {
+ bKernelContext = IMG_TRUE;
+ hResManContext = psDeviceNode->hResManContext;
+ }
+ else
+ {
+ bKernelContext = IMG_FALSE;
+ hResManContext = psPerProc->hResManContext;
+ }
+
+ if (pbCreated != IMG_NULL)
+ {
+ *pbCreated = IMG_FALSE;
+ }
+
+
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+
+ if (bKernelContext == IMG_FALSE)
+ {
+ IMG_HANDLE res = (IMG_HANDLE) List_BM_CONTEXT_Any_va(psDevMemoryInfo->pBMContext,
+ &BM_CreateContext_IncRefCount_AnyVaCb,
+ hResManContext);
+ if (res)
+ {
+ return res;
+ }
+ }
+
+
+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof (struct _BM_CONTEXT_),
+ (IMG_PVOID *)&pBMContext, IMG_NULL,
+ "Buffer Manager Context") != PVRSRV_OK)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: Alloc failed"));
+ return IMG_NULL;
+ }
+ OSMemSet(pBMContext, 0, sizeof (BM_CONTEXT));
+
+
+ pBMContext->psDeviceNode = psDeviceNode;
+
+
+
+ pBMContext->pBufferHash = HASH_Create(32);
+ if (pBMContext->pBufferHash==IMG_NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: HASH_Create failed"));
+ goto cleanup;
+ }
+
+ if((IMG_NULL == psDeviceNode->pfnMMUInitialise) || (psDeviceNode->pfnMMUInitialise(psDeviceNode,
+ &pBMContext->psMMUContext,
+ psPDDevPAddr) != PVRSRV_OK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_CreateContext: MMUInitialise failed"));
+ goto cleanup;
+ }
+
+ if(bKernelContext)
+ {
+
+ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext == IMG_NULL);
+ psDevMemoryInfo->pBMKernelContext = pBMContext;
+ }
+ else
+ {
+
+
+
+
+
+ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext);
+
+ if (psDevMemoryInfo->pBMKernelContext == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_CreateContext: psDevMemoryInfo->pBMKernelContext invalid"));
+ goto cleanup;
+ }
+
+ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext->psBMHeap);
+
+
+
+
+
+ pBMContext->psBMSharedHeap = psDevMemoryInfo->pBMKernelContext->psBMHeap;
+
+
+
+
+ List_BM_HEAP_ForEach_va(pBMContext->psBMSharedHeap,
+ &BM_CreateContext_InsertHeap_ForEachVaCb,
+ psDeviceNode,
+ pBMContext);
+
+
+ List_BM_CONTEXT_Insert(&psDevMemoryInfo->pBMContext, pBMContext);
+ }
+
+
+ pBMContext->ui32RefCount++;
+
+
+ pBMContext->hResItem = ResManRegisterRes(hResManContext,
+ RESMAN_TYPE_DEVICEMEM_CONTEXT,
+ pBMContext,
+ 0,
+ &BM_DestroyContextCallBack);
+ if (pBMContext->hResItem == IMG_NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: ResManRegisterRes failed"));
+ goto cleanup;
+ }
+
+ if (pbCreated != IMG_NULL)
+ {
+ *pbCreated = IMG_TRUE;
+ }
+ return (IMG_HANDLE)pBMContext;
+
+cleanup:
+ (IMG_VOID)BM_DestroyContextCallBack(pBMContext, 0, CLEANUP_WITH_POLL);
+
+ return IMG_NULL;
+}
+
+
+static IMG_VOID *BM_CreateHeap_AnyVaCb(BM_HEAP *psBMHeap, va_list va)
+{
+ DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo;
+ psDevMemHeapInfo = va_arg(va, DEVICE_MEMORY_HEAP_INFO*);
+ if (psBMHeap->sDevArena.ui32HeapID == psDevMemHeapInfo->ui32HeapID)
+ {
+
+ return psBMHeap;
+ }
+ else
+ {
+ return IMG_NULL;
+ }
+}
+
+IMG_HANDLE
+BM_CreateHeap (IMG_HANDLE hBMContext,
+ DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo)
+{
+ BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ BM_HEAP *psBMHeap;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateHeap"));
+
+ if(!pBMContext)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: BM_CONTEXT null"));
+ return IMG_NULL;
+ }
+
+ psDeviceNode = pBMContext->psDeviceNode;
+
+
+
+ PVR_ASSERT((psDevMemHeapInfo->ui32HeapSize & (psDevMemHeapInfo->ui32DataPageSize - 1)) == 0);
+ PVR_ASSERT(psDevMemHeapInfo->ui32HeapSize > 0);
+
+
+
+
+
+
+ if(pBMContext->ui32RefCount > 0)
+ {
+ psBMHeap = (BM_HEAP*)List_BM_HEAP_Any_va(pBMContext->psBMHeap,
+ &BM_CreateHeap_AnyVaCb,
+ psDevMemHeapInfo);
+
+ if (psBMHeap)
+ {
+ return psBMHeap;
+ }
+ }
+
+
+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof (BM_HEAP),
+ (IMG_PVOID *)&psBMHeap, IMG_NULL,
+ "Buffer Manager Heap") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: Alloc failed"));
+ return IMG_NULL;
+ }
+
+ OSMemSet (psBMHeap, 0, sizeof (BM_HEAP));
+
+ psBMHeap->sDevArena.ui32HeapID = psDevMemHeapInfo->ui32HeapID;
+ psBMHeap->sDevArena.pszName = psDevMemHeapInfo->pszName;
+ psBMHeap->sDevArena.BaseDevVAddr = psDevMemHeapInfo->sDevVAddrBase;
+ psBMHeap->sDevArena.ui32Size = psDevMemHeapInfo->ui32HeapSize;
+ psBMHeap->sDevArena.DevMemHeapType = psDevMemHeapInfo->DevMemHeapType;
+ psBMHeap->sDevArena.ui32DataPageSize = psDevMemHeapInfo->ui32DataPageSize;
+ psBMHeap->sDevArena.psDeviceMemoryHeapInfo = psDevMemHeapInfo;
+ psBMHeap->ui32Attribs = psDevMemHeapInfo->ui32Attribs;
+#if defined(SUPPORT_MEMORY_TILING)
+ psBMHeap->ui32XTileStride = psDevMemHeapInfo->ui32XTileStride;
+#endif
+
+
+ psBMHeap->pBMContext = pBMContext;
+
+ psBMHeap->pMMUHeap = psDeviceNode->pfnMMUCreate (pBMContext->psMMUContext,
+ &psBMHeap->sDevArena,
+ &psBMHeap->pVMArena,
+ &psBMHeap->psMMUAttrib);
+ if (!psBMHeap->pMMUHeap)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: MMUCreate failed"));
+ goto ErrorExit;
+ }
+
+
+ psBMHeap->pImportArena = RA_Create (psDevMemHeapInfo->pszBSName,
+ 0, 0, IMG_NULL,
+ MAX(HOST_PAGESIZE(), psBMHeap->sDevArena.ui32DataPageSize),
+ &BM_ImportMemory,
+ &BM_FreeMemory,
+ IMG_NULL,
+ psBMHeap);
+ if(psBMHeap->pImportArena == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: RA_Create failed"));
+ goto ErrorExit;
+ }
+
+ if(psBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
+ {
+
+
+
+
+ psBMHeap->pLocalDevMemArena = psDevMemHeapInfo->psLocalDevMemArena;
+ if(psBMHeap->pLocalDevMemArena == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: LocalDevMemArena null"));
+ goto ErrorExit;
+ }
+ }
+
+
+ List_BM_HEAP_Insert(&pBMContext->psBMHeap, psBMHeap);
+
+ return (IMG_HANDLE)psBMHeap;
+
+
+ErrorExit:
+
+
+ if (psBMHeap->pMMUHeap != IMG_NULL)
+ {
+ psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
+
+ }
+
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
+
+
+ return IMG_NULL;
+}
+
+IMG_VOID
+BM_DestroyHeap (IMG_HANDLE hDevMemHeap)
+{
+ BM_HEAP* psBMHeap = (BM_HEAP*)hDevMemHeap;
+ PVRSRV_DEVICE_NODE *psDeviceNode = psBMHeap->pBMContext->psDeviceNode;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "BM_DestroyHeap"));
+
+ if(psBMHeap)
+ {
+
+ if(psBMHeap->ui32Attribs
+ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
+ |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
+ {
+ if (psBMHeap->pImportArena)
+ {
+ RA_Delete (psBMHeap->pImportArena);
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_DestroyHeap: backing store type unsupported"));
+ return;
+ }
+
+
+ psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
+
+
+ List_BM_HEAP_Remove(psBMHeap);
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
+
+ }
+ else
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyHeap: invalid heap handle"));
+ }
+}
+
+
+IMG_BOOL
+BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+
+ PVR_DPF((PVR_DBG_MESSAGE, "BM_Reinitialise"));
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+
+ return IMG_TRUE;
+}
+
+IMG_BOOL
+BM_Alloc ( IMG_HANDLE hDevMemHeap,
+ IMG_DEV_VIRTADDR *psDevVAddr,
+ IMG_SIZE_T uSize,
+ IMG_UINT32 *pui32Flags,
+ IMG_UINT32 uDevVAddrAlignment,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
+ BM_HANDLE *phBuf)
+{
+ BM_BUF *pBuf;
+ BM_CONTEXT *pBMContext;
+ BM_HEAP *psBMHeap;
+ SYS_DATA *psSysData;
+ IMG_UINT32 uFlags;
+
+ if (pui32Flags == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: invalid parameter"));
+ PVR_DBG_BREAK;
+ return IMG_FALSE;
+ }
+
+ uFlags = *pui32Flags;
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "BM_Alloc (uSize=0x%x, uFlags=0x%x, uDevVAddrAlignment=0x%x)",
+ uSize, uFlags, uDevVAddrAlignment));
+
+ SysAcquireData(&psSysData);
+
+ psBMHeap = (BM_HEAP*)hDevMemHeap;
+ pBMContext = psBMHeap->pBMContext;
+
+ if(uDevVAddrAlignment == 0)
+ {
+ uDevVAddrAlignment = 1;
+ }
+
+
+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof (BM_BUF),
+ (IMG_PVOID *)&pBuf, IMG_NULL,
+ "Buffer Manager buffer") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: BM_Buf alloc FAILED"));
+ return IMG_FALSE;
+ }
+ OSMemSet(pBuf, 0, sizeof (BM_BUF));
+
+
+ if (AllocMemory(pBMContext,
+ psBMHeap,
+ psDevVAddr,
+ uSize,
+ uFlags,
+ uDevVAddrAlignment,
+ pvPrivData,
+ ui32PrivDataLength,
+ pBuf) != IMG_TRUE)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
+
+ PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: AllocMemory FAILED"));
+ return IMG_FALSE;
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "BM_Alloc (uSize=0x%x, uFlags=0x%x)",
+ uSize, uFlags));
+
+
+ pBuf->ui32RefCount = 1;
+ *phBuf = (BM_HANDLE)pBuf;
+ *pui32Flags = uFlags | psBMHeap->ui32Attribs;
+
+
+ if(uFlags & PVRSRV_HAP_CACHETYPE_MASK)
+ {
+ *pui32Flags &= ~PVRSRV_HAP_CACHETYPE_MASK;
+ *pui32Flags |= (uFlags & PVRSRV_HAP_CACHETYPE_MASK);
+ }
+
+ return IMG_TRUE;
+}
+
+
+
+#if defined(PVR_LMA)
+static IMG_BOOL
+ValidSysPAddrArrayForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR *psSysPAddr, IMG_UINT32 ui32PageCount, IMG_SIZE_T ui32PageSize)
+{
+ IMG_UINT32 i;
+
+ for (i = 0; i < ui32PageCount; i++)
+ {
+ IMG_SYS_PHYADDR sStartSysPAddr = psSysPAddr[i];
+ IMG_SYS_PHYADDR sEndSysPAddr;
+
+ if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sStartSysPAddr))
+ {
+ return IMG_FALSE;
+ }
+
+ sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + ui32PageSize;
+
+ if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sEndSysPAddr))
+ {
+ return IMG_FALSE;
+ }
+ }
+
+ return IMG_TRUE;
+}
+
+static IMG_BOOL
+ValidSysPAddrRangeForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR sStartSysPAddr, IMG_SIZE_T ui32Range)
+{
+ IMG_SYS_PHYADDR sEndSysPAddr;
+
+ if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sStartSysPAddr))
+ {
+ return IMG_FALSE;
+ }
+
+ sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + ui32Range;
+
+ if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sEndSysPAddr))
+ {
+ return IMG_FALSE;
+ }
+
+ return IMG_TRUE;
+}
+
+#define WRAP_MAPPING_SIZE(ui32ByteSize, ui32PageOffset) HOST_PAGEALIGN((ui32ByteSize) + (ui32PageOffset))
+
+#define WRAP_PAGE_COUNT(ui32ByteSize, ui32PageOffset, ui32HostPageSize) (WRAP_MAPPING_SIZE(ui32ByteSize, ui32PageOffset) / (ui32HostPageSize))
+
+#endif
+
+
+IMG_BOOL
+BM_Wrap ( IMG_HANDLE hDevMemHeap,
+ IMG_SIZE_T ui32Size,
+ IMG_SIZE_T ui32Offset,
+ IMG_BOOL bPhysContig,
+ IMG_SYS_PHYADDR *psSysAddr,
+ IMG_VOID *pvCPUVAddr,
+ IMG_UINT32 *pui32Flags,
+ BM_HANDLE *phBuf)
+{
+ BM_BUF *pBuf;
+ BM_CONTEXT *psBMContext;
+ BM_HEAP *psBMHeap;
+ SYS_DATA *psSysData;
+ IMG_SYS_PHYADDR sHashAddress;
+ IMG_UINT32 uFlags;
+
+ psBMHeap = (BM_HEAP*)hDevMemHeap;
+ psBMContext = psBMHeap->pBMContext;
+
+ uFlags = psBMHeap->ui32Attribs & (PVRSRV_HAP_CACHETYPE_MASK | PVRSRV_HAP_MAPTYPE_MASK);
+
+ if ((pui32Flags != IMG_NULL) && ((*pui32Flags & PVRSRV_HAP_CACHETYPE_MASK) != 0))
+ {
+ uFlags &= ~PVRSRV_HAP_CACHETYPE_MASK;
+ uFlags |= *pui32Flags & PVRSRV_HAP_CACHETYPE_MASK;
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "BM_Wrap (uSize=0x%x, uOffset=0x%x, bPhysContig=0x%x, pvCPUVAddr=0x%x, uFlags=0x%x)",
+ ui32Size, ui32Offset, bPhysContig, (IMG_UINTPTR_T)pvCPUVAddr, uFlags));
+
+ SysAcquireData(&psSysData);
+
+#if defined(PVR_LMA)
+ if (bPhysContig)
+ {
+ if (!ValidSysPAddrRangeForDev(psBMContext->psDeviceNode, *psSysAddr, WRAP_MAPPING_SIZE(ui32Size, ui32Offset)))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: System address range invalid for device"));
+ return IMG_FALSE;
+ }
+ }
+ else
+ {
+ IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE();
+
+ if (!ValidSysPAddrArrayForDev(psBMContext->psDeviceNode, psSysAddr, WRAP_PAGE_COUNT(ui32Size, ui32Offset, ui32HostPageSize), ui32HostPageSize))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: Array of system addresses invalid for device"));
+ return IMG_FALSE;
+ }
+ }
+#endif
+
+ sHashAddress = psSysAddr[0];
+
+
+ sHashAddress.uiAddr += ui32Offset;
+
+
+ pBuf = (BM_BUF *)HASH_Retrieve(psBMContext->pBufferHash, sHashAddress.uiAddr);
+
+ if(pBuf)
+ {
+ IMG_SIZE_T ui32MappingSize = HOST_PAGEALIGN (ui32Size + ui32Offset);
+
+
+ if(pBuf->pMapping->uSize == ui32MappingSize && (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped ||
+ pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr))
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "BM_Wrap (Matched previous Wrap! uSize=0x%x, uOffset=0x%x, SysAddr=%08X)",
+ ui32Size, ui32Offset, sHashAddress.uiAddr));
+
+ PVRSRVBMBufIncRef(pBuf);
+ *phBuf = (BM_HANDLE)pBuf;
+ if(pui32Flags)
+ *pui32Flags = uFlags;
+
+ return IMG_TRUE;
+ }
+ else
+ {
+
+ HASH_Remove(psBMContext->pBufferHash, (IMG_UINTPTR_T)sHashAddress.uiAddr);
+ }
+ }
+
+
+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof (BM_BUF),
+ (IMG_PVOID *)&pBuf, IMG_NULL,
+ "Buffer Manager buffer") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: BM_Buf alloc FAILED"));
+ return IMG_FALSE;
+ }
+ OSMemSet(pBuf, 0, sizeof (BM_BUF));
+
+
+ if (WrapMemory (psBMHeap, ui32Size, ui32Offset, bPhysContig, psSysAddr, pvCPUVAddr, uFlags, pBuf) != IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: WrapMemory FAILED"));
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
+
+ return IMG_FALSE;
+ }
+
+
+ if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
+ {
+
+ PVR_ASSERT(SysSysPAddrToCpuPAddr(sHashAddress).uiAddr == pBuf->CpuPAddr.uiAddr);
+
+ if (!HASH_Insert (psBMContext->pBufferHash, sHashAddress.uiAddr, (IMG_UINTPTR_T)pBuf))
+ {
+ FreeBuf (pBuf, uFlags, IMG_TRUE);
+ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: HASH_Insert FAILED"));
+ return IMG_FALSE;
+ }
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "BM_Wrap (uSize=0x%x, uFlags=0x%x, devVAddr=%08X)",
+ ui32Size, uFlags, pBuf->DevVAddr.uiAddr));
+
+
+ pBuf->ui32RefCount = 1;
+ *phBuf = (BM_HANDLE)pBuf;
+ if(pui32Flags)
+ {
+
+ *pui32Flags = (uFlags & ~PVRSRV_HAP_MAPTYPE_MASK) | PVRSRV_HAP_MULTI_PROCESS;
+ }
+
+ return IMG_TRUE;
+}
+
+IMG_VOID
+BM_Export (BM_HANDLE hBuf)
+{
+ BM_BUF *pBuf = (BM_BUF *)hBuf;
+
+ PVRSRVBMBufIncExport(pBuf);
+}
+
+IMG_VOID
+BM_FreeExport(BM_HANDLE hBuf,
+ IMG_UINT32 ui32Flags)
+{
+ BM_BUF *pBuf = (BM_BUF *)hBuf;
+
+ PVRSRVBMBufDecExport(pBuf);
+ FreeBuf (pBuf, ui32Flags, IMG_FALSE);
+}
+
+IMG_VOID
+BM_Free (BM_HANDLE hBuf,
+ IMG_UINT32 ui32Flags)
+{
+ BM_BUF *pBuf = (BM_BUF *)hBuf;
+ SYS_DATA *psSysData;
+ IMG_SYS_PHYADDR sHashAddr;
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "BM_Free (h=0x%x)", (IMG_UINTPTR_T)hBuf));
+ PVR_ASSERT (pBuf!=IMG_NULL);
+
+ if (pBuf == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_Free: invalid parameter"));
+ return;
+ }
+
+ SysAcquireData(&psSysData);
+
+ PVRSRVBMBufDecRef(pBuf);
+ if(pBuf->ui32RefCount == 0)
+ {
+ if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
+ {
+ sHashAddr = SysCpuPAddrToSysPAddr(pBuf->CpuPAddr);
+
+ HASH_Remove (pBuf->pMapping->pBMHeap->pBMContext->pBufferHash, (IMG_UINTPTR_T)sHashAddr.uiAddr);
+ }
+ FreeBuf (pBuf, ui32Flags, IMG_TRUE);
+ }
+}
+
+
+IMG_CPU_VIRTADDR
+BM_HandleToCpuVaddr (BM_HANDLE hBuf)
+{
+ BM_BUF *pBuf = (BM_BUF *)hBuf;
+
+ PVR_ASSERT (pBuf != IMG_NULL);
+ if (pBuf == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_HandleToCpuVaddr: invalid parameter"));
+ return IMG_NULL;
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "BM_HandleToCpuVaddr(h=0x%x)=0x%x",
+ (IMG_UINTPTR_T)hBuf, (IMG_UINTPTR_T)pBuf->CpuVAddr));
+ return pBuf->CpuVAddr;
+}
+
+
+IMG_DEV_VIRTADDR
+BM_HandleToDevVaddr (BM_HANDLE hBuf)
+{
+ BM_BUF *pBuf = (BM_BUF *)hBuf;
+
+ PVR_ASSERT (pBuf != IMG_NULL);
+ if (pBuf == IMG_NULL)
+ {
+ IMG_DEV_VIRTADDR DevVAddr = {0};
+ PVR_DPF((PVR_DBG_ERROR, "BM_HandleToDevVaddr: invalid parameter"));
+ return DevVAddr;
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToDevVaddr(h=0x%x)=%08X", (IMG_UINTPTR_T)hBuf, pBuf->DevVAddr.uiAddr));
+ return pBuf->DevVAddr;
+}
+
+
+IMG_SYS_PHYADDR
+BM_HandleToSysPaddr (BM_HANDLE hBuf)
+{
+ BM_BUF *pBuf = (BM_BUF *)hBuf;
+
+ PVR_ASSERT (pBuf != IMG_NULL);
+
+ if (pBuf == IMG_NULL)
+ {
+ IMG_SYS_PHYADDR PhysAddr = {0};
+ PVR_DPF((PVR_DBG_ERROR, "BM_HandleToSysPaddr: invalid parameter"));
+ return PhysAddr;
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToSysPaddr(h=0x%x)=%08X", (IMG_UINTPTR_T)hBuf, pBuf->CpuPAddr.uiAddr));
+ return SysCpuPAddrToSysPAddr (pBuf->CpuPAddr);
+}
+
+IMG_HANDLE
+BM_HandleToOSMemHandle(BM_HANDLE hBuf)
+{
+ BM_BUF *pBuf = (BM_BUF *)hBuf;
+
+ PVR_ASSERT (pBuf != IMG_NULL);
+
+ if (pBuf == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_HandleToOSMemHandle: invalid parameter"));
+ return IMG_NULL;
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "BM_HandleToOSMemHandle(h=0x%x)=0x%x",
+ (IMG_UINTPTR_T)hBuf, (IMG_UINTPTR_T)pBuf->hOSMemHandle));
+ return pBuf->hOSMemHandle;
+}
+
+static IMG_BOOL
+DevMemoryAlloc (BM_CONTEXT *pBMContext,
+ BM_MAPPING *pMapping,
+ IMG_SIZE_T *pActualSize,
+ IMG_UINT32 uFlags,
+ IMG_UINT32 dev_vaddr_alignment,
+ IMG_DEV_VIRTADDR *pDevVAddr)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+#ifdef PDUMP
+ IMG_UINT32 ui32PDumpSize = (IMG_UINT32)pMapping->uSize;
+#endif
+
+ psDeviceNode = pBMContext->psDeviceNode;
+
+ if(uFlags & PVRSRV_MEM_INTERLEAVED)
+ {
+
+ pMapping->uSize *= 2;
+ }
+
+#ifdef PDUMP
+ if(uFlags & PVRSRV_MEM_DUMMY)
+ {
+
+ ui32PDumpSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize;
+ }
+#endif
+
+
+ if (!psDeviceNode->pfnMMUAlloc (pMapping->pBMHeap->pMMUHeap,
+ pMapping->uSize,
+ pActualSize,
+ 0,
+ dev_vaddr_alignment,
+ &(pMapping->DevVAddr)))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevMemoryAlloc ERROR MMU_Alloc"));
+ return IMG_FALSE;
+ }
+
+#ifdef SUPPORT_SGX_MMU_BYPASS
+ EnableHostAccess(pBMContext->psMMUContext);
+#endif
+
+#if defined(PDUMP)
+
+ PDUMPMALLOCPAGES(&psDeviceNode->sDevId,
+ pMapping->DevVAddr.uiAddr,
+ pMapping->CpuVAddr,
+ pMapping->hOSMemHandle,
+ ui32PDumpSize,
+ pMapping->pBMHeap->sDevArena.ui32DataPageSize,
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ psDeviceNode->pfnMMUIsHeapShared(pMapping->pBMHeap->pMMUHeap),
+#else
+ IMG_FALSE,
+#endif
+ (IMG_HANDLE)pMapping);
+#endif
+
+ switch (pMapping->eCpuMemoryOrigin)
+ {
+ case hm_wrapped:
+ case hm_wrapped_virtaddr:
+ case hm_contiguous:
+ {
+ psDeviceNode->pfnMMUMapPages ( pMapping->pBMHeap->pMMUHeap,
+ pMapping->DevVAddr,
+ SysCpuPAddrToSysPAddr (pMapping->CpuPAddr),
+ pMapping->uSize,
+ uFlags,
+ (IMG_HANDLE)pMapping);
+
+ *pDevVAddr = pMapping->DevVAddr;
+ break;
+ }
+ case hm_env:
+ {
+ psDeviceNode->pfnMMUMapShadow ( pMapping->pBMHeap->pMMUHeap,
+ pMapping->DevVAddr,
+ pMapping->uSize,
+ pMapping->CpuVAddr,
+ pMapping->hOSMemHandle,
+ pDevVAddr,
+ uFlags,
+ (IMG_HANDLE)pMapping);
+ break;
+ }
+ case hm_wrapped_scatter:
+ case hm_wrapped_scatter_virtaddr:
+ {
+ psDeviceNode->pfnMMUMapScatter (pMapping->pBMHeap->pMMUHeap,
+ pMapping->DevVAddr,
+ pMapping->psSysAddr,
+ pMapping->uSize,
+ uFlags,
+ (IMG_HANDLE)pMapping);
+
+ *pDevVAddr = pMapping->DevVAddr;
+ break;
+ }
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "Illegal value %d for pMapping->eCpuMemoryOrigin",
+ pMapping->eCpuMemoryOrigin));
+ return IMG_FALSE;
+ }
+
+#ifdef SUPPORT_SGX_MMU_BYPASS
+ DisableHostAccess(pBMContext->psMMUContext);
+#endif
+
+ return IMG_TRUE;
+}
+
+static IMG_VOID
+DevMemoryFree (BM_MAPPING *pMapping)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ IMG_DEV_PHYADDR sDevPAddr;
+#ifdef PDUMP
+ IMG_UINT32 ui32PSize;
+#endif
+
+ psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode;
+ sDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr);
+
+ if (sDevPAddr.uiAddr != 0)
+ {
+#ifdef PDUMP
+
+ if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
+ {
+
+ ui32PSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize;
+ }
+ else
+ {
+ ui32PSize = (IMG_UINT32)pMapping->uSize;
+ }
+
+ PDUMPFREEPAGES(pMapping->pBMHeap,
+ pMapping->DevVAddr,
+ ui32PSize,
+ pMapping->pBMHeap->sDevArena.ui32DataPageSize,
+ (IMG_HANDLE)pMapping,
+ (pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) ? IMG_TRUE : IMG_FALSE);
+#endif
+ }
+ psDeviceNode->pfnMMUFree (pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr, IMG_CAST_TO_DEVVADDR_UINT(pMapping->uSize));
+}
+
+#ifndef XPROC_WORKAROUND_NUM_SHAREABLES
+#define XPROC_WORKAROUND_NUM_SHAREABLES 200
+#endif
+
+#define XPROC_WORKAROUND_BAD_SHAREINDEX 0773407734
+
+#define XPROC_WORKAROUND_UNKNOWN 0
+#define XPROC_WORKAROUND_ALLOC 1
+#define XPROC_WORKAROUND_MAP 2
+
+static IMG_UINT32 gXProcWorkaroundShareIndex = XPROC_WORKAROUND_BAD_SHAREINDEX;
+static IMG_UINT32 gXProcWorkaroundState = XPROC_WORKAROUND_UNKNOWN;
+
+
+XPROC_DATA gXProcWorkaroundShareData[XPROC_WORKAROUND_NUM_SHAREABLES] = {{0}};
+
+PVRSRV_ERROR BM_XProcWorkaroundSetShareIndex(IMG_UINT32 ui32Index)
+{
+
+
+
+ if (gXProcWorkaroundShareIndex != XPROC_WORKAROUND_BAD_SHAREINDEX)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "No, it's already set!"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ gXProcWorkaroundShareIndex = ui32Index;
+ gXProcWorkaroundState = XPROC_WORKAROUND_MAP;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR BM_XProcWorkaroundUnsetShareIndex(IMG_UINT32 ui32Index)
+{
+
+
+
+ if (gXProcWorkaroundShareIndex == XPROC_WORKAROUND_BAD_SHAREINDEX)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "huh? how can it be bad??"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ if (gXProcWorkaroundShareIndex != ui32Index)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "gXProcWorkaroundShareIndex == 0x%08x != 0x%08x == ui32Index", gXProcWorkaroundShareIndex, ui32Index));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ gXProcWorkaroundShareIndex = XPROC_WORKAROUND_BAD_SHAREINDEX;
+ gXProcWorkaroundState = XPROC_WORKAROUND_UNKNOWN;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR BM_XProcWorkaroundFindNewBufferAndSetShareIndex(IMG_UINT32 *pui32Index)
+{
+
+
+
+ if (gXProcWorkaroundShareIndex != XPROC_WORKAROUND_BAD_SHAREINDEX)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ for (*pui32Index = 0; *pui32Index < XPROC_WORKAROUND_NUM_SHAREABLES; (*pui32Index)++)
+ {
+ if (gXProcWorkaroundShareData[*pui32Index].ui32RefCount == 0)
+ {
+ gXProcWorkaroundShareIndex = *pui32Index;
+ gXProcWorkaroundState = XPROC_WORKAROUND_ALLOC;
+ return PVRSRV_OK;
+ }
+ }
+
+ PVR_DPF((PVR_DBG_ERROR, "ran out of shared buffers"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+}
+
+static PVRSRV_ERROR
+XProcWorkaroundAllocShareable(RA_ARENA *psArena,
+ IMG_UINT32 ui32AllocFlags,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32PageSize,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
+ IMG_VOID **ppvCpuVAddr,
+ IMG_HANDLE *phOSMemHandle)
+{
+ if ((ui32AllocFlags & PVRSRV_MEM_XPROC) == 0)
+ {
+ PVR_DPF((PVR_DBG_VERBOSE, "XProcWorkaroundAllocShareable: bad flags"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32RefCount > 0)
+ {
+ PVR_DPF((PVR_DBG_VERBOSE,
+ "XProcWorkaroundAllocShareable: re-using previously allocated pages"));
+
+ ui32AllocFlags &= ~PVRSRV_HAP_MAPTYPE_MASK;
+ ui32AllocFlags |= PVRSRV_HAP_SINGLE_PROCESS;
+
+ if (ui32AllocFlags != gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "Can't! Flags don't match! (I had 0x%08x, you gave 0x%08x)",
+ gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags,
+ ui32AllocFlags));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (ui32Size != gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32Size)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "Can't! Size doesn't match!"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (ui32PageSize != gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32PageSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "Can't! Page Size doesn't match!"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ *ppvCpuVAddr = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr;
+ *phOSMemHandle = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle;
+
+ BM_XProcIndexAcquire(gXProcWorkaroundShareIndex);
+
+ return PVRSRV_OK;
+ }
+ else
+ {
+ if (gXProcWorkaroundState != XPROC_WORKAROUND_ALLOC)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "XPROC workaround in bad state! About to allocate memory from non-alloc state! (%d)",
+ gXProcWorkaroundState));
+ }
+ PVR_ASSERT(gXProcWorkaroundState == XPROC_WORKAROUND_ALLOC);
+
+ if (psArena != IMG_NULL)
+ {
+ IMG_CPU_PHYADDR sCpuPAddr;
+ IMG_SYS_PHYADDR sSysPAddr;
+
+ PVR_DPF((PVR_DBG_VERBOSE,
+ "XProcWorkaroundAllocShareable: making a NEW allocation from local mem"));
+
+ if (!RA_Alloc (psArena,
+ ui32Size,
+ IMG_NULL,
+ IMG_NULL,
+ 0,
+ ui32PageSize,
+ 0,
+ pvPrivData,
+ ui32PrivDataLength,
+ (IMG_UINTPTR_T *)&sSysPAddr.uiAddr))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "XProcWorkaroundAllocShareable: RA_Alloc(0x%x) FAILED", ui32Size));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
+ if(OSReservePhys(sCpuPAddr,
+ ui32Size,
+ ui32AllocFlags,
+ (IMG_VOID **)&gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr,
+ &gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "XProcWorkaroundAllocShareable: OSReservePhys failed"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].sSysPAddr = sSysPAddr;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_VERBOSE,
+ "XProcWorkaroundAllocShareable: making a NEW allocation from OS"));
+
+ ui32AllocFlags &= ~PVRSRV_HAP_MAPTYPE_MASK;
+ ui32AllocFlags |= PVRSRV_HAP_SINGLE_PROCESS;
+
+
+ if (OSAllocPages(ui32AllocFlags,
+ ui32Size,
+ ui32PageSize,
+ pvPrivData,
+ ui32PrivDataLength,
+ (IMG_VOID **)&gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr,
+ &gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "XProcWorkaroundAllocShareable: OSAllocPages(0x%x) failed",
+ ui32PageSize));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].psArena = psArena;
+ gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags = ui32AllocFlags;
+ gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32Size = ui32Size;
+ gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32PageSize = ui32PageSize;
+
+ *ppvCpuVAddr = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr;
+ *phOSMemHandle = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle;
+
+ BM_XProcIndexAcquire(gXProcWorkaroundShareIndex);
+
+ return PVRSRV_OK;
+ }
+}
+
+static PVRSRV_ERROR XProcWorkaroundHandleToSI(IMG_HANDLE hOSMemHandle, IMG_UINT32 *pui32SI)
+{
+
+ IMG_UINT32 ui32SI;
+ IMG_BOOL bFound;
+ IMG_BOOL bErrorDups;
+
+ bFound = IMG_FALSE;
+ bErrorDups = IMG_FALSE;
+
+ for (ui32SI = 0; ui32SI < XPROC_WORKAROUND_NUM_SHAREABLES; ui32SI++)
+ {
+ if (gXProcWorkaroundShareData[ui32SI].ui32RefCount>0 && gXProcWorkaroundShareData[ui32SI].hOSMemHandle == hOSMemHandle)
+ {
+ if (bFound)
+ {
+ bErrorDups = IMG_TRUE;
+ }
+ else
+ {
+ *pui32SI = ui32SI;
+ bFound = IMG_TRUE;
+ }
+ }
+ }
+
+ if (bErrorDups || !bFound)
+ {
+ return PVRSRV_ERROR_BM_BAD_SHAREMEM_HANDLE;
+ }
+
+ return PVRSRV_OK;
+}
+
+#if defined(PVRSRV_REFCOUNT_DEBUG)
+IMG_VOID _BM_XProcIndexAcquireDebug(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index)
+#else
+IMG_VOID _BM_XProcIndexAcquire(IMG_UINT32 ui32Index)
+#endif
+{
+#if defined(PVRSRV_REFCOUNT_DEBUG)
+ PVRSRVBMXProcIncRef2(pszFile, iLine, ui32Index);
+#else
+ PVRSRVBMXProcIncRef(ui32Index);
+#endif
+}
+
+#if defined(PVRSRV_REFCOUNT_DEBUG)
+IMG_VOID _BM_XProcIndexReleaseDebug(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index)
+#else
+IMG_VOID _BM_XProcIndexRelease(IMG_UINT32 ui32Index)
+#endif
+{
+#if defined(PVRSRV_REFCOUNT_DEBUG)
+ PVRSRVBMXProcDecRef2(pszFile, iLine, ui32Index);
+#else
+ PVRSRVBMXProcDecRef(ui32Index);
+#endif
+
+ PVR_DPF((PVR_DBG_VERBOSE, "Reduced refcount of SI[%d] from %d to %d",
+ ui32Index, gXProcWorkaroundShareData[ui32Index].ui32RefCount+1, gXProcWorkaroundShareData[ui32Index].ui32RefCount));
+
+ if (gXProcWorkaroundShareData[ui32Index].ui32RefCount == 0)
+ {
+ if (gXProcWorkaroundShareData[ui32Index].psArena != IMG_NULL)
+ {
+ IMG_SYS_PHYADDR sSysPAddr;
+
+ if (gXProcWorkaroundShareData[ui32Index].pvCpuVAddr != IMG_NULL)
+ {
+ OSUnReservePhys(gXProcWorkaroundShareData[ui32Index].pvCpuVAddr,
+ gXProcWorkaroundShareData[ui32Index].ui32Size,
+ gXProcWorkaroundShareData[ui32Index].ui32AllocFlags,
+ gXProcWorkaroundShareData[ui32Index].hOSMemHandle);
+ }
+ sSysPAddr = gXProcWorkaroundShareData[ui32Index].sSysPAddr;
+ RA_Free (gXProcWorkaroundShareData[ui32Index].psArena,
+ sSysPAddr.uiAddr,
+ IMG_FALSE);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_VERBOSE, "freeing OS memory"));
+ OSFreePages(gXProcWorkaroundShareData[ui32Index].ui32AllocFlags,
+ gXProcWorkaroundShareData[ui32Index].ui32PageSize,
+ gXProcWorkaroundShareData[ui32Index].pvCpuVAddr,
+ gXProcWorkaroundShareData[ui32Index].hOSMemHandle);
+ }
+ }
+}
+
+static IMG_VOID XProcWorkaroundFreeShareable(IMG_HANDLE hOSMemHandle)
+{
+ IMG_UINT32 ui32SI = (IMG_UINT32)((IMG_UINTPTR_T)hOSMemHandle & 0xffffU);
+ PVRSRV_ERROR eError;
+
+ eError = XProcWorkaroundHandleToSI(hOSMemHandle, &ui32SI);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "bad handle"));
+ return;
+ }
+
+ BM_XProcIndexRelease(ui32SI);
+}
+
+
+static IMG_BOOL
+BM_ImportMemory (IMG_VOID *pH,
+ IMG_SIZE_T uRequestSize,
+ IMG_SIZE_T *pActualSize,
+ BM_MAPPING **ppsMapping,
+ IMG_UINT32 uFlags,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
+ IMG_UINTPTR_T *pBase)
+{
+ BM_MAPPING *pMapping;
+ BM_HEAP *pBMHeap = pH;
+ BM_CONTEXT *pBMContext = pBMHeap->pBMContext;
+ IMG_BOOL bResult;
+ IMG_SIZE_T uSize;
+ IMG_SIZE_T uPSize;
+ IMG_SIZE_T uDevVAddrAlignment = 0;
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "BM_ImportMemory (pBMContext=0x%x, uRequestSize=0x%x, uFlags=0x%x, uAlign=0x%x)",
+ (IMG_UINTPTR_T)pBMContext, uRequestSize, uFlags, uDevVAddrAlignment));
+
+ PVR_ASSERT (ppsMapping != IMG_NULL);
+ PVR_ASSERT (pBMContext != IMG_NULL);
+
+ if (ppsMapping == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: invalid parameter"));
+ goto fail_exit;
+ }
+
+ uSize = HOST_PAGEALIGN (uRequestSize);
+ PVR_ASSERT (uSize >= uRequestSize);
+
+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof (BM_MAPPING),
+ (IMG_PVOID *)&pMapping, IMG_NULL,
+ "Buffer Manager Mapping") != PVRSRV_OK)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "BM_ImportMemory: failed BM_MAPPING alloc"));
+ goto fail_exit;
+ }
+
+ pMapping->hOSMemHandle = 0;
+ pMapping->CpuVAddr = 0;
+ pMapping->DevVAddr.uiAddr = 0;
+ pMapping->CpuPAddr.uiAddr = 0;
+ pMapping->uSize = uSize;
+ pMapping->pBMHeap = pBMHeap;
+ pMapping->ui32Flags = uFlags;
+
+
+ if (pActualSize)
+ {
+ *pActualSize = uSize;
+ }
+
+
+ if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
+ {
+ uPSize = pBMHeap->sDevArena.ui32DataPageSize;
+ }
+ else
+ {
+ uPSize = pMapping->uSize;
+ }
+
+ if (uFlags & PVRSRV_MEM_XPROC)
+ {
+ IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs | PVRSRV_MEM_XPROC;
+ IMG_BOOL bBadBackingStoreType;
+
+
+ if(uFlags & PVRSRV_MEM_ION)
+ {
+ ui32Attribs |= PVRSRV_MEM_ION;
+ }
+
+ bBadBackingStoreType = IMG_TRUE;
+
+ if ((ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) != 0)
+ {
+#ifndef MAX
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+#endif
+ uDevVAddrAlignment = MAX(pBMHeap->sDevArena.ui32DataPageSize, HOST_PAGESIZE());
+
+
+ if (uPSize % uDevVAddrAlignment != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Cannot use use this memory sharing workaround with allocations that might be suballocated"));
+ goto fail_mapping_alloc;
+ }
+ uDevVAddrAlignment = 0;
+
+
+ if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
+ {
+ ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
+ ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
+ }
+
+
+ if (XProcWorkaroundAllocShareable(IMG_NULL,
+ ui32Attribs,
+ (IMG_UINT32)uPSize,
+ pBMHeap->sDevArena.ui32DataPageSize,
+ pvPrivData,
+ ui32PrivDataLength,
+ (IMG_VOID **)&pMapping->CpuVAddr,
+ &pMapping->hOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "BM_ImportMemory: XProcWorkaroundAllocShareable(0x%x) failed",
+ uPSize));
+ goto fail_mapping_alloc;
+ }
+
+
+
+
+ pMapping->eCpuMemoryOrigin = hm_env;
+ bBadBackingStoreType = IMG_FALSE;
+ }
+
+ if ((ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) != 0)
+ {
+ uDevVAddrAlignment = pBMHeap->sDevArena.ui32DataPageSize;
+
+ if (uPSize % uDevVAddrAlignment != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Cannot use use this memory sharing workaround with allocations that might be suballocated"));
+ goto fail_mapping_alloc;
+ }
+ uDevVAddrAlignment = 0;
+
+
+ if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
+ {
+ ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
+ ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
+ }
+
+
+ if (XProcWorkaroundAllocShareable(pBMHeap->pLocalDevMemArena,
+ ui32Attribs,
+ (IMG_UINT32)uPSize,
+ pBMHeap->sDevArena.ui32DataPageSize,
+ pvPrivData,
+ ui32PrivDataLength,
+ (IMG_VOID **)&pMapping->CpuVAddr,
+ &pMapping->hOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "BM_ImportMemory: XProcWorkaroundAllocShareable(0x%x) failed",
+ uPSize));
+ goto fail_mapping_alloc;
+ }
+
+
+
+
+ pMapping->eCpuMemoryOrigin = hm_env;
+ bBadBackingStoreType = IMG_FALSE;
+ }
+
+ if (bBadBackingStoreType)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Cannot use this memory sharing workaround with this type of backing store"));
+ goto fail_mapping_alloc;
+ }
+ }
+ else
+
+
+
+ if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
+ {
+ IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs;
+
+
+ if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
+ {
+ ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
+ ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
+ }
+
+ if (pMapping->ui32Flags & PVRSRV_MEM_ALLOCATENONCACHEDMEM)
+ {
+ ui32Attribs &= ~PVRSRV_MEM_ALLOCATENONCACHEDMEM;
+ ui32Attribs |= (pMapping->ui32Flags & PVRSRV_MEM_ALLOCATENONCACHEDMEM);
+ }
+
+
+ if (OSAllocPages(ui32Attribs,
+ uPSize,
+ pBMHeap->sDevArena.ui32DataPageSize,
+ pvPrivData,
+ ui32PrivDataLength,
+ (IMG_VOID **)&pMapping->CpuVAddr,
+ &pMapping->hOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "BM_ImportMemory: OSAllocPages(0x%x) failed",
+ uPSize));
+ goto fail_mapping_alloc;
+ }
+
+
+ pMapping->eCpuMemoryOrigin = hm_env;
+ }
+ else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
+ {
+ IMG_SYS_PHYADDR sSysPAddr;
+ IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs;
+
+
+ PVR_ASSERT(pBMHeap->pLocalDevMemArena != IMG_NULL);
+
+
+ if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
+ {
+ ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
+ ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
+ }
+
+ if (!RA_Alloc (pBMHeap->pLocalDevMemArena,
+ uPSize,
+ IMG_NULL,
+ IMG_NULL,
+ 0,
+ pBMHeap->sDevArena.ui32DataPageSize,
+ 0,
+ pvPrivData,
+ ui32PrivDataLength,
+ (IMG_UINTPTR_T *)&sSysPAddr.uiAddr))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: RA_Alloc(0x%x) FAILED", uPSize));
+ goto fail_mapping_alloc;
+ }
+
+
+ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
+ if(OSReservePhys(pMapping->CpuPAddr,
+ uPSize,
+ ui32Attribs,
+ &pMapping->CpuVAddr,
+ &pMapping->hOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: OSReservePhys failed"));
+ goto fail_dev_mem_alloc;
+ }
+
+
+ pMapping->eCpuMemoryOrigin = hm_contiguous;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: Invalid backing store type"));
+ goto fail_mapping_alloc;
+ }
+
+
+ bResult = DevMemoryAlloc (pBMContext,
+ pMapping,
+ IMG_NULL,
+ uFlags,
+ (IMG_UINT32)uDevVAddrAlignment,
+ &pMapping->DevVAddr);
+ if (!bResult)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "BM_ImportMemory: DevMemoryAlloc(0x%x) failed",
+ pMapping->uSize));
+ goto fail_dev_mem_alloc;
+ }
+
+
+
+ PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1);
+
+ *pBase = pMapping->DevVAddr.uiAddr;
+ *ppsMapping = pMapping;
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE"));
+ return IMG_TRUE;
+
+fail_dev_mem_alloc:
+ if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
+ {
+
+ if(pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED)
+ {
+ pMapping->uSize /= 2;
+ }
+
+ if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
+ {
+ uPSize = pBMHeap->sDevArena.ui32DataPageSize;
+ }
+ else
+ {
+ uPSize = pMapping->uSize;
+ }
+
+ if (uFlags & PVRSRV_MEM_XPROC)
+ {
+ XProcWorkaroundFreeShareable(pMapping->hOSMemHandle);
+ }
+ else
+ if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
+ {
+ OSFreePages(pBMHeap->ui32Attribs,
+ uPSize,
+ (IMG_VOID *)pMapping->CpuVAddr,
+ pMapping->hOSMemHandle);
+ }
+ else
+ {
+ IMG_SYS_PHYADDR sSysPAddr;
+
+ if(pMapping->CpuVAddr)
+ {
+ OSUnReservePhys(pMapping->CpuVAddr,
+ uPSize,
+ pBMHeap->ui32Attribs,
+ pMapping->hOSMemHandle);
+ }
+ sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr);
+ RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
+ }
+ }
+fail_mapping_alloc:
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
+
+fail_exit:
+ return IMG_FALSE;
+}
+
+
+static IMG_VOID
+BM_FreeMemory (IMG_VOID *h, IMG_UINTPTR_T _base, BM_MAPPING *psMapping)
+{
+ BM_HEAP *pBMHeap = h;
+ IMG_SIZE_T uPSize;
+
+ PVR_UNREFERENCED_PARAMETER (_base);
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "BM_FreeMemory (h=0x%x, base=0x%x, psMapping=0x%x)",
+ (IMG_UINTPTR_T)h, _base, (IMG_UINTPTR_T)psMapping));
+
+ PVR_ASSERT (psMapping != IMG_NULL);
+
+ if (psMapping == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: invalid parameter"));
+ return;
+ }
+
+ DevMemoryFree (psMapping);
+
+
+ if((psMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) != 0)
+ {
+ psMapping->uSize /= 2;
+ }
+
+ if(psMapping->ui32Flags & PVRSRV_MEM_DUMMY)
+ {
+ uPSize = psMapping->pBMHeap->sDevArena.ui32DataPageSize;
+ }
+ else
+ {
+ uPSize = psMapping->uSize;
+ }
+
+ if (psMapping->ui32Flags & PVRSRV_MEM_XPROC)
+ {
+ XProcWorkaroundFreeShareable(psMapping->hOSMemHandle);
+ }
+ else
+ if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
+ {
+ OSFreePages(pBMHeap->ui32Attribs,
+ uPSize,
+ (IMG_VOID *) psMapping->CpuVAddr,
+ psMapping->hOSMemHandle);
+ }
+ else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
+ {
+ IMG_SYS_PHYADDR sSysPAddr;
+
+ OSUnReservePhys(psMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, psMapping->hOSMemHandle);
+
+ sSysPAddr = SysCpuPAddrToSysPAddr(psMapping->CpuPAddr);
+
+ RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: Invalid backing store type"));
+ }
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), psMapping, IMG_NULL);
+
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "..BM_FreeMemory (h=0x%x, base=0x%x)",
+ (IMG_UINTPTR_T)h, _base));
+}
+
+IMG_VOID BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
+ IMG_DEV_VIRTADDR sDevVPageAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "BM_GetPhysPageAddr"));
+
+ PVR_ASSERT (psMemInfo && psDevPAddr)
+
+
+ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
+
+
+ psDeviceNode = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pBMContext->psDeviceNode;
+
+ *psDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pMMUHeap,
+ sDevVPageAddr);
+}
+
+
+MMU_CONTEXT* BM_GetMMUContext(IMG_HANDLE hDevMemHeap)
+{
+ BM_HEAP *pBMHeap = (BM_HEAP*)hDevMemHeap;
+
+ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUContext"));
+
+ return pBMHeap->pBMContext->psMMUContext;
+}
+
+MMU_CONTEXT* BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext)
+{
+ BM_CONTEXT *pBMContext = (BM_CONTEXT*)hDevMemContext;
+
+ PVR_DPF ((PVR_DBG_VERBOSE, "BM_GetMMUContextFromMemContext"));
+
+ return pBMContext->psMMUContext;
+}
+
+IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap)
+{
+ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUHeap"));
+
+ return (IMG_HANDLE)((BM_HEAP*)hDevMemHeap)->pMMUHeap;
+}
+
+
+PVRSRV_DEVICE_NODE* BM_GetDeviceNode(IMG_HANDLE hDevMemContext)
+{
+ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetDeviceNode"));
+
+ return ((BM_CONTEXT*)hDevMemContext)->psDeviceNode;
+}
+
+
+IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
+{
+ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMappingHandle"));
+
+ return ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->hOSMemHandle;
+}
+
diff --git a/drivers/gpu/pvr/buffer_manager.h b/drivers/gpu/pvr/buffer_manager.h
new file mode 100644
index 0000000..b78b0ae
--- /dev/null
+++ b/drivers/gpu/pvr/buffer_manager.h
@@ -0,0 +1,248 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _BUFFER_MANAGER_H_
+#define _BUFFER_MANAGER_H_
+
+#include "img_types.h"
+#include "ra.h"
+#include "perproc.h"
+
+#if defined(__cplusplus)
+extern "C"{
+#endif
+
+typedef struct _BM_HEAP_ BM_HEAP;
+
+struct _BM_MAPPING_
+{
+ enum
+ {
+ hm_wrapped = 1,
+ hm_wrapped_scatter,
+ hm_wrapped_virtaddr,
+ hm_wrapped_scatter_virtaddr,
+ hm_env,
+ hm_contiguous
+ } eCpuMemoryOrigin;
+
+ BM_HEAP *pBMHeap;
+ RA_ARENA *pArena;
+
+ IMG_CPU_VIRTADDR CpuVAddr;
+ IMG_CPU_PHYADDR CpuPAddr;
+ IMG_DEV_VIRTADDR DevVAddr;
+ IMG_SYS_PHYADDR *psSysAddr;
+ IMG_SIZE_T uSize;
+ IMG_HANDLE hOSMemHandle;
+ IMG_UINT32 ui32Flags;
+};
+
+typedef struct _BM_BUF_
+{
+ IMG_CPU_VIRTADDR *CpuVAddr;
+ IMG_VOID *hOSMemHandle;
+ IMG_CPU_PHYADDR CpuPAddr;
+ IMG_DEV_VIRTADDR DevVAddr;
+
+ BM_MAPPING *pMapping;
+ IMG_UINT32 ui32RefCount;
+ IMG_UINT32 ui32ExportCount;
+} BM_BUF;
+
+struct _BM_HEAP_
+{
+ IMG_UINT32 ui32Attribs;
+ BM_CONTEXT *pBMContext;
+ RA_ARENA *pImportArena;
+ RA_ARENA *pLocalDevMemArena;
+ RA_ARENA *pVMArena;
+ DEV_ARENA_DESCRIPTOR sDevArena;
+ MMU_HEAP *pMMUHeap;
+ PDUMP_MMU_ATTRIB *psMMUAttrib;
+
+ struct _BM_HEAP_ *psNext;
+ struct _BM_HEAP_ **ppsThis;
+
+ IMG_UINT32 ui32XTileStride;
+};
+
+struct _BM_CONTEXT_
+{
+ MMU_CONTEXT *psMMUContext;
+
+
+ BM_HEAP *psBMHeap;
+
+
+ BM_HEAP *psBMSharedHeap;
+
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+
+ HASH_TABLE *pBufferHash;
+
+
+ IMG_HANDLE hResItem;
+
+ IMG_UINT32 ui32RefCount;
+
+
+
+ struct _BM_CONTEXT_ *psNext;
+ struct _BM_CONTEXT_ **ppsThis;
+};
+
+typedef struct _XPROC_DATA_{
+ IMG_UINT32 ui32RefCount;
+ IMG_UINT32 ui32AllocFlags;
+ IMG_UINT32 ui32Size;
+ IMG_UINT32 ui32PageSize;
+ RA_ARENA *psArena;
+ IMG_SYS_PHYADDR sSysPAddr;
+ IMG_VOID *pvCpuVAddr;
+ IMG_HANDLE hOSMemHandle;
+} XPROC_DATA;
+
+extern XPROC_DATA gXProcWorkaroundShareData[];
+typedef IMG_VOID *BM_HANDLE;
+
+#define BP_POOL_MASK 0x7
+
+#define BP_CONTIGUOUS (1 << 3)
+#define BP_PARAMBUFFER (1 << 4)
+
+#define BM_MAX_DEVMEM_ARENAS 2
+
+IMG_HANDLE
+BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEV_PHYADDR *psPDDevPAddr,
+ PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_BOOL *pbCreated);
+
+
+PVRSRV_ERROR
+BM_DestroyContext (IMG_HANDLE hBMContext,
+ IMG_BOOL *pbCreated);
+
+
+IMG_HANDLE
+BM_CreateHeap (IMG_HANDLE hBMContext,
+ DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo);
+
+IMG_VOID
+BM_DestroyHeap (IMG_HANDLE hDevMemHeap);
+
+
+IMG_BOOL
+BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_BOOL
+BM_Alloc (IMG_HANDLE hDevMemHeap,
+ IMG_DEV_VIRTADDR *psDevVAddr,
+ IMG_SIZE_T uSize,
+ IMG_UINT32 *pui32Flags,
+ IMG_UINT32 uDevVAddrAlignment,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
+ BM_HANDLE *phBuf);
+
+IMG_BOOL
+BM_Wrap ( IMG_HANDLE hDevMemHeap,
+ IMG_SIZE_T ui32Size,
+ IMG_SIZE_T ui32Offset,
+ IMG_BOOL bPhysContig,
+ IMG_SYS_PHYADDR *psSysAddr,
+ IMG_VOID *pvCPUVAddr,
+ IMG_UINT32 *pui32Flags,
+ BM_HANDLE *phBuf);
+
+IMG_VOID
+BM_Free (BM_HANDLE hBuf,
+ IMG_UINT32 ui32Flags);
+
+
+IMG_CPU_VIRTADDR
+BM_HandleToCpuVaddr (BM_HANDLE hBuf);
+
+IMG_DEV_VIRTADDR
+BM_HandleToDevVaddr (BM_HANDLE hBuf);
+
+IMG_SYS_PHYADDR
+BM_HandleToSysPaddr (BM_HANDLE hBuf);
+
+IMG_HANDLE
+BM_HandleToOSMemHandle (BM_HANDLE hBuf);
+
+IMG_VOID BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
+ IMG_DEV_VIRTADDR sDevVPageAddr,
+ IMG_DEV_PHYADDR *psDevPAddr);
+
+MMU_CONTEXT* BM_GetMMUContext(IMG_HANDLE hDevMemHeap);
+
+MMU_CONTEXT* BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext);
+
+IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap);
+
+PVRSRV_DEVICE_NODE* BM_GetDeviceNode(IMG_HANDLE hDevMemContext);
+
+
+IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
+
+IMG_VOID BM_Export(BM_HANDLE hBuf);
+
+IMG_VOID BM_FreeExport(BM_HANDLE hBuf, IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR BM_XProcWorkaroundSetShareIndex(IMG_UINT32 ui32Index);
+PVRSRV_ERROR BM_XProcWorkaroundUnsetShareIndex(IMG_UINT32 ui32Index);
+PVRSRV_ERROR BM_XProcWorkaroundFindNewBufferAndSetShareIndex(IMG_UINT32 *pui32Index);
+
+#if defined(PVRSRV_REFCOUNT_DEBUG)
+IMG_VOID _BM_XProcIndexAcquireDebug(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index);
+IMG_VOID _BM_XProcIndexReleaseDebug(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index);
+
+#define BM_XProcIndexAcquire(x...) \
+ _BM_XProcIndexAcquireDebug(__FILE__, __LINE__, x)
+#define BM_XProcIndexRelease(x...) \
+ _BM_XProcIndexReleaseDebug(__FILE__, __LINE__, x)
+
+#else
+IMG_VOID _BM_XProcIndexAcquire(IMG_UINT32 ui32Index);
+IMG_VOID _BM_XProcIndexRelease(IMG_UINT32 ui32Index);
+
+#define BM_XProcIndexAcquire(x...) \
+ _BM_XProcIndexAcquire( x)
+#define BM_XProcIndexRelease(x...) \
+ _BM_XProcIndexRelease( x)
+#endif
+
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/dbgdrv/dbgdriv.c b/drivers/gpu/pvr/dbgdrv/dbgdriv.c
new file mode 100644
index 0000000..386aca4
--- /dev/null
+++ b/drivers/gpu/pvr/dbgdrv/dbgdriv.c
@@ -0,0 +1,2354 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+
+#ifdef LINUX
+#include <linux/string.h>
+#endif
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "dbgdrvif.h"
+#include "dbgdriv.h"
+#include "hotkey.h"
+#include "hostfunc.h"
+#include "pvr_debug.h"
+
+
+
+
+#define LAST_FRAME_BUF_SIZE 1024
+
+typedef struct _DBG_LASTFRAME_BUFFER_
+{
+ PDBG_STREAM psStream;
+ IMG_UINT8 ui8Buffer[LAST_FRAME_BUF_SIZE];
+ IMG_UINT32 ui32BufLen;
+ struct _DBG_LASTFRAME_BUFFER_ *psNext;
+} *PDBG_LASTFRAME_BUFFER;
+
+
+static PDBG_STREAM g_psStreamList = 0;
+static PDBG_LASTFRAME_BUFFER g_psLFBufferList;
+
+static IMG_UINT32 g_ui32LOff = 0;
+static IMG_UINT32 g_ui32Line = 0;
+static IMG_UINT32 g_ui32MonoLines = 25;
+
+static IMG_BOOL g_bHotkeyMiddump = IMG_FALSE;
+static IMG_UINT32 g_ui32HotkeyMiddumpStart = 0xffffffff;
+static IMG_UINT32 g_ui32HotkeyMiddumpEnd = 0xffffffff;
+
+IMG_VOID * g_pvAPIMutex=IMG_NULL;
+
+extern IMG_UINT32 g_ui32HotKeyFrame;
+extern IMG_BOOL g_bHotKeyPressed;
+extern IMG_BOOL g_bHotKeyRegistered;
+
+IMG_BOOL gbDumpThisFrame = IMG_FALSE;
+
+
+IMG_UINT32 SpaceInStream(PDBG_STREAM psStream);
+IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize);
+PDBG_LASTFRAME_BUFFER FindLFBuf(PDBG_STREAM psStream);
+
+DBGKM_SERVICE_TABLE g_sDBGKMServices =
+{
+ sizeof (DBGKM_SERVICE_TABLE),
+ ExtDBGDrivCreateStream,
+ ExtDBGDrivDestroyStream,
+ ExtDBGDrivFindStream,
+ ExtDBGDrivWriteString,
+ ExtDBGDrivReadString,
+ ExtDBGDrivWrite,
+ ExtDBGDrivRead,
+ ExtDBGDrivSetCaptureMode,
+ ExtDBGDrivSetOutputMode,
+ ExtDBGDrivSetDebugLevel,
+ ExtDBGDrivSetFrame,
+ ExtDBGDrivGetFrame,
+ ExtDBGDrivOverrideMode,
+ ExtDBGDrivDefaultMode,
+ ExtDBGDrivWrite2,
+ ExtDBGDrivWriteStringCM,
+ ExtDBGDrivWriteCM,
+ ExtDBGDrivSetMarker,
+ ExtDBGDrivGetMarker,
+ ExtDBGDrivStartInitPhase,
+ ExtDBGDrivStopInitPhase,
+ ExtDBGDrivIsCaptureFrame,
+ ExtDBGDrivWriteLF,
+ ExtDBGDrivReadLF,
+ ExtDBGDrivGetStreamOffset,
+ ExtDBGDrivSetStreamOffset,
+ ExtDBGDrivIsLastCaptureFrame,
+ ExtDBGDrivWaitForEvent,
+ ExtDBGDrivSetConnectNotifier,
+ ExtDBGDrivWritePersist
+};
+
+
+static IMG_UINT32 DBGDrivWritePersist(PDBG_STREAM psMainStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+static IMG_VOID InvalidateAllStreams(IMG_VOID);
+
+
+
+
+DBGKM_CONNECT_NOTIFIER g_fnDBGKMNotifier;
+
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetConnectNotifier(DBGKM_CONNECT_NOTIFIER fn_notifier)
+{
+
+ g_fnDBGKMNotifier = fn_notifier;
+}
+
+IMG_VOID * IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR * pszName, IMG_UINT32 ui32CapMode, IMG_UINT32 ui32OutMode, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size)
+{
+ IMG_VOID * pvRet;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ pvRet=DBGDrivCreateStream(pszName, ui32CapMode, ui32OutMode, ui32Flags, ui32Size);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return pvRet;
+}
+
+void IMG_CALLCONV ExtDBGDrivDestroyStream(PDBG_STREAM psStream)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivDestroyStream(psStream);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+IMG_VOID * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream)
+{
+ IMG_VOID * pvRet;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ pvRet=DBGDrivFindStream(pszName, bResetStream);
+ if(g_fnDBGKMNotifier.pfnConnectNotifier)
+ {
+ g_fnDBGKMNotifier.pfnConnectNotifier();
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "pfnConnectNotifier not initialised.\n"));
+ }
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return pvRet;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivWriteString(psStream, pszString, ui32Level);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivReadString(psStream, pszString, ui32Limit);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivWrite(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivRead(psStream, bReadInitBuffer, ui32OutBuffSize, pui8OutBuf);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+void IMG_CALLCONV ExtDBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32End,IMG_UINT32 ui32SampleRate)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivSetCaptureMode(psStream, ui32Mode, ui32Start, ui32End, ui32SampleRate);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+void IMG_CALLCONV ExtDBGDrivSetOutputMode(PDBG_STREAM psStream,IMG_UINT32 ui32OutMode)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivSetOutputMode(psStream, ui32OutMode);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+void IMG_CALLCONV ExtDBGDrivSetDebugLevel(PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivSetDebugLevel(psStream, ui32DebugLevel);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+void IMG_CALLCONV ExtDBGDrivSetFrame(PDBG_STREAM psStream,IMG_UINT32 ui32Frame)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivSetFrame(psStream, ui32Frame);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(PDBG_STREAM psStream)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivGetFrame(psStream);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+IMG_BOOL IMG_CALLCONV ExtDBGDrivIsLastCaptureFrame(PDBG_STREAM psStream)
+{
+ IMG_BOOL bRet;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ bRet = DBGDrivIsLastCaptureFrame(psStream);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return bRet;
+}
+
+IMG_BOOL IMG_CALLCONV ExtDBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame)
+{
+ IMG_BOOL bRet;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ bRet = DBGDrivIsCaptureFrame(psStream, bCheckPreviousFrame);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return bRet;
+}
+
+void IMG_CALLCONV ExtDBGDrivOverrideMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivOverrideMode(psStream, ui32Mode);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+void IMG_CALLCONV ExtDBGDrivDefaultMode(PDBG_STREAM psStream)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivDefaultMode(psStream);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivWrite2(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWritePersist(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivWritePersist(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
+ if(ui32Ret==0xFFFFFFFFU)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "An error occurred in DBGDrivWritePersist."));
+ }
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivWriteStringCM(psStream, pszString, ui32Level);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivWriteCM(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+void IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivSetMarker(psStream, ui32Marker);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream)
+{
+ IMG_UINT32 ui32Marker;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Marker = DBGDrivGetMarker(psStream);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Marker;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 * pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret = DBGDrivWriteLF(psStream, pui8InBuf, ui32InBuffSize, ui32Level, ui32Flags);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 * pui8OutBuf)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret = DBGDrivReadLF(psStream, ui32OutBuffSize, pui8OutBuf);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+
+IMG_VOID IMG_CALLCONV ExtDBGDrivStartInitPhase(PDBG_STREAM psStream)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivStartInitPhase(psStream);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+IMG_VOID IMG_CALLCONV ExtDBGDrivStopInitPhase(PDBG_STREAM psStream)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivStopInitPhase(psStream);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetStreamOffset(PDBG_STREAM psStream)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret = DBGDrivGetStreamOffset(psStream);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetStreamOffset(PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivSetStreamOffset(psStream, ui32StreamOffset);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+}
+
+IMG_VOID IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent)
+{
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ DBGDrivWaitForEvent(eEvent);
+#else
+ PVR_UNREFERENCED_PARAMETER(eEvent);
+#endif
+}
+
+IMG_UINT32 AtoI(IMG_CHAR *szIn)
+{
+ IMG_INT iLen = 0;
+ IMG_UINT32 ui32Value = 0;
+ IMG_UINT32 ui32Digit=1;
+ IMG_UINT32 ui32Base=10;
+ IMG_INT iPos;
+ IMG_CHAR bc;
+
+
+ while (szIn[iLen] > 0)
+ {
+ iLen ++;
+ }
+
+
+ if (iLen == 0)
+ {
+ return (0);
+ }
+
+
+ iPos=0;
+ while (szIn[iPos] == '0')
+ {
+ iPos++;
+ }
+ if (szIn[iPos] == '\0')
+ {
+ return 0;
+ }
+ if (szIn[iPos] == 'x' || szIn[iPos] == 'X')
+ {
+ ui32Base=16;
+ szIn[iPos]='0';
+ }
+
+
+ for (iPos = iLen - 1; iPos >= 0; iPos --)
+ {
+ bc = szIn[iPos];
+
+ if ( (bc >= 'a') && (bc <= 'f') && ui32Base == 16)
+ {
+ bc -= 'a' - 0xa;
+ }
+ else
+ if ( (bc >= 'A') && (bc <= 'F') && ui32Base == 16)
+ {
+ bc -= 'A' - 0xa;
+ }
+ else
+ if ((bc >= '0') && (bc <= '9'))
+ {
+ bc -= '0';
+ }
+ else
+ return (0);
+
+ ui32Value += (IMG_UINT32)bc * ui32Digit;
+
+ ui32Digit = ui32Digit * ui32Base;
+ }
+ return (ui32Value);
+}
+
+
+static IMG_BOOL StreamValid(PDBG_STREAM psStream)
+{
+ PDBG_STREAM psThis;
+
+ psThis = g_psStreamList;
+
+ while (psThis)
+ {
+ if (psStream && (psThis == psStream) )
+ {
+ return(IMG_TRUE);
+ }
+ else
+ {
+ psThis = psThis->psNext;
+ }
+ }
+
+ return(IMG_FALSE);
+}
+
+
+static IMG_BOOL StreamValidForRead(PDBG_STREAM psStream)
+{
+ if( StreamValid(psStream) &&
+ ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_WRITEONLY) == 0) )
+ {
+ return(IMG_TRUE);
+ }
+
+ return(IMG_FALSE);
+}
+
+static IMG_BOOL StreamValidForWrite(PDBG_STREAM psStream)
+{
+ if( StreamValid(psStream) &&
+ ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_READONLY) == 0) )
+ {
+ return(IMG_TRUE);
+ }
+
+ return(IMG_FALSE);
+}
+
+
+static void Write(PDBG_STREAM psStream,IMG_PUINT8 pui8Data,IMG_UINT32 ui32InBuffSize)
+{
+
+
+ if (!psStream->bCircularAllowed)
+ {
+
+ }
+
+ if ((psStream->ui32WPtr + ui32InBuffSize) > psStream->ui32Size)
+ {
+
+ IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32WPtr;
+ IMG_UINT32 ui32B2 = ui32InBuffSize - ui32B1;
+
+
+ HostMemCopy((IMG_PVOID)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32WPtr),
+ (IMG_PVOID) pui8Data,
+ ui32B1);
+
+
+ HostMemCopy(psStream->pvBase,
+ (IMG_PVOID)(pui8Data + ui32B1),
+ ui32B2);
+
+
+ psStream->ui32WPtr = ui32B2;
+ }
+ else
+ {
+ HostMemCopy((IMG_PVOID)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32WPtr),
+ (IMG_PVOID) pui8Data,
+ ui32InBuffSize);
+
+ psStream->ui32WPtr += ui32InBuffSize;
+
+ if (psStream->ui32WPtr == psStream->ui32Size)
+ {
+ psStream->ui32WPtr = 0;
+ }
+ }
+ psStream->ui32DataWritten += ui32InBuffSize;
+}
+
+
+void MonoOut(IMG_CHAR * pszString,IMG_BOOL bNewLine)
+{
+#if defined (_WIN64)
+ PVR_UNREFERENCED_PARAMETER(pszString);
+ PVR_UNREFERENCED_PARAMETER(bNewLine);
+
+#else
+ IMG_UINT32 i;
+ IMG_CHAR * pScreen;
+
+ pScreen = (IMG_CHAR *) DBGDRIV_MONOBASE;
+
+ pScreen += g_ui32Line * 160;
+
+
+
+ i=0;
+ do
+ {
+ pScreen[g_ui32LOff + (i*2)] = pszString[i];
+ pScreen[g_ui32LOff + (i*2)+1] = 127;
+ i++;
+ }
+ while ((pszString[i] != 0) && (i < 4096));
+
+ g_ui32LOff += i * 2;
+
+ if (bNewLine)
+ {
+ g_ui32LOff = 0;
+ g_ui32Line++;
+ }
+
+
+
+ if (g_ui32Line == g_ui32MonoLines)
+ {
+ g_ui32Line = g_ui32MonoLines - 1;
+
+ HostMemCopy((IMG_VOID *)DBGDRIV_MONOBASE,(IMG_VOID *)(DBGDRIV_MONOBASE + 160),160 * (g_ui32MonoLines - 1));
+
+ HostMemSet((IMG_VOID *)(DBGDRIV_MONOBASE + (160 * (g_ui32MonoLines - 1))),0,160);
+ }
+#endif
+}
+
+static IMG_UINT32 WriteExpandingBuffer(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize)
+{
+ IMG_UINT ui32Space;
+
+
+
+ ui32Space = SpaceInStream(psStream);
+
+
+
+ if ((psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: buffer %x is disabled", (IMG_UINTPTR_T) psStream));
+ return(0);
+ }
+
+
+
+ if (psStream->psCtrl->ui32Flags & DEBUG_FLAGS_NO_BUF_EXPANDSION)
+ {
+
+
+
+ if (ui32Space < 32)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: buffer %x is full and isn't expandable", (IMG_UINTPTR_T) psStream));
+ return(0);
+ }
+ }
+ else
+ {
+ if ((ui32Space < 32) || (ui32Space <= (ui32InBuffSize + 4)))
+ {
+ IMG_UINT32 ui32NewBufSize;
+
+
+
+ ui32NewBufSize = 2 * psStream->ui32Size;
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Expanding buffer size = %x, new size = %x",
+ psStream->ui32Size, ui32NewBufSize));
+
+ if (ui32InBuffSize > psStream->ui32Size)
+ {
+ ui32NewBufSize += ui32InBuffSize;
+ }
+
+
+
+ if (!ExpandStreamBuffer(psStream,ui32NewBufSize))
+ {
+ if (ui32Space < 32)
+ {
+ if(psStream->bCircularAllowed)
+ {
+ return(0);
+ }
+ else
+ {
+
+ PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: Unable to expand %x. Out of memory.", (IMG_UINTPTR_T) psStream));
+ InvalidateAllStreams();
+ return (0xFFFFFFFFUL);
+ }
+ }
+ }
+
+
+
+ ui32Space = SpaceInStream(psStream);
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Expanded buffer, free space = %x",
+ ui32Space));
+ }
+ }
+
+
+
+ if (ui32Space <= (ui32InBuffSize + 4))
+ {
+ ui32InBuffSize = ui32Space - 4;
+ }
+
+
+
+ Write(psStream,pui8InBuf,ui32InBuffSize);
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ if (ui32InBuffSize)
+ {
+ HostSignalEvent(DBG_EVENT_STREAM_DATA);
+ }
+#endif
+ return(ui32InBuffSize);
+}
+
+IMG_VOID * IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR * pszName,
+ IMG_UINT32 ui32CapMode,
+ IMG_UINT32 ui32OutMode,
+ IMG_UINT32 ui32Flags,
+ IMG_UINT32 ui32Size)
+{
+ PDBG_STREAM psStream;
+ PDBG_STREAM psInitStream;
+ PDBG_LASTFRAME_BUFFER psLFBuffer;
+ PDBG_STREAM_CONTROL psCtrl;
+ IMG_UINT32 ui32Off;
+ IMG_VOID * pvBase;
+ static IMG_CHAR pszNameInitSuffix[] = "_Init";
+ IMG_UINT32 ui32OffSuffix;
+
+
+
+
+ psStream = (PDBG_STREAM) DBGDrivFindStream(pszName, IMG_FALSE);
+
+ if (psStream)
+ {
+ return ((IMG_VOID *) psStream);
+ }
+
+
+
+ psStream = HostNonPageablePageAlloc(1);
+ psInitStream = HostNonPageablePageAlloc(1);
+ psLFBuffer = HostNonPageablePageAlloc(1);
+ psCtrl = HostNonPageablePageAlloc(1);
+ if (
+ (!psStream) ||
+ (!psInitStream) ||
+ (!psLFBuffer) ||
+ (!psCtrl)
+ )
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc control structs\n\r"));
+ return((IMG_VOID *) 0);
+ }
+
+
+ if ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+ {
+ pvBase = HostNonPageablePageAlloc(ui32Size);
+ }
+ else
+ {
+ pvBase = HostPageablePageAlloc(ui32Size);
+ }
+
+ if (!pvBase)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc Stream buffer\n\r"));
+ HostNonPageablePageFree(psStream);
+ return((IMG_VOID *) 0);
+ }
+
+
+ psCtrl->ui32Flags = ui32Flags;
+ psCtrl->ui32CapMode = ui32CapMode;
+ psCtrl->ui32OutMode = ui32OutMode;
+ psCtrl->ui32DebugLevel = DEBUG_LEVEL_0;
+ psCtrl->ui32DefaultMode = ui32CapMode;
+ psCtrl->ui32Start = 0;
+ psCtrl->ui32End = 0;
+ psCtrl->ui32Current = 0;
+ psCtrl->ui32SampleRate = 1;
+ psCtrl->bInitPhaseComplete = IMG_FALSE;
+
+
+
+ psStream->psNext = 0;
+ psStream->pvBase = pvBase;
+ psStream->psCtrl = psCtrl;
+ psStream->ui32Size = ui32Size * 4096UL;
+ psStream->ui32RPtr = 0;
+ psStream->ui32WPtr = 0;
+ psStream->ui32DataWritten = 0;
+ psStream->ui32Marker = 0;
+ psStream->bCircularAllowed = IMG_TRUE;
+ psStream->ui32InitPhaseWOff = 0;
+
+
+
+
+ if ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+ {
+ pvBase = HostNonPageablePageAlloc(ui32Size);
+ }
+ else
+ {
+ pvBase = HostPageablePageAlloc(ui32Size);
+ }
+
+ if (!pvBase)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc InitStream buffer\n\r"));
+
+ if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+ {
+ HostNonPageablePageFree(psStream->pvBase);
+ }
+ else
+ {
+ HostPageablePageFree(psStream->pvBase);
+ }
+ HostNonPageablePageFree(psStream);
+ return((IMG_VOID *) 0);
+ }
+
+
+ psInitStream->psNext = 0;
+ psInitStream->pvBase = pvBase;
+ psInitStream->psCtrl = psCtrl;
+ psInitStream->ui32Size = ui32Size * 4096UL;
+ psInitStream->ui32RPtr = 0;
+ psInitStream->ui32WPtr = 0;
+ psInitStream->ui32DataWritten = 0;
+ psInitStream->ui32Marker = 0;
+ psInitStream->bCircularAllowed = IMG_FALSE;
+ psInitStream->ui32InitPhaseWOff = 0;
+
+
+
+ psStream->psInitStream = psInitStream;
+
+
+ psLFBuffer->psStream = psStream;
+ psLFBuffer->ui32BufLen = 0UL;
+
+ g_bHotkeyMiddump = IMG_FALSE;
+ g_ui32HotkeyMiddumpStart = 0xffffffffUL;
+ g_ui32HotkeyMiddumpEnd = 0xffffffffUL;
+
+
+
+ ui32Off = 0;
+
+ do
+ {
+ psStream->szName[ui32Off] = pszName[ui32Off];
+ psInitStream->szName[ui32Off] = pszName[ui32Off];
+ ui32Off++;
+ }
+ while ((pszName[ui32Off] != 0) && (ui32Off < (4096UL - sizeof(DBG_STREAM))));
+ psStream->szName[ui32Off] = pszName[ui32Off];
+
+
+
+ ui32OffSuffix = 0;
+ do
+ {
+ psInitStream->szName[ui32Off] = pszNameInitSuffix[ui32OffSuffix];
+ ui32Off++;
+ ui32OffSuffix++;
+ }
+ while ( (pszNameInitSuffix[ui32OffSuffix] != 0) &&
+ (ui32Off < (4096UL - sizeof(DBG_STREAM))));
+ psInitStream->szName[ui32Off] = pszNameInitSuffix[ui32OffSuffix];
+
+
+
+ psStream->psNext = g_psStreamList;
+ g_psStreamList = psStream;
+
+ psLFBuffer->psNext = g_psLFBufferList;
+ g_psLFBufferList = psLFBuffer;
+
+ AddSIDEntry(psStream);
+
+ return((IMG_VOID *) psStream);
+}
+
+void IMG_CALLCONV DBGDrivDestroyStream(PDBG_STREAM psStream)
+{
+ PDBG_STREAM psStreamThis;
+ PDBG_STREAM psStreamPrev;
+ PDBG_LASTFRAME_BUFFER psLFBuffer;
+ PDBG_LASTFRAME_BUFFER psLFThis;
+ PDBG_LASTFRAME_BUFFER psLFPrev;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "DBGDriv: Destroying stream %s\r\n", psStream->szName ));
+
+
+
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ RemoveSIDEntry(psStream);
+
+ psLFBuffer = FindLFBuf(psStream);
+
+
+
+ psStreamThis = g_psStreamList;
+ psStreamPrev = 0;
+
+ while (psStreamThis)
+ {
+ if (psStreamThis == psStream)
+ {
+ if (psStreamPrev)
+ {
+ psStreamPrev->psNext = psStreamThis->psNext;
+ }
+ else
+ {
+ g_psStreamList = psStreamThis->psNext;
+ }
+
+ psStreamThis = 0;
+ }
+ else
+ {
+ psStreamPrev = psStreamThis;
+ psStreamThis = psStreamThis->psNext;
+ }
+ }
+
+ psLFThis = g_psLFBufferList;
+ psLFPrev = 0;
+
+ while (psLFThis)
+ {
+ if (psLFThis == psLFBuffer)
+ {
+ if (psLFPrev)
+ {
+ psLFPrev->psNext = psLFThis->psNext;
+ }
+ else
+ {
+ g_psLFBufferList = psLFThis->psNext;
+ }
+
+ psLFThis = 0;
+ }
+ else
+ {
+ psLFPrev = psLFThis;
+ psLFThis = psLFThis->psNext;
+ }
+ }
+
+
+ if (psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_HOTKEY)
+ {
+ DeactivateHotKeys();
+ }
+
+
+
+ if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+ {
+ HostNonPageablePageFree(psStream->psCtrl);
+ HostNonPageablePageFree(psStream->pvBase);
+ HostNonPageablePageFree(psStream->psInitStream->pvBase);
+ }
+ else
+ {
+ HostNonPageablePageFree(psStream->psCtrl);
+ HostPageablePageFree(psStream->pvBase);
+ HostPageablePageFree(psStream->psInitStream->pvBase);
+ }
+
+ HostNonPageablePageFree(psStream->psInitStream);
+ HostNonPageablePageFree(psStream);
+ HostNonPageablePageFree(psLFBuffer);
+
+ if (g_psStreamList == 0)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Stream list now empty" ));
+ }
+
+ return;
+}
+
+IMG_VOID * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream)
+{
+ PDBG_STREAM psStream;
+ PDBG_STREAM psThis;
+ IMG_UINT32 ui32Off;
+ IMG_BOOL bAreSame;
+
+ psStream = 0;
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "PDump client connecting to %s %s",
+ pszName,
+ (bResetStream == IMG_TRUE) ? "with reset" : "no reset"));
+
+
+
+ for (psThis = g_psStreamList; psThis != IMG_NULL; psThis = psThis->psNext)
+ {
+ bAreSame = IMG_TRUE;
+ ui32Off = 0;
+
+ if (strlen(psThis->szName) == strlen(pszName))
+ {
+ while ((psThis->szName[ui32Off] != 0) && (pszName[ui32Off] != 0) && (ui32Off < 128) && bAreSame)
+ {
+ if (psThis->szName[ui32Off] != pszName[ui32Off])
+ {
+ bAreSame = IMG_FALSE;
+ }
+
+ ui32Off++;
+ }
+ }
+ else
+ {
+ bAreSame = IMG_FALSE;
+ }
+
+ if (bAreSame)
+ {
+ psStream = psThis;
+ break;
+ }
+ }
+
+ if(bResetStream && psStream)
+ {
+ static IMG_CHAR szComment[] = "-- Init phase terminated\r\n";
+ psStream->psInitStream->ui32RPtr = 0;
+ psStream->ui32RPtr = 0;
+ psStream->ui32WPtr = 0;
+ psStream->ui32DataWritten = psStream->psInitStream->ui32DataWritten;
+ if (psStream->psCtrl->bInitPhaseComplete == IMG_FALSE)
+ {
+ if (psStream->psCtrl->ui32Flags & DEBUG_FLAGS_TEXTSTREAM)
+ {
+ DBGDrivWrite2(psStream, (IMG_UINT8 *)szComment, sizeof(szComment) - 1, 0x01);
+ }
+ psStream->psCtrl->bInitPhaseComplete = IMG_TRUE;
+ }
+
+ {
+
+
+ psStream->psInitStream->ui32InitPhaseWOff = psStream->psInitStream->ui32WPtr;
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Set %s client marker bo %x, total bw %x",
+ psStream->szName,
+ psStream->psInitStream->ui32InitPhaseWOff,
+ psStream->psInitStream->ui32DataWritten ));
+ }
+ }
+
+ return((IMG_VOID *) psStream);
+}
+
+static void IMG_CALLCONV DBGDrivInvalidateStream(PDBG_STREAM psStream)
+{
+ IMG_CHAR pszErrorMsg[] = "**OUTOFMEM\n";
+ IMG_UINT32 ui32Space;
+ IMG_UINT32 ui32Off = 0;
+ IMG_UINT32 ui32WPtr = psStream->ui32WPtr;
+ IMG_PUINT8 pui8Buffer = (IMG_UINT8 *) psStream->pvBase;
+
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivInvalidateStream: An error occurred for stream %s\r\n", psStream->szName ));
+
+
+
+
+
+
+
+
+
+ ui32Space = SpaceInStream(psStream);
+
+
+ if(ui32Space > 0)
+ {
+ ui32Space--;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivInvalidateStream: Buffer full."));
+ }
+
+ while((pszErrorMsg[ui32Off] != 0) && (ui32Off < ui32Space))
+ {
+ pui8Buffer[ui32WPtr] = (IMG_UINT8)pszErrorMsg[ui32Off];
+ ui32Off++;
+ ui32WPtr++;
+ }
+ pui8Buffer[ui32WPtr++] = '\0';
+ psStream->ui32WPtr = ui32WPtr;
+
+
+ psStream->psCtrl->ui32Flags |= DEBUG_FLAGS_READONLY;
+}
+
+static IMG_VOID InvalidateAllStreams(IMG_VOID)
+{
+ PDBG_STREAM psStream = g_psStreamList;
+ while (psStream != IMG_NULL)
+ {
+ DBGDrivInvalidateStream(psStream);
+ psStream = psStream->psNext;
+ }
+ return;
+}
+
+
+
+IMG_UINT32 IMG_CALLCONV DBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
+{
+
+
+ if (!StreamValidForWrite(psStream))
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+ if (psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED)
+ {
+ if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
+ {
+ return(0);
+ }
+ }
+ else
+ {
+ if (psStream->psCtrl->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
+ {
+ if ((psStream->psCtrl->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
+ {
+ return(0);
+ }
+ }
+ }
+
+ return(DBGDrivWriteString(psStream,pszString,ui32Level));
+
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
+{
+ IMG_UINT32 ui32Len;
+ IMG_UINT32 ui32Space;
+ IMG_UINT32 ui32WPtr;
+ IMG_UINT8 * pui8Buffer;
+
+
+
+ if (!StreamValidForWrite(psStream))
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+ if ((psStream->psCtrl->ui32DebugLevel & ui32Level) == 0)
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+
+ if ((psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_ASYNC) == 0)
+ {
+ if (psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_STANDARDDBG)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"%s: %s\r\n",psStream->szName, pszString));
+ }
+
+
+
+ if (psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_MONO)
+ {
+ MonoOut(psStream->szName,IMG_FALSE);
+ MonoOut(": ",IMG_FALSE);
+ MonoOut(pszString,IMG_TRUE);
+ }
+ }
+
+
+
+ if (
+ !(
+ ((psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) != 0) ||
+ ((psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_ASYNC) != 0)
+ )
+ )
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+ ui32Space=SpaceInStream(psStream);
+
+
+ if(ui32Space > 0)
+ {
+ ui32Space--;
+ }
+
+ ui32Len = 0;
+ ui32WPtr = psStream->ui32WPtr;
+ pui8Buffer = (IMG_UINT8 *) psStream->pvBase;
+
+ while((pszString[ui32Len] != 0) && (ui32Len < ui32Space))
+ {
+ pui8Buffer[ui32WPtr] = (IMG_UINT8)pszString[ui32Len];
+ ui32Len++;
+ ui32WPtr++;
+ if (ui32WPtr == psStream->ui32Size)
+ {
+ ui32WPtr = 0;
+ }
+ }
+
+ if (ui32Len < ui32Space)
+ {
+
+ pui8Buffer[ui32WPtr] = (IMG_UINT8)pszString[ui32Len];
+ ui32Len++;
+ ui32WPtr++;
+ if (ui32WPtr == psStream->ui32Size)
+ {
+ ui32WPtr = 0;
+ }
+
+
+ psStream->ui32WPtr = ui32WPtr;
+ psStream->ui32DataWritten+= ui32Len;
+ } else
+ {
+ ui32Len = 0;
+ }
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ if (ui32Len)
+ {
+ HostSignalEvent(DBG_EVENT_STREAM_DATA);
+ }
+#endif
+
+ return(ui32Len);
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit)
+{
+ IMG_UINT32 ui32OutLen;
+ IMG_UINT32 ui32Len;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT8 *pui8Buff;
+
+
+
+ if (!StreamValidForRead(psStream))
+ {
+ return(0);
+ }
+
+
+
+ pui8Buff = (IMG_UINT8 *)psStream->pvBase;
+ ui32Offset = psStream->ui32RPtr;
+
+ if (psStream->ui32RPtr == psStream->ui32WPtr)
+ {
+ return(0);
+ }
+
+
+
+ ui32Len = 0;
+ while((pui8Buff[ui32Offset] != 0) && (ui32Offset != psStream->ui32WPtr))
+ {
+ ui32Offset++;
+ ui32Len++;
+
+
+
+ if (ui32Offset == psStream->ui32Size)
+ {
+ ui32Offset = 0;
+ }
+ }
+
+ ui32OutLen = ui32Len + 1;
+
+
+
+ if (ui32Len > ui32Limit)
+ {
+ return(0);
+ }
+
+
+
+ ui32Offset = psStream->ui32RPtr;
+ ui32Len = 0;
+
+ while ((pui8Buff[ui32Offset] != 0) && (ui32Len < ui32Limit))
+ {
+ pszString[ui32Len] = (IMG_CHAR)pui8Buff[ui32Offset];
+ ui32Offset++;
+ ui32Len++;
+
+
+
+ if (ui32Offset == psStream->ui32Size)
+ {
+ ui32Offset = 0;
+ }
+ }
+
+ pszString[ui32Len] = (IMG_CHAR)pui8Buff[ui32Offset];
+
+ psStream->ui32RPtr = ui32Offset + 1;
+
+ if (psStream->ui32RPtr == psStream->ui32Size)
+ {
+ psStream->ui32RPtr = 0;
+ }
+
+ return(ui32OutLen);
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivWrite(PDBG_STREAM psMainStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
+{
+ IMG_UINT32 ui32Space;
+ DBG_STREAM *psStream;
+
+
+
+ if (!StreamValidForWrite(psMainStream))
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+ if ((psMainStream->psCtrl->ui32DebugLevel & ui32Level) == 0)
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+ if (psMainStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED)
+ {
+ if ((psMainStream->psCtrl->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
+ {
+
+ return(ui32InBuffSize);
+ }
+ }
+ else if (psMainStream->psCtrl->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
+ {
+ if ((psMainStream->psCtrl->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
+ {
+
+ return(ui32InBuffSize);
+ }
+ }
+
+ if(psMainStream->psCtrl->bInitPhaseComplete)
+ {
+ psStream = psMainStream;
+ }
+ else
+ {
+ psStream = psMainStream->psInitStream;
+ }
+
+
+
+ ui32Space=SpaceInStream(psStream);
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Recv %d b for %s: Roff = %x, WOff = %x",
+ ui32InBuffSize,
+ psStream->szName,
+ psStream->ui32RPtr,
+ psStream->ui32WPtr));
+
+
+
+ if ((psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivWrite: buffer %x is disabled", (IMG_UINTPTR_T) psStream));
+ return(0);
+ }
+
+ if (ui32Space < 8)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivWrite: buffer %x is full", (IMG_UINTPTR_T) psStream));
+ return(0);
+ }
+
+
+
+ if (ui32Space <= (ui32InBuffSize + 4))
+ {
+ ui32InBuffSize = ui32Space - 8;
+ }
+
+
+
+ Write(psStream,(IMG_UINT8 *) &ui32InBuffSize,4);
+ Write(psStream,pui8InBuf,ui32InBuffSize);
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ if (ui32InBuffSize)
+ {
+ HostSignalEvent(DBG_EVENT_STREAM_DATA);
+ }
+#endif
+ return(ui32InBuffSize);
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
+{
+
+
+ if (!StreamValidForWrite(psStream))
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+ if (psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED)
+ {
+ if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
+ {
+
+ return(ui32InBuffSize);
+ }
+ }
+ else
+ {
+ if (psStream->psCtrl->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
+ {
+ if ((psStream->psCtrl->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
+ {
+
+ return(ui32InBuffSize);
+ }
+ }
+ }
+
+ return(DBGDrivWrite2(psStream,pui8InBuf,ui32InBuffSize,ui32Level));
+}
+
+
+static IMG_UINT32 DBGDrivWritePersist(PDBG_STREAM psMainStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
+{
+ DBG_STREAM *psStream;
+ PVR_UNREFERENCED_PARAMETER(ui32Level);
+
+
+
+ if (!StreamValidForWrite(psMainStream))
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+ psStream = psMainStream->psInitStream;
+ if(psStream->bCircularAllowed == IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "DBGDrivWritePersist: Init phase is a circular buffer, some data may be lost"));
+ }
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Append %x b to %s: Roff = %x, WOff = %x [bw = %x]",
+ ui32InBuffSize,
+ psStream->szName,
+ psStream->ui32RPtr,
+ psStream->ui32WPtr,
+ psStream->ui32DataWritten));
+
+ return( WriteExpandingBuffer(psStream, pui8InBuf, ui32InBuffSize) );
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psMainStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
+{
+ DBG_STREAM *psStream;
+
+
+
+ if (!StreamValidForWrite(psMainStream))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivWrite2: stream not valid"));
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+ if ((psMainStream->psCtrl->ui32DebugLevel & ui32Level) == 0)
+ {
+ return(0);
+ }
+
+ if(psMainStream->psCtrl->bInitPhaseComplete)
+ {
+ psStream = psMainStream;
+ }
+ else
+ {
+ psStream = psMainStream->psInitStream;
+ }
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Recv(exp) %d b for %s: Roff = %x, WOff = %x",
+ ui32InBuffSize,
+ psStream->szName,
+ psStream->ui32RPtr,
+ psStream->ui32WPtr));
+
+ return( WriteExpandingBuffer(psStream, pui8InBuf, ui32InBuffSize) );
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psMainStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf)
+{
+ IMG_UINT32 ui32Data;
+ DBG_STREAM *psStream;
+
+
+
+ if (!StreamValidForRead(psMainStream))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivRead: buffer %x is invalid", (IMG_UINTPTR_T) psMainStream));
+ return(0);
+ }
+
+ if(bReadInitBuffer)
+ {
+ psStream = psMainStream->psInitStream;
+ }
+ else
+ {
+ psStream = psMainStream;
+ }
+
+
+ if (psStream->ui32RPtr == psStream->ui32WPtr ||
+ ((psStream->ui32InitPhaseWOff > 0) &&
+ (psStream->ui32RPtr >= psStream->ui32InitPhaseWOff)) )
+ {
+ return(0);
+ }
+
+
+
+ if (psStream->ui32RPtr <= psStream->ui32WPtr)
+ {
+ ui32Data = psStream->ui32WPtr - psStream->ui32RPtr;
+ }
+ else
+ {
+ ui32Data = psStream->ui32WPtr + (psStream->ui32Size - psStream->ui32RPtr);
+ }
+
+
+
+ if ((psStream->ui32InitPhaseWOff > 0) &&
+ (psStream->ui32InitPhaseWOff < psStream->ui32WPtr))
+ {
+ ui32Data = psStream->ui32InitPhaseWOff - psStream->ui32RPtr;
+ }
+
+
+
+ if (ui32Data > ui32OutBuffSize)
+ {
+ ui32Data = ui32OutBuffSize;
+ }
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Send %x b from %s: Roff = %x, WOff = %x",
+ ui32Data,
+ psStream->szName,
+ psStream->ui32RPtr,
+ psStream->ui32WPtr));
+
+
+
+ if ((psStream->ui32RPtr + ui32Data) > psStream->ui32Size)
+ {
+ IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32RPtr;
+ IMG_UINT32 ui32B2 = ui32Data - ui32B1;
+
+
+ HostMemCopy((IMG_VOID *) pui8OutBuf,
+ (IMG_VOID *)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32RPtr),
+ ui32B1);
+
+
+ HostMemCopy((IMG_VOID *)(pui8OutBuf + ui32B1),
+ psStream->pvBase,
+ ui32B2);
+
+
+ psStream->ui32RPtr = ui32B2;
+ }
+ else
+ {
+ HostMemCopy((IMG_VOID *) pui8OutBuf,
+ (IMG_VOID *)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32RPtr),
+ ui32Data);
+
+
+ psStream->ui32RPtr += ui32Data;
+
+
+ if (psStream->ui32RPtr == psStream->ui32Size)
+ {
+ psStream->ui32RPtr = 0;
+ }
+ }
+
+ return(ui32Data);
+}
+
+void IMG_CALLCONV DBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32End,IMG_UINT32 ui32SampleRate)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ psStream->psCtrl->ui32CapMode = ui32Mode;
+ psStream->psCtrl->ui32DefaultMode = ui32Mode;
+ psStream->psCtrl->ui32Start = ui32Start;
+ psStream->psCtrl->ui32End = ui32End;
+ psStream->psCtrl->ui32SampleRate = ui32SampleRate;
+
+
+
+ if (psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_HOTKEY)
+ {
+ ActivateHotKeys(psStream);
+ }
+}
+
+void IMG_CALLCONV DBGDrivSetOutputMode(PDBG_STREAM psStream,IMG_UINT32 ui32OutMode)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ psStream->psCtrl->ui32OutMode = ui32OutMode;
+}
+
+void IMG_CALLCONV DBGDrivSetDebugLevel(PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ psStream->psCtrl->ui32DebugLevel = ui32DebugLevel;
+}
+
+void IMG_CALLCONV DBGDrivSetFrame(PDBG_STREAM psStream,IMG_UINT32 ui32Frame)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ psStream->psCtrl->ui32Current = ui32Frame;
+
+ if ((ui32Frame >= psStream->psCtrl->ui32Start) &&
+ (ui32Frame <= psStream->psCtrl->ui32End) &&
+ (((ui32Frame - psStream->psCtrl->ui32Start) % psStream->psCtrl->ui32SampleRate) == 0))
+ {
+ psStream->psCtrl->ui32Flags |= DEBUG_FLAGS_ENABLESAMPLE;
+ }
+ else
+ {
+ psStream->psCtrl->ui32Flags &= ~DEBUG_FLAGS_ENABLESAMPLE;
+ }
+
+ if (g_bHotkeyMiddump)
+ {
+ if ((ui32Frame >= g_ui32HotkeyMiddumpStart) &&
+ (ui32Frame <= g_ui32HotkeyMiddumpEnd) &&
+ (((ui32Frame - g_ui32HotkeyMiddumpStart) % psStream->psCtrl->ui32SampleRate) == 0))
+ {
+ psStream->psCtrl->ui32Flags |= DEBUG_FLAGS_ENABLESAMPLE;
+ }
+ else
+ {
+ psStream->psCtrl->ui32Flags &= ~DEBUG_FLAGS_ENABLESAMPLE;
+ if (psStream->psCtrl->ui32Current > g_ui32HotkeyMiddumpEnd)
+ {
+ g_bHotkeyMiddump = IMG_FALSE;
+ }
+ }
+ }
+
+
+ if (g_bHotKeyRegistered)
+ {
+ g_bHotKeyRegistered = IMG_FALSE;
+
+ PVR_DPF((PVR_DBG_MESSAGE,"Hotkey pressed (%p)!\n",psStream));
+
+ if (!g_bHotKeyPressed)
+ {
+
+
+ g_ui32HotKeyFrame = psStream->psCtrl->ui32Current + 2;
+
+
+
+ g_bHotKeyPressed = IMG_TRUE;
+ }
+
+
+
+ if (((psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0) &&
+ ((psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_HOTKEY) != 0))
+ {
+ if (!g_bHotkeyMiddump)
+ {
+
+ g_ui32HotkeyMiddumpStart = g_ui32HotKeyFrame + 1;
+ g_ui32HotkeyMiddumpEnd = 0xffffffff;
+ g_bHotkeyMiddump = IMG_TRUE;
+ PVR_DPF((PVR_DBG_MESSAGE,"Sampling every %d frame(s)\n", psStream->psCtrl->ui32SampleRate));
+ }
+ else
+ {
+
+ g_ui32HotkeyMiddumpEnd = g_ui32HotKeyFrame;
+ PVR_DPF((PVR_DBG_MESSAGE,"Turning off sampling\n"));
+ }
+ }
+
+ }
+
+
+
+ if (psStream->psCtrl->ui32Current > g_ui32HotKeyFrame)
+ {
+ g_bHotKeyPressed = IMG_FALSE;
+ }
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(PDBG_STREAM psStream)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return(0);
+ }
+
+ return(psStream->psCtrl->ui32Current);
+}
+
+IMG_BOOL IMG_CALLCONV DBGDrivIsLastCaptureFrame(PDBG_STREAM psStream)
+{
+ IMG_UINT32 ui32NextFrame;
+
+
+
+ if (!StreamValid(psStream))
+ {
+ return IMG_FALSE;
+ }
+
+ if (psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED)
+ {
+ ui32NextFrame = psStream->psCtrl->ui32Current + psStream->psCtrl->ui32SampleRate;
+ if (ui32NextFrame > psStream->psCtrl->ui32End)
+ {
+ return IMG_TRUE;
+ }
+ }
+ return IMG_FALSE;
+}
+
+IMG_BOOL IMG_CALLCONV DBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame)
+{
+ IMG_UINT32 ui32FrameShift = bCheckPreviousFrame ? 1UL : 0UL;
+
+
+
+ if (!StreamValid(psStream))
+ {
+ return IMG_FALSE;
+ }
+
+ if (psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED)
+ {
+
+ if (g_bHotkeyMiddump)
+ {
+ if ((psStream->psCtrl->ui32Current >= (g_ui32HotkeyMiddumpStart - ui32FrameShift)) &&
+ (psStream->psCtrl->ui32Current <= (g_ui32HotkeyMiddumpEnd - ui32FrameShift)) &&
+ ((((psStream->psCtrl->ui32Current + ui32FrameShift) - g_ui32HotkeyMiddumpStart) % psStream->psCtrl->ui32SampleRate) == 0))
+ {
+ return IMG_TRUE;
+ }
+ }
+ else
+ {
+ if ((psStream->psCtrl->ui32Current >= (psStream->psCtrl->ui32Start - ui32FrameShift)) &&
+ (psStream->psCtrl->ui32Current <= (psStream->psCtrl->ui32End - ui32FrameShift)) &&
+ ((((psStream->psCtrl->ui32Current + ui32FrameShift) - psStream->psCtrl->ui32Start) % psStream->psCtrl->ui32SampleRate) == 0))
+ {
+ return IMG_TRUE;
+ }
+ }
+ }
+ else if (psStream->psCtrl->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
+ {
+ if ((psStream->psCtrl->ui32Current == (g_ui32HotKeyFrame-ui32FrameShift)) && (g_bHotKeyPressed))
+ {
+ return IMG_TRUE;
+ }
+ }
+ return IMG_FALSE;
+}
+
+void IMG_CALLCONV DBGDrivOverrideMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ psStream->psCtrl->ui32CapMode = ui32Mode;
+}
+
+void IMG_CALLCONV DBGDrivDefaultMode(PDBG_STREAM psStream)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ psStream->psCtrl->ui32CapMode = psStream->psCtrl->ui32DefaultMode;
+}
+
+IMG_VOID IMG_CALLCONV DBGDrivSetClientMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ psStream->ui32InitPhaseWOff = ui32Marker;
+}
+
+void IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ psStream->ui32Marker = ui32Marker;
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return 0;
+ }
+
+ return psStream->ui32Marker;
+}
+
+
+IMG_UINT32 IMG_CALLCONV DBGDrivGetStreamOffset(PDBG_STREAM psMainStream)
+{
+ PDBG_STREAM psStream;
+
+
+
+ if (!StreamValid(psMainStream))
+ {
+ return 0;
+ }
+
+ if(psMainStream->psCtrl->bInitPhaseComplete)
+ {
+ psStream = psMainStream;
+ }
+ else
+ {
+ psStream = psMainStream->psInitStream;
+ }
+
+ return psStream->ui32DataWritten;
+}
+
+IMG_VOID IMG_CALLCONV DBGDrivSetStreamOffset(PDBG_STREAM psMainStream, IMG_UINT32 ui32StreamOffset)
+{
+ PDBG_STREAM psStream;
+
+
+
+ if (!StreamValid(psMainStream))
+ {
+ return;
+ }
+
+ if(psMainStream->psCtrl->bInitPhaseComplete)
+ {
+ psStream = psMainStream;
+ }
+ else
+ {
+ psStream = psMainStream->psInitStream;
+ }
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "DBGDrivSetStreamOffset: %s set to %x b",
+ psStream->szName,
+ ui32StreamOffset));
+ psStream->ui32DataWritten = ui32StreamOffset;
+}
+
+IMG_PVOID IMG_CALLCONV DBGDrivGetServiceTable(IMG_VOID)
+{
+ return((IMG_PVOID)&g_sDBGKMServices);
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 * pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags)
+{
+ PDBG_LASTFRAME_BUFFER psLFBuffer;
+
+
+
+ if (!StreamValidForWrite(psStream))
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+ if ((psStream->psCtrl->ui32DebugLevel & ui32Level) == 0)
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+ if ((psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0)
+ {
+ if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
+ {
+
+ return(ui32InBuffSize);
+ }
+ }
+ else if (psStream->psCtrl->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
+ {
+ if ((psStream->psCtrl->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
+ {
+
+ return(ui32InBuffSize);
+ }
+ }
+
+ psLFBuffer = FindLFBuf(psStream);
+
+ if (ui32Flags & WRITELF_FLAGS_RESETBUF)
+ {
+
+
+ ui32InBuffSize = (ui32InBuffSize > LAST_FRAME_BUF_SIZE) ? LAST_FRAME_BUF_SIZE : ui32InBuffSize;
+ HostMemCopy((IMG_VOID *)psLFBuffer->ui8Buffer, (IMG_VOID *)pui8InBuf, ui32InBuffSize);
+ psLFBuffer->ui32BufLen = ui32InBuffSize;
+ }
+ else
+ {
+
+
+ ui32InBuffSize = ((psLFBuffer->ui32BufLen + ui32InBuffSize) > LAST_FRAME_BUF_SIZE) ? (LAST_FRAME_BUF_SIZE - psLFBuffer->ui32BufLen) : ui32InBuffSize;
+ HostMemCopy((IMG_VOID *)(&psLFBuffer->ui8Buffer[psLFBuffer->ui32BufLen]), (IMG_VOID *)pui8InBuf, ui32InBuffSize);
+ psLFBuffer->ui32BufLen += ui32InBuffSize;
+ }
+
+ return(ui32InBuffSize);
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 * pui8OutBuf)
+{
+ PDBG_LASTFRAME_BUFFER psLFBuffer;
+ IMG_UINT32 ui32Data;
+
+
+
+ if (!StreamValidForRead(psStream))
+ {
+ return(0);
+ }
+
+ psLFBuffer = FindLFBuf(psStream);
+
+
+
+ ui32Data = (ui32OutBuffSize < psLFBuffer->ui32BufLen) ? ui32OutBuffSize : psLFBuffer->ui32BufLen;
+
+
+
+ HostMemCopy((IMG_VOID *)pui8OutBuf, (IMG_VOID *)psLFBuffer->ui8Buffer, ui32Data);
+
+ return ui32Data;
+}
+
+IMG_VOID IMG_CALLCONV DBGDrivStartInitPhase(PDBG_STREAM psStream)
+{
+ psStream->psCtrl->bInitPhaseComplete = IMG_FALSE;
+}
+
+IMG_VOID IMG_CALLCONV DBGDrivStopInitPhase(PDBG_STREAM psStream)
+{
+ psStream->psCtrl->bInitPhaseComplete = IMG_TRUE;
+}
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+IMG_VOID IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent)
+{
+ HostWaitForEvent(eEvent);
+}
+#endif
+
+IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize)
+{
+ IMG_VOID * pvNewBuf;
+ IMG_UINT32 ui32NewSizeInPages;
+ IMG_UINT32 ui32NewWOffset;
+ IMG_UINT32 ui32NewROffset;
+ IMG_UINT32 ui32SpaceInOldBuf;
+
+
+
+ if (psStream->ui32Size >= ui32NewSize)
+ {
+ return IMG_FALSE;
+ }
+
+
+
+ ui32SpaceInOldBuf = SpaceInStream(psStream);
+
+
+
+ ui32NewSizeInPages = ((ui32NewSize + 0xfffUL) & ~0xfffUL) / 4096UL;
+
+ if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+ {
+ pvNewBuf = HostNonPageablePageAlloc(ui32NewSizeInPages);
+ }
+ else
+ {
+ pvNewBuf = HostPageablePageAlloc(ui32NewSizeInPages);
+ }
+
+ if (pvNewBuf == IMG_NULL)
+ {
+ return IMG_FALSE;
+ }
+
+ if(psStream->bCircularAllowed)
+ {
+
+
+
+ if (psStream->ui32RPtr <= psStream->ui32WPtr)
+ {
+
+
+ HostMemCopy(pvNewBuf,
+ (IMG_VOID *)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32RPtr),
+ psStream->ui32WPtr - psStream->ui32RPtr);
+ }
+ else
+ {
+ IMG_UINT32 ui32FirstCopySize;
+
+
+
+ ui32FirstCopySize = psStream->ui32Size - psStream->ui32RPtr;
+
+ HostMemCopy(pvNewBuf,
+ (IMG_VOID *)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32RPtr),
+ ui32FirstCopySize);
+
+
+
+ HostMemCopy((IMG_VOID *)((IMG_UINTPTR_T)pvNewBuf + ui32FirstCopySize),
+ (IMG_VOID *)(IMG_PBYTE)psStream->pvBase,
+ psStream->ui32WPtr);
+ }
+ ui32NewROffset = 0;
+ }
+ else
+ {
+
+ HostMemCopy(pvNewBuf, psStream->pvBase, psStream->ui32WPtr);
+ ui32NewROffset = psStream->ui32RPtr;
+ }
+
+
+
+
+ ui32NewWOffset = psStream->ui32Size - ui32SpaceInOldBuf;
+
+
+
+ if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+ {
+ HostNonPageablePageFree(psStream->pvBase);
+ }
+ else
+ {
+ HostPageablePageFree(psStream->pvBase);
+ }
+
+
+
+ psStream->pvBase = pvNewBuf;
+ psStream->ui32RPtr = ui32NewROffset;
+ psStream->ui32WPtr = ui32NewWOffset;
+ psStream->ui32Size = ui32NewSizeInPages * 4096;
+
+ return IMG_TRUE;
+}
+
+IMG_UINT32 SpaceInStream(PDBG_STREAM psStream)
+{
+ IMG_UINT32 ui32Space;
+
+ if (psStream->bCircularAllowed)
+ {
+
+ if (psStream->ui32RPtr > psStream->ui32WPtr)
+ {
+ ui32Space = psStream->ui32RPtr - psStream->ui32WPtr;
+ }
+ else
+ {
+ ui32Space = psStream->ui32RPtr + (psStream->ui32Size - psStream->ui32WPtr);
+ }
+ }
+ else
+ {
+
+ ui32Space = psStream->ui32Size - psStream->ui32WPtr;
+ }
+
+ return ui32Space;
+}
+
+
+void DestroyAllStreams(void)
+{
+ while (g_psStreamList != IMG_NULL)
+ {
+ DBGDrivDestroyStream(g_psStreamList);
+ }
+ return;
+}
+
+PDBG_LASTFRAME_BUFFER FindLFBuf(PDBG_STREAM psStream)
+{
+ PDBG_LASTFRAME_BUFFER psLFBuffer;
+
+ psLFBuffer = g_psLFBufferList;
+
+ while (psLFBuffer)
+ {
+ if (psLFBuffer->psStream == psStream)
+ {
+ break;
+ }
+
+ psLFBuffer = psLFBuffer->psNext;
+ }
+
+ return psLFBuffer;
+}
+
diff --git a/drivers/gpu/pvr/dbgdrv/dbgdriv.h b/drivers/gpu/pvr/dbgdrv/dbgdriv.h
new file mode 100644
index 0000000..2db4843
--- /dev/null
+++ b/drivers/gpu/pvr/dbgdrv/dbgdriv.h
@@ -0,0 +1,122 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _DBGDRIV_
+#define _DBGDRIV_
+
+#define BUFFER_SIZE 64*PAGESIZE
+
+#define DBGDRIV_VERSION 0x100
+#define MAX_PROCESSES 2
+#define BLOCK_USED 0x01
+#define BLOCK_LOCKED 0x02
+#define DBGDRIV_MONOBASE 0x000B0000
+
+
+extern IMG_VOID * g_pvAPIMutex;
+
+IMG_VOID * IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR * pszName,
+ IMG_UINT32 ui32CapMode,
+ IMG_UINT32 ui32OutMode,
+ IMG_UINT32 ui32Flags,
+ IMG_UINT32 ui32Pages);
+IMG_VOID IMG_CALLCONV DBGDrivDestroyStream(PDBG_STREAM psStream);
+IMG_VOID * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream);
+IMG_UINT32 IMG_CALLCONV DBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
+IMG_UINT32 IMG_CALLCONV DBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit);
+IMG_UINT32 IMG_CALLCONV DBGDrivWrite(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBufferSize,IMG_UINT8 *pui8OutBuf);
+IMG_VOID IMG_CALLCONV DBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32Stop,IMG_UINT32 ui32SampleRate);
+IMG_VOID IMG_CALLCONV DBGDrivSetOutputMode(PDBG_STREAM psStream,IMG_UINT32 ui32OutMode);
+IMG_VOID IMG_CALLCONV DBGDrivSetDebugLevel(PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel);
+IMG_VOID IMG_CALLCONV DBGDrivSetFrame(PDBG_STREAM psStream,IMG_UINT32 ui32Frame);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(PDBG_STREAM psStream);
+IMG_VOID IMG_CALLCONV DBGDrivOverrideMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode);
+IMG_VOID IMG_CALLCONV DBGDrivDefaultMode(PDBG_STREAM psStream);
+IMG_PVOID IMG_CALLCONV DBGDrivGetServiceTable(IMG_VOID);
+IMG_UINT32 IMG_CALLCONV DBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
+IMG_UINT32 IMG_CALLCONV DBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+IMG_VOID IMG_CALLCONV DBGDrivSetClientMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+IMG_VOID IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream);
+IMG_BOOL IMG_CALLCONV DBGDrivIsLastCaptureFrame(PDBG_STREAM psStream);
+IMG_BOOL IMG_CALLCONV DBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame);
+IMG_UINT32 IMG_CALLCONV DBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags);
+IMG_UINT32 IMG_CALLCONV DBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf);
+IMG_VOID IMG_CALLCONV DBGDrivStartInitPhase(PDBG_STREAM psStream);
+IMG_VOID IMG_CALLCONV DBGDrivStopInitPhase(PDBG_STREAM psStream);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetStreamOffset(PDBG_STREAM psStream);
+IMG_VOID IMG_CALLCONV DBGDrivSetStreamOffset(PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset);
+IMG_VOID IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent);
+
+IMG_VOID DestroyAllStreams(IMG_VOID);
+
+IMG_UINT32 AtoI(IMG_CHAR *szIn);
+
+IMG_VOID HostMemSet(IMG_VOID *pvDest,IMG_UINT8 ui8Value,IMG_UINT32 ui32Size);
+IMG_VOID HostMemCopy(IMG_VOID *pvDest,IMG_VOID *pvSrc,IMG_UINT32 ui32Size);
+IMG_VOID MonoOut(IMG_CHAR * pszString,IMG_BOOL bNewLine);
+
+IMG_SID PStream2SID(PDBG_STREAM psStream);
+PDBG_STREAM SID2PStream(IMG_SID hStream);
+IMG_BOOL AddSIDEntry(PDBG_STREAM psStream);
+IMG_BOOL RemoveSIDEntry(PDBG_STREAM psStream);
+
+IMG_VOID * IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR * pszName, IMG_UINT32 ui32CapMode, IMG_UINT32 ui32OutMode, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size);
+IMG_VOID IMG_CALLCONV ExtDBGDrivDestroyStream(PDBG_STREAM psStream);
+IMG_VOID * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 *pui8OutBuf);
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32End,IMG_UINT32 ui32SampleRate);
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetOutputMode(PDBG_STREAM psStream,IMG_UINT32 ui32OutMode);
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetDebugLevel(PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel);
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetFrame(PDBG_STREAM psStream,IMG_UINT32 ui32Frame);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(PDBG_STREAM psStream);
+IMG_VOID IMG_CALLCONV ExtDBGDrivOverrideMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode);
+IMG_VOID IMG_CALLCONV ExtDBGDrivDefaultMode(PDBG_STREAM psStream);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream);
+IMG_VOID IMG_CALLCONV ExtDBGDrivStartInitPhase(PDBG_STREAM psStream);
+IMG_VOID IMG_CALLCONV ExtDBGDrivStopInitPhase(PDBG_STREAM psStream);
+IMG_BOOL IMG_CALLCONV ExtDBGDrivIsLastCaptureFrame(PDBG_STREAM psStream);
+IMG_BOOL IMG_CALLCONV ExtDBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetStreamOffset(PDBG_STREAM psStream);
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetStreamOffset(PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset);
+IMG_VOID IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent);
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetConnectNotifier(DBGKM_CONNECT_NOTIFIER fn_notifier);
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWritePersist(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+
+#endif
+
diff --git a/drivers/gpu/pvr/dbgdrv/dbgdriv_ioctl.h b/drivers/gpu/pvr/dbgdrv/dbgdriv_ioctl.h
new file mode 100644
index 0000000..130c146
--- /dev/null
+++ b/drivers/gpu/pvr/dbgdrv/dbgdriv_ioctl.h
@@ -0,0 +1,35 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _IOCTL_
+#define _IOCTL_
+
+#define MAX_DBGVXD_W32_API 25
+
+extern IMG_UINT32 (*g_DBGDrivProc[MAX_DBGVXD_W32_API])(IMG_VOID *, IMG_VOID *);
+
+#endif
+
diff --git a/drivers/gpu/pvr/dbgdrv/handle.c b/drivers/gpu/pvr/dbgdrv/handle.c
new file mode 100644
index 0000000..ddffb3f
--- /dev/null
+++ b/drivers/gpu/pvr/dbgdrv/handle.c
@@ -0,0 +1,121 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "img_defs.h"
+#include "dbgdrvif.h"
+#include "dbgdriv.h"
+
+#define MAX_SID_ENTRIES 8
+
+typedef struct _SID_INFO
+{
+ PDBG_STREAM psStream;
+} SID_INFO, *PSID_INFO;
+
+static SID_INFO gaSID_Xlat_Table[MAX_SID_ENTRIES];
+
+IMG_SID PStream2SID(PDBG_STREAM psStream)
+{
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ IMG_INT32 iIdx;
+
+ for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++)
+ {
+ if (psStream == gaSID_Xlat_Table[iIdx].psStream)
+ {
+
+ return (IMG_SID)iIdx+1;
+ }
+ }
+ }
+
+ return (IMG_SID)0;
+}
+
+
+PDBG_STREAM SID2PStream(IMG_SID hStream)
+{
+
+ IMG_INT32 iIdx = (IMG_INT32)hStream-1;
+
+ if (iIdx >= 0 && iIdx < MAX_SID_ENTRIES)
+ {
+ return gaSID_Xlat_Table[iIdx].psStream;
+ }
+ else
+ {
+ return (PDBG_STREAM)IMG_NULL;
+ }
+}
+
+
+IMG_BOOL AddSIDEntry(PDBG_STREAM psStream)
+{
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ IMG_INT32 iIdx;
+
+ for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++)
+ {
+ if (psStream == gaSID_Xlat_Table[iIdx].psStream)
+ {
+
+ return IMG_TRUE;
+ }
+
+ if (gaSID_Xlat_Table[iIdx].psStream == (PDBG_STREAM)IMG_NULL)
+ {
+
+ gaSID_Xlat_Table[iIdx].psStream = psStream;
+ return IMG_TRUE;
+ }
+ }
+ }
+
+ return IMG_FALSE;
+}
+
+IMG_BOOL RemoveSIDEntry(PDBG_STREAM psStream)
+{
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ IMG_INT32 iIdx;
+
+ for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++)
+ {
+ if (psStream == gaSID_Xlat_Table[iIdx].psStream)
+ {
+ gaSID_Xlat_Table[iIdx].psStream = (PDBG_STREAM)IMG_NULL;
+ return IMG_TRUE;
+ }
+ }
+ }
+
+ return IMG_FALSE;
+}
+
+
diff --git a/drivers/gpu/pvr/dbgdrv/hostfunc.c b/drivers/gpu/pvr/dbgdrv/hostfunc.c
new file mode 100644
index 0000000..18c8898
--- /dev/null
+++ b/drivers/gpu/pvr/dbgdrv/hostfunc.c
@@ -0,0 +1,324 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <asm/page.h>
+#include <linux/vmalloc.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+#include <linux/mutex.h>
+#else
+#include <asm/semaphore.h>
+#endif
+#include <linux/hardirq.h>
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#endif
+
+#include "img_types.h"
+#include "pvr_debug.h"
+
+#include "dbgdrvif.h"
+#include "hostfunc.h"
+#include "dbgdriv.h"
+
+#if defined(MODULE) && defined(DEBUG) && !defined(SUPPORT_DRI_DRM)
+IMG_UINT32 gPVRDebugLevel = (DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING);
+
+#define PVR_STRING_TERMINATOR '\0'
+#define PVR_IS_FILE_SEPARATOR(character) ( ((character) == '\\') || ((character) == '/') )
+
+void PVRSRVDebugPrintf (
+ IMG_UINT32 ui32DebugLevel,
+ const IMG_CHAR* pszFileName,
+ IMG_UINT32 ui32Line,
+ const IMG_CHAR* pszFormat,
+ ...
+ )
+{
+ IMG_BOOL bTrace;
+#if !defined(__sh__)
+ IMG_CHAR *pszLeafName;
+
+ pszLeafName = (char *)strrchr (pszFileName, '\\');
+
+ if (pszLeafName)
+ {
+ pszFileName = pszLeafName;
+ }
+#endif
+
+ bTrace = (IMG_BOOL)(ui32DebugLevel & DBGPRIV_CALLTRACE) ? IMG_TRUE : IMG_FALSE;
+
+ if (gPVRDebugLevel & ui32DebugLevel)
+ {
+ va_list vaArgs;
+ char szBuffer[256];
+ char *szBufferEnd = szBuffer;
+ char *szBufferLimit = szBuffer + sizeof(szBuffer) - 1;
+
+
+ *szBufferLimit = '\0';
+
+ snprintf(szBufferEnd, szBufferLimit - szBufferEnd, "PVR_K:");
+ szBufferEnd += strlen(szBufferEnd);
+
+
+ if (bTrace == IMG_FALSE)
+ {
+ switch(ui32DebugLevel)
+ {
+ case DBGPRIV_FATAL:
+ {
+ snprintf(szBufferEnd, szBufferLimit - szBufferEnd, "(Fatal):");
+ break;
+ }
+ case DBGPRIV_ERROR:
+ {
+ snprintf(szBufferEnd, szBufferLimit - szBufferEnd, "(Error):");
+ break;
+ }
+ case DBGPRIV_WARNING:
+ {
+ snprintf(szBufferEnd, szBufferLimit - szBufferEnd, "(Warning):");
+ break;
+ }
+ case DBGPRIV_MESSAGE:
+ {
+ snprintf(szBufferEnd, szBufferLimit - szBufferEnd, "(Message):");
+ break;
+ }
+ case DBGPRIV_VERBOSE:
+ {
+ snprintf(szBufferEnd, szBufferLimit - szBufferEnd, "(Verbose):");
+ break;
+ }
+ default:
+ {
+ snprintf(szBufferEnd, szBufferLimit - szBufferEnd, "(Unknown message level)");
+ break;
+ }
+ }
+ szBufferEnd += strlen(szBufferEnd);
+ }
+ snprintf(szBufferEnd, szBufferLimit - szBufferEnd, " ");
+ szBufferEnd += strlen(szBufferEnd);
+
+ va_start (vaArgs, pszFormat);
+ vsnprintf(szBufferEnd, szBufferLimit - szBufferEnd, pszFormat, vaArgs);
+ va_end (vaArgs);
+ szBufferEnd += strlen(szBufferEnd);
+
+
+ if (bTrace == IMG_FALSE)
+ {
+ snprintf(szBufferEnd, szBufferLimit - szBufferEnd,
+ " [%d, %s]", (int)ui32Line, pszFileName);
+ szBufferEnd += strlen(szBufferEnd);
+ }
+
+ printk(KERN_INFO "%s\r\n", szBuffer);
+ }
+}
+#endif
+
+IMG_VOID HostMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size)
+{
+ memset(pvDest, (int) ui8Value, (size_t) ui32Size);
+}
+
+IMG_VOID HostMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_UINT32 ui32Size)
+{
+#if defined(USE_UNOPTIMISED_MEMCPY)
+ unsigned char *src,*dst;
+ int i;
+
+ src=(unsigned char *)pvSrc;
+ dst=(unsigned char *)pvDst;
+ for(i=0;i<ui32Size;i++)
+ {
+ dst[i]=src[i];
+ }
+#else
+ memcpy(pvDst, pvSrc, ui32Size);
+#endif
+}
+
+IMG_UINT32 HostReadRegistryDWORDFromString(char *pcKey, char *pcValueName, IMG_UINT32 *pui32Data)
+{
+
+ return 0;
+}
+
+IMG_VOID * HostPageablePageAlloc(IMG_UINT32 ui32Pages)
+{
+ return (void*)vmalloc(ui32Pages * PAGE_SIZE);
+}
+
+IMG_VOID HostPageablePageFree(IMG_VOID * pvBase)
+{
+ vfree(pvBase);
+}
+
+IMG_VOID * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages)
+{
+ return (void*)vmalloc(ui32Pages * PAGE_SIZE);
+}
+
+IMG_VOID HostNonPageablePageFree(IMG_VOID * pvBase)
+{
+ vfree(pvBase);
+}
+
+IMG_VOID * HostMapKrnBufIntoUser(IMG_VOID * pvKrnAddr, IMG_UINT32 ui32Size, IMG_VOID **ppvMdl)
+{
+
+ return IMG_NULL;
+}
+
+IMG_VOID HostUnMapKrnBufFromUser(IMG_VOID * pvUserAddr, IMG_VOID * pvMdl, IMG_VOID * pvProcess)
+{
+
+}
+
+IMG_VOID HostCreateRegDeclStreams(IMG_VOID)
+{
+
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+typedef struct mutex MUTEX;
+#define INIT_MUTEX(m) mutex_init(m)
+#define DOWN_TRYLOCK(m) (!mutex_trylock(m))
+#define DOWN(m) mutex_lock(m)
+#define UP(m) mutex_unlock(m)
+#else
+typedef struct semaphore MUTEX;
+#define INIT_MUTEX(m) init_MUTEX(m)
+#define DOWN_TRYLOCK(m) down_trylock(m)
+#define DOWN(m) down(m)
+#define UP(m) up(m)
+#endif
+
+IMG_VOID *HostCreateMutex(IMG_VOID)
+{
+ MUTEX *psMutex;
+
+ psMutex = kmalloc(sizeof(*psMutex), GFP_KERNEL);
+ if (psMutex)
+ {
+ INIT_MUTEX(psMutex);
+ }
+
+ return psMutex;
+}
+
+IMG_VOID HostAquireMutex(IMG_VOID * pvMutex)
+{
+ BUG_ON(in_interrupt());
+
+#if defined(PVR_DEBUG_DBGDRV_DETECT_HOST_MUTEX_COLLISIONS)
+ if (DOWN_TRYLOCK((MUTEX *)pvMutex))
+ {
+ printk(KERN_INFO "HostAquireMutex: Waiting for mutex\n");
+ DOWN((MUTEX *)pvMutex);
+ }
+#else
+ DOWN((MUTEX *)pvMutex);
+#endif
+}
+
+IMG_VOID HostReleaseMutex(IMG_VOID * pvMutex)
+{
+ UP((MUTEX *)pvMutex);
+}
+
+IMG_VOID HostDestroyMutex(IMG_VOID * pvMutex)
+{
+ if (pvMutex)
+ {
+ kfree(pvMutex);
+ }
+}
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+
+#define EVENT_WAIT_TIMEOUT_MS 500
+#define EVENT_WAIT_TIMEOUT_JIFFIES (EVENT_WAIT_TIMEOUT_MS * HZ / 1000)
+
+static int iStreamData;
+static wait_queue_head_t sStreamDataEvent;
+
+IMG_INT32 HostCreateEventObjects(IMG_VOID)
+{
+ init_waitqueue_head(&sStreamDataEvent);
+
+ return 0;
+}
+
+IMG_VOID HostWaitForEvent(DBG_EVENT eEvent)
+{
+ switch(eEvent)
+ {
+ case DBG_EVENT_STREAM_DATA:
+
+ wait_event_interruptible_timeout(sStreamDataEvent, iStreamData != 0, EVENT_WAIT_TIMEOUT_JIFFIES);
+ iStreamData = 0;
+ break;
+ default:
+
+ msleep_interruptible(EVENT_WAIT_TIMEOUT_MS);
+ break;
+ }
+}
+
+IMG_VOID HostSignalEvent(DBG_EVENT eEvent)
+{
+ switch(eEvent)
+ {
+ case DBG_EVENT_STREAM_DATA:
+ iStreamData = 1;
+ wake_up_interruptible(&sStreamDataEvent);
+ break;
+ default:
+ break;
+ }
+}
+
+IMG_VOID HostDestroyEventObjects(IMG_VOID)
+{
+}
+#endif
diff --git a/drivers/gpu/pvr/dbgdrv/hostfunc.h b/drivers/gpu/pvr/dbgdrv/hostfunc.h
new file mode 100644
index 0000000..70192fb
--- /dev/null
+++ b/drivers/gpu/pvr/dbgdrv/hostfunc.h
@@ -0,0 +1,58 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _HOSTFUNC_
+#define _HOSTFUNC_
+
+#define HOST_PAGESIZE (4096)
+#define DBG_MEMORY_INITIALIZER (0xe2)
+
+IMG_UINT32 HostReadRegistryDWORDFromString(IMG_CHAR *pcKey, IMG_CHAR *pcValueName, IMG_UINT32 *pui32Data);
+
+IMG_VOID * HostPageablePageAlloc(IMG_UINT32 ui32Pages);
+IMG_VOID HostPageablePageFree(IMG_VOID * pvBase);
+IMG_VOID * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages);
+IMG_VOID HostNonPageablePageFree(IMG_VOID * pvBase);
+
+IMG_VOID * HostMapKrnBufIntoUser(IMG_VOID * pvKrnAddr, IMG_UINT32 ui32Size, IMG_VOID * *ppvMdl);
+IMG_VOID HostUnMapKrnBufFromUser(IMG_VOID * pvUserAddr, IMG_VOID * pvMdl, IMG_VOID * pvProcess);
+
+IMG_VOID HostCreateRegDeclStreams(IMG_VOID);
+
+IMG_VOID * HostCreateMutex(IMG_VOID);
+IMG_VOID HostAquireMutex(IMG_VOID * pvMutex);
+IMG_VOID HostReleaseMutex(IMG_VOID * pvMutex);
+IMG_VOID HostDestroyMutex(IMG_VOID * pvMutex);
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+IMG_INT32 HostCreateEventObjects(IMG_VOID);
+IMG_VOID HostWaitForEvent(DBG_EVENT eEvent);
+IMG_VOID HostSignalEvent(DBG_EVENT eEvent);
+IMG_VOID HostDestroyEventObjects(IMG_VOID);
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/dbgdrv/hotkey.c b/drivers/gpu/pvr/dbgdrv/hotkey.c
new file mode 100644
index 0000000..a456fee
--- /dev/null
+++ b/drivers/gpu/pvr/dbgdrv/hotkey.c
@@ -0,0 +1,135 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+
+#if !defined(LINUX)
+#include <ntddk.h>
+#include <windef.h>
+#endif
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "dbgdrvif.h"
+#include "dbgdriv.h"
+#include "hotkey.h"
+#include "hostfunc.h"
+
+
+
+
+
+IMG_UINT32 g_ui32HotKeyFrame = 0xFFFFFFFF;
+IMG_BOOL g_bHotKeyPressed = IMG_FALSE;
+IMG_BOOL g_bHotKeyRegistered = IMG_FALSE;
+
+PRIVATEHOTKEYDATA g_PrivateHotKeyData;
+
+
+IMG_VOID ReadInHotKeys(IMG_VOID)
+{
+ g_PrivateHotKeyData.ui32ScanCode = 0x58;
+ g_PrivateHotKeyData.ui32ShiftState = 0x0;
+
+
+
+#if 0
+ if (_RegOpenKey(HKEY_LOCAL_MACHINE,pszRegPath,&hKey) == ERROR_SUCCESS)
+ {
+
+
+ QueryReg(hKey,"ui32ScanCode",&g_PrivateHotKeyData.ui32ScanCode);
+ QueryReg(hKey,"ui32ShiftState",&g_PrivateHotKeyData.ui32ShiftState);
+ }
+#else
+ HostReadRegistryDWORDFromString("DEBUG\\Streams", "ui32ScanCode" , &g_PrivateHotKeyData.ui32ScanCode);
+ HostReadRegistryDWORDFromString("DEBUG\\Streams", "ui32ShiftState", &g_PrivateHotKeyData.ui32ShiftState);
+#endif
+}
+
+IMG_VOID RegisterKeyPressed(IMG_UINT32 dwui32ScanCode, PHOTKEYINFO pInfo)
+{
+ PDBG_STREAM psStream;
+
+ PVR_UNREFERENCED_PARAMETER(pInfo);
+
+ if (dwui32ScanCode == g_PrivateHotKeyData.ui32ScanCode)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"PDUMP Hotkey pressed !\n"));
+
+ psStream = (PDBG_STREAM) g_PrivateHotKeyData.sHotKeyInfo.pvStream;
+
+ if (!g_bHotKeyPressed)
+ {
+
+
+ g_ui32HotKeyFrame = psStream->psCtrl->ui32Current + 2;
+
+
+
+ g_bHotKeyPressed = IMG_TRUE;
+ }
+ }
+}
+
+IMG_VOID ActivateHotKeys(PDBG_STREAM psStream)
+{
+
+
+ ReadInHotKeys();
+
+
+
+ if (!g_PrivateHotKeyData.sHotKeyInfo.hHotKey)
+ {
+ if (g_PrivateHotKeyData.ui32ScanCode != 0)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"Activate HotKey for PDUMP.\n"));
+
+
+
+ g_PrivateHotKeyData.sHotKeyInfo.pvStream = psStream;
+
+ DefineHotKey(g_PrivateHotKeyData.ui32ScanCode, g_PrivateHotKeyData.ui32ShiftState, &g_PrivateHotKeyData.sHotKeyInfo);
+ }
+ else
+ {
+ g_PrivateHotKeyData.sHotKeyInfo.hHotKey = 0;
+ }
+ }
+}
+
+IMG_VOID DeactivateHotKeys(IMG_VOID)
+{
+ if (g_PrivateHotKeyData.sHotKeyInfo.hHotKey != 0)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"Deactivate HotKey.\n"));
+
+ RemoveHotKey(g_PrivateHotKeyData.sHotKeyInfo.hHotKey);
+ g_PrivateHotKeyData.sHotKeyInfo.hHotKey = 0;
+ }
+}
+
+
diff --git a/drivers/gpu/pvr/dbgdrv/hotkey.h b/drivers/gpu/pvr/dbgdrv/hotkey.h
new file mode 100644
index 0000000..c5d84bb
--- /dev/null
+++ b/drivers/gpu/pvr/dbgdrv/hotkey.h
@@ -0,0 +1,60 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _HOTKEY_
+#define _HOTKEY_
+
+
+typedef struct _hotkeyinfo
+{
+ IMG_UINT8 ui8ScanCode;
+ IMG_UINT8 ui8Type;
+ IMG_UINT8 ui8Flag;
+ IMG_UINT8 ui8Filler1;
+ IMG_UINT32 ui32ShiftState;
+ IMG_UINT32 ui32HotKeyProc;
+ IMG_VOID *pvStream;
+ IMG_UINT32 hHotKey;
+} HOTKEYINFO, *PHOTKEYINFO;
+
+typedef struct _privatehotkeydata
+{
+ IMG_UINT32 ui32ScanCode;
+ IMG_UINT32 ui32ShiftState;
+ HOTKEYINFO sHotKeyInfo;
+} PRIVATEHOTKEYDATA, *PPRIVATEHOTKEYDATA;
+
+
+IMG_VOID ReadInHotKeys (IMG_VOID);
+IMG_VOID ActivateHotKeys(PDBG_STREAM psStream);
+IMG_VOID DeactivateHotKeys(IMG_VOID);
+
+IMG_VOID RemoveHotKey (IMG_UINT32 hHotKey);
+IMG_VOID DefineHotKey (IMG_UINT32 ui32ScanCode, IMG_UINT32 ui32ShiftState, PHOTKEYINFO psInfo);
+IMG_VOID RegisterKeyPressed (IMG_UINT32 ui32ScanCode, PHOTKEYINFO psInfo);
+
+#endif
+
diff --git a/drivers/gpu/pvr/dbgdrv/ioctl.c b/drivers/gpu/pvr/dbgdrv/ioctl.c
new file mode 100644
index 0000000..47487b0
--- /dev/null
+++ b/drivers/gpu/pvr/dbgdrv/ioctl.c
@@ -0,0 +1,587 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+
+
+#ifdef LINUX
+#include <asm/uaccess.h>
+#include "pvr_uaccess.h"
+#endif
+
+#include "img_types.h"
+#include "dbgdrvif.h"
+#include "dbgdriv.h"
+#include "hotkey.h"
+#include "dbgdriv_ioctl.h"
+
+
+static IMG_UINT32 DBGDIOCDrivCreateStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_CREATESTREAM psIn;
+ IMG_VOID * *ppvOut;
+ #ifdef LINUX
+ static IMG_CHAR name[32];
+ #endif
+
+ psIn = (PDBG_IN_CREATESTREAM) pvInBuffer;
+ ppvOut = (IMG_VOID * *) pvOutBuffer;
+
+ #ifdef LINUX
+
+ if(pvr_copy_from_user(name, psIn->u.pszName, 32) != 0)
+ {
+ return IMG_FALSE;
+ }
+
+ *ppvOut = ExtDBGDrivCreateStream(name, psIn->ui32CapMode, psIn->ui32OutMode, 0, psIn->ui32Pages);
+
+ #else
+ *ppvOut = ExtDBGDrivCreateStream(psIn->u.pszName, psIn->ui32CapMode, psIn->ui32OutMode, DEBUG_FLAGS_NO_BUF_EXPANDSION, psIn->ui32Pages);
+ #endif
+
+
+ return(IMG_TRUE);
+}
+
+static IMG_UINT32 DBGDIOCDrivDestroyStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_STREAM *ppsStream;
+ PDBG_STREAM psStream;
+
+ ppsStream = (PDBG_STREAM *) pvInBuffer;
+ psStream = (PDBG_STREAM) *ppsStream;
+
+ PVR_UNREFERENCED_PARAMETER( pvOutBuffer);
+
+ ExtDBGDrivDestroyStream(psStream);
+
+ return(IMG_TRUE);
+}
+
+static IMG_UINT32 DBGDIOCDrivGetStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_FINDSTREAM psParams;
+ IMG_SID * phStream;
+
+ psParams = (PDBG_IN_FINDSTREAM)pvInBuffer;
+ phStream = (IMG_SID *)pvOutBuffer;
+
+ *phStream = PStream2SID(ExtDBGDrivFindStream(psParams->u.pszName, psParams->bResetStream));
+
+ return(IMG_TRUE);
+}
+
+static IMG_UINT32 DBGDIOCDrivWriteString(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_WRITESTRING psParams;
+ IMG_UINT32 *pui32OutLen;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_WRITESTRING) pvInBuffer;
+ pui32OutLen = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32OutLen = ExtDBGDrivWriteString(psStream,psParams->u.pszString,psParams->ui32Level);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32OutLen = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivWriteStringCM(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_WRITESTRING psParams;
+ IMG_UINT32 *pui32OutLen;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_WRITESTRING) pvInBuffer;
+ pui32OutLen = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32OutLen = ExtDBGDrivWriteStringCM(psStream,psParams->u.pszString,psParams->ui32Level);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32OutLen = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivReadString(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ IMG_UINT32 * pui32OutLen;
+ PDBG_IN_READSTRING psParams;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_READSTRING) pvInBuffer;
+ pui32OutLen = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32OutLen = ExtDBGDrivReadString(psStream,
+ psParams->u.pszString,psParams->ui32StringLen);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32OutLen = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivWrite(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ IMG_UINT32 * pui32BytesCopied;
+ PDBG_IN_WRITE psInParams;
+ PDBG_STREAM psStream;
+
+ psInParams = (PDBG_IN_WRITE) pvInBuffer;
+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psInParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32BytesCopied = ExtDBGDrivWrite(psStream,
+ psInParams->u.pui8InBuffer,
+ psInParams->ui32TransferSize,
+ psInParams->ui32Level);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32BytesCopied = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivWrite2(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ IMG_UINT32 * pui32BytesCopied;
+ PDBG_IN_WRITE psInParams;
+ PDBG_STREAM psStream;
+
+ psInParams = (PDBG_IN_WRITE) pvInBuffer;
+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psInParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32BytesCopied = ExtDBGDrivWrite2(psStream,
+ psInParams->u.pui8InBuffer,
+ psInParams->ui32TransferSize,
+ psInParams->ui32Level);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32BytesCopied = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivWriteCM(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ IMG_UINT32 * pui32BytesCopied;
+ PDBG_IN_WRITE psInParams;
+ PDBG_STREAM psStream;
+
+ psInParams = (PDBG_IN_WRITE) pvInBuffer;
+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psInParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32BytesCopied = ExtDBGDrivWriteCM(psStream,
+ psInParams->u.pui8InBuffer,
+ psInParams->ui32TransferSize,
+ psInParams->ui32Level);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32BytesCopied = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivRead(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ IMG_UINT32 * pui32BytesCopied;
+ PDBG_IN_READ psInParams;
+ PDBG_STREAM psStream;
+
+ psInParams = (PDBG_IN_READ) pvInBuffer;
+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psInParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32BytesCopied = ExtDBGDrivRead(psStream,
+ psInParams->bReadInitBuffer,
+ psInParams->ui32OutBufferSize,
+ psInParams->u.pui8OutBuffer);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32BytesCopied = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivSetCaptureMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_SETDEBUGMODE psParams;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_SETDEBUGMODE) pvInBuffer;
+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ ExtDBGDrivSetCaptureMode(psStream,
+ psParams->ui32Mode,
+ psParams->ui32Start,
+ psParams->ui32End,
+ psParams->ui32SampleRate);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivSetOutMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_SETDEBUGOUTMODE psParams;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_SETDEBUGOUTMODE) pvInBuffer;
+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ ExtDBGDrivSetOutputMode(psStream,psParams->ui32Mode);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivSetDebugLevel(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_SETDEBUGLEVEL psParams;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_SETDEBUGLEVEL) pvInBuffer;
+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ ExtDBGDrivSetDebugLevel(psStream,psParams->ui32Level);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivSetFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_SETFRAME psParams;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_SETFRAME) pvInBuffer;
+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ ExtDBGDrivSetFrame(psStream,psParams->ui32Frame);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivGetFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_STREAM psStream;
+ IMG_UINT32 *pui32Current;
+
+ pui32Current = (IMG_UINT32 *) pvOutBuffer;
+ psStream = SID2PStream(*(IMG_SID *)pvInBuffer);
+
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32Current = ExtDBGDrivGetFrame(psStream);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32Current = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivIsCaptureFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_ISCAPTUREFRAME psParams;
+ IMG_UINT32 * pui32Current;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_ISCAPTUREFRAME) pvInBuffer;
+ pui32Current = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32Current = ExtDBGDrivIsCaptureFrame(psStream,
+ psParams->bCheckPreviousFrame);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32Current = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivOverrideMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_OVERRIDEMODE psParams;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_OVERRIDEMODE) pvInBuffer;
+ PVR_UNREFERENCED_PARAMETER( pvOutBuffer);
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ ExtDBGDrivOverrideMode(psStream,psParams->ui32Mode);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivDefaultMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_STREAM psStream;
+
+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+
+ psStream = SID2PStream(*(IMG_SID *)pvInBuffer);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ ExtDBGDrivDefaultMode(psStream);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivSetMarker(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_SETMARKER psParams;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_SETMARKER) pvInBuffer;
+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ ExtDBGDrivSetMarker(psStream, psParams->ui32Marker);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivGetMarker(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_STREAM psStream;
+ IMG_UINT32 *pui32Current;
+
+ pui32Current = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(*(IMG_SID *)pvInBuffer);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32Current = ExtDBGDrivGetMarker(psStream);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32Current = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivGetServiceTable(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ IMG_PVOID * ppvOut;
+
+ PVR_UNREFERENCED_PARAMETER(pvInBuffer);
+ ppvOut = (IMG_PVOID *) pvOutBuffer;
+
+ *ppvOut = DBGDrivGetServiceTable();
+
+ return(IMG_TRUE);
+}
+
+static IMG_UINT32 DBGDIOCDrivWriteLF(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_WRITE_LF psInParams;
+ IMG_UINT32 *pui32BytesCopied;
+ PDBG_STREAM psStream;
+
+ psInParams = (PDBG_IN_WRITE_LF) pvInBuffer;
+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psInParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32BytesCopied = ExtDBGDrivWriteLF(psStream,
+ psInParams->u.pui8InBuffer,
+ psInParams->ui32BufferSize,
+ psInParams->ui32Level,
+ psInParams->ui32Flags);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivReadLF(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ IMG_UINT32 * pui32BytesCopied;
+ PDBG_IN_READ psInParams;
+ PDBG_STREAM psStream;
+
+ psInParams = (PDBG_IN_READ) pvInBuffer;
+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psInParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32BytesCopied = ExtDBGDrivReadLF(psStream,
+ psInParams->ui32OutBufferSize,
+ psInParams->u.pui8OutBuffer);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32BytesCopied = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivWaitForEvent(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ DBG_EVENT eEvent = (DBG_EVENT)(*(IMG_UINT32 *)pvInBuffer);
+
+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+
+ ExtDBGDrivWaitForEvent(eEvent);
+
+ return(IMG_TRUE);
+}
+
+IMG_UINT32 (*g_DBGDrivProc[25])(IMG_VOID *, IMG_VOID *) =
+{
+ DBGDIOCDrivCreateStream,
+ DBGDIOCDrivDestroyStream,
+ DBGDIOCDrivGetStream,
+ DBGDIOCDrivWriteString,
+ DBGDIOCDrivReadString,
+ DBGDIOCDrivWrite,
+ DBGDIOCDrivRead,
+ DBGDIOCDrivSetCaptureMode,
+ DBGDIOCDrivSetOutMode,
+ DBGDIOCDrivSetDebugLevel,
+ DBGDIOCDrivSetFrame,
+ DBGDIOCDrivGetFrame,
+ DBGDIOCDrivOverrideMode,
+ DBGDIOCDrivDefaultMode,
+ DBGDIOCDrivGetServiceTable,
+ DBGDIOCDrivWrite2,
+ DBGDIOCDrivWriteStringCM,
+ DBGDIOCDrivWriteCM,
+ DBGDIOCDrivSetMarker,
+ DBGDIOCDrivGetMarker,
+ DBGDIOCDrivIsCaptureFrame,
+ DBGDIOCDrivWriteLF,
+ DBGDIOCDrivReadLF,
+ DBGDIOCDrivWaitForEvent
+};
+
diff --git a/drivers/gpu/pvr/dbgdrv/linuxsrv.h b/drivers/gpu/pvr/dbgdrv/linuxsrv.h
new file mode 100644
index 0000000..f1cb02a
--- /dev/null
+++ b/drivers/gpu/pvr/dbgdrv/linuxsrv.h
@@ -0,0 +1,48 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ **************************************************************************/
+
+#ifndef _LINUXSRV_H__
+#define _LINUXSRV_H__
+
+typedef struct tagIOCTL_PACKAGE
+{
+ IMG_UINT32 ui32Cmd; // ioctl command
+ IMG_UINT32 ui32Size; // needs to be correctly set
+ IMG_VOID *pInBuffer; // input data buffer
+ IMG_UINT32 ui32InBufferSize; // size of input data buffer
+ IMG_VOID *pOutBuffer; // output data buffer
+ IMG_UINT32 ui32OutBufferSize; // size of output data buffer
+} IOCTL_PACKAGE;
+
+IMG_UINT32 DeviceIoControl(IMG_UINT32 hDevice,
+ IMG_UINT32 ui32ControlCode,
+ IMG_VOID *pInBuffer,
+ IMG_UINT32 ui32InBufferSize,
+ IMG_VOID *pOutBuffer,
+ IMG_UINT32 ui32OutBufferSize,
+ IMG_UINT32 *pui32BytesReturned);
+
+#endif /* _LINUXSRV_H__*/
diff --git a/drivers/gpu/pvr/dbgdrv/main.c b/drivers/gpu/pvr/dbgdrv/main.c
new file mode 100644
index 0000000..6556249
--- /dev/null
+++ b/drivers/gpu/pvr/dbgdrv/main.c
@@ -0,0 +1,317 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/kdev_t.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+
+#if defined(LDM_PLATFORM) && !defined(SUPPORT_DRI_DRM)
+#include <linux/platform_device.h>
+#endif
+
+#if defined(LDM_PCI) && !defined(SUPPORT_DRI_DRM)
+#include <linux/pci.h>
+#endif
+
+#include <asm/uaccess.h>
+
+#if defined(SUPPORT_DRI_DRM)
+#include "drmP.h"
+#include "drm.h"
+#endif
+
+#include "img_types.h"
+#include "linuxsrv.h"
+#include "dbgdriv_ioctl.h"
+#include "dbgdrvif.h"
+#include "dbgdriv.h"
+#include "hostfunc.h"
+#include "hotkey.h"
+#include "pvr_debug.h"
+#include "pvrmodule.h"
+#include "pvr_uaccess.h"
+
+#if defined(SUPPORT_DRI_DRM)
+
+#include "pvr_drm_shared.h"
+#include "pvr_drm.h"
+
+#else
+
+#define DRVNAME "dbgdrv"
+MODULE_SUPPORTED_DEVICE(DRVNAME);
+
+#if (defined(LDM_PLATFORM) || defined(LDM_PCI)) && !defined(SUPPORT_DRI_DRM)
+static struct class *psDbgDrvClass;
+#endif
+
+static int AssignedMajorNumber = 0;
+
+long dbgdrv_ioctl(struct file *, unsigned int, unsigned long);
+
+static int dbgdrv_open(struct inode unref__ * pInode, struct file unref__ * pFile)
+{
+ return 0;
+}
+
+static int dbgdrv_release(struct inode unref__ * pInode, struct file unref__ * pFile)
+{
+ return 0;
+}
+
+static int dbgdrv_mmap(struct file* pFile, struct vm_area_struct* ps_vma)
+{
+ return 0;
+}
+
+static struct file_operations dbgdrv_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = dbgdrv_ioctl,
+ .open = dbgdrv_open,
+ .release = dbgdrv_release,
+ .mmap = dbgdrv_mmap,
+};
+
+#endif
+
+IMG_VOID DBGDrvGetServiceTable(IMG_VOID **fn_table);
+
+IMG_VOID DBGDrvGetServiceTable(IMG_VOID **fn_table)
+{
+ extern DBGKM_SERVICE_TABLE g_sDBGKMServices;
+
+ *fn_table = &g_sDBGKMServices;
+}
+
+#if defined(SUPPORT_DRI_DRM)
+void dbgdrv_cleanup(void)
+#else
+static void __exit dbgdrv_cleanup(void)
+#endif
+{
+#if !defined(SUPPORT_DRI_DRM)
+#if defined(LDM_PLATFORM) || defined(LDM_PCI)
+ device_destroy(psDbgDrvClass, MKDEV(AssignedMajorNumber, 0));
+ class_destroy(psDbgDrvClass);
+#endif
+ unregister_chrdev(AssignedMajorNumber, DRVNAME);
+#endif
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ HostDestroyEventObjects();
+#endif
+ HostDestroyMutex(g_pvAPIMutex);
+ return;
+}
+
+#if defined(SUPPORT_DRI_DRM)
+IMG_INT dbgdrv_init(void)
+#else
+static int __init dbgdrv_init(void)
+#endif
+{
+#if (defined(LDM_PLATFORM) || defined(LDM_PCI)) && !defined(SUPPORT_DRI_DRM)
+ struct device *psDev;
+#endif
+
+#if !defined(SUPPORT_DRI_DRM)
+ int err = -EBUSY;
+#endif
+
+
+ if ((g_pvAPIMutex=HostCreateMutex()) == IMG_NULL)
+ {
+ return -ENOMEM;
+ }
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+
+ (void) HostCreateEventObjects();
+#endif
+
+#if !defined(SUPPORT_DRI_DRM)
+ AssignedMajorNumber =
+ register_chrdev(AssignedMajorNumber, DRVNAME, &dbgdrv_fops);
+
+ if (AssignedMajorNumber <= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR," unable to get major\n"));
+ goto ErrDestroyEventObjects;
+ }
+
+#if defined(LDM_PLATFORM) || defined(LDM_PCI)
+
+ psDbgDrvClass = class_create(THIS_MODULE, DRVNAME);
+ if (IS_ERR(psDbgDrvClass))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: unable to create class (%ld)",
+ __func__, PTR_ERR(psDbgDrvClass)));
+ goto ErrUnregisterCharDev;
+ }
+
+ psDev = device_create(psDbgDrvClass, NULL, MKDEV(AssignedMajorNumber, 0),
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
+ NULL,
+#endif
+ DRVNAME);
+ if (IS_ERR(psDev))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: unable to create device (%ld)",
+ __func__, PTR_ERR(psDev)));
+ goto ErrDestroyClass;
+ }
+#endif
+#endif
+
+ return 0;
+
+#if !defined(SUPPORT_DRI_DRM)
+ErrDestroyEventObjects:
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ HostDestroyEventObjects();
+#endif
+#if defined(LDM_PLATFORM) || defined(LDM_PCI)
+ErrUnregisterCharDev:
+ unregister_chrdev(AssignedMajorNumber, DRVNAME);
+ErrDestroyClass:
+ class_destroy(psDbgDrvClass);
+#endif
+ return err;
+#endif
+}
+
+#if defined(SUPPORT_DRI_DRM)
+int dbgdrv_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
+#else
+long dbgdrv_ioctl(struct file *file, unsigned int ioctlCmd, unsigned long arg)
+#endif
+{
+ IOCTL_PACKAGE *pIP = (IOCTL_PACKAGE *) arg;
+ char *buffer, *in, *out;
+ unsigned int cmd;
+
+ if((pIP->ui32InBufferSize > (PAGE_SIZE >> 1) ) || (pIP->ui32OutBufferSize > (PAGE_SIZE >> 1)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Sizes of the buffers are too large, cannot do ioctl\n"));
+ return -1;
+ }
+
+ buffer = (char *) HostPageablePageAlloc(1);
+ if(!buffer)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to allocate buffer, cannot do ioctl\n"));
+ return -EFAULT;
+ }
+
+ in = buffer;
+ out = buffer + (PAGE_SIZE >>1);
+
+ if(pvr_copy_from_user(in, pIP->pInBuffer, pIP->ui32InBufferSize) != 0)
+ {
+ goto init_failed;
+ }
+
+
+ cmd = MAKEIOCTLINDEX(pIP->ui32Cmd) - DEBUG_SERVICE_IOCTL_BASE - 1;
+
+ if(pIP->ui32Cmd == DEBUG_SERVICE_READ)
+ {
+ IMG_UINT32 *pui32BytesCopied = (IMG_UINT32 *)out;
+ DBG_IN_READ *psReadInParams = (DBG_IN_READ *)in;
+ DBG_STREAM *psStream;
+ IMG_CHAR *ui8Tmp;
+
+ ui8Tmp = vmalloc(psReadInParams->ui32OutBufferSize);
+
+ if(!ui8Tmp)
+ {
+ goto init_failed;
+ }
+
+ psStream = SID2PStream(psReadInParams->hStream);
+ if(!psStream)
+ {
+ goto init_failed;
+ }
+
+ *pui32BytesCopied = ExtDBGDrivRead(psStream,
+ psReadInParams->bReadInitBuffer,
+ psReadInParams->ui32OutBufferSize,
+ ui8Tmp);
+
+ if(pvr_copy_to_user(psReadInParams->u.pui8OutBuffer,
+ ui8Tmp,
+ *pui32BytesCopied) != 0)
+ {
+ vfree(ui8Tmp);
+ goto init_failed;
+ }
+
+ vfree(ui8Tmp);
+ }
+ else
+ {
+ (g_DBGDrivProc[cmd])(in, out);
+ }
+
+ if(copy_to_user(pIP->pOutBuffer, out, pIP->ui32OutBufferSize) != 0)
+ {
+ goto init_failed;
+ }
+
+ HostPageablePageFree((IMG_VOID *)buffer);
+ return 0;
+
+init_failed:
+ HostPageablePageFree((IMG_VOID *)buffer);
+ return -EFAULT;
+}
+
+
+IMG_VOID RemoveHotKey (IMG_UINT32 hHotKey)
+{
+ PVR_UNREFERENCED_PARAMETER(hHotKey);
+}
+
+IMG_VOID DefineHotKey (IMG_UINT32 ui32ScanCode, IMG_UINT32 ui32ShiftState, PHOTKEYINFO psInfo)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32ScanCode);
+ PVR_UNREFERENCED_PARAMETER(ui32ShiftState);
+ PVR_UNREFERENCED_PARAMETER(psInfo);
+}
+
+EXPORT_SYMBOL(DBGDrvGetServiceTable);
+
+#if !defined(SUPPORT_DRI_DRM)
+subsys_initcall(dbgdrv_init);
+module_exit(dbgdrv_cleanup);
+#endif
diff --git a/drivers/gpu/pvr/dbgdrvif.h b/drivers/gpu/pvr/dbgdrvif.h
new file mode 100644
index 0000000..09c1608
--- /dev/null
+++ b/drivers/gpu/pvr/dbgdrvif.h
@@ -0,0 +1,358 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+
+*****************************************************************************/
+#ifndef _DBGDRVIF_
+#define _DBGDRVIF_
+
+
+#if defined(__linux__)
+
+#define FILE_DEVICE_UNKNOWN 0
+#define METHOD_BUFFERED 0
+#define FILE_ANY_ACCESS 0
+
+#define CTL_CODE( DeviceType, Function, Method, Access ) (Function)
+#define MAKEIOCTLINDEX(i) ((i) & 0xFFF)
+
+#else
+
+#include "ioctldef.h"
+
+#endif
+
+/*****************************************************************************
+ Stream mode stuff.
+*****************************************************************************/
+#define DEBUG_CAPMODE_FRAMED 0x00000001UL
+#define DEBUG_CAPMODE_CONTINUOUS 0x00000002UL
+#define DEBUG_CAPMODE_HOTKEY 0x00000004UL
+
+#define DEBUG_OUTMODE_STANDARDDBG 0x00000001UL
+#define DEBUG_OUTMODE_MONO 0x00000002UL
+#define DEBUG_OUTMODE_STREAMENABLE 0x00000004UL
+#define DEBUG_OUTMODE_ASYNC 0x00000008UL
+#define DEBUG_OUTMODE_SGXVGA 0x00000010UL
+
+#define DEBUG_FLAGS_USE_NONPAGED_MEM 0x00000001UL
+#define DEBUG_FLAGS_NO_BUF_EXPANDSION 0x00000002UL
+#define DEBUG_FLAGS_ENABLESAMPLE 0x00000004UL
+#define DEBUG_FLAGS_READONLY 0x00000008UL
+#define DEBUG_FLAGS_WRITEONLY 0x00000010UL
+
+#define DEBUG_FLAGS_TEXTSTREAM 0x80000000UL
+
+/*****************************************************************************
+ Debug level control. Only bothered with the first 12 levels, I suspect you
+ get the idea...
+*****************************************************************************/
+#define DEBUG_LEVEL_0 0x00000001UL
+#define DEBUG_LEVEL_1 0x00000003UL
+#define DEBUG_LEVEL_2 0x00000007UL
+#define DEBUG_LEVEL_3 0x0000000FUL
+#define DEBUG_LEVEL_4 0x0000001FUL
+#define DEBUG_LEVEL_5 0x0000003FUL
+#define DEBUG_LEVEL_6 0x0000007FUL
+#define DEBUG_LEVEL_7 0x000000FFUL
+#define DEBUG_LEVEL_8 0x000001FFUL
+#define DEBUG_LEVEL_9 0x000003FFUL
+#define DEBUG_LEVEL_10 0x000007FFUL
+#define DEBUG_LEVEL_11 0x00000FFFUL
+
+#define DEBUG_LEVEL_SEL0 0x00000001UL
+#define DEBUG_LEVEL_SEL1 0x00000002UL
+#define DEBUG_LEVEL_SEL2 0x00000004UL
+#define DEBUG_LEVEL_SEL3 0x00000008UL
+#define DEBUG_LEVEL_SEL4 0x00000010UL
+#define DEBUG_LEVEL_SEL5 0x00000020UL
+#define DEBUG_LEVEL_SEL6 0x00000040UL
+#define DEBUG_LEVEL_SEL7 0x00000080UL
+#define DEBUG_LEVEL_SEL8 0x00000100UL
+#define DEBUG_LEVEL_SEL9 0x00000200UL
+#define DEBUG_LEVEL_SEL10 0x00000400UL
+#define DEBUG_LEVEL_SEL11 0x00000800UL
+
+/*****************************************************************************
+ IOCTL values.
+*****************************************************************************/
+#define DEBUG_SERVICE_IOCTL_BASE 0x800UL
+#define DEBUG_SERVICE_CREATESTREAM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x01, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_DESTROYSTREAM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x02, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_GETSTREAM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x03, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_WRITESTRING CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x04, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_READSTRING CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x05, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_WRITE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x06, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_READ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x07, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_SETDEBUGMODE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x08, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_SETDEBUGOUTMODE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x09, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_SETDEBUGLEVEL CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0A, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_SETFRAME CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0B, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_GETFRAME CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0C, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_OVERRIDEMODE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0D, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_DEFAULTMODE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0E, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_GETSERVICETABLE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0F, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_WRITE2 CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x10, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_WRITESTRINGCM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x11, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_WRITECM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x12, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_SETMARKER CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x13, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_GETMARKER CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x14, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_ISCAPTUREFRAME CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x15, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_WRITELF CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x16, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_READLF CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x17, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_WAITFOREVENT CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x18, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_SETCONNNOTIFY CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x19, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+
+typedef enum _DBG_EVENT_
+{
+ DBG_EVENT_STREAM_DATA = 1
+} DBG_EVENT;
+
+
+/*****************************************************************************
+ In/Out Structures
+*****************************************************************************/
+typedef struct _DBG_IN_CREATESTREAM_
+{
+ union
+ {
+ IMG_CHAR *pszName;
+ IMG_UINT64 ui64Name;
+ } u;
+ IMG_UINT32 ui32Pages;
+ IMG_UINT32 ui32CapMode;
+ IMG_UINT32 ui32OutMode;
+}DBG_IN_CREATESTREAM, *PDBG_IN_CREATESTREAM;
+
+typedef struct _DBG_IN_FINDSTREAM_
+{
+ union
+ {
+ IMG_CHAR *pszName;
+ IMG_UINT64 ui64Name;
+ }u;
+ IMG_BOOL bResetStream;
+}DBG_IN_FINDSTREAM, *PDBG_IN_FINDSTREAM;
+
+typedef struct _DBG_IN_WRITESTRING_
+{
+ union
+ {
+ IMG_CHAR *pszString;
+ IMG_UINT64 ui64String;
+ } u;
+ IMG_SID hStream;
+ IMG_UINT32 ui32Level;
+}DBG_IN_WRITESTRING, *PDBG_IN_WRITESTRING;
+
+typedef struct _DBG_IN_READSTRING_
+{
+ union
+ {
+ IMG_CHAR *pszString;
+ IMG_UINT64 ui64String;
+ } u;
+ IMG_SID hStream;
+ IMG_UINT32 ui32StringLen;
+} DBG_IN_READSTRING, *PDBG_IN_READSTRING;
+
+typedef struct _DBG_IN_SETDEBUGMODE_
+{
+ IMG_SID hStream;
+ IMG_UINT32 ui32Mode;
+ IMG_UINT32 ui32Start;
+ IMG_UINT32 ui32End;
+ IMG_UINT32 ui32SampleRate;
+} DBG_IN_SETDEBUGMODE, *PDBG_IN_SETDEBUGMODE;
+
+typedef struct _DBG_IN_SETDEBUGOUTMODE_
+{
+ IMG_SID hStream;
+ IMG_UINT32 ui32Mode;
+} DBG_IN_SETDEBUGOUTMODE, *PDBG_IN_SETDEBUGOUTMODE;
+
+typedef struct _DBG_IN_SETDEBUGLEVEL_
+{
+ IMG_SID hStream;
+ IMG_UINT32 ui32Level;
+} DBG_IN_SETDEBUGLEVEL, *PDBG_IN_SETDEBUGLEVEL;
+
+typedef struct _DBG_IN_SETFRAME_
+{
+ IMG_SID hStream;
+ IMG_UINT32 ui32Frame;
+} DBG_IN_SETFRAME, *PDBG_IN_SETFRAME;
+
+typedef struct _DBG_IN_WRITE_
+{
+ union
+ {
+ IMG_UINT8 *pui8InBuffer;
+ IMG_UINT64 ui64InBuffer;
+ } u;
+ IMG_SID hStream;
+ IMG_UINT32 ui32Level;
+ IMG_UINT32 ui32TransferSize;
+} DBG_IN_WRITE, *PDBG_IN_WRITE;
+
+typedef struct _DBG_IN_READ_
+{
+ union
+ {
+ IMG_UINT8 *pui8OutBuffer;
+ IMG_UINT64 ui64OutBuffer;
+ } u;
+ IMG_SID hStream;
+ IMG_BOOL bReadInitBuffer;
+ IMG_UINT32 ui32OutBufferSize;
+} DBG_IN_READ, *PDBG_IN_READ;
+
+typedef struct _DBG_IN_OVERRIDEMODE_
+{
+ IMG_SID hStream;
+ IMG_UINT32 ui32Mode;
+} DBG_IN_OVERRIDEMODE, *PDBG_IN_OVERRIDEMODE;
+
+typedef struct _DBG_IN_ISCAPTUREFRAME_
+{
+ IMG_SID hStream;
+ IMG_BOOL bCheckPreviousFrame;
+} DBG_IN_ISCAPTUREFRAME, *PDBG_IN_ISCAPTUREFRAME;
+
+typedef struct _DBG_IN_SETMARKER_
+{
+ IMG_SID hStream;
+ IMG_UINT32 ui32Marker;
+} DBG_IN_SETMARKER, *PDBG_IN_SETMARKER;
+
+typedef struct _DBG_IN_WRITE_LF_
+{
+ union
+ {
+ IMG_UINT8 *pui8InBuffer;
+ IMG_UINT64 ui64InBuffer;
+ } u;
+ IMG_UINT32 ui32Flags;
+ IMG_SID hStream;
+ IMG_UINT32 ui32Level;
+ IMG_UINT32 ui32BufferSize;
+} DBG_IN_WRITE_LF, *PDBG_IN_WRITE_LF;
+
+/*
+ Flags for above struct
+*/
+#define WRITELF_FLAGS_RESETBUF 0x00000001UL
+
+/*
+ Common control structure (don't duplicate control in main stream
+ and init phase stream).
+*/
+typedef struct _DBG_STREAM_CONTROL_
+{
+ IMG_BOOL bInitPhaseComplete; /*!< init phase has finished */
+ IMG_UINT32 ui32Flags; /*!< flags (see DEBUG_FLAGS above) */
+
+ IMG_UINT32 ui32CapMode; /*!< capturing mode framed/hot key */
+ IMG_UINT32 ui32OutMode; /*!< output mode, e.g. files */
+ IMG_UINT32 ui32DebugLevel;
+ IMG_UINT32 ui32DefaultMode;
+ IMG_UINT32 ui32Start; /*!< first capture frame */
+ IMG_UINT32 ui32End; /*!< last frame */
+ IMG_UINT32 ui32Current; /*!< current frame */
+ IMG_UINT32 ui32SampleRate; /*!< capture frequency */
+ IMG_UINT32 ui32Reserved;
+} DBG_STREAM_CONTROL, *PDBG_STREAM_CONTROL;
+/*
+ Per-buffer control structure.
+*/
+typedef struct _DBG_STREAM_
+{
+ struct _DBG_STREAM_ *psNext;
+ struct _DBG_STREAM_ *psInitStream;
+ DBG_STREAM_CONTROL *psCtrl;
+ IMG_BOOL bCircularAllowed;
+ IMG_PVOID pvBase;
+ IMG_UINT32 ui32Size;
+ IMG_UINT32 ui32RPtr;
+ IMG_UINT32 ui32WPtr;
+ IMG_UINT32 ui32DataWritten;
+ IMG_UINT32 ui32Marker; /*!< marker for file splitting */
+ IMG_UINT32 ui32InitPhaseWOff; /*!< snapshot offset for init phase end for follow-on pdump */
+ IMG_CHAR szName[30]; /* Give this a size, some compilers don't like [] */
+} DBG_STREAM,*PDBG_STREAM;
+
+/*
+ * Allows dbgdrv to notify services when events happen, e.g. pdump.exe starts.
+ * (better than resetting psDevInfo->psKernelCCBInfo->ui32CCBDumpWOff = 0
+ * in SGXGetClientInfoKM.)
+ */
+typedef struct _DBGKM_CONNECT_NOTIFIER_
+{
+ IMG_VOID (IMG_CALLCONV *pfnConnectNotifier) (IMG_VOID);
+} DBGKM_CONNECT_NOTIFIER, *PDBGKM_CONNECT_NOTIFIER;
+
+/*****************************************************************************
+ Kernel mode service table
+*****************************************************************************/
+typedef struct _DBGKM_SERVICE_TABLE_
+{
+ IMG_UINT32 ui32Size;
+ IMG_VOID * (IMG_CALLCONV *pfnCreateStream) (IMG_CHAR * pszName,IMG_UINT32 ui32CapMode,IMG_UINT32 ui32OutMode,IMG_UINT32 ui32Flags,IMG_UINT32 ui32Pages);
+ IMG_VOID (IMG_CALLCONV *pfnDestroyStream) (PDBG_STREAM psStream);
+ IMG_VOID * (IMG_CALLCONV *pfnFindStream) (IMG_CHAR * pszName, IMG_BOOL bResetInitBuffer);
+ IMG_UINT32 (IMG_CALLCONV *pfnWriteString) (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
+ IMG_UINT32 (IMG_CALLCONV *pfnReadString) (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit);
+ IMG_UINT32 (IMG_CALLCONV *pfnWriteBIN) (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+ IMG_UINT32 (IMG_CALLCONV *pfnReadBIN) (PDBG_STREAM psStream,IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBufferSize,IMG_UINT8 *pui8OutBuf);
+ IMG_VOID (IMG_CALLCONV *pfnSetCaptureMode) (PDBG_STREAM psStream,IMG_UINT32 ui32CapMode,IMG_UINT32 ui32Start,IMG_UINT32 ui32Stop,IMG_UINT32 ui32SampleRate);
+ IMG_VOID (IMG_CALLCONV *pfnSetOutputMode) (PDBG_STREAM psStream,IMG_UINT32 ui32OutMode);
+ IMG_VOID (IMG_CALLCONV *pfnSetDebugLevel) (PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel);
+ IMG_VOID (IMG_CALLCONV *pfnSetFrame) (PDBG_STREAM psStream,IMG_UINT32 ui32Frame);
+ IMG_UINT32 (IMG_CALLCONV *pfnGetFrame) (PDBG_STREAM psStream);
+ IMG_VOID (IMG_CALLCONV *pfnOverrideMode) (PDBG_STREAM psStream,IMG_UINT32 ui32Mode);
+ IMG_VOID (IMG_CALLCONV *pfnDefaultMode) (PDBG_STREAM psStream);
+ IMG_UINT32 (IMG_CALLCONV *pfnDBGDrivWrite2) (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+ IMG_UINT32 (IMG_CALLCONV *pfnWriteStringCM) (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
+ IMG_UINT32 (IMG_CALLCONV *pfnWriteBINCM) (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+ IMG_VOID (IMG_CALLCONV *pfnSetMarker) (PDBG_STREAM psStream,IMG_UINT32 ui32Marker);
+ IMG_UINT32 (IMG_CALLCONV *pfnGetMarker) (PDBG_STREAM psStream);
+ IMG_VOID (IMG_CALLCONV *pfnStartInitPhase) (PDBG_STREAM psStream);
+ IMG_VOID (IMG_CALLCONV *pfnStopInitPhase) (PDBG_STREAM psStream);
+ IMG_BOOL (IMG_CALLCONV *pfnIsCaptureFrame) (PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame);
+ IMG_UINT32 (IMG_CALLCONV *pfnWriteLF) (PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags);
+ IMG_UINT32 (IMG_CALLCONV *pfnReadLF) (PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf);
+ IMG_UINT32 (IMG_CALLCONV *pfnGetStreamOffset) (PDBG_STREAM psStream);
+ IMG_VOID (IMG_CALLCONV *pfnSetStreamOffset) (PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset);
+ IMG_BOOL (IMG_CALLCONV *pfnIsLastCaptureFrame) (PDBG_STREAM psStream);
+ IMG_VOID (IMG_CALLCONV *pfnWaitForEvent) (DBG_EVENT eEvent);
+ IMG_VOID (IMG_CALLCONV *pfnSetConnectNotifier) (DBGKM_CONNECT_NOTIFIER fn_notifier);
+ IMG_UINT32 (IMG_CALLCONV *pfnWritePersist) (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+} DBGKM_SERVICE_TABLE, *PDBGKM_SERVICE_TABLE;
+
+
+#endif
+/*****************************************************************************
+ End of file (DBGDRVIF.H)
+*****************************************************************************/
diff --git a/drivers/gpu/pvr/device.h b/drivers/gpu/pvr/device.h
new file mode 100644
index 0000000..9df2c73
--- /dev/null
+++ b/drivers/gpu/pvr/device.h
@@ -0,0 +1,323 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __DEVICE_H__
+#define __DEVICE_H__
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "ra.h"
+#include "resman.h"
+
+typedef struct _BM_CONTEXT_ BM_CONTEXT;
+
+typedef struct _MMU_HEAP_ MMU_HEAP;
+typedef struct _MMU_CONTEXT_ MMU_CONTEXT;
+
+#define PVRSRV_BACKINGSTORE_SYSMEM_CONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+0))
+#define PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+1))
+#define PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+2))
+#define PVRSRV_BACKINGSTORE_LOCALMEM_NONCONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+3))
+
+typedef IMG_UINT32 DEVICE_MEMORY_HEAP_TYPE;
+#define DEVICE_MEMORY_HEAP_PERCONTEXT 0
+#define DEVICE_MEMORY_HEAP_KERNEL 1
+#define DEVICE_MEMORY_HEAP_SHARED 2
+#define DEVICE_MEMORY_HEAP_SHARED_EXPORTED 3
+
+#define PVRSRV_DEVICE_NODE_FLAGS_PORT80DISPLAY 1
+#define PVRSRV_DEVICE_NODE_FLAGS_MMU_OPT_INV 2
+
+typedef struct _DEVICE_MEMORY_HEAP_INFO_
+{
+
+ IMG_UINT32 ui32HeapID;
+
+
+ IMG_CHAR *pszName;
+
+
+ IMG_CHAR *pszBSName;
+
+
+ IMG_DEV_VIRTADDR sDevVAddrBase;
+
+
+ IMG_UINT32 ui32HeapSize;
+
+
+ IMG_UINT32 ui32Attribs;
+
+
+ DEVICE_MEMORY_HEAP_TYPE DevMemHeapType;
+
+
+ IMG_HANDLE hDevMemHeap;
+
+
+ RA_ARENA *psLocalDevMemArena;
+
+
+ IMG_UINT32 ui32DataPageSize;
+
+ IMG_UINT32 ui32XTileStride;
+
+} DEVICE_MEMORY_HEAP_INFO;
+
+typedef struct _DEVICE_MEMORY_INFO_
+{
+
+ IMG_UINT32 ui32AddressSpaceSizeLog2;
+
+
+
+
+ IMG_UINT32 ui32Flags;
+
+
+ IMG_UINT32 ui32HeapCount;
+
+
+ IMG_UINT32 ui32SyncHeapID;
+
+
+ IMG_UINT32 ui32MappingHeapID;
+
+
+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
+
+
+ BM_CONTEXT *pBMKernelContext;
+
+
+ BM_CONTEXT *pBMContext;
+
+} DEVICE_MEMORY_INFO;
+
+
+typedef struct DEV_ARENA_DESCRIPTOR_TAG
+{
+ IMG_UINT32 ui32HeapID;
+
+ IMG_CHAR *pszName;
+
+ IMG_DEV_VIRTADDR BaseDevVAddr;
+
+ IMG_UINT32 ui32Size;
+
+ DEVICE_MEMORY_HEAP_TYPE DevMemHeapType;
+
+
+ IMG_UINT32 ui32DataPageSize;
+
+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeapInfo;
+
+} DEV_ARENA_DESCRIPTOR;
+
+
+typedef struct _PDUMP_MMU_ATTRIB_
+{
+ PVRSRV_DEVICE_IDENTIFIER sDevId;
+
+ IMG_CHAR *pszPDRegRegion;
+
+
+ IMG_UINT32 ui32DataPageMask;
+
+
+ IMG_UINT32 ui32PTEValid;
+ IMG_UINT32 ui32PTSize;
+ IMG_UINT32 ui32PTEAlignShift;
+
+
+ IMG_UINT32 ui32PDEMask;
+ IMG_UINT32 ui32PDEAlignShift;
+
+} PDUMP_MMU_ATTRIB;
+
+typedef struct _SYS_DATA_TAG_ *PSYS_DATA;
+
+typedef struct _PVRSRV_DEVICE_NODE_
+{
+ PVRSRV_DEVICE_IDENTIFIER sDevId;
+ IMG_UINT32 ui32RefCount;
+
+
+
+
+ PVRSRV_ERROR (*pfnInitDevice) (IMG_VOID*);
+
+ PVRSRV_ERROR (*pfnDeInitDevice) (IMG_VOID*);
+
+
+ PVRSRV_ERROR (*pfnInitDeviceCompatCheck) (struct _PVRSRV_DEVICE_NODE_*);
+
+
+ PVRSRV_ERROR (*pfnMMUInitialise)(struct _PVRSRV_DEVICE_NODE_*, MMU_CONTEXT**, IMG_DEV_PHYADDR*);
+ IMG_VOID (*pfnMMUFinalise)(MMU_CONTEXT*);
+ IMG_VOID (*pfnMMUInsertHeap)(MMU_CONTEXT*, MMU_HEAP*);
+ MMU_HEAP* (*pfnMMUCreate)(MMU_CONTEXT*,DEV_ARENA_DESCRIPTOR*,RA_ARENA**,PDUMP_MMU_ATTRIB **ppsMMUAttrib);
+ IMG_VOID (*pfnMMUDelete)(MMU_HEAP*);
+ IMG_BOOL (*pfnMMUAlloc)(MMU_HEAP*pMMU,
+ IMG_SIZE_T uSize,
+ IMG_SIZE_T *pActualSize,
+ IMG_UINT32 uFlags,
+ IMG_UINT32 uDevVAddrAlignment,
+ IMG_DEV_VIRTADDR *pDevVAddr);
+ IMG_VOID (*pfnMMUFree)(MMU_HEAP*,IMG_DEV_VIRTADDR,IMG_UINT32);
+ IMG_VOID (*pfnMMUEnable)(MMU_HEAP*);
+ IMG_VOID (*pfnMMUDisable)(MMU_HEAP*);
+ IMG_VOID (*pfnMMUMapPages)(MMU_HEAP *pMMU,
+ IMG_DEV_VIRTADDR devVAddr,
+ IMG_SYS_PHYADDR SysPAddr,
+ IMG_SIZE_T uSize,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag);
+ IMG_VOID (*pfnMMUMapShadow)(MMU_HEAP *pMMU,
+ IMG_DEV_VIRTADDR MapBaseDevVAddr,
+ IMG_SIZE_T uSize,
+ IMG_CPU_VIRTADDR CpuVAddr,
+ IMG_HANDLE hOSMemHandle,
+ IMG_DEV_VIRTADDR *pDevVAddr,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag);
+ IMG_VOID (*pfnMMUUnmapPages)(MMU_HEAP *pMMU,
+ IMG_DEV_VIRTADDR dev_vaddr,
+ IMG_UINT32 ui32PageCount,
+ IMG_HANDLE hUniqueTag);
+
+ IMG_VOID (*pfnMMUMapScatter)(MMU_HEAP *pMMU,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_SYS_PHYADDR *psSysAddr,
+ IMG_SIZE_T uSize,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag);
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ IMG_BOOL (*pfnMMUIsHeapShared)(MMU_HEAP *);
+#endif
+ IMG_DEV_PHYADDR (*pfnMMUGetPhysPageAddr)(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr);
+ IMG_DEV_PHYADDR (*pfnMMUGetPDDevPAddr)(MMU_CONTEXT *pMMUContext);
+ IMG_VOID (*pfnMMUGetCacheFlushRange)(MMU_CONTEXT *pMMUContext, IMG_UINT32 *pui32RangeMask);
+ IMG_VOID (*pfnMMUGetPDPhysAddr)(MMU_CONTEXT *pMMUContext, IMG_DEV_PHYADDR *psDevPAddr);
+
+
+ PVRSRV_ERROR (*pfnAllocMemTilingRange)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
+ IMG_UINT32 ui32TilingStride,
+ IMG_UINT32 *pui32RangeIndex);
+ PVRSRV_ERROR (*pfnFreeMemTilingRange)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ IMG_UINT32 ui32RangeIndex);
+
+
+ IMG_BOOL (*pfnDeviceISR)(IMG_VOID*);
+
+ IMG_VOID *pvISRData;
+
+ IMG_UINT32 ui32SOCInterruptBit;
+
+ IMG_VOID (*pfnDeviceMISR)(IMG_VOID*);
+
+
+ IMG_VOID (*pfnDeviceCommandComplete)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+ IMG_BOOL bReProcessDeviceCommandComplete;
+
+ IMG_VOID (*pfnCacheInvalidate)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+
+ DEVICE_MEMORY_INFO sDevMemoryInfo;
+
+
+ IMG_VOID *pvDevice;
+ IMG_UINT32 ui32pvDeviceSize;
+
+
+ PRESMAN_CONTEXT hResManContext;
+
+
+ PSYS_DATA psSysData;
+
+
+ RA_ARENA *psLocalDevMemArena;
+
+ IMG_UINT32 ui32Flags;
+
+ struct _PVRSRV_DEVICE_NODE_ *psNext;
+ struct _PVRSRV_DEVICE_NODE_ **ppsThis;
+
+#if defined(PDUMP)
+
+ PVRSRV_ERROR (*pfnPDumpInitDevice)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+ IMG_UINT32 (*pfnMMUGetContextID)(IMG_HANDLE hDevMemContext);
+#endif
+} PVRSRV_DEVICE_NODE;
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PSYS_DATA psSysData,
+ PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
+ IMG_UINT32 ui32SOCInterruptBit,
+ IMG_UINT32 *pui32DeviceIndex );
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVInitialiseDevice(IMG_UINT32 ui32DevIndex);
+PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccesful);
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex);
+
+#if !defined(USE_CODE)
+
+IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PollForValueKM(volatile IMG_UINT32* pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ IMG_UINT32 ui32Timeoutus,
+ IMG_UINT32 ui32PollPeriodus,
+ IMG_BOOL bAllowPreemption);
+
+#endif
+
+
+#if defined (USING_ISR_INTERRUPTS)
+PVRSRV_ERROR IMG_CALLCONV PollForInterruptKM(IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ IMG_UINT32 ui32Waitus,
+ IMG_UINT32 ui32Tries);
+#endif
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(PSYS_DATA psSysData);
+IMG_VOID IMG_CALLCONV PVRSRVDeInit(PSYS_DATA psSysData);
+IMG_BOOL IMG_CALLCONV PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode);
+IMG_BOOL IMG_CALLCONV PVRSRVSystemLISR(IMG_VOID *pvSysData);
+IMG_VOID IMG_CALLCONV PVRSRVMISR(IMG_VOID *pvSysData);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/deviceclass.c b/drivers/gpu/pvr/deviceclass.c
new file mode 100644
index 0000000..233ac08
--- /dev/null
+++ b/drivers/gpu/pvr/deviceclass.c
@@ -0,0 +1,2371 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "services_headers.h"
+#include "buffer_manager.h"
+#include "kernelbuffer.h"
+#include "kerneldisplay.h"
+#include "pvr_bridge_km.h"
+#include "pdump_km.h"
+#include "deviceid.h"
+
+#include "lists.h"
+
+PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID);
+PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID);
+
+#if defined(SUPPORT_MISR_IN_THREAD)
+void OSVSyncMISR(IMG_HANDLE, IMG_BOOL);
+#endif
+
+#if defined(SUPPORT_CUSTOM_SWAP_OPERATIONS)
+IMG_VOID PVRSRVFreeCommandCompletePacketKM(IMG_HANDLE hCmdCookie,
+ IMG_BOOL bScheduleMISR);
+#endif
+typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG *PPVRSRV_DC_SRV2DISP_KMJTABLE;
+
+typedef struct PVRSRV_DC_BUFFER_TAG
+{
+
+ PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer;
+
+ struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo;
+ struct PVRSRV_DC_SWAPCHAIN_TAG *psSwapChain;
+} PVRSRV_DC_BUFFER;
+
+typedef struct PVRSRV_DC_SWAPCHAIN_TAG
+{
+ IMG_HANDLE hExtSwapChain;
+ IMG_UINT32 ui32SwapChainID;
+ IMG_UINT32 ui32RefCount;
+ IMG_UINT32 ui32Flags;
+ PVRSRV_QUEUE_INFO *psQueue;
+ PVRSRV_DC_BUFFER asBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
+ IMG_UINT32 ui32BufferCount;
+ PVRSRV_DC_BUFFER *psLastFlipBuffer;
+ IMG_UINT32 ui32MinSwapInterval;
+ IMG_UINT32 ui32MaxSwapInterval;
+#if !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED)
+ PVRSRV_KERNEL_SYNC_INFO **ppsLastSyncInfos;
+ IMG_UINT32 ui32LastNumSyncInfos;
+#endif
+ struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo;
+ struct PVRSRV_DC_SWAPCHAIN_TAG *psNext;
+} PVRSRV_DC_SWAPCHAIN;
+
+
+typedef struct PVRSRV_DC_SWAPCHAIN_REF_TAG
+{
+ struct PVRSRV_DC_SWAPCHAIN_TAG *psSwapChain;
+ IMG_HANDLE hResItem;
+} PVRSRV_DC_SWAPCHAIN_REF;
+
+
+typedef struct PVRSRV_DISPLAYCLASS_INFO_TAG
+{
+ IMG_UINT32 ui32RefCount;
+ IMG_UINT32 ui32DeviceID;
+ IMG_HANDLE hExtDevice;
+ PPVRSRV_DC_SRV2DISP_KMJTABLE psFuncTable;
+ IMG_HANDLE hDevMemContext;
+ PVRSRV_DC_BUFFER sSystemBuffer;
+ struct PVRSRV_DC_SWAPCHAIN_TAG *psDCSwapChainShared;
+} PVRSRV_DISPLAYCLASS_INFO;
+
+
+typedef struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO_TAG
+{
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
+ PRESMAN_ITEM hResItem;
+} PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO;
+
+
+typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG *PPVRSRV_BC_SRV2BUFFER_KMJTABLE;
+
+typedef struct PVRSRV_BC_BUFFER_TAG
+{
+
+ PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer;
+
+ struct PVRSRV_BUFFERCLASS_INFO_TAG *psBCInfo;
+} PVRSRV_BC_BUFFER;
+
+
+typedef struct PVRSRV_BUFFERCLASS_INFO_TAG
+{
+ IMG_UINT32 ui32RefCount;
+ IMG_UINT32 ui32DeviceID;
+ IMG_HANDLE hExtDevice;
+ PPVRSRV_BC_SRV2BUFFER_KMJTABLE psFuncTable;
+ IMG_HANDLE hDevMemContext;
+
+ IMG_UINT32 ui32BufferCount;
+ PVRSRV_BC_BUFFER *psBuffer;
+
+} PVRSRV_BUFFERCLASS_INFO;
+
+
+typedef struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO_TAG
+{
+ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
+ IMG_HANDLE hResItem;
+} PVRSRV_BUFFERCLASS_PERCONTEXT_INFO;
+
+
+static PVRSRV_DISPLAYCLASS_INFO* DCDeviceHandleToDCInfo (IMG_HANDLE hDeviceKM)
+{
+ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
+
+ psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)hDeviceKM;
+
+ return psDCPerContextInfo->psDCInfo;
+}
+
+
+static PVRSRV_BUFFERCLASS_INFO* BCDeviceHandleToBCInfo (IMG_HANDLE hDeviceKM)
+{
+ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
+
+ psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)hDeviceKM;
+
+ return psBCPerContextInfo->psBCInfo;
+}
+
+static IMG_VOID PVRSRVEnumerateDCKM_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
+{
+ IMG_UINT *pui32DevCount;
+ IMG_UINT32 **ppui32DevID;
+ PVRSRV_DEVICE_CLASS peDeviceClass;
+
+ pui32DevCount = va_arg(va, IMG_UINT*);
+ ppui32DevID = va_arg(va, IMG_UINT32**);
+ peDeviceClass = va_arg(va, PVRSRV_DEVICE_CLASS);
+
+ if ((psDeviceNode->sDevId.eDeviceClass == peDeviceClass)
+ && (psDeviceNode->sDevId.eDeviceType == PVRSRV_DEVICE_TYPE_EXT))
+ {
+ (*pui32DevCount)++;
+ if(*ppui32DevID)
+ {
+ *(*ppui32DevID)++ = psDeviceNode->sDevId.ui32DeviceIndex;
+ }
+ }
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVEnumerateDCKM (PVRSRV_DEVICE_CLASS DeviceClass,
+ IMG_UINT32 *pui32DevCount,
+ IMG_UINT32 *pui32DevID )
+{
+
+ IMG_UINT ui32DevCount = 0;
+ SYS_DATA *psSysData;
+
+ SysAcquireData(&psSysData);
+
+
+ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
+ &PVRSRVEnumerateDCKM_ForEachVaCb,
+ &ui32DevCount,
+ &pui32DevID,
+ DeviceClass);
+
+ if(pui32DevCount)
+ {
+ *pui32DevCount = ui32DevCount;
+ }
+ else if(pui32DevID == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDCKM: Invalid parameters"));
+ return (PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ return PVRSRV_OK;
+}
+
+
+static
+PVRSRV_ERROR PVRSRVRegisterDCDeviceKM (PVRSRV_DC_SRV2DISP_KMJTABLE *psFuncTable,
+ IMG_UINT32 *pui32DeviceID)
+{
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo = IMG_NULL;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ SYS_DATA *psSysData;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ SysAcquireData(&psSysData);
+
+
+
+
+
+ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(*psDCInfo),
+ (IMG_VOID **)&psDCInfo, IMG_NULL,
+ "Display Class Info") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psDCInfo alloc"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ OSMemSet (psDCInfo, 0, sizeof(*psDCInfo));
+
+
+ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE),
+ (IMG_VOID **)&psDCInfo->psFuncTable, IMG_NULL,
+ "Function table for SRVKM->DISPLAY") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psFuncTable alloc"));
+ goto ErrorExit;
+ }
+ OSMemSet (psDCInfo->psFuncTable, 0, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE));
+
+
+ *psDCInfo->psFuncTable = *psFuncTable;
+
+
+ if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_DEVICE_NODE),
+ (IMG_VOID **)&psDeviceNode, IMG_NULL,
+ "Device Node") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psDeviceNode alloc"));
+ goto ErrorExit;
+ }
+ OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
+
+ psDeviceNode->pvDevice = (IMG_VOID*)psDCInfo;
+ psDeviceNode->ui32pvDeviceSize = sizeof(*psDCInfo);
+ psDeviceNode->ui32RefCount = 1;
+ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT;
+ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_DISPLAY;
+ psDeviceNode->psSysData = psSysData;
+
+
+ if (AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed to allocate Device ID"));
+ goto ErrorExit;
+ }
+ psDCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
+ if (pui32DeviceID)
+ {
+ *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
+ }
+
+
+ SysRegisterExternalDevice(psDeviceNode);
+
+
+ List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList, psDeviceNode);
+
+ return PVRSRV_OK;
+
+ErrorExit:
+
+ if(psDCInfo->psFuncTable)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE), psDCInfo->psFuncTable, IMG_NULL);
+ psDCInfo->psFuncTable = IMG_NULL;
+ }
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_INFO), psDCInfo, IMG_NULL);
+
+
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+}
+
+static PVRSRV_ERROR PVRSRVRemoveDCDeviceKM(IMG_UINT32 ui32DevIndex)
+{
+ SYS_DATA *psSysData;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
+
+ SysAcquireData(&psSysData);
+
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE*)
+ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
+ &MatchDeviceKM_AnyVaCb,
+ ui32DevIndex,
+ IMG_FALSE,
+ PVRSRV_DEVICE_CLASS_DISPLAY);
+ if (!psDeviceNode)
+ {
+
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveDCDeviceKM: requested device %d not present", ui32DevIndex));
+ return PVRSRV_ERROR_NO_DEVICENODE_FOUND;
+ }
+
+
+ psDCInfo = (PVRSRV_DISPLAYCLASS_INFO*)psDeviceNode->pvDevice;
+
+
+
+
+ if(psDCInfo->ui32RefCount == 0)
+ {
+
+
+ List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
+
+
+ SysRemoveExternalDevice(psDeviceNode);
+
+
+
+
+ PVR_ASSERT(psDCInfo->ui32RefCount == 0);
+ (IMG_VOID)FreeDeviceID(psSysData, ui32DevIndex);
+ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE), psDCInfo->psFuncTable, IMG_NULL);
+ psDCInfo->psFuncTable = IMG_NULL;
+ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_INFO), psDCInfo, IMG_NULL);
+
+ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL);
+
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveDCDeviceKM: failed as %d Services DC API connections are still open", psDCInfo->ui32RefCount));
+ return PVRSRV_ERROR_UNABLE_TO_REMOVE_DEVICE;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+static
+PVRSRV_ERROR PVRSRVRegisterBCDeviceKM (PVRSRV_BC_SRV2BUFFER_KMJTABLE *psFuncTable,
+ IMG_UINT32 *pui32DeviceID)
+{
+ PVRSRV_BUFFERCLASS_INFO *psBCInfo = IMG_NULL;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ SYS_DATA *psSysData;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ SysAcquireData(&psSysData);
+
+
+
+
+
+ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(*psBCInfo),
+ (IMG_VOID **)&psBCInfo, IMG_NULL,
+ "Buffer Class Info") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psBCInfo alloc"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ OSMemSet (psBCInfo, 0, sizeof(*psBCInfo));
+
+
+ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE),
+ (IMG_VOID **)&psBCInfo->psFuncTable, IMG_NULL,
+ "Function table for SRVKM->BUFFER") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psFuncTable alloc"));
+ goto ErrorExit;
+ }
+ OSMemSet (psBCInfo->psFuncTable, 0, sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE));
+
+
+ *psBCInfo->psFuncTable = *psFuncTable;
+
+
+ if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_DEVICE_NODE),
+ (IMG_VOID **)&psDeviceNode, IMG_NULL,
+ "Device Node") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psDeviceNode alloc"));
+ goto ErrorExit;
+ }
+ OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
+
+ psDeviceNode->pvDevice = (IMG_VOID*)psBCInfo;
+ psDeviceNode->ui32pvDeviceSize = sizeof(*psBCInfo);
+ psDeviceNode->ui32RefCount = 1;
+ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT;
+ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_BUFFER;
+ psDeviceNode->psSysData = psSysData;
+
+
+ if (AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed to allocate Device ID"));
+ goto ErrorExit;
+ }
+ psBCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
+ if (pui32DeviceID)
+ {
+ *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
+ }
+
+
+ List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList, psDeviceNode);
+
+ return PVRSRV_OK;
+
+ErrorExit:
+
+ if(psBCInfo->psFuncTable)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PPVRSRV_BC_SRV2BUFFER_KMJTABLE), psBCInfo->psFuncTable, IMG_NULL);
+ psBCInfo->psFuncTable = IMG_NULL;
+ }
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_INFO), psBCInfo, IMG_NULL);
+
+
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+}
+
+
+static PVRSRV_ERROR PVRSRVRemoveBCDeviceKM(IMG_UINT32 ui32DevIndex)
+{
+ SYS_DATA *psSysData;
+ PVRSRV_DEVICE_NODE *psDevNode;
+ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
+
+ SysAcquireData(&psSysData);
+
+
+ psDevNode = (PVRSRV_DEVICE_NODE*)
+ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
+ &MatchDeviceKM_AnyVaCb,
+ ui32DevIndex,
+ IMG_FALSE,
+ PVRSRV_DEVICE_CLASS_BUFFER);
+
+ if (!psDevNode)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveBCDeviceKM: requested device %d not present", ui32DevIndex));
+ return PVRSRV_ERROR_NO_DEVICENODE_FOUND;
+ }
+
+
+
+ psBCInfo = (PVRSRV_BUFFERCLASS_INFO*)psDevNode->pvDevice;
+
+
+
+
+ if(psBCInfo->ui32RefCount == 0)
+ {
+
+
+ List_PVRSRV_DEVICE_NODE_Remove(psDevNode);
+
+
+
+
+ (IMG_VOID)FreeDeviceID(psSysData, ui32DevIndex);
+
+
+ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE), psBCInfo->psFuncTable, IMG_NULL);
+ psBCInfo->psFuncTable = IMG_NULL;
+ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_INFO), psBCInfo, IMG_NULL);
+
+ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DEVICE_NODE), psDevNode, IMG_NULL);
+
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveBCDeviceKM: failed as %d Services BC API connections are still open", psBCInfo->ui32RefCount));
+ return PVRSRV_ERROR_UNABLE_TO_REMOVE_DEVICE;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVCloseDCDeviceKM (IMG_HANDLE hDeviceKM,
+ IMG_BOOL bResManCallback)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
+
+ PVR_UNREFERENCED_PARAMETER(bResManCallback);
+
+ psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)hDeviceKM;
+
+
+ eError = ResManFreeResByPtr(psDCPerContextInfo->hResItem, CLEANUP_WITH_POLL);
+
+ return eError;
+}
+
+
+static PVRSRV_ERROR CloseDCDeviceCallBack(IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bDummy)
+{
+ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+ PVR_UNREFERENCED_PARAMETER(bDummy);
+
+ psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)pvParam;
+ psDCInfo = psDCPerContextInfo->psDCInfo;
+
+ if(psDCInfo->sSystemBuffer.sDeviceClassBuffer.ui32MemMapRefCount != 0)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"CloseDCDeviceCallBack: system buffer (0x%p) still mapped (refcount = %d)",
+ &psDCInfo->sSystemBuffer.sDeviceClassBuffer,
+ psDCInfo->sSystemBuffer.sDeviceClassBuffer.ui32MemMapRefCount));
+#if 0
+
+ return PVRSRV_ERROR_STILL_MAPPED;
+#endif
+ }
+
+ psDCInfo->ui32RefCount--;
+ if(psDCInfo->ui32RefCount == 0)
+ {
+
+ psDCInfo->psFuncTable->pfnCloseDCDevice(psDCInfo->hExtDevice);
+
+ PVRSRVKernelSyncInfoDecRef(psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL);
+
+ psDCInfo->hDevMemContext = IMG_NULL;
+ psDCInfo->hExtDevice = IMG_NULL;
+ }
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO), psDCPerContextInfo, IMG_NULL);
+
+
+ return PVRSRV_OK;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVOpenDCDeviceKM (PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_UINT32 ui32DeviceID,
+ IMG_HANDLE hDevCookie,
+ IMG_HANDLE *phDeviceKM)
+{
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
+ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ SYS_DATA *psSysData;
+ PVRSRV_ERROR eError;
+
+ if(!phDeviceKM || !hDevCookie)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Invalid params"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ SysAcquireData(&psSysData);
+
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE*)
+ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
+ &MatchDeviceKM_AnyVaCb,
+ ui32DeviceID,
+ IMG_FALSE,
+ PVRSRV_DEVICE_CLASS_DISPLAY);
+ if (!psDeviceNode)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: no devnode matching index %d", ui32DeviceID));
+ return PVRSRV_ERROR_NO_DEVICENODE_FOUND;
+ }
+ psDCInfo = (PVRSRV_DISPLAYCLASS_INFO*)psDeviceNode->pvDevice;
+
+
+
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(*psDCPerContextInfo),
+ (IMG_VOID **)&psDCPerContextInfo, IMG_NULL,
+ "Display Class per Context Info") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed psDCPerContextInfo alloc"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ OSMemSet(psDCPerContextInfo, 0, sizeof(*psDCPerContextInfo));
+
+ if(psDCInfo->ui32RefCount++ == 0)
+ {
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
+
+
+ psDCInfo->hDevMemContext = (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext;
+
+
+ eError = PVRSRVAllocSyncInfoKM(IMG_NULL,
+ (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext,
+ &psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed sync info alloc"));
+ psDCInfo->ui32RefCount--;
+ return eError;
+ }
+
+
+ eError = psDCInfo->psFuncTable->pfnOpenDCDevice(ui32DeviceID,
+ &psDCInfo->hExtDevice,
+ (PVRSRV_SYNC_DATA*)psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed to open external DC device"));
+ psDCInfo->ui32RefCount--;
+ PVRSRVKernelSyncInfoDecRef(psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL);
+ return eError;
+ }
+
+ psDCPerContextInfo->psDCInfo = psDCInfo;
+ eError = PVRSRVGetDCSystemBufferKM(psDCPerContextInfo, IMG_NULL);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed to get system buffer"));
+ psDCInfo->ui32RefCount--;
+ PVRSRVKernelSyncInfoDecRef(psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL);
+ return eError;
+ }
+ psDCInfo->sSystemBuffer.sDeviceClassBuffer.ui32MemMapRefCount = 0;
+ }
+ else
+ {
+ psDCPerContextInfo->psDCInfo = psDCInfo;
+ }
+
+ psDCPerContextInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_DISPLAYCLASS_DEVICE,
+ psDCPerContextInfo,
+ 0,
+ &CloseDCDeviceCallBack);
+
+
+ *phDeviceKM = (IMG_HANDLE)psDCPerContextInfo;
+
+ return PVRSRV_OK;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVEnumDCFormatsKM (IMG_HANDLE hDeviceKM,
+ IMG_UINT32 *pui32Count,
+ DISPLAY_FORMAT *psFormat)
+{
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
+
+ if(!hDeviceKM || !pui32Count || !psFormat)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumDCFormatsKM: Invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
+
+
+ return psDCInfo->psFuncTable->pfnEnumDCFormats(psDCInfo->hExtDevice, pui32Count, psFormat);
+}
+
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVEnumDCDimsKM (IMG_HANDLE hDeviceKM,
+ DISPLAY_FORMAT *psFormat,
+ IMG_UINT32 *pui32Count,
+ DISPLAY_DIMS *psDim)
+{
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
+
+ if(!hDeviceKM || !pui32Count || !psFormat)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumDCDimsKM: Invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
+
+
+ return psDCInfo->psFuncTable->pfnEnumDCDims(psDCInfo->hExtDevice, psFormat, pui32Count, psDim);
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVGetDCSystemBufferKM (IMG_HANDLE hDeviceKM,
+ IMG_HANDLE *phBuffer)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
+ IMG_HANDLE hExtBuffer;
+
+ if(!hDeviceKM)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCSystemBufferKM: Invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
+
+
+ eError = psDCInfo->psFuncTable->pfnGetDCSystemBuffer(psDCInfo->hExtDevice, &hExtBuffer);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCSystemBufferKM: Failed to get valid buffer handle from external driver"));
+ return eError;
+ }
+
+
+ psDCInfo->sSystemBuffer.sDeviceClassBuffer.pfnGetBufferAddr = psDCInfo->psFuncTable->pfnGetBufferAddr;
+ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hDevMemContext = psDCInfo->hDevMemContext;
+ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtDevice = psDCInfo->hExtDevice;
+ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer = hExtBuffer;
+
+ psDCInfo->sSystemBuffer.psDCInfo = psDCInfo;
+
+
+ if (phBuffer)
+ {
+ *phBuffer = (IMG_HANDLE)&(psDCInfo->sSystemBuffer);
+ }
+
+ return PVRSRV_OK;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVGetDCInfoKM (IMG_HANDLE hDeviceKM,
+ DISPLAY_INFO *psDisplayInfo)
+{
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
+ PVRSRV_ERROR eError;
+
+ if(!hDeviceKM || !psDisplayInfo)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCInfoKM: Invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
+
+
+ eError = psDCInfo->psFuncTable->pfnGetDCInfo(psDCInfo->hExtDevice, psDisplayInfo);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ if (psDisplayInfo->ui32MaxSwapChainBuffers > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS)
+ {
+ psDisplayInfo->ui32MaxSwapChainBuffers = PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(IMG_HANDLE hSwapChainRef)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef;
+
+ if(!hSwapChainRef)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDestroyDCSwapChainKM: Invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psSwapChainRef = hSwapChainRef;
+
+ eError = ResManFreeResByPtr(psSwapChainRef->hResItem, CLEANUP_WITH_POLL);
+
+ return eError;
+}
+
+
+static PVRSRV_ERROR DestroyDCSwapChain(PVRSRV_DC_SWAPCHAIN *psSwapChain)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo = psSwapChain->psDCInfo;
+ IMG_UINT32 i;
+
+
+ if( psDCInfo->psDCSwapChainShared )
+ {
+ if( psDCInfo->psDCSwapChainShared == psSwapChain )
+ {
+ psDCInfo->psDCSwapChainShared = psSwapChain->psNext;
+ }
+ else
+ {
+ PVRSRV_DC_SWAPCHAIN *psCurrentSwapChain;
+ psCurrentSwapChain = psDCInfo->psDCSwapChainShared;
+ while( psCurrentSwapChain->psNext )
+ {
+ if( psCurrentSwapChain->psNext != psSwapChain )
+ {
+ psCurrentSwapChain = psCurrentSwapChain->psNext;
+ continue;
+ }
+ psCurrentSwapChain->psNext = psSwapChain->psNext;
+ break;
+ }
+ }
+ }
+
+
+ PVRSRVDestroyCommandQueueKM(psSwapChain->psQueue);
+
+
+ eError = psDCInfo->psFuncTable->pfnDestroyDCSwapChain(psDCInfo->hExtDevice,
+ psSwapChain->hExtSwapChain);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DestroyDCSwapChainCallBack: Failed to destroy DC swap chain"));
+ return eError;
+ }
+
+
+ for(i=0; i<psSwapChain->ui32BufferCount; i++)
+ {
+ if(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
+ {
+ PVRSRVKernelSyncInfoDecRef(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL);
+ }
+ }
+
+#if !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED)
+ if (psSwapChain->ppsLastSyncInfos)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_KERNEL_SYNC_INFO *) * psSwapChain->ui32LastNumSyncInfos,
+ psSwapChain->ppsLastSyncInfos, IMG_NULL);
+ }
+#endif
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN), psSwapChain, IMG_NULL);
+
+
+ return eError;
+}
+
+
+static PVRSRV_ERROR DestroyDCSwapChainRefCallBack(IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bDummy)
+{
+ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = (PVRSRV_DC_SWAPCHAIN_REF *) pvParam;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 i;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+ PVR_UNREFERENCED_PARAMETER(bDummy);
+
+ for (i = 0; i < psSwapChainRef->psSwapChain->ui32BufferCount; i++)
+ {
+ if (psSwapChainRef->psSwapChain->asBuffer[i].sDeviceClassBuffer.ui32MemMapRefCount != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DestroyDCSwapChainRefCallBack: swapchain (0x%p) still mapped (ui32MemMapRefCount = %d)",
+ &psSwapChainRef->psSwapChain->asBuffer[i].sDeviceClassBuffer,
+ psSwapChainRef->psSwapChain->asBuffer[i].sDeviceClassBuffer.ui32MemMapRefCount));
+#if 0
+
+ return PVRSRV_ERROR_STILL_MAPPED;
+#endif
+ }
+ }
+
+ if(--psSwapChainRef->psSwapChain->ui32RefCount == 0)
+ {
+ eError = DestroyDCSwapChain(psSwapChainRef->psSwapChain);
+ }
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN_REF), psSwapChainRef, IMG_NULL);
+ return eError;
+}
+
+static PVRSRV_DC_SWAPCHAIN* PVRSRVFindSharedDCSwapChainKM(PVRSRV_DISPLAYCLASS_INFO *psDCInfo,
+ IMG_UINT32 ui32SwapChainID)
+{
+ PVRSRV_DC_SWAPCHAIN *psCurrentSwapChain;
+
+ for(psCurrentSwapChain = psDCInfo->psDCSwapChainShared;
+ psCurrentSwapChain;
+ psCurrentSwapChain = psCurrentSwapChain->psNext)
+ {
+ if(psCurrentSwapChain->ui32SwapChainID == ui32SwapChainID)
+ return psCurrentSwapChain;
+ }
+ return IMG_NULL;
+}
+
+static PVRSRV_ERROR PVRSRVCreateDCSwapChainRefKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ PVRSRV_DC_SWAPCHAIN *psSwapChain,
+ PVRSRV_DC_SWAPCHAIN_REF **ppsSwapChainRef)
+{
+ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = IMG_NULL;
+
+
+ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(PVRSRV_DC_SWAPCHAIN_REF),
+ (IMG_VOID **)&psSwapChainRef, IMG_NULL,
+ "Display Class Swapchain Reference") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainRefKM: Failed psSwapChainRef alloc"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ OSMemSet (psSwapChainRef, 0, sizeof(PVRSRV_DC_SWAPCHAIN_REF));
+
+
+ psSwapChain->ui32RefCount++;
+
+
+ psSwapChainRef->psSwapChain = psSwapChain;
+ psSwapChainRef->hResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF,
+ psSwapChainRef,
+ 0,
+ &DestroyDCSwapChainRefCallBack);
+ *ppsSwapChainRef = psSwapChainRef;
+
+ return PVRSRV_OK;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVCreateDCSwapChainKM (PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDeviceKM,
+ IMG_UINT32 ui32Flags,
+ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
+ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
+ IMG_UINT32 ui32BufferCount,
+ IMG_UINT32 ui32OEMFlags,
+ IMG_HANDLE *phSwapChainRef,
+ IMG_UINT32 *pui32SwapChainID)
+{
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
+ PVRSRV_DC_SWAPCHAIN *psSwapChain = IMG_NULL;
+ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = IMG_NULL;
+ PVRSRV_SYNC_DATA *apsSyncData[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
+ PVRSRV_QUEUE_INFO *psQueue = IMG_NULL;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+ DISPLAY_INFO sDisplayInfo;
+
+
+ if(!hDeviceKM
+ || !psDstSurfAttrib
+ || !psSrcSurfAttrib
+ || !phSwapChainRef
+ || !pui32SwapChainID)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (ui32BufferCount > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Too many buffers"));
+ return PVRSRV_ERROR_TOOMANYBUFFERS;
+ }
+
+ if (ui32BufferCount < 2)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Too few buffers"));
+ return PVRSRV_ERROR_TOO_FEW_BUFFERS;
+ }
+
+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
+
+ if( ui32Flags & PVRSRV_CREATE_SWAPCHAIN_QUERY )
+ {
+
+ psSwapChain = PVRSRVFindSharedDCSwapChainKM(psDCInfo, *pui32SwapChainID );
+ if( psSwapChain )
+ {
+
+ eError = PVRSRVCreateDCSwapChainRefKM(psPerProc,
+ psSwapChain,
+ &psSwapChainRef);
+ if( eError != PVRSRV_OK )
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Couldn't create swap chain reference"));
+ return eError;
+ }
+
+ *phSwapChainRef = (IMG_HANDLE)psSwapChainRef;
+ return PVRSRV_OK;
+ }
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: No shared SwapChain found for query"));
+ return PVRSRV_ERROR_FLIP_CHAIN_EXISTS;
+ }
+
+
+ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(PVRSRV_DC_SWAPCHAIN),
+ (IMG_VOID **)&psSwapChain, IMG_NULL,
+ "Display Class Swapchain") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed psSwapChain alloc"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorExit;
+ }
+ OSMemSet (psSwapChain, 0, sizeof(PVRSRV_DC_SWAPCHAIN));
+
+
+ eError = PVRSRVCreateCommandQueueKM(1024, &psQueue);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to create CmdQueue"));
+ goto ErrorExit;
+ }
+
+
+ psSwapChain->psQueue = psQueue;
+
+
+ for(i=0; i<ui32BufferCount; i++)
+ {
+ eError = PVRSRVAllocSyncInfoKM(IMG_NULL,
+ psDCInfo->hDevMemContext,
+ &psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to alloc syninfo for psSwapChain"));
+ goto ErrorExit;
+ }
+
+
+ psSwapChain->asBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr = psDCInfo->psFuncTable->pfnGetBufferAddr;
+ psSwapChain->asBuffer[i].sDeviceClassBuffer.hDevMemContext = psDCInfo->hDevMemContext;
+ psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtDevice = psDCInfo->hExtDevice;
+
+
+ psSwapChain->asBuffer[i].psDCInfo = psDCInfo;
+ psSwapChain->asBuffer[i].psSwapChain = psSwapChain;
+
+
+ apsSyncData[i] = (PVRSRV_SYNC_DATA*)psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM;
+ }
+
+ psSwapChain->ui32BufferCount = ui32BufferCount;
+ psSwapChain->psDCInfo = psDCInfo;
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Allocate DC swap chain (SwapChainID == %u, BufferCount == %u)",
+ *pui32SwapChainID,
+ ui32BufferCount);
+ PDUMPCOMMENT(" Src surface dimensions == %u x %u",
+ psSrcSurfAttrib->sDims.ui32Width,
+ psSrcSurfAttrib->sDims.ui32Height);
+ PDUMPCOMMENT(" Dst surface dimensions == %u x %u",
+ psDstSurfAttrib->sDims.ui32Width,
+ psDstSurfAttrib->sDims.ui32Height);
+#endif
+
+ eError = psDCInfo->psFuncTable->pfnGetDCInfo(psDCInfo->hExtDevice, &sDisplayInfo);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to get DC info"));
+ return eError;
+ }
+
+ psSwapChain->ui32MinSwapInterval = sDisplayInfo.ui32MinSwapInterval;
+ psSwapChain->ui32MaxSwapInterval = sDisplayInfo.ui32MaxSwapInterval;
+
+
+ eError = psDCInfo->psFuncTable->pfnCreateDCSwapChain(psDCInfo->hExtDevice,
+ ui32Flags,
+ psDstSurfAttrib,
+ psSrcSurfAttrib,
+ ui32BufferCount,
+ apsSyncData,
+ ui32OEMFlags,
+ &psSwapChain->hExtSwapChain,
+ &psSwapChain->ui32SwapChainID);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to create 3rd party SwapChain"));
+ PDUMPCOMMENT("Swapchain allocation failed.");
+ goto ErrorExit;
+ }
+
+
+ eError = PVRSRVCreateDCSwapChainRefKM(psPerProc,
+ psSwapChain,
+ &psSwapChainRef);
+ if( eError != PVRSRV_OK )
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Couldn't create swap chain reference"));
+ PDUMPCOMMENT("Swapchain allocation failed.");
+ goto ErrorExit;
+ }
+
+ psSwapChain->ui32RefCount = 1;
+ psSwapChain->ui32Flags = ui32Flags;
+
+
+ if( ui32Flags & PVRSRV_CREATE_SWAPCHAIN_SHARED )
+ {
+ if(! psDCInfo->psDCSwapChainShared )
+ {
+ psDCInfo->psDCSwapChainShared = psSwapChain;
+ }
+ else
+ {
+ PVRSRV_DC_SWAPCHAIN *psOldHead = psDCInfo->psDCSwapChainShared;
+ psDCInfo->psDCSwapChainShared = psSwapChain;
+ psSwapChain->psNext = psOldHead;
+ }
+ }
+
+
+ *pui32SwapChainID = psSwapChain->ui32SwapChainID;
+
+
+ *phSwapChainRef= (IMG_HANDLE)psSwapChainRef;
+
+ return eError;
+
+ErrorExit:
+
+ for(i=0; i<ui32BufferCount; i++)
+ {
+ if(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
+ {
+ PVRSRVKernelSyncInfoDecRef(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL);
+ }
+ }
+
+ if(psQueue)
+ {
+ PVRSRVDestroyCommandQueueKM(psQueue);
+ }
+
+ if(psSwapChain)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN), psSwapChain, IMG_NULL);
+
+ }
+
+ return eError;
+}
+
+
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSetDCDstRectKM(IMG_HANDLE hDeviceKM,
+ IMG_HANDLE hSwapChainRef,
+ IMG_RECT *psRect)
+{
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
+ PVRSRV_DC_SWAPCHAIN *psSwapChain;
+
+ if(!hDeviceKM || !hSwapChainRef)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCDstRectKM: Invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
+ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
+
+ return psDCInfo->psFuncTable->pfnSetDCDstRect(psDCInfo->hExtDevice,
+ psSwapChain->hExtSwapChain,
+ psRect);
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSetDCSrcRectKM(IMG_HANDLE hDeviceKM,
+ IMG_HANDLE hSwapChainRef,
+ IMG_RECT *psRect)
+{
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
+ PVRSRV_DC_SWAPCHAIN *psSwapChain;
+
+ if(!hDeviceKM || !hSwapChainRef)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCSrcRectKM: Invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
+ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
+
+ return psDCInfo->psFuncTable->pfnSetDCSrcRect(psDCInfo->hExtDevice,
+ psSwapChain->hExtSwapChain,
+ psRect);
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(IMG_HANDLE hDeviceKM,
+ IMG_HANDLE hSwapChainRef,
+ IMG_UINT32 ui32CKColour)
+{
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
+ PVRSRV_DC_SWAPCHAIN *psSwapChain;
+
+ if(!hDeviceKM || !hSwapChainRef)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCDstColourKeyKM: Invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
+ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
+
+ return psDCInfo->psFuncTable->pfnSetDCDstColourKey(psDCInfo->hExtDevice,
+ psSwapChain->hExtSwapChain,
+ ui32CKColour);
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(IMG_HANDLE hDeviceKM,
+ IMG_HANDLE hSwapChainRef,
+ IMG_UINT32 ui32CKColour)
+{
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
+ PVRSRV_DC_SWAPCHAIN *psSwapChain;
+
+ if(!hDeviceKM || !hSwapChainRef)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCSrcColourKeyKM: Invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
+ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
+
+ return psDCInfo->psFuncTable->pfnSetDCSrcColourKey(psDCInfo->hExtDevice,
+ psSwapChain->hExtSwapChain,
+ ui32CKColour);
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVGetDCBuffersKM(IMG_HANDLE hDeviceKM,
+ IMG_HANDLE hSwapChainRef,
+ IMG_UINT32 *pui32BufferCount,
+ IMG_HANDLE *phBuffer,
+ IMG_SYS_PHYADDR *psPhyAddr)
+{
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
+ PVRSRV_DC_SWAPCHAIN *psSwapChain;
+ IMG_HANDLE ahExtBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+
+ if(!hDeviceKM || !hSwapChainRef || !phBuffer || !psPhyAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCBuffersKM: Invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
+ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
+
+
+ eError = psDCInfo->psFuncTable->pfnGetDCBuffers(psDCInfo->hExtDevice,
+ psSwapChain->hExtSwapChain,
+ pui32BufferCount,
+ ahExtBuffer);
+
+ PVR_ASSERT(*pui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
+
+
+
+
+ for(i=0; i<*pui32BufferCount; i++)
+ {
+ psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtBuffer = ahExtBuffer[i];
+ phBuffer[i] = (IMG_HANDLE)&psSwapChain->asBuffer[i];
+ }
+
+#if defined(SUPPORT_GET_DC_BUFFERS_SYS_PHYADDRS)
+ for(i = 0; i < *pui32BufferCount; i++)
+ {
+ IMG_UINT32 ui32ByteSize, ui32TilingStride;
+ IMG_SYS_PHYADDR *pPhyAddr;
+ IMG_BOOL bIsContiguous;
+ IMG_HANDLE hOSMapInfo;
+ IMG_VOID *pvVAddr;
+
+ eError = psDCInfo->psFuncTable->pfnGetBufferAddr(psDCInfo->hExtDevice,
+ ahExtBuffer[i],
+ &pPhyAddr,
+ &ui32ByteSize,
+ &pvVAddr,
+ &hOSMapInfo,
+ &bIsContiguous,
+ &ui32TilingStride);
+ if(eError != PVRSRV_OK)
+ {
+ break;
+ }
+
+ psPhyAddr[i] = *pPhyAddr;
+ }
+#endif
+
+ return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSwapToDCBufferKM(IMG_HANDLE hDeviceKM,
+ IMG_HANDLE hBuffer,
+ IMG_UINT32 ui32SwapInterval,
+ IMG_HANDLE hPrivateTag,
+ IMG_UINT32 ui32ClipRectCount,
+ IMG_RECT *psClipRect)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
+ PVRSRV_DC_BUFFER *psBuffer;
+ PVRSRV_QUEUE_INFO *psQueue;
+ DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
+ IMG_UINT32 i;
+ IMG_BOOL bAddReferenceToLast = IMG_TRUE;
+ IMG_UINT16 ui16SwapCommandID = DC_FLIP_COMMAND;
+ IMG_UINT32 ui32NumSrcSyncs = 1;
+ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2];
+ PVRSRV_COMMAND *psCommand;
+ SYS_DATA *psSysData;
+
+ if(!hDeviceKM || !hBuffer || !psClipRect)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psBuffer = (PVRSRV_DC_BUFFER*)hBuffer;
+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
+
+
+ if(ui32SwapInterval < psBuffer->psSwapChain->ui32MinSwapInterval ||
+ ui32SwapInterval > psBuffer->psSwapChain->ui32MaxSwapInterval)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Invalid swap interval. Requested %u, Allowed range %u-%u",
+ ui32SwapInterval, psBuffer->psSwapChain->ui32MinSwapInterval, psBuffer->psSwapChain->ui32MaxSwapInterval));
+ return PVRSRV_ERROR_INVALID_SWAPINTERVAL;
+ }
+
+#if defined(SUPPORT_CUSTOM_SWAP_OPERATIONS)
+
+ if(psDCInfo->psFuncTable->pfnQuerySwapCommandID != IMG_NULL)
+ {
+ psDCInfo->psFuncTable->pfnQuerySwapCommandID(psDCInfo->hExtDevice,
+ psBuffer->psSwapChain->hExtSwapChain,
+ psBuffer->sDeviceClassBuffer.hExtBuffer,
+ hPrivateTag,
+ &ui16SwapCommandID,
+ &bAddReferenceToLast);
+
+ }
+
+#endif
+
+
+ psQueue = psBuffer->psSwapChain->psQueue;
+
+
+ apsSrcSync[0] = psBuffer->sDeviceClassBuffer.psKernelSyncInfo;
+
+
+
+ if(bAddReferenceToLast && psBuffer->psSwapChain->psLastFlipBuffer &&
+ psBuffer != psBuffer->psSwapChain->psLastFlipBuffer)
+ {
+ apsSrcSync[1] = psBuffer->psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo;
+
+
+
+ ui32NumSrcSyncs++;
+ }
+
+
+ eError = PVRSRVInsertCommandKM (psQueue,
+ &psCommand,
+ psDCInfo->ui32DeviceID,
+ ui16SwapCommandID,
+ 0,
+ IMG_NULL,
+ ui32NumSrcSyncs,
+ apsSrcSync,
+ sizeof(DISPLAYCLASS_FLIP_COMMAND) + (sizeof(IMG_RECT) * ui32ClipRectCount),
+ IMG_NULL,
+ IMG_NULL);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to get space in queue"));
+ goto Exit;
+ }
+
+
+ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)psCommand->pvData;
+
+
+ psFlipCmd->hExtDevice = psDCInfo->hExtDevice;
+
+
+ psFlipCmd->hExtSwapChain = psBuffer->psSwapChain->hExtSwapChain;
+
+
+ psFlipCmd->hExtBuffer = psBuffer->sDeviceClassBuffer.hExtBuffer;
+
+
+ psFlipCmd->hPrivateTag = hPrivateTag;
+
+
+ psFlipCmd->ui32ClipRectCount = ui32ClipRectCount;
+
+ psFlipCmd->psClipRect = (IMG_RECT*)((IMG_UINT8*)psFlipCmd + sizeof(DISPLAYCLASS_FLIP_COMMAND));
+
+ for(i=0; i<ui32ClipRectCount; i++)
+ {
+ psFlipCmd->psClipRect[i] = psClipRect[i];
+ }
+
+
+ psFlipCmd->ui32SwapInterval = ui32SwapInterval;
+
+ SysAcquireData(&psSysData);
+
+
+ {
+ if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH)
+ {
+ OSFlushCPUCacheKM();
+ }
+ else if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN)
+ {
+ OSCleanCPUCacheKM();
+ }
+
+ psSysData->ePendingCacheOpType = PVRSRV_MISC_INFO_CPUCACHEOP_NONE;
+ }
+
+
+ eError = PVRSRVSubmitCommandKM (psQueue, psCommand);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to submit command"));
+ goto Exit;
+ }
+
+
+
+ eError = OSScheduleMISR(psSysData);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to schedule MISR"));
+ goto Exit;
+ }
+
+
+ psBuffer->psSwapChain->psLastFlipBuffer = psBuffer;
+
+Exit:
+
+ if(eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE)
+ {
+ eError = PVRSRV_ERROR_RETRY;
+ }
+
+ return eError;
+}
+
+typedef struct _CALLBACK_DATA_
+{
+ IMG_PVOID pvPrivData;
+ IMG_UINT32 ui32PrivDataLength;
+ IMG_PVOID ppvMemInfos;
+ IMG_UINT32 ui32NumMemInfos;
+} CALLBACK_DATA;
+
+static IMG_VOID FreePrivateData(IMG_HANDLE hCallbackData)
+{
+ CALLBACK_DATA *psCallbackData = hCallbackData;
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, psCallbackData->ui32PrivDataLength,
+ psCallbackData->pvPrivData, IMG_NULL);
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(IMG_VOID *) * psCallbackData->ui32NumMemInfos,
+ psCallbackData->ppvMemInfos, IMG_NULL);
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(CALLBACK_DATA), hCallbackData, IMG_NULL);
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSwapToDCBuffer2KM(IMG_HANDLE hDeviceKM,
+ IMG_HANDLE hSwapChain,
+ IMG_UINT32 ui32SwapInterval,
+ PVRSRV_KERNEL_MEM_INFO **ppsMemInfos,
+ PVRSRV_KERNEL_SYNC_INFO **ppsSyncInfos,
+ IMG_UINT32 ui32NumMemSyncInfos,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength)
+{
+ PVRSRV_KERNEL_SYNC_INFO **ppsCompiledSyncInfos;
+ IMG_UINT32 i, ui32NumCompiledSyncInfos;
+ DISPLAYCLASS_FLIP_COMMAND2 *psFlipCmd;
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
+ PVRSRV_DC_SWAPCHAIN *psSwapChain;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ CALLBACK_DATA *psCallbackData;
+ PVRSRV_QUEUE_INFO *psQueue;
+ PVRSRV_COMMAND *psCommand;
+ IMG_PVOID *ppvMemInfos;
+ SYS_DATA *psSysData;
+
+ if(!hDeviceKM || !hSwapChain || !ppsMemInfos || !ppsSyncInfos || ui32NumMemSyncInfos < 1)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChain)->psSwapChain;
+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
+
+
+ if(ui32SwapInterval < psSwapChain->ui32MinSwapInterval ||
+ ui32SwapInterval > psSwapChain->ui32MaxSwapInterval)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Invalid swap interval. Requested %u, Allowed range %u-%u",
+ ui32SwapInterval, psSwapChain->ui32MinSwapInterval, psSwapChain->ui32MaxSwapInterval));
+ return PVRSRV_ERROR_INVALID_SWAPINTERVAL;
+ }
+
+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(CALLBACK_DATA),
+ (IMG_VOID **)&psCallbackData, IMG_NULL,
+ "PVRSRVSwapToDCBuffer2KM callback data");
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ psCallbackData->pvPrivData = pvPrivData;
+ psCallbackData->ui32PrivDataLength = ui32PrivDataLength;
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(IMG_VOID *) * ui32NumMemSyncInfos,
+ (IMG_VOID **)&ppvMemInfos, IMG_NULL,
+ "Swap Command Meminfos") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Failed to allocate space for meminfo list"));
+ psCallbackData->ppvMemInfos = IMG_NULL;
+ goto Exit;
+ }
+
+ for(i = 0; i < ui32NumMemSyncInfos; i++)
+ {
+ ppvMemInfos[i] = ppsMemInfos[i];
+ }
+
+ psCallbackData->ppvMemInfos = ppvMemInfos;
+ psCallbackData->ui32NumMemInfos = ui32NumMemSyncInfos;
+
+
+ psQueue = psSwapChain->psQueue;
+
+#if !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED)
+ if(psSwapChain->ppsLastSyncInfos)
+ {
+ IMG_UINT32 ui32NumUniqueSyncInfos = psSwapChain->ui32LastNumSyncInfos;
+ IMG_UINT32 j;
+
+ for(j = 0; j < psSwapChain->ui32LastNumSyncInfos; j++)
+ {
+ for(i = 0; i < ui32NumMemSyncInfos; i++)
+ {
+ if(psSwapChain->ppsLastSyncInfos[j] == ppsSyncInfos[i])
+ {
+ psSwapChain->ppsLastSyncInfos[j] = IMG_NULL;
+ ui32NumUniqueSyncInfos--;
+ }
+ }
+ }
+
+ ui32NumCompiledSyncInfos = ui32NumMemSyncInfos + ui32NumUniqueSyncInfos;
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(PVRSRV_KERNEL_SYNC_INFO *) * ui32NumCompiledSyncInfos,
+ (IMG_VOID **)&ppsCompiledSyncInfos, IMG_NULL,
+ "Compiled syncinfos") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Failed to allocate space for meminfo list"));
+ goto Exit;
+ }
+
+ OSMemCopy(ppsCompiledSyncInfos, ppsSyncInfos, sizeof(PVRSRV_KERNEL_SYNC_INFO *) * ui32NumMemSyncInfos);
+ for(j = 0, i = ui32NumMemSyncInfos; j < psSwapChain->ui32LastNumSyncInfos; j++)
+ {
+ if(psSwapChain->ppsLastSyncInfos[j])
+ {
+ ppsCompiledSyncInfos[i] = psSwapChain->ppsLastSyncInfos[j];
+ i++;
+ }
+ }
+ }
+ else
+#endif
+ {
+ ppsCompiledSyncInfos = ppsSyncInfos;
+ ui32NumCompiledSyncInfos = ui32NumMemSyncInfos;
+ }
+
+
+ eError = PVRSRVInsertCommandKM (psQueue,
+ &psCommand,
+ psDCInfo->ui32DeviceID,
+ DC_FLIP_COMMAND,
+ 0,
+ IMG_NULL,
+ ui32NumCompiledSyncInfos,
+ ppsCompiledSyncInfos,
+ sizeof(DISPLAYCLASS_FLIP_COMMAND2),
+ FreePrivateData,
+ psCallbackData);
+
+ if (ppsCompiledSyncInfos != ppsSyncInfos)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(PVRSRV_KERNEL_SYNC_INFO *) * ui32NumCompiledSyncInfos,
+ (IMG_VOID *)ppsCompiledSyncInfos,
+ IMG_NULL);
+ }
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Failed to get space in queue"));
+ goto Exit;
+ }
+
+
+ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND2*)psCommand->pvData;
+
+
+ psFlipCmd->hUnused = IMG_NULL;
+
+
+ psFlipCmd->hExtDevice = psDCInfo->hExtDevice;
+
+
+ psFlipCmd->hExtSwapChain = psSwapChain->hExtSwapChain;
+
+
+ psFlipCmd->ui32SwapInterval = ui32SwapInterval;
+
+
+ psFlipCmd->pvPrivData = pvPrivData;
+ psFlipCmd->ui32PrivDataLength = ui32PrivDataLength;
+
+ psFlipCmd->ppsMemInfos = (PDC_MEM_INFO *)ppvMemInfos;
+ psFlipCmd->ui32NumMemInfos = ui32NumMemSyncInfos;
+
+
+ psFlipCmd->hUnused = IMG_NULL;
+
+ SysAcquireData(&psSysData);
+
+
+ {
+ if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH)
+ {
+ OSFlushCPUCacheKM();
+ }
+ else if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN)
+ {
+ OSCleanCPUCacheKM();
+ }
+
+ psSysData->ePendingCacheOpType = PVRSRV_MISC_INFO_CPUCACHEOP_NONE;
+ }
+
+
+ eError = PVRSRVSubmitCommandKM (psQueue, psCommand);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Failed to submit command"));
+ goto Exit;
+ }
+
+
+ psCallbackData = IMG_NULL;
+
+
+
+ eError = OSScheduleMISR(psSysData);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Failed to schedule MISR"));
+ goto Exit;
+ }
+
+#if !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED)
+
+ if (psSwapChain->ui32LastNumSyncInfos < ui32NumMemSyncInfos)
+ {
+ if (psSwapChain->ppsLastSyncInfos)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_KERNEL_SYNC_INFO *) * psSwapChain->ui32LastNumSyncInfos,
+ psSwapChain->ppsLastSyncInfos, IMG_NULL);
+ }
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(PVRSRV_KERNEL_SYNC_INFO *) * ui32NumMemSyncInfos,
+ (IMG_VOID **)&psSwapChain->ppsLastSyncInfos, IMG_NULL,
+ "Last syncinfos") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Failed to allocate space for meminfo list"));
+ goto Exit;
+ }
+ }
+
+ psSwapChain->ui32LastNumSyncInfos = ui32NumMemSyncInfos;
+
+ for(i = 0; i < ui32NumMemSyncInfos; i++)
+ {
+ psSwapChain->ppsLastSyncInfos[i] = ppsSyncInfos[i];
+ }
+#endif
+
+Exit:
+ if (psCallbackData)
+ {
+ if(psCallbackData->ppvMemInfos)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(IMG_VOID *) * psCallbackData->ui32NumMemInfos,
+ psCallbackData->ppvMemInfos, IMG_NULL);
+ }
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(CALLBACK_DATA), psCallbackData, IMG_NULL);
+ }
+ if(eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE)
+ {
+ eError = PVRSRV_ERROR_RETRY;
+ }
+
+ return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSwapToDCSystemKM(IMG_HANDLE hDeviceKM,
+ IMG_HANDLE hSwapChainRef)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_QUEUE_INFO *psQueue;
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
+ PVRSRV_DC_SWAPCHAIN *psSwapChain;
+ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef;
+ DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
+ IMG_UINT32 ui32NumSrcSyncs = 1;
+ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2];
+ PVRSRV_COMMAND *psCommand;
+ IMG_BOOL bAddReferenceToLast = IMG_TRUE;
+ IMG_UINT16 ui16SwapCommandID = DC_FLIP_COMMAND;
+ SYS_DATA *psSysData;
+
+ if(!hDeviceKM || !hSwapChainRef)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
+ psSwapChainRef = (PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef;
+ psSwapChain = psSwapChainRef->psSwapChain;
+
+
+ psQueue = psSwapChain->psQueue;
+
+#if defined(SUPPORT_CUSTOM_SWAP_OPERATIONS)
+
+ if(psDCInfo->psFuncTable->pfnQuerySwapCommandID != IMG_NULL)
+ {
+ psDCInfo->psFuncTable->pfnQuerySwapCommandID(psDCInfo->hExtDevice,
+ psSwapChain->hExtSwapChain,
+ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer,
+ 0,
+ &ui16SwapCommandID,
+ &bAddReferenceToLast);
+
+ }
+
+#endif
+
+
+ apsSrcSync[0] = psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo;
+
+
+
+ if(bAddReferenceToLast && psSwapChain->psLastFlipBuffer)
+ {
+
+ if (apsSrcSync[0] != psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo)
+ {
+ apsSrcSync[1] = psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo;
+
+
+
+ ui32NumSrcSyncs++;
+ }
+ }
+
+
+ eError = PVRSRVInsertCommandKM (psQueue,
+ &psCommand,
+ psDCInfo->ui32DeviceID,
+ ui16SwapCommandID,
+ 0,
+ IMG_NULL,
+ ui32NumSrcSyncs,
+ apsSrcSync,
+ sizeof(DISPLAYCLASS_FLIP_COMMAND),
+ IMG_NULL,
+ IMG_NULL);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to get space in queue"));
+ goto Exit;
+ }
+
+
+ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)psCommand->pvData;
+
+
+ psFlipCmd->hExtDevice = psDCInfo->hExtDevice;
+
+
+ psFlipCmd->hExtSwapChain = psSwapChain->hExtSwapChain;
+
+
+ psFlipCmd->hExtBuffer = psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer;
+
+
+ psFlipCmd->hPrivateTag = IMG_NULL;
+
+
+ psFlipCmd->ui32ClipRectCount = 0;
+
+ psFlipCmd->ui32SwapInterval = 1;
+
+
+ eError = PVRSRVSubmitCommandKM (psQueue, psCommand);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to submit command"));
+ goto Exit;
+ }
+
+
+ SysAcquireData(&psSysData);
+ eError = OSScheduleMISR(psSysData);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to schedule MISR"));
+ goto Exit;
+ }
+
+
+ psSwapChain->psLastFlipBuffer = &psDCInfo->sSystemBuffer;
+
+ eError = PVRSRV_OK;
+
+Exit:
+
+ if(eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE)
+ {
+ eError = PVRSRV_ERROR_RETRY;
+ }
+
+ return eError;
+}
+
+
+static
+PVRSRV_ERROR PVRSRVRegisterSystemISRHandler (PFN_ISR_HANDLER pfnISRHandler,
+ IMG_VOID *pvISRHandlerData,
+ IMG_UINT32 ui32ISRSourceMask,
+ IMG_UINT32 ui32DeviceID)
+{
+ SYS_DATA *psSysData;
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+ PVR_UNREFERENCED_PARAMETER(ui32ISRSourceMask);
+
+ SysAcquireData(&psSysData);
+
+
+ psDevNode = (PVRSRV_DEVICE_NODE*)
+ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
+ &MatchDeviceKM_AnyVaCb,
+ ui32DeviceID,
+ IMG_TRUE);
+
+ if (psDevNode == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterSystemISRHandler: Failed to get psDevNode"));
+ PVR_DBG_BREAK;
+ return PVRSRV_ERROR_NO_DEVICENODE_FOUND;
+ }
+
+
+ psDevNode->pvISRData = (IMG_VOID*) pvISRHandlerData;
+
+
+ psDevNode->pfnDeviceISR = pfnISRHandler;
+
+ return PVRSRV_OK;
+}
+
+static
+IMG_VOID PVRSRVSetDCState_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
+{
+ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
+ IMG_UINT32 ui32State;
+ ui32State = va_arg(va, IMG_UINT32);
+
+ if (psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY)
+ {
+ psDCInfo = (PVRSRV_DISPLAYCLASS_INFO *)psDeviceNode->pvDevice;
+ if (psDCInfo->psFuncTable->pfnSetDCState && psDCInfo->hExtDevice)
+ {
+ psDCInfo->psFuncTable->pfnSetDCState(psDCInfo->hExtDevice, ui32State);
+ }
+ }
+}
+
+
+IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State)
+{
+ SYS_DATA *psSysData;
+
+ SysAcquireData(&psSysData);
+
+ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
+ &PVRSRVSetDCState_ForEachVaCb,
+ ui32State);
+}
+
+static PVRSRV_ERROR
+PVRSRVDCMemInfoGetCpuVAddr(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo,
+ IMG_CPU_VIRTADDR *pVAddr)
+{
+ *pVAddr = psKernelMemInfo->pvLinAddrKM;
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PVRSRVDCMemInfoGetCpuPAddr(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo,
+ IMG_SIZE_T uByteOffset, IMG_CPU_PHYADDR *pPAddr)
+{
+ *pPAddr = OSMemHandleToCpuPAddr(psKernelMemInfo->sMemBlk.hOSMemHandle, uByteOffset);
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PVRSRVDCMemInfoGetByteSize(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo,
+ IMG_SIZE_T *uByteSize)
+{
+ *uByteSize = psKernelMemInfo->uAllocSize;
+ return PVRSRV_OK;
+}
+
+static IMG_BOOL
+PVRSRVDCMemInfoIsPhysContig(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
+{
+ return OSMemHandleIsPhysContig(psKernelMemInfo->sMemBlk.hOSMemHandle);
+}
+
+IMG_EXPORT
+IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable)
+{
+ psJTable->ui32TableSize = sizeof(PVRSRV_DC_DISP2SRV_KMJTABLE);
+ psJTable->pfnPVRSRVRegisterDCDevice = &PVRSRVRegisterDCDeviceKM;
+ psJTable->pfnPVRSRVRemoveDCDevice = &PVRSRVRemoveDCDeviceKM;
+ psJTable->pfnPVRSRVOEMFunction = &SysOEMFunction;
+ psJTable->pfnPVRSRVRegisterCmdProcList = &PVRSRVRegisterCmdProcListKM;
+ psJTable->pfnPVRSRVRemoveCmdProcList = &PVRSRVRemoveCmdProcListKM;
+#if defined(SUPPORT_MISR_IN_THREAD)
+ psJTable->pfnPVRSRVCmdComplete = &OSVSyncMISR;
+#else
+ psJTable->pfnPVRSRVCmdComplete = &PVRSRVCommandCompleteKM;
+#endif
+ psJTable->pfnPVRSRVRegisterSystemISRHandler = &PVRSRVRegisterSystemISRHandler;
+ psJTable->pfnPVRSRVRegisterPowerDevice = &PVRSRVRegisterPowerDevice;
+#if defined(SUPPORT_CUSTOM_SWAP_OPERATIONS)
+ psJTable->pfnPVRSRVFreeCmdCompletePacket = &PVRSRVFreeCommandCompletePacketKM;
+#endif
+ psJTable->pfnPVRSRVDCMemInfoGetCpuVAddr = &PVRSRVDCMemInfoGetCpuVAddr;
+ psJTable->pfnPVRSRVDCMemInfoGetCpuPAddr = &PVRSRVDCMemInfoGetCpuPAddr;
+ psJTable->pfnPVRSRVDCMemInfoGetByteSize = &PVRSRVDCMemInfoGetByteSize;
+ psJTable->pfnPVRSRVDCMemInfoIsPhysContig = &PVRSRVDCMemInfoIsPhysContig;
+ return IMG_TRUE;
+}
+
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVCloseBCDeviceKM (IMG_HANDLE hDeviceKM,
+ IMG_BOOL bResManCallback)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
+
+ PVR_UNREFERENCED_PARAMETER(bResManCallback);
+
+ psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)hDeviceKM;
+
+
+ eError = ResManFreeResByPtr(psBCPerContextInfo->hResItem, CLEANUP_WITH_POLL);
+
+ return eError;
+}
+
+
+static PVRSRV_ERROR CloseBCDeviceCallBack(IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bDummy)
+{
+ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
+ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
+ IMG_UINT32 i;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+ PVR_UNREFERENCED_PARAMETER(bDummy);
+
+ psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)pvParam;
+
+ psBCInfo = psBCPerContextInfo->psBCInfo;
+
+ for (i = 0; i < psBCInfo->ui32BufferCount; i++)
+ {
+ if (psBCInfo->psBuffer[i].sDeviceClassBuffer.ui32MemMapRefCount != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CloseBCDeviceCallBack: buffer %d (0x%p) still mapped (ui32MemMapRefCount = %d)",
+ i,
+ &psBCInfo->psBuffer[i].sDeviceClassBuffer,
+ psBCInfo->psBuffer[i].sDeviceClassBuffer.ui32MemMapRefCount));
+ return PVRSRV_ERROR_STILL_MAPPED;
+ }
+ }
+
+ psBCInfo->ui32RefCount--;
+ if(psBCInfo->ui32RefCount == 0)
+ {
+
+ psBCInfo->psFuncTable->pfnCloseBCDevice(psBCInfo->ui32DeviceID, psBCInfo->hExtDevice);
+
+
+ for(i=0; i<psBCInfo->ui32BufferCount; i++)
+ {
+ if(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
+ {
+ PVRSRVKernelSyncInfoDecRef(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL);
+ }
+ }
+
+
+ if(psBCInfo->psBuffer)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_BUFFER) * psBCInfo->ui32BufferCount, psBCInfo->psBuffer, IMG_NULL);
+ psBCInfo->psBuffer = IMG_NULL;
+ }
+ }
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_PERCONTEXT_INFO), psBCPerContextInfo, IMG_NULL);
+
+
+ return PVRSRV_OK;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVOpenBCDeviceKM (PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_UINT32 ui32DeviceID,
+ IMG_HANDLE hDevCookie,
+ IMG_HANDLE *phDeviceKM)
+{
+ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
+ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ SYS_DATA *psSysData;
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+
+ if(!phDeviceKM || !hDevCookie)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Invalid params"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ SysAcquireData(&psSysData);
+
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE*)
+ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
+ &MatchDeviceKM_AnyVaCb,
+ ui32DeviceID,
+ IMG_FALSE,
+ PVRSRV_DEVICE_CLASS_BUFFER);
+ if (!psDeviceNode)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: No devnode matching index %d", ui32DeviceID));
+ return PVRSRV_ERROR_NO_DEVICENODE_FOUND;
+ }
+ psBCInfo = (PVRSRV_BUFFERCLASS_INFO*)psDeviceNode->pvDevice;
+
+
+
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(*psBCPerContextInfo),
+ (IMG_VOID **)&psBCPerContextInfo, IMG_NULL,
+ "Buffer Class per Context Info") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed psBCPerContextInfo alloc"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ OSMemSet(psBCPerContextInfo, 0, sizeof(*psBCPerContextInfo));
+
+ if(psBCInfo->ui32RefCount++ == 0)
+ {
+ BUFFER_INFO sBufferInfo;
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
+
+
+ psBCInfo->hDevMemContext = (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext;
+
+
+ eError = psBCInfo->psFuncTable->pfnOpenBCDevice(ui32DeviceID, &psBCInfo->hExtDevice);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to open external BC device"));
+ return eError;
+ }
+
+
+ eError = psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice, &sBufferInfo);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM : Failed to get BC Info"));
+ return eError;
+ }
+
+
+ psBCInfo->ui32BufferCount = sBufferInfo.ui32BufferCount;
+
+
+
+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(PVRSRV_BC_BUFFER) * sBufferInfo.ui32BufferCount,
+ (IMG_VOID **)&psBCInfo->psBuffer,
+ IMG_NULL,
+ "Array of Buffer Class Buffer");
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to allocate BC buffers"));
+ return eError;
+ }
+ OSMemSet (psBCInfo->psBuffer,
+ 0,
+ sizeof(PVRSRV_BC_BUFFER) * sBufferInfo.ui32BufferCount);
+
+ for(i=0; i<psBCInfo->ui32BufferCount; i++)
+ {
+
+ eError = PVRSRVAllocSyncInfoKM(IMG_NULL,
+ psBCInfo->hDevMemContext,
+ &psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed sync info alloc"));
+ goto ErrorExit;
+ }
+
+
+
+
+ eError = psBCInfo->psFuncTable->pfnGetBCBuffer(psBCInfo->hExtDevice,
+ i,
+ psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->psSyncData,
+ &psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtBuffer);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to get BC buffers"));
+ goto ErrorExit;
+ }
+
+
+ psBCInfo->psBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr = psBCInfo->psFuncTable->pfnGetBufferAddr;
+ psBCInfo->psBuffer[i].sDeviceClassBuffer.hDevMemContext = psBCInfo->hDevMemContext;
+ psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtDevice = psBCInfo->hExtDevice;
+ psBCInfo->psBuffer[i].sDeviceClassBuffer.ui32MemMapRefCount = 0;
+ }
+ }
+
+ psBCPerContextInfo->psBCInfo = psBCInfo;
+ psBCPerContextInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_BUFFERCLASS_DEVICE,
+ psBCPerContextInfo,
+ 0,
+ &CloseBCDeviceCallBack);
+
+
+ *phDeviceKM = (IMG_HANDLE)psBCPerContextInfo;
+
+ return PVRSRV_OK;
+
+ErrorExit:
+
+
+ for(i=0; i<psBCInfo->ui32BufferCount; i++)
+ {
+ if(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
+ {
+ PVRSRVKernelSyncInfoDecRef(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL);
+ }
+ }
+
+
+ if(psBCInfo->psBuffer)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_BUFFER), psBCInfo->psBuffer, IMG_NULL);
+ psBCInfo->psBuffer = IMG_NULL;
+ }
+
+ return eError;
+}
+
+
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVGetBCInfoKM (IMG_HANDLE hDeviceKM,
+ BUFFER_INFO *psBufferInfo)
+{
+ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
+ PVRSRV_ERROR eError;
+
+ if(!hDeviceKM || !psBufferInfo)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCInfoKM: Invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM);
+
+ eError = psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice, psBufferInfo);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCInfoKM : Failed to get BC Info"));
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVGetBCBufferKM (IMG_HANDLE hDeviceKM,
+ IMG_UINT32 ui32BufferIndex,
+ IMG_HANDLE *phBuffer)
+{
+ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
+
+ if(!hDeviceKM || !phBuffer)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCBufferKM: Invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM);
+
+ if(ui32BufferIndex < psBCInfo->ui32BufferCount)
+ {
+ *phBuffer = (IMG_HANDLE)&psBCInfo->psBuffer[ui32BufferIndex];
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCBufferKM: Buffer index %d out of range (%d)", ui32BufferIndex,psBCInfo->ui32BufferCount));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+IMG_EXPORT
+IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable)
+{
+ psJTable->ui32TableSize = sizeof(PVRSRV_BC_BUFFER2SRV_KMJTABLE);
+
+ psJTable->pfnPVRSRVRegisterBCDevice = &PVRSRVRegisterBCDeviceKM;
+ psJTable->pfnPVRSRVScheduleDevices = &PVRSRVScheduleDevicesKM;
+ psJTable->pfnPVRSRVRemoveBCDevice = &PVRSRVRemoveBCDeviceKM;
+
+ return IMG_TRUE;
+}
+
diff --git a/drivers/gpu/pvr/deviceid.h b/drivers/gpu/pvr/deviceid.h
new file mode 100644
index 0000000..9a7bdb3
--- /dev/null
+++ b/drivers/gpu/pvr/deviceid.h
@@ -0,0 +1,36 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __DEVICEID_H__
+#define __DEVICEID_H__
+
+#include "services.h"
+#include "syscommon.h"
+
+PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID);
+PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID);
+
+#endif
diff --git a/drivers/gpu/pvr/devicemem.c b/drivers/gpu/pvr/devicemem.c
new file mode 100644
index 0000000..8874d61
--- /dev/null
+++ b/drivers/gpu/pvr/devicemem.c
@@ -0,0 +1,1837 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <stddef.h>
+
+#include "services_headers.h"
+#include "buffer_manager.h"
+#include "pdump_km.h"
+#include "pvr_bridge_km.h"
+#include "osfunc.h"
+
+static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE hDevCookie,
+ IMG_HANDLE hDevMemHeap,
+ IMG_UINT32 ui32Flags,
+ IMG_SIZE_T ui32Size,
+ IMG_SIZE_T ui32Alignment,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
+ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
+
+typedef struct _RESMAN_MAP_DEVICE_MEM_DATA_
+{
+
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
+
+ PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo;
+} RESMAN_MAP_DEVICE_MEM_DATA;
+
+typedef struct _PVRSRV_DC_MAPINFO_
+{
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ IMG_UINT32 ui32RangeIndex;
+ IMG_UINT32 ui32TilingStride;
+ PVRSRV_DEVICECLASS_BUFFER *psDeviceClassBuffer;
+} PVRSRV_DC_MAPINFO;
+
+static IMG_UINT32 g_ui32SyncUID = 0;
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapsKM(IMG_HANDLE hDevCookie,
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_HEAP_INFO_KM *psHeapInfo)
+#else
+ PVRSRV_HEAP_INFO *psHeapInfo)
+#endif
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ IMG_UINT32 ui32HeapCount;
+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
+ IMG_UINT32 i;
+
+ if (hDevCookie == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetDeviceMemHeapsKM: hDevCookie invalid"));
+ PVR_DBG_BREAK;
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
+
+
+ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
+ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
+
+
+ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
+
+
+ for(i=0; i<ui32HeapCount; i++)
+ {
+
+ psHeapInfo[i].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
+ psHeapInfo[i].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap;
+ psHeapInfo[i].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
+ psHeapInfo[i].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
+ psHeapInfo[i].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
+
+ psHeapInfo[i].ui32XTileStride = psDeviceMemoryHeap[i].ui32XTileStride;
+ }
+
+ for(; i < PVRSRV_MAX_CLIENT_HEAPS; i++)
+ {
+ OSMemSet(psHeapInfo + i, 0, sizeof(*psHeapInfo));
+ psHeapInfo[i].ui32HeapID = (IMG_UINT32)PVRSRV_UNDEFINED_HEAP_ID;
+ }
+
+ return PVRSRV_OK;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContextKM(IMG_HANDLE hDevCookie,
+ PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE *phDevMemContext,
+ IMG_UINT32 *pui32ClientHeapCount,
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_HEAP_INFO_KM *psHeapInfo,
+#else
+ PVRSRV_HEAP_INFO *psHeapInfo,
+#endif
+ IMG_BOOL *pbCreated,
+ IMG_BOOL *pbShared)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ IMG_UINT32 ui32HeapCount, ui32ClientHeapCount=0;
+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
+ IMG_HANDLE hDevMemContext;
+ IMG_HANDLE hDevMemHeap;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+ IMG_UINT32 i;
+
+#if !defined(PVR_SECURE_HANDLES) && !defined (SUPPORT_SID_INTERFACE)
+ PVR_UNREFERENCED_PARAMETER(pbShared);
+#endif
+
+ if (hDevCookie == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVCreateDeviceMemContextKM: hDevCookie invalid"));
+ PVR_DBG_BREAK;
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
+
+
+
+ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
+ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
+
+
+
+ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
+
+
+
+ hDevMemContext = BM_CreateContext(psDeviceNode,
+ &sPDDevPAddr,
+ psPerProc,
+ pbCreated);
+ if (hDevMemContext == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDeviceMemContextKM: Failed BM_CreateContext"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+
+ for(i=0; i<ui32HeapCount; i++)
+ {
+ switch(psDeviceMemoryHeap[i].DevMemHeapType)
+ {
+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
+ {
+
+ psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
+ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap;
+ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
+ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
+ psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
+ #if defined(SUPPORT_MEMORY_TILING)
+ psHeapInfo[ui32ClientHeapCount].ui32XTileStride = psDeviceMemoryHeap[i].ui32XTileStride;
+ #else
+ psHeapInfo[ui32ClientHeapCount].ui32XTileStride = 0;
+ #endif
+
+#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
+ pbShared[ui32ClientHeapCount] = IMG_TRUE;
+#endif
+ ui32ClientHeapCount++;
+ break;
+ }
+ case DEVICE_MEMORY_HEAP_PERCONTEXT:
+ {
+ if (psDeviceMemoryHeap[i].ui32HeapSize > 0)
+ {
+ hDevMemHeap = BM_CreateHeap(hDevMemContext,
+ &psDeviceMemoryHeap[i]);
+ if (hDevMemHeap == IMG_NULL)
+ {
+ BM_DestroyContext(hDevMemContext, IMG_NULL);
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ }
+ else
+ {
+ hDevMemHeap = IMG_NULL;
+ }
+
+
+ psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
+ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = hDevMemHeap;
+ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
+ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
+ psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
+ #if defined(SUPPORT_MEMORY_TILING)
+ psHeapInfo[ui32ClientHeapCount].ui32XTileStride = psDeviceMemoryHeap[i].ui32XTileStride;
+ #else
+ psHeapInfo[ui32ClientHeapCount].ui32XTileStride = 0;
+ #endif
+#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
+ pbShared[ui32ClientHeapCount] = IMG_FALSE;
+#endif
+
+ ui32ClientHeapCount++;
+ break;
+ }
+ }
+ }
+
+
+ *pui32ClientHeapCount = ui32ClientHeapCount;
+ *phDevMemContext = hDevMemContext;
+
+ return PVRSRV_OK;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContextKM(IMG_HANDLE hDevCookie,
+ IMG_HANDLE hDevMemContext,
+ IMG_BOOL *pbDestroyed)
+{
+ PVR_UNREFERENCED_PARAMETER(hDevCookie);
+
+ return BM_DestroyContext(hDevMemContext, pbDestroyed);
+}
+
+
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfoKM(IMG_HANDLE hDevCookie,
+ IMG_HANDLE hDevMemContext,
+ IMG_UINT32 *pui32ClientHeapCount,
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_HEAP_INFO_KM *psHeapInfo,
+#else
+ PVRSRV_HEAP_INFO *psHeapInfo,
+#endif
+ IMG_BOOL *pbShared)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ IMG_UINT32 ui32HeapCount, ui32ClientHeapCount=0;
+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
+ IMG_HANDLE hDevMemHeap;
+ IMG_UINT32 i;
+
+#if !defined(PVR_SECURE_HANDLES) && !defined (SUPPORT_SID_INTERFACE)
+ PVR_UNREFERENCED_PARAMETER(pbShared);
+#endif
+
+ if (hDevCookie == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetDeviceMemHeapInfoKM: hDevCookie invalid"));
+ PVR_DBG_BREAK;
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
+
+
+
+ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
+ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
+
+
+
+ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
+
+
+ for(i=0; i<ui32HeapCount; i++)
+ {
+ switch(psDeviceMemoryHeap[i].DevMemHeapType)
+ {
+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
+ {
+
+ psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
+ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap;
+ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
+ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
+ psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
+ psHeapInfo[ui32ClientHeapCount].ui32XTileStride = psDeviceMemoryHeap[i].ui32XTileStride;
+#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
+ pbShared[ui32ClientHeapCount] = IMG_TRUE;
+#endif
+ ui32ClientHeapCount++;
+ break;
+ }
+ case DEVICE_MEMORY_HEAP_PERCONTEXT:
+ {
+ if (psDeviceMemoryHeap[i].ui32HeapSize > 0)
+ {
+ hDevMemHeap = BM_CreateHeap(hDevMemContext,
+ &psDeviceMemoryHeap[i]);
+
+ if (hDevMemHeap == IMG_NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ }
+ else
+ {
+ hDevMemHeap = IMG_NULL;
+ }
+
+
+ psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
+ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = hDevMemHeap;
+ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
+ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
+ psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
+ psHeapInfo[ui32ClientHeapCount].ui32XTileStride = psDeviceMemoryHeap[i].ui32XTileStride;
+#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
+ pbShared[ui32ClientHeapCount] = IMG_FALSE;
+#endif
+
+ ui32ClientHeapCount++;
+ break;
+ }
+ }
+ }
+
+
+ *pui32ClientHeapCount = ui32ClientHeapCount;
+
+ return PVRSRV_OK;
+}
+
+
+static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE hDevCookie,
+ IMG_HANDLE hDevMemHeap,
+ IMG_UINT32 ui32Flags,
+ IMG_SIZE_T ui32Size,
+ IMG_SIZE_T ui32Alignment,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
+ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo)
+{
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
+ BM_HANDLE hBuffer;
+
+ PVRSRV_MEMBLK *psMemBlock;
+ IMG_BOOL bBMError;
+
+ PVR_UNREFERENCED_PARAMETER(hDevCookie);
+
+ *ppsMemInfo = IMG_NULL;
+
+ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
+ sizeof(PVRSRV_KERNEL_MEM_INFO),
+ (IMG_VOID **)&psMemInfo, IMG_NULL,
+ "Kernel Memory Info") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"AllocDeviceMem: Failed to alloc memory for block"));
+ return (PVRSRV_ERROR_OUT_OF_MEMORY);
+ }
+
+ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
+
+ psMemBlock = &(psMemInfo->sMemBlk);
+
+
+ psMemInfo->ui32Flags = ui32Flags | PVRSRV_MEM_RAM_BACKED_ALLOCATION;
+
+ bBMError = BM_Alloc (hDevMemHeap,
+ IMG_NULL,
+ ui32Size,
+ &psMemInfo->ui32Flags,
+ IMG_CAST_TO_DEVVADDR_UINT(ui32Alignment),
+ pvPrivData,
+ ui32PrivDataLength,
+ &hBuffer);
+
+ if (!bBMError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"AllocDeviceMem: BM_Alloc Failed"));
+ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
+
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+
+ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
+ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
+
+
+ psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
+
+
+
+ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
+
+ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
+
+ psMemInfo->uAllocSize = ui32Size;
+
+
+ psMemInfo->pvSysBackupBuffer = IMG_NULL;
+
+
+ *ppsMemInfo = psMemInfo;
+
+
+ return (PVRSRV_OK);
+}
+
+static PVRSRV_ERROR FreeDeviceMem2(PVRSRV_KERNEL_MEM_INFO *psMemInfo, PVRSRV_FREE_CALLBACK_ORIGIN eCallbackOrigin)
+{
+ BM_HANDLE hBuffer;
+
+ if (!psMemInfo)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ hBuffer = psMemInfo->sMemBlk.hBuffer;
+
+
+ switch(eCallbackOrigin)
+ {
+ case PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR:
+ BM_Free(hBuffer, psMemInfo->ui32Flags);
+ break;
+ case PVRSRV_FREE_CALLBACK_ORIGIN_IMPORTER:
+ BM_FreeExport(hBuffer, psMemInfo->ui32Flags);
+ break;
+ default:
+ break;
+ }
+
+
+ if (psMemInfo->pvSysBackupBuffer &&
+ eCallbackOrigin == PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR)
+ {
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, psMemInfo->uAllocSize, psMemInfo->pvSysBackupBuffer, IMG_NULL);
+ psMemInfo->pvSysBackupBuffer = IMG_NULL;
+ }
+
+ if (psMemInfo->ui32RefCount == 0)
+ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
+
+
+ return(PVRSRV_OK);
+}
+
+static PVRSRV_ERROR FreeDeviceMem(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
+{
+ BM_HANDLE hBuffer;
+
+ if (!psMemInfo)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ hBuffer = psMemInfo->sMemBlk.hBuffer;
+
+
+ BM_Free(hBuffer, psMemInfo->ui32Flags);
+
+ if(psMemInfo->pvSysBackupBuffer)
+ {
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, psMemInfo->uAllocSize, psMemInfo->pvSysBackupBuffer, IMG_NULL);
+ psMemInfo->pvSysBackupBuffer = IMG_NULL;
+ }
+
+ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
+
+
+ return(PVRSRV_OK);
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfoKM(IMG_HANDLE hDevCookie,
+ IMG_HANDLE hDevMemContext,
+ PVRSRV_KERNEL_SYNC_INFO **ppsKernelSyncInfo)
+{
+ IMG_HANDLE hSyncDevMemHeap;
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ BM_CONTEXT *pBMContext;
+ PVRSRV_ERROR eError;
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
+ PVRSRV_SYNC_DATA *psSyncData;
+
+ eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT,
+ sizeof(PVRSRV_KERNEL_SYNC_INFO),
+ (IMG_VOID **)&psKernelSyncInfo, IMG_NULL,
+ "Kernel Synchronization Info");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to alloc memory"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ eError = OSAtomicAlloc(&psKernelSyncInfo->pvRefCount);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to allocate atomic"));
+ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_SYNC_INFO), psKernelSyncInfo, IMG_NULL);
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+
+
+ pBMContext = (BM_CONTEXT*)hDevMemContext;
+ psDevMemoryInfo = &pBMContext->psDeviceNode->sDevMemoryInfo;
+
+
+ hSyncDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[psDevMemoryInfo->ui32SyncHeapID].hDevMemHeap;
+
+
+
+
+ eError = AllocDeviceMem(hDevCookie,
+ hSyncDevMemHeap,
+ PVRSRV_MEM_CACHE_CONSISTENT,
+ sizeof(PVRSRV_SYNC_DATA),
+ sizeof(IMG_UINT32),
+ IMG_NULL,
+ 0,
+ &psKernelSyncInfo->psSyncDataMemInfoKM);
+
+ if (eError != PVRSRV_OK)
+ {
+
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to alloc memory"));
+ OSAtomicFree(psKernelSyncInfo->pvRefCount);
+ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_SYNC_INFO), psKernelSyncInfo, IMG_NULL);
+
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+
+ psKernelSyncInfo->psSyncData = psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM;
+ psSyncData = psKernelSyncInfo->psSyncData;
+
+ psSyncData->ui32WriteOpsPending = 0;
+ psSyncData->ui32WriteOpsComplete = 0;
+ psSyncData->ui32ReadOpsPending = 0;
+ psSyncData->ui32ReadOpsComplete = 0;
+ psSyncData->ui32ReadOps2Pending = 0;
+ psSyncData->ui32ReadOps2Complete = 0;
+ psSyncData->ui32LastOpDumpVal = 0;
+ psSyncData->ui32LastReadOpDumpVal = 0;
+ psSyncData->ui64LastWrite = 0;
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Allocating kernel sync object");
+ PDUMPMEM(psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM,
+ psKernelSyncInfo->psSyncDataMemInfoKM,
+ 0,
+ (IMG_UINT32)psKernelSyncInfo->psSyncDataMemInfoKM->uAllocSize,
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM));
+#endif
+
+ psKernelSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete);
+ psKernelSyncInfo->sReadOpsCompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
+ psKernelSyncInfo->sReadOps2CompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32ReadOps2Complete);
+ psKernelSyncInfo->ui32UID = g_ui32SyncUID++;
+
+
+ psKernelSyncInfo->psSyncDataMemInfoKM->psKernelSyncInfo = IMG_NULL;
+
+ OSAtomicInc(psKernelSyncInfo->pvRefCount);
+
+
+ *ppsKernelSyncInfo = psKernelSyncInfo;
+
+ return PVRSRV_OK;
+}
+
+IMG_EXPORT
+IMG_VOID PVRSRVAcquireSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo)
+{
+ OSAtomicInc(psKernelSyncInfo->pvRefCount);
+}
+
+IMG_EXPORT
+IMG_VOID IMG_CALLCONV PVRSRVReleaseSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo)
+{
+ if (OSAtomicDecAndTest(psKernelSyncInfo->pvRefCount))
+ {
+ FreeDeviceMem(psKernelSyncInfo->psSyncDataMemInfoKM);
+
+
+ psKernelSyncInfo->psSyncDataMemInfoKM = IMG_NULL;
+ psKernelSyncInfo->psSyncData = IMG_NULL;
+ OSAtomicFree(psKernelSyncInfo->pvRefCount);
+ (IMG_VOID)OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_SYNC_INFO), psKernelSyncInfo, IMG_NULL);
+
+ }
+}
+
+static IMG_VOID freeWrapped(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
+{
+ IMG_HANDLE hOSWrapMem = psMemInfo->sMemBlk.hOSWrapMem;
+
+
+ if(psMemInfo->sMemBlk.psIntSysPAddr)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psMemInfo->sMemBlk.psIntSysPAddr, IMG_NULL);
+ psMemInfo->sMemBlk.psIntSysPAddr = IMG_NULL;
+ }
+
+ if(hOSWrapMem)
+ {
+ OSReleasePhysPageAddr(hOSWrapMem);
+ }
+}
+
+
+#if defined (PVRSRV_FLUSH_KERNEL_OPS_LAST_ONLY)
+static
+PVRSRV_ERROR _PollUntilAtLeast(volatile IMG_UINT32* pui32WatchedValue,
+ IMG_UINT32 ui32MinimumValue,
+ IMG_UINT32 ui32Waitus,
+ IMG_UINT32 ui32Tries)
+{
+ PVRSRV_ERROR eError;
+ IMG_INT32 iDiff;
+
+ for(;;)
+ {
+ SYS_DATA *psSysData = SysAcquireDataNoCheck();
+ iDiff = *pui32WatchedValue - ui32MinimumValue;
+
+ if (iDiff >= 0)
+ {
+ eError = PVRSRV_OK;
+ break;
+ }
+
+ if(!ui32Tries)
+ {
+ eError = PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE;
+ break;
+ }
+
+ ui32Tries--;
+
+
+ if (psSysData->psGlobalEventObject)
+ {
+ IMG_HANDLE hOSEventKM;
+ if(psSysData->psGlobalEventObject)
+ {
+ eError = OSEventObjectOpenKM(psSysData->psGlobalEventObject, &hOSEventKM);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "_PollUntilAtLeast: OSEventObjectOpen failed"));
+ goto Exit;
+ }
+ eError = OSEventObjectWaitKM(hOSEventKM);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "_PollUntilAtLeast: PVRSRVEventObjectWait failed"));
+ goto Exit;
+ }
+ eError = OSEventObjectCloseKM(psSysData->psGlobalEventObject, hOSEventKM);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "_PollUntilAtLeast: OSEventObjectClose failed"));
+ }
+ }
+ }
+ }
+Exit:
+ return eError;
+}
+
+static PVRSRV_ERROR FlushKernelOps(PVRSRV_SYNC_DATA *psSyncData)
+{
+ PVRSRV_ERROR eError;
+
+ if(!psSyncData)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FlushKernelOps: invalid psSyncData"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+
+
+
+
+
+
+
+
+ eError = _PollUntilAtLeast(&psSyncData->ui32ReadOpsComplete,
+ psSyncData->ui32ReadOpsPending,
+ MAX_HW_TIME_US/WAIT_TRY_COUNT,
+ WAIT_TRY_COUNT);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FlushClientOps: Read ops pending timeout"));
+ PVR_DBG_BREAK;
+ return eError;
+ }
+
+ eError = _PollUntilAtLeast(&psSyncData->ui32WriteOpsComplete,
+ psSyncData->ui32WriteOpsPending,
+ MAX_HW_TIME_US/WAIT_TRY_COUNT,
+ WAIT_TRY_COUNT);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FlushClientOps: Write ops pending timeout"));
+ PVR_DBG_BREAK;
+ }
+
+ return eError;
+}
+#endif
+
+IMG_EXPORT
+PVRSRV_ERROR FreeMemCallBackCommon(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
+ IMG_UINT32 ui32Param,
+ PVRSRV_FREE_CALLBACK_ORIGIN eCallbackOrigin)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+
+
+ PVRSRVKernelMemInfoDecRef(psMemInfo);
+
+
+ if (psMemInfo->ui32RefCount == 0)
+ {
+ if((psMemInfo->ui32Flags & PVRSRV_MEM_EXPORTED) != 0)
+ {
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hMemInfo = 0;
+#else
+ IMG_HANDLE hMemInfo = IMG_NULL;
+#endif
+
+
+ eError = PVRSRVFindHandle(KERNEL_HANDLE_BASE,
+ &hMemInfo,
+ psMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreeMemCallBackCommon: can't find exported meminfo in the global handle list"));
+ return eError;
+ }
+
+
+ eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE,
+ hMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreeMemCallBackCommon: PVRSRVReleaseHandle failed for exported meminfo"));
+ return eError;
+ }
+ }
+
+#if defined (PVRSRV_FLUSH_KERNEL_OPS_LAST_ONLY)
+ if (psMemInfo->psKernelSyncInfo)
+ {
+ if (psMemInfo->psKernelSyncInfo->ui32RefCount == 1)
+ {
+ FlushKernelOps(psMemInfo->psKernelSyncInfo->psSyncData);
+ }
+ }
+#endif
+ switch(psMemInfo->memType)
+ {
+
+ case PVRSRV_MEMTYPE_WRAPPED:
+ freeWrapped(psMemInfo);
+ case PVRSRV_MEMTYPE_DEVICE:
+ case PVRSRV_MEMTYPE_DEVICECLASS:
+ if (psMemInfo->psKernelSyncInfo)
+ {
+ PVRSRVKernelSyncInfoDecRef(psMemInfo->psKernelSyncInfo, psMemInfo);
+ }
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "FreeMemCallBackCommon: Unknown memType"));
+ eError = PVRSRV_ERROR_INVALID_MEMINFO;
+ }
+ }
+
+
+ if (eError == PVRSRV_OK)
+ {
+ eError = FreeDeviceMem2(psMemInfo, eCallbackOrigin);
+ }
+
+ return eError;
+}
+
+static PVRSRV_ERROR FreeDeviceMemCallBack(IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bDummy)
+{
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam;
+
+ PVR_UNREFERENCED_PARAMETER(bDummy);
+
+ return FreeMemCallBackCommon(psMemInfo, ui32Param,
+ PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR);
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMemKM(IMG_HANDLE hDevCookie,
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(hDevCookie);
+
+ if (!psMemInfo)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (psMemInfo->sMemBlk.hResItem != IMG_NULL)
+ {
+ eError = ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, CLEANUP_WITH_POLL);
+ }
+ else
+ {
+
+ eError = FreeDeviceMemCallBack(psMemInfo, 0, CLEANUP_WITH_POLL);
+ }
+
+ return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV _PVRSRVAllocDeviceMemKM(IMG_HANDLE hDevCookie,
+ PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDevMemHeap,
+ IMG_UINT32 ui32Flags,
+ IMG_SIZE_T ui32Size,
+ IMG_SIZE_T ui32Alignment,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
+ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo)
+{
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
+ PVRSRV_ERROR eError;
+ BM_HEAP *psBMHeap;
+ IMG_HANDLE hDevMemContext;
+
+ if (!hDevMemHeap ||
+ (ui32Size == 0))
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+ if (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
+ {
+
+ if (((ui32Size % HOST_PAGESIZE()) != 0) ||
+ ((ui32Alignment % HOST_PAGESIZE()) != 0))
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+
+ eError = AllocDeviceMem(hDevCookie,
+ hDevMemHeap,
+ ui32Flags,
+ ui32Size,
+ ui32Alignment,
+ pvPrivData,
+ ui32PrivDataLength,
+ &psMemInfo);
+
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ if (ui32Flags & PVRSRV_MEM_NO_SYNCOBJ)
+ {
+ psMemInfo->psKernelSyncInfo = IMG_NULL;
+ }
+ else
+ {
+
+
+
+ psBMHeap = (BM_HEAP*)hDevMemHeap;
+ hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext;
+ eError = PVRSRVAllocSyncInfoKM(hDevCookie,
+ hDevMemContext,
+ &psMemInfo->psKernelSyncInfo);
+ if(eError != PVRSRV_OK)
+ {
+ goto free_mainalloc;
+ }
+ }
+
+
+ *ppsMemInfo = psMemInfo;
+
+ if (ui32Flags & PVRSRV_MEM_NO_RESMAN)
+ {
+ psMemInfo->sMemBlk.hResItem = IMG_NULL;
+ }
+ else
+ {
+
+ psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_DEVICEMEM_ALLOCATION,
+ psMemInfo,
+ 0,
+ &FreeDeviceMemCallBack);
+ if (psMemInfo->sMemBlk.hResItem == IMG_NULL)
+ {
+
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto free_mainalloc;
+ }
+ }
+
+
+ PVRSRVKernelMemInfoIncRef(psMemInfo);
+
+ psMemInfo->memType = PVRSRV_MEMTYPE_DEVICE;
+
+
+ return (PVRSRV_OK);
+
+free_mainalloc:
+ if (psMemInfo->psKernelSyncInfo)
+ {
+ PVRSRVKernelSyncInfoDecRef(psMemInfo->psKernelSyncInfo, psMemInfo);
+ }
+ FreeDeviceMem(psMemInfo);
+
+ return eError;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDissociateDeviceMemKM(IMG_HANDLE hDevCookie,
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevCookie;
+
+ PVR_UNREFERENCED_PARAMETER(hDevCookie);
+
+ if (!psMemInfo)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = ResManDissociateRes(psMemInfo->sMemBlk.hResItem, psDeviceNode->hResManContext);
+
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFreeDeviceMemKM(IMG_UINT32 ui32Flags,
+ IMG_SIZE_T *pui32Total,
+ IMG_SIZE_T *pui32Free,
+ IMG_SIZE_T *pui32LargestBlock)
+{
+
+
+ PVR_UNREFERENCED_PARAMETER(ui32Flags);
+ PVR_UNREFERENCED_PARAMETER(pui32Total);
+ PVR_UNREFERENCED_PARAMETER(pui32Free);
+ PVR_UNREFERENCED_PARAMETER(pui32LargestBlock);
+
+ return PVRSRV_OK;
+}
+
+
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemoryKM (PVRSRV_KERNEL_MEM_INFO *psMemInfo)
+{
+ if (!psMemInfo)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, CLEANUP_WITH_POLL);
+}
+
+
+static PVRSRV_ERROR UnwrapExtMemoryCallBack(IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bDummy)
+{
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam;
+
+ PVR_UNREFERENCED_PARAMETER(bDummy);
+
+ return FreeMemCallBackCommon(psMemInfo, ui32Param,
+ PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR);
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE hDevCookie,
+ PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDevMemContext,
+ IMG_SIZE_T uByteSize,
+ IMG_SIZE_T uPageOffset,
+ IMG_BOOL bPhysContig,
+ IMG_SYS_PHYADDR *psExtSysPAddr,
+ IMG_VOID *pvLinAddr,
+ IMG_UINT32 ui32Flags,
+ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo)
+{
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL;
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE();
+ IMG_HANDLE hDevMemHeap = IMG_NULL;
+ PVRSRV_DEVICE_NODE* psDeviceNode;
+ BM_HANDLE hBuffer;
+ PVRSRV_MEMBLK *psMemBlock;
+ IMG_BOOL bBMError;
+ BM_HEAP *psBMHeap;
+ PVRSRV_ERROR eError;
+ IMG_VOID *pvPageAlignedCPUVAddr;
+ IMG_SYS_PHYADDR *psIntSysPAddr = IMG_NULL;
+ IMG_HANDLE hOSWrapMem = IMG_NULL;
+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
+ IMG_UINT32 i;
+ IMG_SIZE_T uPageCount = 0;
+
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE*)hDevCookie;
+ PVR_ASSERT(psDeviceNode != IMG_NULL);
+
+ if (psDeviceNode == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVWrapExtMemoryKM: invalid parameter"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if(pvLinAddr)
+ {
+
+ uPageOffset = (IMG_UINTPTR_T)pvLinAddr & (ui32HostPageSize - 1);
+
+
+ uPageCount = HOST_PAGEALIGN(uByteSize + uPageOffset) / ui32HostPageSize;
+ pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)pvLinAddr - uPageOffset);
+
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ uPageCount * sizeof(IMG_SYS_PHYADDR),
+ (IMG_VOID **)&psIntSysPAddr, IMG_NULL,
+ "Array of Page Addresses") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ eError = OSAcquirePhysPageAddr(pvPageAlignedCPUVAddr,
+ uPageCount * ui32HostPageSize,
+ psIntSysPAddr,
+ &hOSWrapMem);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorExitPhase1;
+ }
+
+
+ psExtSysPAddr = psIntSysPAddr;
+
+
+
+ bPhysContig = IMG_FALSE;
+ }
+ else
+ {
+
+ }
+
+
+ psDevMemoryInfo = &((BM_CONTEXT*)hDevMemContext)->psDeviceNode->sDevMemoryInfo;
+ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
+ for(i=0; i<PVRSRV_MAX_CLIENT_HEAPS; i++)
+ {
+ if(HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) == psDevMemoryInfo->ui32MappingHeapID)
+ {
+ if(psDeviceMemoryHeap[i].DevMemHeapType == DEVICE_MEMORY_HEAP_PERCONTEXT)
+ {
+
+ if (psDeviceMemoryHeap[i].ui32HeapSize > 0)
+ {
+ hDevMemHeap = BM_CreateHeap(hDevMemContext, &psDeviceMemoryHeap[i]);
+ }
+ else
+ {
+ hDevMemHeap = IMG_NULL;
+ }
+ }
+ else
+ {
+ hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[i].hDevMemHeap;
+ }
+ break;
+ }
+ }
+
+ if(hDevMemHeap == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: unable to find mapping heap"));
+ eError = PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP;
+ goto ErrorExitPhase2;
+ }
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(PVRSRV_KERNEL_MEM_INFO),
+ (IMG_VOID **)&psMemInfo, IMG_NULL,
+ "Kernel Memory Info") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorExitPhase2;
+ }
+
+ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
+ psMemInfo->ui32Flags = ui32Flags;
+
+ psMemBlock = &(psMemInfo->sMemBlk);
+
+ bBMError = BM_Wrap(hDevMemHeap,
+ uByteSize,
+ uPageOffset,
+ bPhysContig,
+ psExtSysPAddr,
+ IMG_NULL,
+ &psMemInfo->ui32Flags,
+ &hBuffer);
+ if (!bBMError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: BM_Wrap Failed"));
+ eError = PVRSRV_ERROR_BAD_MAPPING;
+ goto ErrorExitPhase3;
+ }
+
+
+ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
+ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
+ psMemBlock->hOSWrapMem = hOSWrapMem;
+ psMemBlock->psIntSysPAddr = psIntSysPAddr;
+
+
+ psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
+
+
+ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
+ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
+ psMemInfo->uAllocSize = uByteSize;
+
+
+
+ psMemInfo->pvSysBackupBuffer = IMG_NULL;
+
+
+
+
+ psBMHeap = (BM_HEAP*)hDevMemHeap;
+ hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext;
+ eError = PVRSRVAllocSyncInfoKM(hDevCookie,
+ hDevMemContext,
+ &psMemInfo->psKernelSyncInfo);
+ if(eError != PVRSRV_OK)
+ {
+ goto ErrorExitPhase4;
+ }
+
+
+ PVRSRVKernelMemInfoIncRef(psMemInfo);
+
+ psMemInfo->memType = PVRSRV_MEMTYPE_WRAPPED;
+
+
+ psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_DEVICEMEM_WRAP,
+ psMemInfo,
+ 0,
+ &UnwrapExtMemoryCallBack);
+
+
+ *ppsMemInfo = psMemInfo;
+
+ return PVRSRV_OK;
+
+
+
+ErrorExitPhase4:
+ if(psMemInfo)
+ {
+ FreeDeviceMem(psMemInfo);
+
+
+
+ psMemInfo = IMG_NULL;
+ }
+
+ErrorExitPhase3:
+ if(psMemInfo)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
+
+ }
+
+ErrorExitPhase2:
+ if(psIntSysPAddr)
+ {
+ OSReleasePhysPageAddr(hOSWrapMem);
+ }
+
+ErrorExitPhase1:
+ if(psIntSysPAddr)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, uPageCount * sizeof(IMG_SYS_PHYADDR), psIntSysPAddr, IMG_NULL);
+
+ }
+
+ return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemoryKM (PVRSRV_KERNEL_MEM_INFO *psMemInfo)
+{
+ if (!psMemInfo)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, CLEANUP_WITH_POLL);
+}
+
+
+static PVRSRV_ERROR UnmapDeviceMemoryCallBack(IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bDummy)
+{
+ PVRSRV_ERROR eError;
+ RESMAN_MAP_DEVICE_MEM_DATA *psMapData = pvParam;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+ PVR_UNREFERENCED_PARAMETER(bDummy);
+
+ if(psMapData->psMemInfo->sMemBlk.psIntSysPAddr)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psMapData->psMemInfo->sMemBlk.psIntSysPAddr, IMG_NULL);
+ psMapData->psMemInfo->sMemBlk.psIntSysPAddr = IMG_NULL;
+ }
+
+ if( psMapData->psMemInfo->psKernelSyncInfo )
+ {
+ PVRSRVKernelSyncInfoDecRef(psMapData->psMemInfo->psKernelSyncInfo, psMapData->psMemInfo);
+ }
+
+ eError = FreeDeviceMem(psMapData->psMemInfo);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"UnmapDeviceMemoryCallBack: Failed to free DST meminfo"));
+ return eError;
+ }
+
+
+ eError = FreeMemCallBackCommon(psMapData->psSrcMemInfo, 0,
+ PVRSRV_FREE_CALLBACK_ORIGIN_IMPORTER);
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_MAP_DEVICE_MEM_DATA), psMapData, IMG_NULL);
+
+
+ return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo,
+ IMG_HANDLE hDstDevMemHeap,
+ PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+ IMG_SIZE_T uPageCount, uPageOffset;
+ IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE();
+ IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL;
+ IMG_DEV_PHYADDR sDevPAddr;
+ BM_BUF *psBuf;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL;
+ BM_HANDLE hBuffer;
+ PVRSRV_MEMBLK *psMemBlock;
+ IMG_BOOL bBMError;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ IMG_VOID *pvPageAlignedCPUVAddr;
+ RESMAN_MAP_DEVICE_MEM_DATA *psMapData = IMG_NULL;
+
+
+ if(!psSrcMemInfo || !hDstDevMemHeap || !ppsDstMemInfo)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+ *ppsDstMemInfo = IMG_NULL;
+
+ uPageOffset = psSrcMemInfo->sDevVAddr.uiAddr & (ui32HostPageSize - 1);
+ uPageCount = HOST_PAGEALIGN(psSrcMemInfo->uAllocSize + uPageOffset) / ui32HostPageSize;
+ pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)psSrcMemInfo->pvLinAddrKM - uPageOffset);
+
+
+
+
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ uPageCount*sizeof(IMG_SYS_PHYADDR),
+ (IMG_VOID **)&psSysPAddr, IMG_NULL,
+ "Array of Page Addresses") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psBuf = psSrcMemInfo->sMemBlk.hBuffer;
+
+
+ psDeviceNode = psBuf->pMapping->pBMHeap->pBMContext->psDeviceNode;
+
+
+ sDevVAddr.uiAddr = psSrcMemInfo->sDevVAddr.uiAddr - IMG_CAST_TO_DEVVADDR_UINT(uPageOffset);
+ for(i=0; i<uPageCount; i++)
+ {
+ BM_GetPhysPageAddr(psSrcMemInfo, sDevVAddr, &sDevPAddr);
+
+
+ psSysPAddr[i] = SysDevPAddrToSysPAddr (psDeviceNode->sDevId.eDeviceType, sDevPAddr);
+
+
+ sDevVAddr.uiAddr += IMG_CAST_TO_DEVVADDR_UINT(ui32HostPageSize);
+ }
+
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(RESMAN_MAP_DEVICE_MEM_DATA),
+ (IMG_VOID **)&psMapData, IMG_NULL,
+ "Resource Manager Map Data") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc resman map data"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorExit;
+ }
+
+ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
+ sizeof(PVRSRV_KERNEL_MEM_INFO),
+ (IMG_VOID **)&psMemInfo, IMG_NULL,
+ "Kernel Memory Info") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorExit;
+ }
+
+ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
+ psMemInfo->ui32Flags = psSrcMemInfo->ui32Flags;
+
+ psMemBlock = &(psMemInfo->sMemBlk);
+
+ bBMError = BM_Wrap(hDstDevMemHeap,
+ psSrcMemInfo->uAllocSize,
+ uPageOffset,
+ IMG_FALSE,
+ psSysPAddr,
+ pvPageAlignedCPUVAddr,
+ &psMemInfo->ui32Flags,
+ &hBuffer);
+
+ if (!bBMError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: BM_Wrap Failed"));
+ eError = PVRSRV_ERROR_BAD_MAPPING;
+ goto ErrorExit;
+ }
+
+
+ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
+ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
+
+
+ psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
+
+
+ psMemBlock->psIntSysPAddr = psSysPAddr;
+
+
+ psMemInfo->pvLinAddrKM = psSrcMemInfo->pvLinAddrKM;
+
+
+ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
+ psMemInfo->uAllocSize = psSrcMemInfo->uAllocSize;
+ psMemInfo->psKernelSyncInfo = psSrcMemInfo->psKernelSyncInfo;
+
+
+ if(psMemInfo->psKernelSyncInfo)
+ {
+ PVRSRVKernelSyncInfoIncRef(psMemInfo->psKernelSyncInfo, psMemInfo);
+ }
+
+
+
+ psMemInfo->pvSysBackupBuffer = IMG_NULL;
+
+
+ PVRSRVKernelMemInfoIncRef(psMemInfo);
+
+
+ PVRSRVKernelMemInfoIncRef(psSrcMemInfo);
+
+
+ BM_Export(psSrcMemInfo->sMemBlk.hBuffer);
+
+ psMemInfo->memType = PVRSRV_MEMTYPE_MAPPED;
+
+
+ psMapData->psMemInfo = psMemInfo;
+ psMapData->psSrcMemInfo = psSrcMemInfo;
+
+
+ psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_DEVICEMEM_MAPPING,
+ psMapData,
+ 0,
+ &UnmapDeviceMemoryCallBack);
+
+ *ppsDstMemInfo = psMemInfo;
+
+ return PVRSRV_OK;
+
+
+
+ErrorExit:
+
+ if(psSysPAddr)
+ {
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psSysPAddr, IMG_NULL);
+
+ }
+
+ if(psMemInfo)
+ {
+
+ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
+
+ }
+
+ if(psMapData)
+ {
+
+ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(RESMAN_MAP_DEVICE_MEM_DATA), psMapData, IMG_NULL);
+
+ }
+
+ return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
+{
+ if (!psMemInfo)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, CLEANUP_WITH_POLL);
+}
+
+
+static PVRSRV_ERROR UnmapDeviceClassMemoryCallBack(IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bDummy)
+{
+ PVRSRV_DC_MAPINFO *psDCMapInfo = pvParam;
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+ PVR_UNREFERENCED_PARAMETER(bDummy);
+
+ psMemInfo = psDCMapInfo->psMemInfo;
+
+#if defined(SUPPORT_MEMORY_TILING)
+ if(psDCMapInfo->ui32TilingStride > 0)
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = psDCMapInfo->psDeviceNode;
+
+ if (psDeviceNode->pfnFreeMemTilingRange(psDeviceNode,
+ psDCMapInfo->ui32RangeIndex) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"UnmapDeviceClassMemoryCallBack: FreeMemTilingRange failed"));
+ }
+ }
+#endif
+
+ (psDCMapInfo->psDeviceClassBuffer->ui32MemMapRefCount)--;
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_MAPINFO), psDCMapInfo, IMG_NULL);
+
+ return FreeMemCallBackCommon(psMemInfo, ui32Param,
+ PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR);
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDevMemContext,
+ IMG_HANDLE hDeviceClassBuffer,
+ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo,
+ IMG_HANDLE *phOSMapInfo)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE* psDeviceNode;
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL;
+ PVRSRV_DEVICECLASS_BUFFER *psDeviceClassBuffer;
+ IMG_SYS_PHYADDR *psSysPAddr;
+ IMG_VOID *pvCPUVAddr, *pvPageAlignedCPUVAddr;
+ IMG_BOOL bPhysContig;
+ BM_CONTEXT *psBMContext;
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
+ IMG_HANDLE hDevMemHeap = IMG_NULL;
+ IMG_SIZE_T uByteSize;
+ IMG_SIZE_T ui32Offset;
+ IMG_SIZE_T ui32PageSize = HOST_PAGESIZE();
+ BM_HANDLE hBuffer;
+ PVRSRV_MEMBLK *psMemBlock;
+ IMG_BOOL bBMError;
+ IMG_UINT32 i;
+ PVRSRV_DC_MAPINFO *psDCMapInfo = IMG_NULL;
+
+ if(!hDeviceClassBuffer || !ppsMemInfo || !phOSMapInfo || !hDevMemContext)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(PVRSRV_DC_MAPINFO),
+ (IMG_VOID **)&psDCMapInfo, IMG_NULL,
+ "PVRSRV_DC_MAPINFO") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: Failed to alloc memory for psDCMapInfo"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ OSMemSet(psDCMapInfo, 0, sizeof(PVRSRV_DC_MAPINFO));
+
+ psDeviceClassBuffer = (PVRSRV_DEVICECLASS_BUFFER*)hDeviceClassBuffer;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ eError = psDeviceClassBuffer->pfnGetBufferAddr(psDeviceClassBuffer->hExtDevice,
+ psDeviceClassBuffer->hExtBuffer,
+ &psSysPAddr,
+ &uByteSize,
+ &pvCPUVAddr,
+ phOSMapInfo,
+ &bPhysContig,
+ &psDCMapInfo->ui32TilingStride);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: unable to get buffer address"));
+ goto ErrorExitPhase1;
+ }
+
+
+ psBMContext = (BM_CONTEXT*)psDeviceClassBuffer->hDevMemContext;
+ psDeviceNode = psBMContext->psDeviceNode;
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
+ for(i=0; i<PVRSRV_MAX_CLIENT_HEAPS; i++)
+ {
+ if(HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) == psDevMemoryInfo->ui32MappingHeapID)
+ {
+ if(psDeviceMemoryHeap[i].DevMemHeapType == DEVICE_MEMORY_HEAP_PERCONTEXT)
+ {
+
+ if (psDeviceMemoryHeap[i].ui32HeapSize > 0)
+ {
+ hDevMemHeap = BM_CreateHeap(hDevMemContext, &psDeviceMemoryHeap[i]);
+ }
+ else
+ {
+ hDevMemHeap = IMG_NULL;
+ }
+ }
+ else
+ {
+ hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[i].hDevMemHeap;
+ }
+ break;
+ }
+ }
+
+ if(hDevMemHeap == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: unable to find mapping heap"));
+ eError = PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE;
+ goto ErrorExitPhase1;
+ }
+
+
+ ui32Offset = ((IMG_UINTPTR_T)pvCPUVAddr) & (ui32PageSize - 1);
+ pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)pvCPUVAddr - ui32Offset);
+
+ eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT,
+ sizeof(PVRSRV_KERNEL_MEM_INFO),
+ (IMG_VOID **)&psMemInfo, IMG_NULL,
+ "Kernel Memory Info");
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: Failed to alloc memory for block"));
+ goto ErrorExitPhase1;
+ }
+
+ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
+
+ psMemBlock = &(psMemInfo->sMemBlk);
+
+ bBMError = BM_Wrap(hDevMemHeap,
+ uByteSize,
+ ui32Offset,
+ bPhysContig,
+ psSysPAddr,
+ pvPageAlignedCPUVAddr,
+ &psMemInfo->ui32Flags,
+ &hBuffer);
+
+ if (!bBMError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: BM_Wrap Failed"));
+
+ eError = PVRSRV_ERROR_BAD_MAPPING;
+ goto ErrorExitPhase2;
+ }
+
+
+ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
+ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
+
+
+ psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
+
+
+
+ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
+
+
+ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
+ psMemInfo->uAllocSize = uByteSize;
+ psMemInfo->psKernelSyncInfo = psDeviceClassBuffer->psKernelSyncInfo;
+
+ PVR_ASSERT(psMemInfo->psKernelSyncInfo != IMG_NULL);
+ if (psMemInfo->psKernelSyncInfo)
+ {
+ PVRSRVKernelSyncInfoIncRef(psMemInfo->psKernelSyncInfo, psMemInfo);
+ }
+
+
+
+ psMemInfo->pvSysBackupBuffer = IMG_NULL;
+
+
+ psDCMapInfo->psMemInfo = psMemInfo;
+ psDCMapInfo->psDeviceClassBuffer = psDeviceClassBuffer;
+
+#if defined(SUPPORT_MEMORY_TILING)
+ psDCMapInfo->psDeviceNode = psDeviceNode;
+
+ if(psDCMapInfo->ui32TilingStride > 0)
+ {
+
+ eError = psDeviceNode->pfnAllocMemTilingRange(psDeviceNode,
+ psMemInfo,
+ psDCMapInfo->ui32TilingStride,
+ &psDCMapInfo->ui32RangeIndex);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: AllocMemTilingRange failed"));
+ goto ErrorExitPhase3;
+ }
+ }
+#endif
+
+
+ psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_DEVICECLASSMEM_MAPPING,
+ psDCMapInfo,
+ 0,
+ &UnmapDeviceClassMemoryCallBack);
+
+ (psDeviceClassBuffer->ui32MemMapRefCount)++;
+ PVRSRVKernelMemInfoIncRef(psMemInfo);
+
+ psMemInfo->memType = PVRSRV_MEMTYPE_DEVICECLASS;
+
+
+ *ppsMemInfo = psMemInfo;
+
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+
+ if(psMemInfo->pvLinAddrKM)
+ {
+
+ PDUMPCOMMENT("Dump display surface");
+ PDUMPMEM(IMG_NULL, psMemInfo, ui32Offset, psMemInfo->uAllocSize, PDUMP_FLAGS_CONTINUOUS, ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping);
+ }
+#endif
+ return PVRSRV_OK;
+
+#if defined(SUPPORT_MEMORY_TILING)
+ErrorExitPhase3:
+ if(psMemInfo)
+ {
+ if (psMemInfo->psKernelSyncInfo)
+ {
+ PVRSRVKernelSyncInfoDecRef(psMemInfo->psKernelSyncInfo, psMemInfo);
+ }
+
+ FreeDeviceMem(psMemInfo);
+
+
+
+ psMemInfo = IMG_NULL;
+ }
+#endif
+
+ErrorExitPhase2:
+ if(psMemInfo)
+ {
+ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
+ }
+
+ErrorExitPhase1:
+ if(psDCMapInfo)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_KERNEL_MEM_INFO), psDCMapInfo, IMG_NULL);
+ }
+
+ return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVChangeDeviceMemoryAttributesKM(IMG_HANDLE hKernelMemInfo, IMG_UINT32 ui32Attribs)
+{
+ PVRSRV_KERNEL_MEM_INFO *psKMMemInfo;
+
+ if (hKernelMemInfo == IMG_NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psKMMemInfo = (PVRSRV_KERNEL_MEM_INFO *)hKernelMemInfo;
+
+ if (ui32Attribs & PVRSRV_CHANGEDEVMEM_ATTRIBS_CACHECOHERENT)
+ {
+ psKMMemInfo->ui32Flags |= PVRSRV_MEM_CACHE_CONSISTENT;
+ }
+ else
+ {
+ psKMMemInfo->ui32Flags &= ~PVRSRV_MEM_CACHE_CONSISTENT;
+ }
+
+ return PVRSRV_OK;
+}
+
+
diff --git a/drivers/gpu/pvr/display/omap_display.c b/drivers/gpu/pvr/display/omap_display.c
new file mode 100644
index 0000000..d385908
--- /dev/null
+++ b/drivers/gpu/pvr/display/omap_display.c
@@ -0,0 +1,1115 @@
+/*
+ * drivers/gpu/pvr/display/omap_display.c
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fb.h>
+
+#include <plat/vrfb.h>
+#include <plat/display.h>
+
+/* Workaround for DEBUG macro clash in framebuffer */
+#ifdef RELEASE
+#include <../drivers/video/omap2/omapfb/omapfb.h>
+#undef DEBUG
+#else
+#undef DEBUG
+#include <../drivers/video/omap2/omapfb/omapfb.h>
+#endif
+
+#define OMAP_DISP_DRV_NAME "omap_display"
+#define OMAP_DISP_FRAMEBUFFER_COUNT num_registered_fb
+
+#define OMAP_DISP_PAGE_MASK (PAGE_SIZE - 1)
+#define OMAP_DISP_PAGE_TRUNCATE (~OMAP_DISP_PAGE_MASK)
+#define OMAP_DISP_PAGE_ROUND_UP(x) \
+ (((x)+OMAP_DISP_PAGE_MASK) & OMAP_DISP_PAGE_TRUNCATE)
+
+#define OMAP_DISP_IRQ_TIMEOUT 500
+
+#ifdef DEBUG
+#define DBG_PRINT(format, ...) printk(KERN_INFO OMAP_DISP_DRV_NAME \
+ " (%s %i): " format "\n", __func__, __LINE__, ## __VA_ARGS__)
+#define WRN_PRINT(format, ...) printk(KERN_WARNING OMAP_DISP_DRV_NAME \
+ " (%s %i): " format "\n", __func__, __LINE__, ## __VA_ARGS__)
+#define ERR_PRINT(format, ...) printk(KERN_ERR OMAP_DISP_DRV_NAME \
+ " (%s %i): " format "\n", __func__, __LINE__, ## __VA_ARGS__)
+#else
+#define DBG_PRINT(format, ...)
+#define WRN_PRINT(format, ...)
+#define ERR_PRINT(format, ...)
+#endif
+
+#include "omap_display.h"
+
+/* List for the available displays */
+static struct omap_display_device *omap_display_list;
+static unsigned int omap_display_number;
+
+/* Workqueues for virtual display (primary, seconday)*/
+static struct workqueue_struct *vdisp_wq_primary;
+static struct workqueue_struct *vdisp_wq_secondary;
+static struct omap_display_sync_item vdisp_sync_primary;
+static struct omap_display_sync_item vdisp_sync_secondary;
+
+/* Forward declarations */
+static struct omap_display_buffer *create_main_buffer(
+ struct omap_display_device *display);
+static int display_destroy_buffer(struct omap_display_buffer *buffer);
+static void vdisp_sync_handler(struct work_struct *work);
+
+static int open_display(struct omap_display_device *display,
+ enum omap_display_feature features)
+{
+ int i;
+
+ DBG_PRINT("Opening display '%s'", display->name);
+
+ /* TODO: Support horizontal orientation */
+ if (features & ORIENTATION_HORIZONTAL) {
+ DBG_PRINT("Horizontal orientation is not supported yet , "
+ "falling back to vertical orientation");
+ features = ORIENTATION_VERTICAL;
+ }
+
+ display->features = features;
+ display->reference_count++;
+ for (i = 0; i < display->overlay_managers_count; i++)
+ omap_dss_get_device(display->overlay_managers[i]->device);
+
+ /* If the main buffer doesn't exist create it */
+ if (!display->main_buffer) {
+ DBG_PRINT("Main buffer doesn't exist for display '%s', create"
+ " one", display->name);
+ display->main_buffer = create_main_buffer(display);
+ if (!display->main_buffer) {
+ ERR_PRINT("Failed to create main buffer for '%s'",
+ display->name);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int close_display(struct omap_display_device *display)
+{
+ int err;
+ int i;
+
+ /* TODO: Is it the same thing to close a virtual and single display? */
+ DBG_PRINT("Closing display '%s'", display->name);
+
+ display->reference_count--;
+ for (i = 0; i < display->overlay_managers_count; i++)
+ omap_dss_put_device(display->overlay_managers[i]->device);
+
+ if (display->flip_chain) {
+ err = display->destroy_flip_chain(display);
+ display->flip_chain = 0;
+ if (err)
+ WRN_PRINT("An error happened when destroying flip "
+ "chain for '%s'", display->name);
+ }
+
+ return 0;
+}
+
+static int get_max_buffers(struct omap_display_device *display)
+{
+ /* TODO: If TILER is wanted to be used how do you calculate this? */
+ int fb_idx;
+ switch (display->id) {
+ case OMAP_DISPID_PRIMARY:
+ fb_idx = 0;
+ break;
+ case OMAP_DISPID_SECONDARY:
+ fb_idx = 1;
+ break;
+ case OMAP_DISPID_TERTIARY:
+ fb_idx = 2;
+ break;
+ case OMAP_DISPID_VIRTUAL:
+ fb_idx = 0;
+ break;
+ case OMAP_DISPID_BADSTATE:
+ default:
+ ERR_PRINT("Unknown display id %i", display->id);
+ BUG();
+ }
+
+ /* Use the framebuffer memory */
+ if (fb_idx >= 0 && fb_idx < num_registered_fb) {
+ struct fb_info *framebuffer = registered_fb[fb_idx];
+ unsigned long buffer_size;
+
+ /* Single buffer size */
+ buffer_size = display->width * display->height *
+ display->bytes_per_pixel;
+ /* Page align the buffer size, round up to the page size */
+ buffer_size = OMAP_DISP_PAGE_ROUND_UP(buffer_size);
+
+ return (int) (framebuffer->fix.smem_len / buffer_size);
+ } else {
+ ERR_PRINT("Framebuffer %i doesn't exist for display '%s'",
+ fb_idx, display->name);
+ return 0;
+ }
+}
+
+static int create_flip_chain(struct omap_display_device *display,
+ unsigned int buffer_count)
+{
+ int fb_idx;
+
+ /* TODO: What about TILER buffers */
+ if (buffer_count <= 1) {
+ ERR_PRINT("Flip chains with %i buffers not supported",
+ buffer_count);
+ return 1;
+ } else if (buffer_count > display->buffers_available) {
+ ERR_PRINT("Requesting %i buffers when there is %i available"
+ " for '%s'", buffer_count, display->buffers_available,
+ display->name);
+ return 1;
+ } else if (display->flip_chain) {
+ ERR_PRINT("Flip chain already exists for '%s'", display->name);
+ return 1;
+ }
+
+ /* Create the flip chain with the framebuffer memory */
+ switch (display->id) {
+ case OMAP_DISPID_PRIMARY:
+ fb_idx = 0;
+ break;
+ case OMAP_DISPID_SECONDARY:
+ fb_idx = 1;
+ break;
+ case OMAP_DISPID_TERTIARY:
+ fb_idx = 2;
+ break;
+ case OMAP_DISPID_VIRTUAL:
+ fb_idx = 0;
+ break;
+ case OMAP_DISPID_BADSTATE:
+ default:
+ ERR_PRINT("Unknown display id %i", display->id);
+ BUG();
+ }
+
+ /* Use the framebuffer memory */
+ if (fb_idx >= 0 && fb_idx < num_registered_fb) {
+ struct fb_info *framebuffer = registered_fb[fb_idx];
+ unsigned long buffer_size;
+ struct omap_display_flip_chain *flip_chain;
+ int i;
+
+ if (!framebuffer || !framebuffer->fix.smem_start ||
+ !framebuffer->screen_base) {
+ ERR_PRINT("Framebuffer %i doesn't seem to be "
+ "initialized", fb_idx);
+ return 1;
+ }
+
+ /*
+ * Check if there is enough memory in the fb for the requested
+ * buffers
+ */
+ buffer_size = display->width * display->height *
+ display->bytes_per_pixel;
+ /* Page align the buffer size, round up to the page size */
+ buffer_size = OMAP_DISP_PAGE_ROUND_UP(buffer_size);
+
+ if (buffer_size * buffer_count > framebuffer->fix.smem_len) {
+ ERR_PRINT("Not enough memory to allocate %i buffers "
+ "(%lu bytes each), memory available %lu for "
+ "display '%s'", buffer_count, buffer_size,
+ (unsigned long)framebuffer->fix.smem_len,
+ display->name);
+ return 1;
+ }
+
+ flip_chain = kzalloc(sizeof(*flip_chain), GFP_KERNEL);
+
+ if (!flip_chain) {
+ ERR_PRINT("Out of memory");
+ return 1;
+ }
+
+ for (i = 0; i < buffer_count; i++) {
+ struct omap_display_buffer *buffer;
+
+ /*
+ * Reuse the main buffer as the first buffer in the
+ * flip chain
+ */
+ if (i == 0) {
+ buffer = display->main_buffer;
+ flip_chain->buffers[i] = buffer;
+ DBG_PRINT("Flip chain buffer %i has address "
+ "%lx for display '%s'", i,
+ buffer->physical_addr, display->name);
+ continue;
+ }
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+
+ if (!buffer) {
+ /*
+ * FIXME: If one buffer allocation fails,
+ * deallocate flip chain and buffers
+ */
+ ERR_PRINT("Out of memory");
+ return 1;
+ }
+
+ buffer->physical_addr = framebuffer->fix.smem_start +
+ (buffer_size * i);
+ buffer->virtual_addr =
+ (unsigned long) framebuffer->screen_base +
+ (buffer_size * i);
+ buffer->size = buffer_size;
+ buffer->display = display;
+ flip_chain->buffers[i] = buffer;
+
+ DBG_PRINT("Flip chain buffer %i has address %lx for"
+ " display '%s'", i, buffer->physical_addr,
+ display->name);
+ }
+
+ display->flip_chain = flip_chain;
+ return 0;
+ } else {
+ ERR_PRINT("Framebuffer %i doesn't exist for display '%s'",
+ fb_idx, display->name);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int destroy_flip_chain(struct omap_display_device *display)
+{
+ int i;
+ int err;
+
+ if (!display->flip_chain) {
+ DBG_PRINT("No flip chain to destroy for '%s'", display->name);
+ return 0;
+ }
+
+ for (i = 0; i < display->flip_chain->buffer_count; i++) {
+ struct omap_display_buffer *buffer =
+ display->flip_chain->buffers[i];
+ /* If buffer is main buffer don't touch it */
+ if (display->main_buffer == buffer)
+ continue;
+
+ err = display_destroy_buffer(buffer);
+ if (err) {
+ ERR_PRINT("Error destroying buffer in flip chain for"
+ " '%s'", display->name);
+ return 1;
+ }
+ }
+
+ DBG_PRINT("Destroying flip chain for '%s'", display->name);
+ kfree(display->flip_chain);
+ display->flip_chain = 0;
+
+ return 0;
+}
+
+static int rotate_display(struct omap_display_device *display,
+ unsigned int rotation)
+{
+ ERR_PRINT("Not supported yet");
+ return 1;
+}
+
+static int display_destroy_buffer(struct omap_display_buffer *buffer)
+{
+ kfree(buffer);
+ return 0;
+}
+
+static int present_buffer_virtual(struct omap_display_buffer *buffer)
+{
+ /*
+ * TODO: Support for ORIENTATION_VERTICAL is in place,
+ * ORIENTATION_HORIZONTAL is missing
+ */
+ struct omap_display_device *display_virtual = buffer->display;
+ struct omap_display_device *display_primary;
+ struct omap_display_device *display_secondary;
+ struct omap_display_buffer temp_buffer;
+ unsigned int buffer_offset;
+
+ if (display_virtual->id != OMAP_DISPID_VIRTUAL) {
+ ERR_PRINT("Not a virtual display");
+ BUG();
+ }
+
+ display_primary = omap_display_get(OMAP_DISPID_PRIMARY);
+ display_secondary = omap_display_get(OMAP_DISPID_SECONDARY);
+ /*
+ * Calculate offset without page alignment round up otherwise second
+ * display may see incorrect data
+ */
+ buffer_offset = display_primary->height * display_virtual->byte_stride;
+
+ /* The first buffer will be the base */
+ temp_buffer.physical_addr = buffer->physical_addr;
+ temp_buffer.virtual_addr = buffer->virtual_addr;
+ temp_buffer.size = buffer->size >> 1;
+
+ if (display_virtual->features & ORIENTATION_INVERT) {
+ /* Secondary display has the base */
+ temp_buffer.display = display_secondary;
+ display_secondary->present_buffer(&temp_buffer);
+ } else {
+ /* Primary display has the base */
+ temp_buffer.display = display_primary;
+ display_primary->present_buffer(&temp_buffer);
+ }
+
+ /* Remaining display will show the rest */
+ temp_buffer.physical_addr = buffer->physical_addr + buffer_offset;
+ temp_buffer.virtual_addr = buffer->virtual_addr + buffer_offset;
+
+ if (display_virtual->features & ORIENTATION_INVERT) {
+ temp_buffer.display = display_primary;
+ display_primary->present_buffer(&temp_buffer);
+ } else {
+ temp_buffer.display = display_secondary;
+ display_secondary->present_buffer(&temp_buffer);
+ }
+
+ return 0;
+}
+
+static int present_buffer(struct omap_display_buffer *buffer)
+{
+ struct omap_display_device *display = buffer->display;
+ struct fb_info *framebuffer;
+ struct omapfb_info *ofbi;
+ struct omapfb2_device *fbdev;
+ int i;
+ int fb_idx;
+
+ switch (display->id) {
+ case OMAP_DISPID_PRIMARY:
+ fb_idx = 0;
+ break;
+ case OMAP_DISPID_SECONDARY:
+ fb_idx = 1;
+ break;
+ case OMAP_DISPID_TERTIARY:
+ fb_idx = 2;
+ break;
+ case OMAP_DISPID_VIRTUAL:
+ case OMAP_DISPID_BADSTATE:
+ default:
+ ERR_PRINT("Unable to handle display %i", display->id);
+ BUG();
+ }
+
+ if (fb_idx < 0 || fb_idx >= num_registered_fb) {
+ ERR_PRINT("Framebuffer %i doesn't exist for display '%s'",
+ fb_idx, display->name);
+ return 1;
+ }
+
+ framebuffer = registered_fb[fb_idx];
+ ofbi = FB2OFB(framebuffer);
+ fbdev = ofbi->fbdev;
+
+ omapfb_lock(fbdev);
+
+ /* Get the overlays attached to the framebuffer */
+ for (i = 0; i < ofbi->num_overlays ; i++) {
+ struct omap_dss_device *display = NULL;
+ struct omap_dss_driver *driver = NULL;
+ struct omap_overlay_manager *manager;
+ struct omap_overlay *overlay;
+ struct omap_overlay_info overlay_info;
+
+ overlay = ofbi->overlays[i];
+ manager = overlay->manager;
+ overlay->get_overlay_info(overlay, &overlay_info);
+
+ overlay_info.paddr = buffer->physical_addr;
+ overlay_info.vaddr = (void *) buffer->virtual_addr;
+ overlay->set_overlay_info(overlay, &overlay_info);
+
+ if (manager) {
+ manager->apply(manager);
+ display = manager->device;
+ driver = display ? display->driver : NULL;
+ }
+
+ if (dss_ovl_manually_updated(overlay)) {
+ if (driver->sched_update)
+ driver->sched_update(display, 0, 0,
+ overlay_info.width,
+ overlay_info.height);
+ else if (driver->update)
+ driver->update(display, 0, 0,
+ overlay_info.width,
+ overlay_info.height);
+ }
+ }
+
+ omapfb_unlock(fbdev);
+
+
+ return 0;
+}
+
+static int present_buffer_sync(struct omap_display_buffer *buffer)
+{
+ /* TODO: Cloning may tear with this implementation */
+ struct omap_display_device *display = buffer->display;
+ struct fb_info *framebuffer;
+ struct omap_dss_device *dss_device;
+ struct omap_dss_driver *driver;
+ struct omap_overlay_manager *manager;
+ int fb_idx;
+ int err = 1;
+
+ switch (display->id) {
+ case OMAP_DISPID_PRIMARY:
+ fb_idx = 0;
+ break;
+ case OMAP_DISPID_SECONDARY:
+ fb_idx = 1;
+ break;
+ case OMAP_DISPID_TERTIARY:
+ fb_idx = 2;
+ break;
+ case OMAP_DISPID_VIRTUAL:
+ case OMAP_DISPID_BADSTATE:
+ default:
+ ERR_PRINT("Unable to handle display %i", display->id);
+ BUG();
+ }
+
+ if (fb_idx < 0 || fb_idx >= num_registered_fb) {
+ ERR_PRINT("Framebuffer %i doesn't exist for display '%s'",
+ fb_idx, display->name);
+ return 1;
+ }
+
+ framebuffer = registered_fb[fb_idx];
+ dss_device = fb2display(framebuffer);
+
+ if (!dss_device) {
+ WRN_PRINT("No DSS device to sync with display '%s'!",
+ display->name);
+ return 1;
+ }
+
+ driver = dss_device->driver;
+ manager = dss_device->manager;
+
+ if (driver && driver->sync &&
+ driver->get_update_mode(dss_device) ==
+ OMAP_DSS_UPDATE_MANUAL) {
+ err = driver->sync(dss_device);
+ err |= display->present_buffer(buffer);
+ } else if (manager && manager->wait_for_vsync) {
+ err = manager->wait_for_vsync(manager);
+ err |= display->present_buffer(buffer);
+ }
+
+ if (err)
+ WRN_PRINT("Unable to sync with display '%s'!", display->name);
+
+ return err;
+}
+
+static void vdisp_sync_handler(struct work_struct *work)
+{
+ struct omap_display_sync_item *sync_item =
+ (struct omap_display_sync_item *) work;
+ struct omap_display_device *display = sync_item->buffer->display;
+ display->present_buffer_sync(sync_item->buffer);
+}
+
+static int present_buffer_sync_virtual(struct omap_display_buffer *buffer)
+{
+ /*
+ * TODO: Support for ORIENTATION_VERTICAL is in place,
+ * ORIENTATION_HORIZONTAL is missing. Some code can be reduced here,
+ * it will be simplified in the future.
+ */
+ struct omap_display_device *display_virtual = buffer->display;
+ struct omap_display_device *display_primary;
+ struct omap_display_device *display_secondary;
+ struct omap_display_buffer temp_buffer_top;
+ struct omap_display_buffer temp_buffer_bottom;
+ unsigned int buffer_offset;
+
+ if (display_virtual->id != OMAP_DISPID_VIRTUAL) {
+ ERR_PRINT("Not a virtual display");
+ BUG();
+ }
+
+ display_primary = omap_display_get(OMAP_DISPID_PRIMARY);
+ display_secondary = omap_display_get(OMAP_DISPID_SECONDARY);
+ /*
+ * Calculate offset without page alignment round up otherwise second
+ * display may see incorrect data
+ */
+ buffer_offset = display_primary->height * display_virtual->byte_stride;
+
+ /* The first buffer will be the top */
+ temp_buffer_top.physical_addr = buffer->physical_addr;
+ temp_buffer_top.virtual_addr = buffer->virtual_addr;
+ temp_buffer_top.size = buffer->size >> 1;
+ /* Then the bottom */
+ temp_buffer_bottom.physical_addr = buffer->physical_addr +
+ buffer_offset;
+ temp_buffer_bottom.virtual_addr = buffer->virtual_addr + buffer_offset;
+ temp_buffer_bottom.size = buffer->size >> 1;
+
+ if (display_virtual->features & ORIENTATION_INVERT) {
+ /* Secondary display has the base */
+ temp_buffer_top.display = display_secondary;
+ temp_buffer_bottom.display = display_primary;
+ vdisp_sync_primary.buffer = &temp_buffer_bottom;
+ vdisp_sync_secondary.buffer = &temp_buffer_top;
+ } else {
+ /* Primary display has the base */
+ temp_buffer_top.display = display_primary;
+ temp_buffer_bottom.display = display_secondary;
+ vdisp_sync_primary.buffer = &temp_buffer_top;
+ vdisp_sync_secondary.buffer = &temp_buffer_bottom;
+ }
+
+ /* Launch the workqueues for each display to present independently */
+ queue_work(vdisp_wq_primary,
+ (struct work_struct *)&vdisp_sync_primary);
+ queue_work(vdisp_wq_secondary,
+ (struct work_struct *)&vdisp_sync_secondary);
+
+ /* Wait until each display sync and present */
+ flush_workqueue(vdisp_wq_primary);
+ flush_workqueue(vdisp_wq_secondary);
+
+ return 0;
+}
+
+static int display_sync(struct omap_display_device *display)
+{
+ /* TODO: Synchronize properly with multiple managers */
+ struct fb_info *framebuffer;
+ struct omap_dss_device *dss_device;
+ struct omap_dss_driver *driver;
+ struct omap_overlay_manager *manager;
+ int fb_idx;
+ int err = 1;
+
+ switch (display->id) {
+ case OMAP_DISPID_PRIMARY:
+ fb_idx = 0;
+ break;
+ case OMAP_DISPID_SECONDARY:
+ fb_idx = 1;
+ break;
+ case OMAP_DISPID_TERTIARY:
+ fb_idx = 2;
+ break;
+ case OMAP_DISPID_VIRTUAL:
+ case OMAP_DISPID_BADSTATE:
+ default:
+ ERR_PRINT("Unable to handle display %i", display->id);
+ BUG();
+ }
+
+ if (fb_idx < 0 || fb_idx >= num_registered_fb) {
+ ERR_PRINT("Framebuffer %i doesn't exist for display '%s'",
+ fb_idx, display->name);
+ return 1;
+ }
+
+ framebuffer = registered_fb[fb_idx];
+ dss_device = fb2display(framebuffer);
+
+ if (!dss_device) {
+ WRN_PRINT("No DSS device to sync with display '%s'!",
+ display->name);
+ return 1;
+ }
+
+ driver = dss_device->driver;
+ manager = dss_device->manager;
+
+ if (driver && driver->sync &&
+ driver->get_update_mode(dss_device) == OMAP_DSS_UPDATE_MANUAL)
+ err = driver->sync(dss_device);
+ else if (manager && manager->wait_for_vsync)
+ err = manager->wait_for_vsync(manager);
+
+ if (err)
+ WRN_PRINT("Unable to sync with display '%s'!", display->name);
+
+ return err;
+}
+
+static int display_sync_virtual(struct omap_display_device *display_virtual)
+{
+ /*
+ * XXX: This function only waits for the primary display it should
+ * be adapted to the customer needs since waiting for the primary
+ * AND the secondary display may take too long for a single sync.
+ */
+ struct omap_display_device *display_primary;
+
+ if (display_virtual->id != OMAP_DISPID_VIRTUAL) {
+ ERR_PRINT("Not a virtual display");
+ BUG();
+ }
+
+ display_primary = omap_display_get(OMAP_DISPID_PRIMARY);
+ return display_primary->sync(display_primary);
+}
+
+static struct omap_display_buffer *create_main_buffer(
+ struct omap_display_device *display)
+{
+ int fb_idx;
+ switch (display->id) {
+ case OMAP_DISPID_PRIMARY:
+ fb_idx = 0;
+ break;
+ case OMAP_DISPID_SECONDARY:
+ fb_idx = 1;
+ break;
+ case OMAP_DISPID_TERTIARY:
+ fb_idx = 2;
+ break;
+ case OMAP_DISPID_VIRTUAL:
+ /* Use fb0 for virtual display */
+ fb_idx = 0;
+ break;
+ case OMAP_DISPID_BADSTATE:
+ default:
+ ERR_PRINT("Unknown display id %i", display->id);
+ BUG();
+ }
+
+ /* Use the framebuffer memory */
+ if (fb_idx >= 0 && fb_idx < num_registered_fb) {
+ struct fb_info *framebuffer = registered_fb[fb_idx];
+ unsigned long buffer_size;
+ struct omap_display_buffer *buffer;
+
+ if (!framebuffer || !framebuffer->fix.smem_start ||
+ !framebuffer->screen_base) {
+ ERR_PRINT("Framebuffer %i doesn't seem to be "
+ "initialized", fb_idx);
+ return NULL;
+ }
+
+ /*
+ * Check if there is enough memory in the fb for the
+ * main buffer
+ */
+ buffer_size = display->width * display->height *
+ display->bytes_per_pixel;
+ /* Page align the buffer size */
+ buffer_size = OMAP_DISP_PAGE_ROUND_UP(buffer_size);
+
+ if (buffer_size > framebuffer->fix.smem_len) {
+ ERR_PRINT("Main buffer needs %lu bytes while the "
+ "framebuffer %i has only %lu bytes for display"
+ " '%s'", buffer_size, fb_idx,
+ (unsigned long)framebuffer->fix.smem_len,
+ display->name);
+ return NULL;
+ }
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+
+ if (!buffer) {
+ ERR_PRINT("Out of memory");
+ return NULL;
+ }
+
+ /* Use base addresses reported by the framebuffer */
+ buffer->physical_addr = framebuffer->fix.smem_start;
+ buffer->virtual_addr =
+ (unsigned long) framebuffer->screen_base;
+ buffer->size = buffer_size;
+ buffer->display = display;
+
+ DBG_PRINT("Created main buffer %lx for display '%s'",
+ buffer->physical_addr, display->name);
+
+ return buffer;
+ } else {
+ ERR_PRINT("Framebuffer %i doesn't exist for display '%s'",
+ fb_idx, display->name);
+ return NULL;
+ }
+}
+
+static int populate_display_info(struct omap_display_device *display,
+ struct omap_overlay_manager *overlay_manager)
+{
+ struct omap_dss_device *dss_device = overlay_manager->device;
+ u16 xres;
+ u16 yres;
+ int i;
+
+ if (!strcmp(dss_device->name, "lcd")) {
+ display->id = OMAP_DISPID_PRIMARY;
+ display->name = "primary";
+ } else if (!strcmp(dss_device->name, "lcd2")) {
+ display->id = OMAP_DISPID_SECONDARY;
+ display->name = "secondary";
+ } else if (!strcmp(dss_device->name, "hdmi")) {
+ display->id = OMAP_DISPID_TERTIARY;
+ display->name = "tertiary";
+ } else {
+ ERR_PRINT("Display id '%s' not supported", dss_device->name);
+ return 1;
+ }
+
+ dss_device->driver->get_resolution(dss_device, &xres, &yres);
+ if (xres == 0 || yres == 0) {
+ ERR_PRINT("Unable to handle display '%s' with width %i "
+ "and height %i", dss_device->name, xres, yres);
+ return 1;
+ }
+
+ display->width = xres;
+ display->height = yres;
+
+ display->bits_per_pixel =
+ dss_device->driver->get_recommended_bpp(dss_device);
+ switch (display->bits_per_pixel) {
+ case 16:
+ /*
+ * TODO: Asume RGB_565, maybe need to double check in
+ * the DSS if this is true
+ */
+ display->pixel_format = RGB_565;
+ display->bytes_per_pixel = 2;
+ break;
+ case 24: /* 24 bits are encapsulated with 32 bits */
+ case 32:
+ /*
+ * TODO: Asume ARGB_8888, maybe need to double check in
+ * the DSS if this is true
+ */
+ display->pixel_format = ARGB_8888;
+ display->bytes_per_pixel = 4;
+ break;
+ default:
+ ERR_PRINT("Unable to handle %i bpp", display->bits_per_pixel);
+ return 1;
+ }
+
+ display->byte_stride = display->bytes_per_pixel * display->width;
+ display->rotation = OMAP_DSS_ROT_0; /* Asume rotation 0 degrees */
+ display->main_buffer = 0;
+ display->flip_chain = 0;
+
+ /* Add the manager to the list */
+ for (i = 0; i < OMAP_DISP_MAX_MANAGERS; i++)
+ display->overlay_managers[i] = 0;
+
+ display->overlay_managers[0] = overlay_manager;
+ display->overlay_managers_count = 1;
+
+ /* Assign function pointers for display operations */
+ display->open = open_display;
+ display->close = close_display;
+ display->create_flip_chain = create_flip_chain;
+ display->destroy_flip_chain = destroy_flip_chain;
+ display->rotate = rotate_display;
+ display->present_buffer = present_buffer;
+ display->sync = display_sync;
+ display->present_buffer_sync = present_buffer_sync;
+
+ display->main_buffer = create_main_buffer(display);
+ if (!display->main_buffer)
+ WRN_PRINT("Failed to create main buffer for '%s'",
+ display->name);
+
+ display->buffers_available = get_max_buffers(display);
+
+ /* Just print some display info */
+ DBG_PRINT("Found display '%s-%s' (%i,%i) %i bpp (%i bytes per pixel)"
+ " rotation %i", display->name, dss_device->name,
+ display->width, display->height, display->bits_per_pixel,
+ display->bytes_per_pixel, display->rotation);
+
+ return 0;
+}
+
+static int populate_virtual_display_info(struct omap_display_device *display)
+{
+ struct omap_display_device *display_primary ;
+ struct omap_display_device *display_secondary;
+ int i;
+
+ display->id = OMAP_DISPID_VIRTUAL;
+ display->name = "virtual";
+
+ display_primary = omap_display_get(OMAP_DISPID_PRIMARY);
+ display_secondary = omap_display_get(OMAP_DISPID_SECONDARY);
+
+ if (!display_primary) {
+ ERR_PRINT("Primary display doesn't exist");
+ return 1;
+ } else if (!display_secondary) {
+ ERR_PRINT("Secondary display doesn't exist");
+ return 1;
+ }
+
+ /* Combine primary and secondary display resolutions */
+ if (display_primary->width != display_secondary->width ||
+ display_primary->height != display_secondary->height) {
+ ERR_PRINT("Primary and seconday displays resolution are not"
+ " the same");
+ return 1;
+ }
+
+ /*
+ * TODO: Here it is hardcoded the resolution asumming a vertical
+ * virtual config, what about horizontal?
+ */
+ display->width = display_primary->width;
+ display->height = display_primary->height * 2;
+
+ if (display_primary->bits_per_pixel !=
+ display_secondary->bits_per_pixel) {
+ ERR_PRINT("Primary and seconday displays format are"
+ " not the same");
+ return 1;
+ }
+
+ display->bits_per_pixel = display_primary->bits_per_pixel;
+ switch (display->bits_per_pixel) {
+ case 16:
+ /*
+ * TODO: Asume RGB_565, maybe need to double check in
+ * the DSS if this is true
+ */
+ display->pixel_format = RGB_565;
+ display->bytes_per_pixel = 2;
+ break;
+ case 24: /* 24 bits are encapsulated with 32 bits */
+ case 32:
+ /*
+ * TODO: Asume ARGB_8888, maybe need to double check in
+ * the DSS if this is true
+ */
+ display->pixel_format = ARGB_8888;
+ display->bytes_per_pixel = 4;
+ break;
+ default:
+ ERR_PRINT("Unable to handle %i bpp",
+ display->bits_per_pixel);
+ return 1;
+ }
+
+ /* TODO: Asumming a vertical virtual config too for stride */
+ display->byte_stride = display->bytes_per_pixel * display->width;
+ display->rotation = OMAP_DSS_ROT_0; /* Asume rotation 0 degrees */
+ display->main_buffer = 0;
+ display->flip_chain = 0;
+
+ /* Add the primary and secondary overlay managers */
+ for (i = 0; i < OMAP_DISP_MAX_MANAGERS; i++)
+ display->overlay_managers[i] = 0;
+
+ display->overlay_managers[0] = display_primary->overlay_managers[0];
+ display->overlay_managers[1] = display_secondary->overlay_managers[0];
+ display->overlay_managers_count = 2;
+
+ /* Assign function pointers for display operations */
+ display->open = open_display;
+ display->close = close_display;
+ display->create_flip_chain = create_flip_chain;
+ display->destroy_flip_chain = destroy_flip_chain;
+ display->rotate = rotate_display;
+ display->present_buffer = present_buffer_virtual;
+ display->sync = display_sync_virtual;
+ display->present_buffer_sync = present_buffer_sync_virtual;
+
+ display->main_buffer = create_main_buffer(display);
+ if (!display->main_buffer)
+ WRN_PRINT("Failed to create main buffer for '%s'",
+ display->name);
+
+ display->buffers_available = get_max_buffers(display);
+
+ /* Just print some display info */
+ DBG_PRINT("Found display '%s' (%i,%i) %i bpp (%i bytes per pixel)"
+ " rotation %i", display->name, display->width, display->height,
+ display->bits_per_pixel, display->bytes_per_pixel,
+ display->rotation);
+
+ return 0;
+}
+
+static int create_display_list(void)
+{
+ int i;
+ struct omap_display_device *display;
+
+ /* Query number of possible displays available first */
+ omap_display_number = omap_dss_get_num_overlay_managers();
+ /* For virtual display */
+ omap_display_number++;
+
+ /* Allocate custom display list */
+ omap_display_list = kzalloc(
+ sizeof(*display) * omap_display_number, GFP_KERNEL);
+
+ if (!omap_display_list) {
+ ERR_PRINT("Out of memory");
+ return 1;
+ }
+
+ /* Populate each display info */
+ for (i = 0; i < omap_display_number - 1; i++) {
+ struct omap_overlay_manager *overlay_manager =
+ omap_dss_get_overlay_manager(i);
+ display = &omap_display_list[i];
+ if (!overlay_manager->device) {
+ WRN_PRINT("Display '%s' doesn't have a dss device "
+ "attached to it, ignoring",
+ overlay_manager->name);
+ display->id = OMAP_DISPID_BADSTATE;
+ continue;
+ }
+ if (populate_display_info(display, overlay_manager)) {
+ ERR_PRINT("Error populating display %i info with "
+ "manager '%s'", i,
+ overlay_manager->device->name);
+ display->id = OMAP_DISPID_BADSTATE;
+ continue;
+ }
+ }
+
+ /* Populate virtual display */
+ display = &omap_display_list[omap_display_number - 1];
+ if (populate_virtual_display_info(display)) {
+ ERR_PRINT("Error populating virtual display info");
+ display->id = OMAP_DISPID_BADSTATE;
+ }
+
+ return 0;
+}
+
+struct omap_display_device *omap_display_get(enum omap_display_id id)
+{
+ int i;
+ struct omap_display_device *display;
+
+ if (id == OMAP_DISPID_BADSTATE) {
+ ERR_PRINT("Oops.. user must never request a bad display");
+ BUG();
+ }
+
+ for (i = 0; i < omap_display_number; i++) {
+ display = &omap_display_list[i];
+ if (display->id == id)
+ return display;
+ }
+
+ ERR_PRINT("Unknown display %i requested", id);
+ return 0;
+}
+EXPORT_SYMBOL(omap_display_get);
+
+int omap_display_count(void)
+{
+ return omap_display_number;
+}
+EXPORT_SYMBOL(omap_display_count);
+
+int omap_display_initialize(void)
+{
+ /*
+ * TODO: Is there a better way to check if list is already created?
+ */
+ if (!omap_display_list) {
+ DBG_PRINT("Initializing driver");
+ if (create_display_list()) {
+ ERR_PRINT("Error loading driver");
+ return 1;
+ }
+ }
+
+ vdisp_wq_primary = __create_workqueue("vdisp_wq_primary", 1, 1, 1);
+ vdisp_wq_secondary = __create_workqueue("vdisp_wq_secondary", 1, 1, 1);
+ INIT_WORK((struct work_struct *)&vdisp_sync_primary,
+ vdisp_sync_handler);
+ INIT_WORK((struct work_struct *)&vdisp_sync_secondary,
+ vdisp_sync_handler);
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_display_initialize);
+
+int omap_display_deinitialize(void)
+{
+ int i;
+ int err = 0;
+ DBG_PRINT("Driver exiting");
+
+ for (i = 0; i < omap_display_number; i++) {
+ struct omap_display_device *display = &omap_display_list[i];
+
+ if (!display)
+ continue;
+
+ if (display->main_buffer) {
+ err = display_destroy_buffer(display->main_buffer);
+ display->main_buffer = 0;
+ if (err)
+ WRN_PRINT("An error happened when destroying "
+ "main buffer for '%s'", display->name);
+ }
+
+ err = display->close(display);
+
+ if (err)
+ ERR_PRINT("Unable to close display '%s'",
+ display->name);
+ }
+
+ kfree(omap_display_list);
+ omap_display_list = 0;
+
+ destroy_workqueue(vdisp_wq_primary);
+ destroy_workqueue(vdisp_wq_secondary);
+ vdisp_wq_primary = NULL;
+ vdisp_wq_secondary = NULL;
+
+ return err;
+}
+EXPORT_SYMBOL(omap_display_deinitialize);
+
diff --git a/drivers/gpu/pvr/display/omap_display.h b/drivers/gpu/pvr/display/omap_display.h
new file mode 100644
index 0000000..8076f88
--- /dev/null
+++ b/drivers/gpu/pvr/display/omap_display.h
@@ -0,0 +1,108 @@
+/*
+ * drivers/gpu/pvr/display/omap_display.h
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <plat/vrfb.h>
+#include <plat/display.h>
+#include <linux/completion.h>
+
+#ifndef __OMAP_DISPLAY_H_
+#define __OMAP_DISPLAY_H_
+
+/* Max overlay managers for virtual display */
+#define OMAP_DISP_MAX_MANAGERS 2
+/* 3 for triple buffering, 4 for virtual display */
+#define OMAP_DISP_MAX_FLIPCHAIN_BUFFERS 4
+#define OMAP_DISP_NUM_DISPLAYS 4 /* lcd, 2lcd, tv, virtual */
+
+struct omap_display_device;
+
+/* On OMAP 4 we can only manage 3 displays at the same time + virtual */
+enum omap_display_id {
+ OMAP_DISPID_PRIMARY = 1 << 0,
+ OMAP_DISPID_SECONDARY = 1 << 1,
+ OMAP_DISPID_TERTIARY = 1 << 2,
+ OMAP_DISPID_VIRTUAL = 1 << 15, /* Multiple displays */
+ OMAP_DISPID_BADSTATE = 1 << 30, /* Used to say a display is unusable*/
+};
+
+enum omap_display_pixel_format {
+ RGB_565 = 0,
+ ARGB_8888 = 1,
+};
+
+/* Primary display location for virtual display */
+enum omap_display_feature {
+ ORIENTATION_VERTICAL = 1 << 0,
+ ORIENTATION_HORIZONTAL = 1 << 1,
+ ORIENTATION_INVERT = 1 << 2,
+};
+
+struct omap_display_buffer {
+ unsigned long physical_addr;
+ unsigned long virtual_addr;
+ unsigned long size;
+ struct omap_display_device *display;
+};
+
+struct omap_display_flip_chain {
+ int buffer_count;
+ struct omap_display_buffer *buffers[OMAP_DISP_MAX_FLIPCHAIN_BUFFERS];
+ struct omap_display_device *display;
+};
+
+struct omap_display_sync_item {
+ struct work_struct work;
+ struct omap_display_buffer *buffer;
+};
+
+struct omap_display_device {
+ char *name;
+ enum omap_display_id id;
+ enum omap_display_pixel_format pixel_format;
+ enum omap_display_feature features;
+ unsigned int width;
+ unsigned int height;
+ unsigned int bits_per_pixel;
+ unsigned int bytes_per_pixel;
+ unsigned int byte_stride;
+ enum omap_dss_rotation_angle rotation;
+ unsigned int reference_count;
+ unsigned int buffers_available;
+ struct omap_display_buffer *main_buffer;
+ struct omap_display_flip_chain *flip_chain;
+ struct omap_overlay_manager *overlay_managers[OMAP_DISP_MAX_MANAGERS];
+ unsigned int overlay_managers_count;
+ int (*open)(struct omap_display_device *display,
+ enum omap_display_feature features);
+ int (*close) (struct omap_display_device *display);
+ int (*create_flip_chain) (struct omap_display_device *display,
+ unsigned int buffer_count);
+ int (*destroy_flip_chain) (struct omap_display_device *display);
+ int (*rotate) (struct omap_display_device *display,
+ enum omap_dss_rotation_angle rotation);
+ int (*present_buffer) (struct omap_display_buffer *buffer);
+ int (*sync) (struct omap_display_device *display);
+ int (*present_buffer_sync) (struct omap_display_buffer *buffer);
+};
+
+int omap_display_initialize(void);
+int omap_display_deinitialize(void);
+int omap_display_count(void);
+struct omap_display_device *omap_display_get(enum omap_display_id id);
+
+#endif
diff --git a/drivers/gpu/pvr/display/omap_sgx_displayclass.c b/drivers/gpu/pvr/display/omap_sgx_displayclass.c
new file mode 100644
index 0000000..7ae2420
--- /dev/null
+++ b/drivers/gpu/pvr/display/omap_sgx_displayclass.c
@@ -0,0 +1,1620 @@
+/*************************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ *************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/console.h>
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/notifier.h>
+
+#if defined(LDM_PLATFORM)
+#include <linux/platform_device.h>
+#if defined(SGX_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif
+#endif
+
+#include "img_defs.h"
+#include "servicesext.h"
+#include "kerneldisplay.h"
+#include "omap_sgx_displayclass.h"
+#include "omap_display.h"
+
+/* XXX: Expect 2 framebuffers for virtual display */
+#if (CONFIG_FB_OMAP2_NUM_FBS < 2)
+#error "Virtual display is supported only with 2 or more framebuffers, \
+CONFIG_FB_OMAP2_NUM_FBS must be equal or greater than 2 \
+see CONFIG_FB_OMAP2_NUM_FBS for details in the kernel config"
+#endif
+
+#define OMAP_DC_CMD_COUNT 1
+#define MAX_BUFFERS_FLIPPING 4
+
+/* Pointer Display->Services */
+static PFN_DC_GET_PVRJTABLE pfnGetPVRJTable = NULL;
+
+/* Pointer to the display devices */
+static struct OMAP_DISP_DEVINFO *pDisplayDevices = NULL;
+static int display_devices_count = 0;
+
+static void display_sync_handler(struct work_struct *work);
+static enum OMAP_ERROR get_pvr_dc_jtable (char *szFunctionName,
+ PFN_DC_GET_PVRJTABLE *ppfnFuncTable);
+
+
+/*
+ * Swap to display buffer. This buffer refers to one inside the
+ * framebuffer memory.
+ * in: hDevice, hBuffer, ui32SwapInterval, hPrivateTag, ui32ClipRectCount,
+ * psClipRect
+ */
+static PVRSRV_ERROR SwapToDCBuffer(IMG_HANDLE hDevice,
+ IMG_HANDLE hBuffer,
+ IMG_UINT32 ui32SwapInterval,
+ IMG_HANDLE hPrivateTag,
+ IMG_UINT32 ui32ClipRectCount,
+ IMG_RECT *psClipRect)
+{
+ /* Nothing to do */
+ return PVRSRV_OK;
+}
+
+/*
+ * Set display destination rectangle.
+ * in: hDevice, hSwapChain, psRect
+ */
+static PVRSRV_ERROR SetDCDstRect(IMG_HANDLE hDevice,
+ IMG_HANDLE hSwapChain,
+ IMG_RECT *psRect)
+{
+ /* Nothing to do */
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+/*
+ * Set display source rectangle.
+ * in: hDevice, hSwapChain, psRect
+ */
+static PVRSRV_ERROR SetDCSrcRect(IMG_HANDLE hDevice,
+ IMG_HANDLE hSwapChain,
+ IMG_RECT *psRect)
+{
+ /* Nothing to do */
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+/*
+ * Set display destination colour key.
+ * in: hDevice, hSwapChain, ui32CKColour
+ */
+static PVRSRV_ERROR SetDCDstColourKey(IMG_HANDLE hDevice,
+ IMG_HANDLE hSwapChain,
+ IMG_UINT32 ui32CKColour)
+{
+ /* Nothing to do */
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+/*
+ * Set display source colour key.
+ * in: hDevice, hSwapChain, ui32CKColour
+ */
+static PVRSRV_ERROR SetDCSrcColourKey(IMG_HANDLE hDevice,
+ IMG_HANDLE hSwapChain,
+ IMG_UINT32 ui32CKColour)
+{
+ /* Nothing to do */
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+/*
+ * Closes the display.
+ * in: hDevice
+ */
+static PVRSRV_ERROR CloseDCDevice(IMG_HANDLE hDevice)
+{
+ struct OMAP_DISP_DEVINFO *psDevInfo =
+ (struct OMAP_DISP_DEVINFO*) hDevice;
+ struct omap_display_device *display = psDevInfo->display;
+
+ if(display->close(display))
+ WARNING_PRINTK("Unable to close properly display '%s'",
+ display->name);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Flushes the sync queue present in the specified swap chain.
+ * in: psSwapChain
+ */
+static void FlushInternalSyncQueue(struct OMAP_DISP_SWAPCHAIN *psSwapChain)
+{
+ struct OMAP_DISP_DEVINFO *psDevInfo =
+ (struct OMAP_DISP_DEVINFO*) psSwapChain->pvDevInfo;
+ struct OMAP_DISP_FLIP_ITEM *psFlipItem;
+ struct omap_display_device *display = psDevInfo->display;
+ unsigned long ulMaxIndex;
+ unsigned long i;
+
+ psFlipItem = &psSwapChain->psFlipItems[psSwapChain->ulRemoveIndex];
+ ulMaxIndex = psSwapChain->ulBufferCount - 1;
+
+ DEBUG_PRINTK("Flushing sync queue on display %lu",
+ psDevInfo->ulDeviceID);
+ for(i = 0; i < psSwapChain->ulBufferCount; i++)
+ {
+ if (psFlipItem->bValid == OMAP_FALSE)
+ continue;
+
+ DEBUG_PRINTK("Flushing swap buffer index %lu",
+ psSwapChain->ulRemoveIndex);
+
+ /* Flip the buffer if it hasn't been flipped */
+ if(psFlipItem->bFlipped == OMAP_FALSE)
+ {
+ display->present_buffer(psFlipItem->display_buffer);
+ }
+
+ /* If the command didn't complete, assume it did */
+ if(psFlipItem->bCmdCompleted == OMAP_FALSE)
+ {
+ DEBUG_PRINTK("Calling command complete for swap "
+ "buffer index %lu",
+ psSwapChain->ulRemoveIndex);
+ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(
+ (IMG_HANDLE)psFlipItem->hCmdComplete,
+ IMG_TRUE);
+ }
+
+ psSwapChain->ulRemoveIndex++;
+ if(psSwapChain->ulRemoveIndex > ulMaxIndex)
+ psSwapChain->ulRemoveIndex = 0;
+
+ /* Put the state of the buffer to be used again later */
+ psFlipItem->bFlipped = OMAP_FALSE;
+ psFlipItem->bCmdCompleted = OMAP_FALSE;
+ psFlipItem->bValid = OMAP_FALSE;
+ psFlipItem =
+ &psSwapChain->psFlipItems[psSwapChain->ulRemoveIndex];
+ }
+
+ psSwapChain->ulInsertIndex = 0;
+ psSwapChain->ulRemoveIndex = 0;
+}
+
+/*
+ * Sets the flush state of the specified display device
+ * at the swap chain level without blocking the call.
+ * in: psDevInfo, bFlushState
+ */
+static void SetFlushStateInternalNoLock(struct OMAP_DISP_DEVINFO* psDevInfo,
+ enum OMAP_BOOL bFlushState)
+{
+ struct OMAP_DISP_SWAPCHAIN *psSwapChain = psDevInfo->psSwapChain;
+
+ /* Nothing to do if there is no swap chain */
+ if (psSwapChain == NULL){
+ DEBUG_PRINTK("Swap chain is null, nothing to do for"
+ " display %lu", psDevInfo->ulDeviceID);
+ return;
+ }
+
+ if (bFlushState)
+ {
+ DEBUG_PRINTK("Desired flushState is true for display %lu",
+ psDevInfo->ulDeviceID);
+ if (psSwapChain->ulSetFlushStateRefCount == 0)
+ {
+ psSwapChain->bFlushCommands = OMAP_TRUE;
+ FlushInternalSyncQueue(psSwapChain);
+ }
+ psSwapChain->ulSetFlushStateRefCount++;
+ }
+ else
+ {
+ DEBUG_PRINTK("Desired flushState is false for display %lu",
+ psDevInfo->ulDeviceID);
+ if (psSwapChain->ulSetFlushStateRefCount != 0)
+ {
+ psSwapChain->ulSetFlushStateRefCount--;
+ if (psSwapChain->ulSetFlushStateRefCount == 0)
+ {
+ psSwapChain->bFlushCommands = OMAP_FALSE;
+ }
+ }
+ }
+}
+
+/*
+ * Sets the flush state of the specified display device
+ * at device level blocking the call if needed.
+ * in: psDevInfo, bFlushState
+ */
+static void SetFlushStateExternal(struct OMAP_DISP_DEVINFO* psDevInfo,
+ enum OMAP_BOOL bFlushState)
+{
+ DEBUG_PRINTK("Executing for display %lu",
+ psDevInfo->ulDeviceID);
+ mutex_lock(&psDevInfo->sSwapChainLockMutex);
+ if (psDevInfo->bFlushCommands != bFlushState)
+ {
+ psDevInfo->bFlushCommands = bFlushState;
+ SetFlushStateInternalNoLock(psDevInfo, bFlushState);
+ }
+ mutex_unlock(&psDevInfo->sSwapChainLockMutex);
+}
+
+/*
+ * Opens the display.
+ * in: ui32DeviceID, phDevice
+ * out: psSystemBufferSyncData
+ */
+static PVRSRV_ERROR OpenDCDevice(IMG_UINT32 ui32DeviceID,
+ IMG_HANDLE *phDevice,
+ PVRSRV_SYNC_DATA* psSystemBufferSyncData)
+{
+ struct OMAP_DISP_DEVINFO *psDevInfo;
+ struct omap_display_device *display;
+ int i;
+
+ psDevInfo = 0;
+ for(i = 0; i < display_devices_count; i++)
+ {
+ if(ui32DeviceID == (&pDisplayDevices[i])->ulDeviceID)
+ {
+ psDevInfo = &pDisplayDevices[i];
+ break;
+ }
+ }
+
+ if(!psDevInfo)
+ {
+ WARNING_PRINTK("Unable to identify display device with id %i",
+ (int)ui32DeviceID);
+ return OMAP_ERROR_INVALID_DEVICE;
+ }
+
+ psDevInfo->sSystemBuffer.psSyncData = psSystemBufferSyncData;
+ display = psDevInfo->display;
+
+ DEBUG_PRINTK("Opening display %lu '%s'",psDevInfo->ulDeviceID,
+ display->name);
+
+ /* TODO: Explain here why ORIENTATION_VERTICAL is used*/
+ if(display->open(display, ORIENTATION_VERTICAL | ORIENTATION_INVERT))
+ ERROR_PRINTK("Unable to open properly display '%s'",
+ psDevInfo->display->name);
+
+ display->present_buffer(display->main_buffer);
+
+ /* TODO: Turn on display here? */
+
+ *phDevice = (IMG_HANDLE)psDevInfo;
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Gets the available formats for the display.
+ * in: hDevice
+ * out: pui32NumFormats, psFormat
+ */
+static PVRSRV_ERROR EnumDCFormats(IMG_HANDLE hDevice,
+ IMG_UINT32 *pui32NumFormats,
+ DISPLAY_FORMAT *psFormat)
+{
+ struct OMAP_DISP_DEVINFO *psDevInfo;
+ if(!hDevice || !pui32NumFormats)
+ {
+ ERROR_PRINTK("Invalid parameters");
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = (struct OMAP_DISP_DEVINFO*)hDevice;
+ *pui32NumFormats = 1;
+
+ if(psFormat)
+ psFormat[0] = psDevInfo->sDisplayFormat;
+ else
+ WARNING_PRINTK("Display format is null for"
+ " display %lu", psDevInfo->ulDeviceID);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Gets the available dimensions for the display.
+ * in: hDevice, psFormat
+ * out: pui32NumDims, psDim
+ */
+static PVRSRV_ERROR EnumDCDims(IMG_HANDLE hDevice,
+ DISPLAY_FORMAT *psFormat,
+ IMG_UINT32 *pui32NumDims,
+ DISPLAY_DIMS *psDim)
+{
+ struct OMAP_DISP_DEVINFO *psDevInfo;
+ if(!hDevice || !psFormat || !pui32NumDims)
+ {
+ ERROR_PRINTK("Invalid parameters");
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = (struct OMAP_DISP_DEVINFO*)hDevice;
+ *pui32NumDims = 1;
+
+ if(psDim)
+ psDim[0] = psDevInfo->sDisplayDim;
+ else
+ WARNING_PRINTK("Display dimensions are null for"
+ " display %lu", psDevInfo->ulDeviceID);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Gets the display framebuffer physical address.
+ * in: hDevice
+ * out: phBuffer
+ */
+static PVRSRV_ERROR GetDCSystemBuffer(IMG_HANDLE hDevice, IMG_HANDLE *phBuffer)
+{
+ struct OMAP_DISP_DEVINFO *psDevInfo;
+
+ if(!hDevice || !phBuffer)
+ {
+ ERROR_PRINTK("Invalid parameters");
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = (struct OMAP_DISP_DEVINFO*)hDevice;
+ *phBuffer = (IMG_HANDLE)&psDevInfo->sSystemBuffer;
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Gets the display general information.
+ * in: hDevice
+ * out: psDCInfo
+ */
+static PVRSRV_ERROR GetDCInfo(IMG_HANDLE hDevice, DISPLAY_INFO *psDCInfo)
+{
+ struct OMAP_DISP_DEVINFO *psDevInfo;
+
+ if(!hDevice || !psDCInfo)
+ {
+ ERROR_PRINTK("Invalid parameters");
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = (struct OMAP_DISP_DEVINFO*)hDevice;
+ *psDCInfo = psDevInfo->sDisplayInfo;
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Gets the display framebuffer virtual address.
+ * in: hDevice
+ * out: ppsSysAddr, pui32ByteSize, ppvCpuVAddr, phOSMapInfo, pbIsContiguous
+ */
+static PVRSRV_ERROR GetDCBufferAddr(IMG_HANDLE hDevice,
+ IMG_HANDLE hBuffer,
+ IMG_SYS_PHYADDR **ppsSysAddr,
+ IMG_UINT32 *pui32ByteSize,
+ IMG_VOID **ppvCpuVAddr,
+ IMG_HANDLE *phOSMapInfo,
+ IMG_BOOL *pbIsContiguous,
+ IMG_UINT32 *pui32TilingStride)
+{
+ struct OMAP_DISP_DEVINFO *psDevInfo;
+ struct OMAP_DISP_BUFFER *psSystemBuffer;
+
+ if(!hDevice || !hBuffer || !ppsSysAddr || !pui32ByteSize )
+ {
+ ERROR_PRINTK("Invalid parameters");
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = (struct OMAP_DISP_DEVINFO*)hDevice;
+ psSystemBuffer = (struct OMAP_DISP_BUFFER *)hBuffer;
+ *ppsSysAddr = &psSystemBuffer->sSysAddr;
+ *pui32ByteSize = (IMG_UINT32)psDevInfo->sSystemBuffer.ulBufferSize;
+
+ if (ppvCpuVAddr)
+ *ppvCpuVAddr = psSystemBuffer->sCPUVAddr;
+
+ if (phOSMapInfo)
+ *phOSMapInfo = (IMG_HANDLE)0;
+
+ if (pbIsContiguous)
+ *pbIsContiguous = IMG_TRUE;
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Creates a swap chain. Called when a 3D application begins.
+ * in: hDevice, ui32Flags, ui32BufferCount, psDstSurfAttrib, psSrcSurfAttrib
+ * ui32OEMFlags
+ * out: phSwapChain, ppsSyncData, pui32SwapChainID
+ */
+static PVRSRV_ERROR CreateDCSwapChain(IMG_HANDLE hDevice,
+ IMG_UINT32 ui32Flags,
+ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
+ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
+ IMG_UINT32 ui32BufferCount,
+ PVRSRV_SYNC_DATA **ppsSyncData,
+ IMG_UINT32 ui32OEMFlags,
+ IMG_HANDLE *phSwapChain,
+ IMG_UINT32 *pui32SwapChainID)
+{
+ struct OMAP_DISP_DEVINFO *psDevInfo;
+ struct OMAP_DISP_SWAPCHAIN *psSwapChain;
+ struct OMAP_DISP_BUFFER *psBuffer;
+ struct OMAP_DISP_FLIP_ITEM *psFlipItems;
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32BuffersToSkip;
+ struct omap_display_device *display;
+ int err;
+
+ if(!hDevice || !psDstSurfAttrib || !psSrcSurfAttrib ||
+ !ppsSyncData || !phSwapChain)
+ {
+ ERROR_PRINTK("Invalid parameters");
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ psDevInfo = (struct OMAP_DISP_DEVINFO*)hDevice;
+
+ if (psDevInfo->sDisplayInfo.ui32MaxSwapChains == 0)
+ {
+ ERROR_PRINTK("Unable to operate with 0 MaxSwapChains for"
+ " display %lu", psDevInfo->ulDeviceID);
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+
+ if(psDevInfo->psSwapChain != NULL)
+ {
+ ERROR_PRINTK("Swap chain already exists for"
+ " display %lu", psDevInfo->ulDeviceID);
+ return PVRSRV_ERROR_FLIP_CHAIN_EXISTS;
+ }
+
+ if(ui32BufferCount > psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers)
+ {
+ ERROR_PRINTK("Too many buffers. Trying to use %u buffers while"
+ " there is only %u available for display %lu",
+ (unsigned int)ui32BufferCount,
+ (unsigned int)psDevInfo->
+ sDisplayInfo.ui32MaxSwapChainBuffers,
+ psDevInfo->ulDeviceID);
+ return PVRSRV_ERROR_TOOMANYBUFFERS;
+ }
+
+ ui32BuffersToSkip = psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers -
+ ui32BufferCount;
+
+ if((psDstSurfAttrib->pixelformat !=
+ psDevInfo->sDisplayFormat.pixelformat) ||
+ (psDstSurfAttrib->sDims.ui32ByteStride !=
+ psDevInfo->sDisplayDim.ui32ByteStride) ||
+ (psDstSurfAttrib->sDims.ui32Width !=
+ psDevInfo->sDisplayDim.ui32Width) ||
+ (psDstSurfAttrib->sDims.ui32Height !=
+ psDevInfo->sDisplayDim.ui32Height))
+ {
+ ERROR_PRINTK("Destination surface attributes differ from the"
+ " current framebuffer for display %lu",
+ psDevInfo->ulDeviceID);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if((psDstSurfAttrib->pixelformat !=
+ psSrcSurfAttrib->pixelformat) ||
+ (psDstSurfAttrib->sDims.ui32ByteStride !=
+ psSrcSurfAttrib->sDims.ui32ByteStride) ||
+ (psDstSurfAttrib->sDims.ui32Width !=
+ psSrcSurfAttrib->sDims.ui32Width) ||
+ (psDstSurfAttrib->sDims.ui32Height !=
+ psSrcSurfAttrib->sDims.ui32Height))
+ {
+ ERROR_PRINTK("Destination surface attributes differ from the"
+ " target destination surface for display %lu",
+ psDevInfo->ulDeviceID);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Create the flip chain in display side */
+ display = psDevInfo->display;
+ /* TODO: What about TILER buffers? */
+ /*
+ * Creating the flip chain with the maximum number of buffers
+ * we will decide which ones will be used later
+ */
+ err = display->create_flip_chain(
+ display, psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers);
+ if(err)
+ {
+ ERROR_PRINTK("Unable to create the flip chain for '%s' display"
+ " id %lu", display->name, psDevInfo->ulDeviceID);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Allocate memory needed for the swap chain */
+ psSwapChain = (struct OMAP_DISP_SWAPCHAIN*) kmalloc(
+ sizeof(struct OMAP_DISP_SWAPCHAIN), GFP_KERNEL);
+ if(!psSwapChain)
+ {
+ ERROR_PRINTK("Out of memory to allocate swap chain for"
+ " display %lu", psDevInfo->ulDeviceID);
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ DEBUG_PRINTK("Creating swap chain for display %lu",
+ psDevInfo->ulDeviceID );
+
+ /* Allocate memory for the buffer abstraction structures */
+ psBuffer = (struct OMAP_DISP_BUFFER*) kmalloc(
+ sizeof(struct OMAP_DISP_BUFFER) * ui32BufferCount, GFP_KERNEL);
+ if(!psBuffer)
+ {
+ ERROR_PRINTK("Out of memory to allocate the buffer"
+ " abstraction structures for display %lu",
+ psDevInfo->ulDeviceID);
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorFreeSwapChain;
+ }
+
+ /* Allocate memory for the flip item abstraction structures */
+ psFlipItems = (struct OMAP_DISP_FLIP_ITEM *) kmalloc (
+ sizeof(struct OMAP_DISP_FLIP_ITEM) * ui32BufferCount,
+ GFP_KERNEL);
+ if (!psFlipItems)
+ {
+ ERROR_PRINTK("Out of memory to allocate the flip item"
+ " abstraction structures for display %lu",
+ psDevInfo->ulDeviceID);
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorFreeBuffers;
+ }
+
+ /* Assign to the swap chain structure the initial data */
+ psSwapChain->ulBufferCount = (unsigned long)ui32BufferCount;
+ psSwapChain->psBuffer = psBuffer;
+ psSwapChain->psFlipItems = psFlipItems;
+ psSwapChain->ulInsertIndex = 0;
+ psSwapChain->ulRemoveIndex = 0;
+ psSwapChain->psPVRJTable = &psDevInfo->sPVRJTable;
+ psSwapChain->pvDevInfo = (void*)psDevInfo;
+
+ /*
+ * Init the workqueue (single thread, freezable and real time)
+ * and its own work for this display
+ */
+ INIT_WORK(&psDevInfo->sync_display_work, display_sync_handler);
+ psDevInfo->sync_display_wq =
+ __create_workqueue("pvr_display_sync_wq", 1, 1, 1);
+
+ DEBUG_PRINTK("Swap chain will have %u buffers for display %lu",
+ (unsigned int)ui32BufferCount, psDevInfo->ulDeviceID);
+ /* Link the buffers available like a circular list */
+ for(i=0; i<ui32BufferCount-1; i++)
+ {
+ psBuffer[i].psNext = &psBuffer[i+1];
+ }
+ psBuffer[i].psNext = &psBuffer[0];
+
+ /* Initialize each buffer abstraction structure */
+ for(i=0; i<ui32BufferCount; i++)
+ {
+ /* Get the needed buffers from the display flip chain */
+ IMG_UINT32 ui32SwapBuffer = i + ui32BuffersToSkip;
+ struct omap_display_buffer * flip_buffer =
+ display->flip_chain->buffers[ui32SwapBuffer];
+ psBuffer[i].display_buffer = flip_buffer;
+ psBuffer[i].psSyncData = ppsSyncData[i];
+ psBuffer[i].sSysAddr.uiAddr = flip_buffer->physical_addr;
+ psBuffer[i].sCPUVAddr =
+ (IMG_CPU_VIRTADDR) flip_buffer->virtual_addr;
+ DEBUG_PRINTK("Display %lu buffer index %u has physical "
+ "address 0x%x",
+ psDevInfo->ulDeviceID,
+ (unsigned int)i,
+ (unsigned int)psBuffer[i].sSysAddr.uiAddr);
+ }
+
+ /* Initialize each flip item abstraction structure */
+ for(i=0; i<ui32BufferCount; i++)
+ {
+ psFlipItems[i].bValid = OMAP_FALSE;
+ psFlipItems[i].bFlipped = OMAP_FALSE;
+ psFlipItems[i].bCmdCompleted = OMAP_FALSE;
+ psFlipItems[i].display_buffer = 0;
+ }
+
+ mutex_lock(&psDevInfo->sSwapChainLockMutex);
+
+ psDevInfo->psSwapChain = psSwapChain;
+ psSwapChain->bFlushCommands = psDevInfo->bFlushCommands;
+ if (psSwapChain->bFlushCommands)
+ psSwapChain->ulSetFlushStateRefCount = 1;
+ else
+ psSwapChain->ulSetFlushStateRefCount = 0;
+
+ mutex_unlock(&psDevInfo->sSwapChainLockMutex);
+
+ *phSwapChain = (IMG_HANDLE)psSwapChain;
+
+ return PVRSRV_OK;
+
+ErrorFreeBuffers:
+ kfree(psBuffer);
+ErrorFreeSwapChain:
+ kfree(psSwapChain);
+
+ return eError;
+}
+
+/*
+ * Destroy a swap chain. Called when a 3D application ends.
+ * in: hDevice, hSwapChain
+ */
+static PVRSRV_ERROR DestroyDCSwapChain(IMG_HANDLE hDevice,
+ IMG_HANDLE hSwapChain)
+{
+ struct OMAP_DISP_DEVINFO *psDevInfo;
+ struct OMAP_DISP_SWAPCHAIN *psSwapChain;
+ struct omap_display_device *display;
+ int err;
+
+ if(!hDevice || !hSwapChain)
+ {
+ ERROR_PRINTK("Invalid parameters");
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = (struct OMAP_DISP_DEVINFO*)hDevice;
+ psSwapChain = (struct OMAP_DISP_SWAPCHAIN*)hSwapChain;
+ display = psDevInfo->display;
+
+ if (psSwapChain != psDevInfo->psSwapChain)
+ {
+ ERROR_PRINTK("Swap chain handler differs from the one "
+ "present in the display device pointer");
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ DEBUG_PRINTK("Destroying swap chain for display %lu",
+ psDevInfo->ulDeviceID);
+
+ mutex_lock(&psDevInfo->sSwapChainLockMutex);
+
+ FlushInternalSyncQueue(psSwapChain);
+ psDevInfo->psSwapChain = NULL;
+
+ /*
+ * Present the buffer which is at the base of address of
+ * the framebuffer
+ */
+ display->present_buffer(display->main_buffer);
+
+ /* Destroy the flip chain in display side */
+ err = display->destroy_flip_chain(display);
+ if(err)
+ {
+ ERROR_PRINTK("Unable to destroy the flip chain for '%s' "
+ "display id %lu", display->name,
+ psDevInfo->ulDeviceID);
+ }
+
+ mutex_unlock(&psDevInfo->sSwapChainLockMutex);
+
+ /* Destroy the workqueue */
+ flush_workqueue(psDevInfo->sync_display_wq);
+ destroy_workqueue(psDevInfo->sync_display_wq);
+
+ kfree(psSwapChain->psFlipItems);
+ kfree(psSwapChain->psBuffer);
+ kfree(psSwapChain);
+
+ return PVRSRV_OK;
+}
+
+
+/*
+ * Get display buffers. These are the buffers that can be allocated
+ * inside the framebuffer memory.
+ * in: hDevice, hSwapChain
+ * out: pui32BufferCount, phBuffer
+ */
+static PVRSRV_ERROR GetDCBuffers(IMG_HANDLE hDevice,
+ IMG_HANDLE hSwapChain,
+ IMG_UINT32 *pui32BufferCount,
+ IMG_HANDLE *phBuffer)
+{
+ struct OMAP_DISP_DEVINFO *psDevInfo;
+ struct OMAP_DISP_SWAPCHAIN *psSwapChain;
+ unsigned long i;
+
+ if(!hDevice || !hSwapChain || !pui32BufferCount || !phBuffer)
+ {
+ ERROR_PRINTK("Invalid parameters");
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = (struct OMAP_DISP_DEVINFO*)hDevice;
+ psSwapChain = (struct OMAP_DISP_SWAPCHAIN*)hSwapChain;
+ if (psSwapChain != psDevInfo->psSwapChain)
+ {
+ ERROR_PRINTK("Swap chain handler differs from the one "
+ "present in the display device %lu pointer",
+ psDevInfo->ulDeviceID);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ *pui32BufferCount = (IMG_UINT32)psSwapChain->ulBufferCount;
+
+ for(i=0; i<psSwapChain->ulBufferCount; i++)
+ phBuffer[i] = (IMG_HANDLE)&psSwapChain->psBuffer[i];
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Sets the display state.
+ * in: ui32State, hDevice
+ */
+static IMG_VOID SetDCState(IMG_HANDLE hDevice, IMG_UINT32 ui32State)
+{
+ struct OMAP_DISP_DEVINFO *psDevInfo =
+ (struct OMAP_DISP_DEVINFO*) hDevice;
+
+ switch (ui32State)
+ {
+ case DC_STATE_FLUSH_COMMANDS:
+ DEBUG_PRINTK("Setting state to flush commands for"
+ " display %lu", psDevInfo->ulDeviceID);
+ SetFlushStateExternal(psDevInfo, OMAP_TRUE);
+ break;
+ case DC_STATE_NO_FLUSH_COMMANDS:
+ DEBUG_PRINTK("Setting state to not flush commands for"
+ " display %lu", psDevInfo->ulDeviceID);
+ SetFlushStateExternal(psDevInfo, OMAP_FALSE);
+ break;
+ default:
+ WARNING_PRINTK("Unknown command state %u for display"
+ " %lu", (unsigned int)ui32State,
+ psDevInfo->ulDeviceID);
+ break;
+ }
+}
+
+/*
+ * Swap to display system buffer. This buffer refers to the one which
+ * is that fits in the framebuffer memory.
+ * in: hDevice, hSwapChain
+ */
+static PVRSRV_ERROR SwapToDCSystem(IMG_HANDLE hDevice,
+ IMG_HANDLE hSwapChain)
+{
+ struct OMAP_DISP_DEVINFO *psDevInfo;
+ struct OMAP_DISP_SWAPCHAIN *psSwapChain;
+ struct omap_display_device *display;
+
+ if(!hDevice || !hSwapChain)
+ {
+ ERROR_PRINTK("Invalid parameters");
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = (struct OMAP_DISP_DEVINFO*)hDevice;
+ psSwapChain = (struct OMAP_DISP_SWAPCHAIN*)hSwapChain;
+ display = psDevInfo->display;
+
+ DEBUG_PRINTK("Executing for display %lu",
+ psDevInfo->ulDeviceID);
+
+ if (psSwapChain != psDevInfo->psSwapChain)
+ {
+ ERROR_PRINTK("Swap chain handler differs from the one "
+ "present in the display device %lu pointer",
+ psDevInfo->ulDeviceID);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ mutex_lock(&psDevInfo->sSwapChainLockMutex);
+
+ FlushInternalSyncQueue(psSwapChain);
+ display->present_buffer(display->main_buffer);
+
+ mutex_unlock(&psDevInfo->sSwapChainLockMutex);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Handles the synchronization with the display
+ * in: work
+ */
+
+static void display_sync_handler(struct work_struct *work)
+{
+ /*
+ * TODO: Since present_buffer_sync waits and then present, this
+ * algorithm can be simplified further
+ */
+ struct OMAP_DISP_DEVINFO *psDevInfo = container_of(work,
+ struct OMAP_DISP_DEVINFO, sync_display_work);
+ struct omap_display_device *display = psDevInfo->display;
+ struct OMAP_DISP_FLIP_ITEM *psFlipItem;
+ struct OMAP_DISP_SWAPCHAIN *psSwapChain;
+ unsigned long ulMaxIndex;
+
+ mutex_lock(&psDevInfo->sSwapChainLockMutex);
+
+ psSwapChain = psDevInfo->psSwapChain;
+ if (!psSwapChain || psSwapChain->bFlushCommands)
+ goto ExitUnlock;
+
+ psFlipItem = &psSwapChain->psFlipItems[psSwapChain->ulRemoveIndex];
+ ulMaxIndex = psSwapChain->ulBufferCount - 1;
+
+ /* Iterate through the flip items and flip them if necessary */
+ while (psFlipItem->bValid) {
+ /* Update display */
+ display->present_buffer_sync(psFlipItem->display_buffer);
+
+ psFlipItem->ulSwapInterval--;
+ psFlipItem->bFlipped = OMAP_TRUE;
+
+ if (psFlipItem->ulSwapInterval == 0) {
+
+ /* Mark the flip item as completed to reuse it */
+ psSwapChain->ulRemoveIndex++;
+ if (psSwapChain->ulRemoveIndex > ulMaxIndex)
+ psSwapChain->ulRemoveIndex = 0;
+ psFlipItem->bCmdCompleted = OMAP_FALSE;
+ psFlipItem->bFlipped = OMAP_FALSE;
+ psFlipItem->bValid = OMAP_FALSE;
+
+ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(
+ (IMG_HANDLE)psFlipItem->hCmdComplete,
+ IMG_TRUE);
+ psFlipItem->bCmdCompleted = OMAP_TRUE;
+ } else {
+ /*
+ * Here the swap interval is not zero yet
+ * we need to schedule another work until
+ * it reaches zero
+ */
+ queue_work(psDevInfo->sync_display_wq,
+ &psDevInfo->sync_display_work);
+ break;
+ }
+
+ psFlipItem =
+ &psSwapChain->psFlipItems[psSwapChain->ulRemoveIndex];
+ }
+
+ExitUnlock:
+ mutex_unlock(&psDevInfo->sSwapChainLockMutex);
+}
+
+/*
+ * Performs a flip. This function takes the necessary steps to present
+ * the buffer to be flipped in the display.
+ * in: hCmdCookie, ui32DataSize, pvData
+ */
+static IMG_BOOL ProcessFlip(IMG_HANDLE hCmdCookie,
+ IMG_UINT32 ui32DataSize,
+ IMG_VOID *pvData)
+{
+ DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
+ struct OMAP_DISP_DEVINFO *psDevInfo;
+ struct OMAP_DISP_BUFFER *psBuffer;
+ struct OMAP_DISP_SWAPCHAIN *psSwapChain;
+ struct omap_display_device *display;
+#if defined(SYS_USING_INTERRUPTS)
+ struct OMAP_DISP_FLIP_ITEM* psFlipItem;
+#endif
+
+ if(!hCmdCookie || !pvData)
+ {
+ WARNING_PRINTK("Ignoring call with NULL parameters");
+ return IMG_FALSE;
+ }
+
+ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)pvData;
+
+ if (psFlipCmd == IMG_NULL ||
+ sizeof(DISPLAYCLASS_FLIP_COMMAND) != ui32DataSize)
+ {
+ WARNING_PRINTK("NULL command or command data size is wrong");
+ return IMG_FALSE;
+ }
+
+ psDevInfo = (struct OMAP_DISP_DEVINFO*)psFlipCmd->hExtDevice;
+ psBuffer = (struct OMAP_DISP_BUFFER*)psFlipCmd->hExtBuffer;
+ psSwapChain = (struct OMAP_DISP_SWAPCHAIN*) psFlipCmd->hExtSwapChain;
+ display = psDevInfo->display;
+
+ mutex_lock(&psDevInfo->sSwapChainLockMutex);
+
+ if (psDevInfo->bDeviceSuspended)
+ {
+ /* If is suspended then assume the commands are completed */
+ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(
+ hCmdCookie, IMG_TRUE);
+ goto ExitTrueUnlock;
+ }
+
+#if defined(SYS_USING_INTERRUPTS)
+
+ if( psFlipCmd->ui32SwapInterval == 0 ||
+ psSwapChain->bFlushCommands == OMAP_TRUE)
+ {
+#endif
+ display->present_buffer(psBuffer->display_buffer);
+ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(
+ hCmdCookie, IMG_TRUE);
+
+#if defined(SYS_USING_INTERRUPTS)
+ goto ExitTrueUnlock;
+ }
+
+ psFlipItem = &psSwapChain->psFlipItems[psSwapChain->ulInsertIndex];
+
+ if(psFlipItem->bValid == OMAP_FALSE)
+ {
+ unsigned long ulMaxIndex = psSwapChain->ulBufferCount - 1;
+
+ psFlipItem->bFlipped = OMAP_FALSE;
+
+ /*
+ * The buffer is queued here, must be consumed by the workqueue
+ */
+ psFlipItem->hCmdComplete = (OMAP_HANDLE)hCmdCookie;
+ psFlipItem->ulSwapInterval =
+ (unsigned long)psFlipCmd->ui32SwapInterval;
+ psFlipItem->sSysAddr = &psBuffer->sSysAddr;
+ psFlipItem->bValid = OMAP_TRUE;
+ psFlipItem->display_buffer = psBuffer->display_buffer;
+
+ psSwapChain->ulInsertIndex++;
+ if(psSwapChain->ulInsertIndex > ulMaxIndex)
+ psSwapChain->ulInsertIndex = 0;
+
+ /* Give work to the workqueue to sync with the display */
+ queue_work(psDevInfo->sync_display_wq,
+ &psDevInfo->sync_display_work);
+
+ goto ExitTrueUnlock;
+ }
+
+ mutex_unlock(&psDevInfo->sSwapChainLockMutex);
+ return IMG_FALSE;
+#endif
+
+ExitTrueUnlock:
+ mutex_unlock(&psDevInfo->sSwapChainLockMutex);
+ return IMG_TRUE;
+}
+
+#if defined(LDM_PLATFORM)
+
+/*
+ * Function called when the driver must suspend
+ */
+static void DriverSuspend(void)
+{
+ struct OMAP_DISP_DEVINFO *psDevInfo;
+ int i;
+
+ if(!pDisplayDevices)
+ return;
+
+ for(i = 0; i < display_devices_count; i++)
+ {
+ psDevInfo = &pDisplayDevices[i];
+
+ mutex_lock(&psDevInfo->sSwapChainLockMutex);
+
+ if (psDevInfo->bDeviceSuspended)
+ {
+ mutex_unlock(&psDevInfo->sSwapChainLockMutex);
+ continue;
+ }
+
+ psDevInfo->bDeviceSuspended = OMAP_TRUE;
+ SetFlushStateInternalNoLock(psDevInfo, OMAP_TRUE);
+
+ mutex_unlock(&psDevInfo->sSwapChainLockMutex);
+ }
+}
+
+/*
+ * Function called when the driver must resume
+ */
+static void DriverResume(void)
+{
+ struct OMAP_DISP_DEVINFO *psDevInfo;
+ int i;
+
+ if(!pDisplayDevices)
+ return;
+
+ for(i = 0; i < display_devices_count; i++)
+ {
+ psDevInfo = &pDisplayDevices[i];
+
+ mutex_lock(&psDevInfo->sSwapChainLockMutex);
+
+ if (!psDevInfo->bDeviceSuspended)
+ {
+ mutex_unlock(&psDevInfo->sSwapChainLockMutex);
+ continue;
+ }
+
+ SetFlushStateInternalNoLock(psDevInfo, OMAP_FALSE);
+ psDevInfo->bDeviceSuspended = OMAP_FALSE;
+
+ mutex_unlock(&psDevInfo->sSwapChainLockMutex);
+ }
+}
+#endif /* defined(LDM_PLATFORM) */
+
+/*
+ * Frees the kernel framebuffer
+ * in: psDevInfo
+ */
+static void deinit_display_device(struct OMAP_DISP_DEVINFO *psDevInfo)
+{
+ /* TODO: Are we sure there is nothing to do here? */
+}
+
+/*
+ * Deinitialization routine for the 3rd party display driver
+ */
+static enum OMAP_ERROR destroy_display_devices(void)
+{
+ struct OMAP_DISP_DEVINFO *psDevInfo;
+ PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable;
+ int i;
+
+ DEBUG_PRINTK("Deinitializing 3rd party display driver");
+
+ if(!pDisplayDevices)
+ return OMAP_OK;
+
+ for(i = 0; i < display_devices_count; i++)
+ {
+ psDevInfo = &pDisplayDevices[i];
+ if(!psDevInfo->display)
+ continue;
+
+ /* Remove the ProcessFlip command callback */
+ psJTable = &psDevInfo->sPVRJTable;
+
+ if(!psJTable)
+ continue;
+
+ if (psDevInfo->sPVRJTable.pfnPVRSRVRemoveCmdProcList(
+ psDevInfo->ulDeviceID,
+ OMAP_DC_CMD_COUNT) != PVRSRV_OK)
+ {
+ ERROR_PRINTK("Unable to remove callback for "
+ "ProcessFlip command for display %lu",
+ psDevInfo->ulDeviceID);
+ return OMAP_ERROR_GENERIC;
+ }
+
+ /* Remove the display device from services */
+ if (psJTable->pfnPVRSRVRemoveDCDevice(
+ psDevInfo->ulDeviceID) != PVRSRV_OK)
+ {
+ ERROR_PRINTK("Unable to remove the display %lu "
+ "from services", psDevInfo->ulDeviceID);
+ return OMAP_ERROR_GENERIC;
+ }
+
+ deinit_display_device(psDevInfo);
+ }
+
+ kfree(pDisplayDevices);
+
+ return OMAP_OK;
+}
+
+/*
+ * Extracts the framebuffer data from the kernel driver
+ * in: psDevInfo
+ */
+static enum OMAP_ERROR init_display_device(struct OMAP_DISP_DEVINFO *psDevInfo,
+ struct omap_display_device *display)
+{
+ int buffers_available = display->buffers_available;
+
+ /* Extract the needed data from the display struct */
+ DEBUG_PRINTK("Display '%s' id %i information:", display->name,
+ display->id);
+ DEBUG_PRINTK("*Width, height: %u,%u", display->width,
+ display->height);
+ DEBUG_PRINTK("*Rotation: %u", display->rotation);
+ DEBUG_PRINTK("*Stride: %u bytes", display->byte_stride);
+ DEBUG_PRINTK("*Buffers available: %u", buffers_available);
+ DEBUG_PRINTK("*Bytes per pixel: %u (%u bpp)",
+ display->bytes_per_pixel, display->bits_per_pixel);
+
+ if(display->bits_per_pixel == 16)
+ {
+ if(display->pixel_format == RGB_565)
+ {
+ DEBUG_PRINTK("*Format: RGB565");
+ psDevInfo->sDisplayFormat.pixelformat =
+ PVRSRV_PIXEL_FORMAT_RGB565;
+ }
+ else
+ WARNING_PRINTK("*Format: Unknown framebuffer"
+ "format");
+ }
+ else if(display->bits_per_pixel == 24 ||
+ display->bits_per_pixel == 32)
+ {
+ if(display->pixel_format == ARGB_8888)
+ {
+ DEBUG_PRINTK("*Format: ARGB8888");
+ psDevInfo->sDisplayFormat.pixelformat =
+ PVRSRV_PIXEL_FORMAT_ARGB8888;
+
+ }
+ else
+ WARNING_PRINTK("*Format: Unknown framebuffer"
+ "format");
+ }
+ else
+ WARNING_PRINTK("*Format: Unknown framebuffer format");
+
+ if(display->main_buffer)
+ {
+ DEBUG_PRINTK("*Bytes per buffer: %lu",
+ display->main_buffer->size);
+ DEBUG_PRINTK("*Main buffer physical address: 0x%lx",
+ display->main_buffer->physical_addr);
+ DEBUG_PRINTK("*Main buffer virtual address: 0x%lx",
+ display->main_buffer->virtual_addr);
+ DEBUG_PRINTK("*Main buffer size: %lu bytes",
+ display->main_buffer->size);
+ }
+ else
+ {
+ psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers = 0;
+ ERROR_PRINTK("*No main buffer found for display '%s'",
+ display->name);
+ return OMAP_ERROR_INIT_FAILURE;
+ }
+
+ psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers = buffers_available;
+ mutex_init(&psDevInfo->sSwapChainLockMutex);
+ psDevInfo->psSwapChain = 0;
+ psDevInfo->bFlushCommands = OMAP_FALSE;
+ psDevInfo->bDeviceSuspended = OMAP_FALSE;
+
+ if(psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers > 1)
+ {
+ if(MAX_BUFFERS_FLIPPING == 1)
+ {
+ DEBUG_PRINTK("Flipping support is possible"
+ " but you decided not to use it");
+ }
+
+ DEBUG_PRINTK("*Flipping support");
+ if(psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers >
+ MAX_BUFFERS_FLIPPING)
+ psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers =
+ MAX_BUFFERS_FLIPPING;
+ }
+ else
+ {
+ DEBUG_PRINTK("*Flipping not supported");
+ }
+
+ if (psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers == 0)
+ {
+ psDevInfo->sDisplayInfo.ui32MaxSwapChains = 0;
+ psDevInfo->sDisplayInfo.ui32MaxSwapInterval = 0;
+ }
+ else
+ {
+ psDevInfo->sDisplayInfo.ui32MaxSwapChains = 1;
+ psDevInfo->sDisplayInfo.ui32MaxSwapInterval = 3;
+ }
+ psDevInfo->sDisplayInfo.ui32MinSwapInterval = 0;
+
+ /* Get the display and framebuffer needed info */
+ strncpy(psDevInfo->sDisplayInfo.szDisplayName,
+ DISPLAY_DEVICE_NAME, MAX_DISPLAY_NAME_SIZE);
+
+ psDevInfo->sDisplayDim.ui32Width = display->width;
+ psDevInfo->sDisplayDim.ui32Height = display->height;
+ psDevInfo->sDisplayDim.ui32ByteStride = display->byte_stride;
+ psDevInfo->sSystemBuffer.sSysAddr.uiAddr =
+ display->main_buffer->physical_addr;
+ psDevInfo->sSystemBuffer.sCPUVAddr =
+ (IMG_CPU_VIRTADDR) display->main_buffer->virtual_addr;
+ psDevInfo->sSystemBuffer.ulBufferSize = display->main_buffer->size;
+ psDevInfo->sSystemBuffer.display_buffer = display->main_buffer;
+ psDevInfo->display = display;
+
+ return OMAP_OK;
+}
+
+/*
+ * Initialization routine for the 3rd party display driver
+ */
+static enum OMAP_ERROR create_display_devices(void)
+{
+ PFN_CMD_PROC pfnCmdProcList[OMAP_DC_CMD_COUNT];
+ IMG_UINT32 aui32SyncCountList[OMAP_DC_CMD_COUNT][2];
+ int i;
+ unsigned int bytes_to_alloc;
+
+ DEBUG_PRINTK("Initializing 3rd party display driver");
+
+ /* Init display abstraction layer */
+ omap_display_initialize();
+
+ /* Ask for the number of displays available */
+ /* TODO: allow more displays */
+ display_devices_count = 1; // omap_display_count();
+
+ DEBUG_PRINTK("Found %i displays", display_devices_count);
+
+ /*
+ * Obtain the function pointer for the jump table from kernel
+ * services to fill it with the function pointers that we want
+ */
+ if(get_pvr_dc_jtable ("PVRGetDisplayClassJTable",
+ &pfnGetPVRJTable) != OMAP_OK)
+ {
+ ERROR_PRINTK("Unable to get the function to get the"
+ " jump table display->services");
+ return OMAP_ERROR_INIT_FAILURE;
+ }
+
+ /*
+ * Allocate the display device structures, one per display available
+ */
+ bytes_to_alloc =
+ sizeof(struct OMAP_DISP_DEVINFO) * display_devices_count;
+ pDisplayDevices = (struct OMAP_DISP_DEVINFO *) kmalloc(
+ bytes_to_alloc, GFP_KERNEL);
+ if(!pDisplayDevices)
+ {
+ pDisplayDevices = NULL;
+ ERROR_PRINTK("Out of memory");
+ return OMAP_ERROR_OUT_OF_MEMORY;
+ }
+ memset(pDisplayDevices, 0, bytes_to_alloc);
+
+ /*
+ * Initialize each display device
+ */
+ for(i = 0; i < display_devices_count; i++)
+ {
+ struct omap_display_device *display;
+ struct OMAP_DISP_DEVINFO * psDevInfo;
+ enum omap_display_id id;
+
+ psDevInfo = &pDisplayDevices[i];
+ psDevInfo->display = 0;
+
+ id = OMAP_DISPID_VIRTUAL;
+
+ /*
+ * TODO: Modify this to allow primary, secondary,
+ * not only virtual
+ */
+#if 0
+ switch(i)
+ {
+ case 0:
+ id = OMAP_DISPID_PRIMARY;
+ break;
+ case 1:
+ id = OMAP_DISPID_SECONDARY;
+ break;
+ case 2:
+ id = OMAP_DISPID_TERTIARY;
+ break;
+ case 3:
+ id = OMAP_DISPID_VIRTUAL;
+ break;
+ default:
+ ERROR_PRINTK("Invalid display type %i", i);
+ BUG();
+ }
+
+#endif
+
+ display = omap_display_get(id);
+ if(!display)
+ continue;
+
+ if(init_display_device(psDevInfo, display) != OMAP_OK)
+ {
+ ERROR_PRINTK("Unable to initialize display '%s' type"
+ " %u", display->name, display->id);
+ continue;
+#if 0
+ kfree(pDisplayDevices);
+ pDisplayDevices = NULL;
+ return OMAP_ERROR_INIT_FAILURE;
+#endif
+ }
+
+ /*
+ * Populate each display device structure
+ */
+ if(!(*pfnGetPVRJTable)(&psDevInfo->sPVRJTable))
+ {
+ ERROR_PRINTK("Unable to get the jump table"
+ " display->services for display '%s'",
+ display->name);
+ return OMAP_ERROR_INIT_FAILURE;
+ }
+
+ /* Populate the function table that services will use */
+ psDevInfo->sDCJTable.ui32TableSize =
+ sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE);
+ psDevInfo->sDCJTable.pfnOpenDCDevice = OpenDCDevice;
+ psDevInfo->sDCJTable.pfnCloseDCDevice = CloseDCDevice;
+ psDevInfo->sDCJTable.pfnEnumDCFormats = EnumDCFormats;
+ psDevInfo->sDCJTable.pfnEnumDCDims = EnumDCDims;
+ psDevInfo->sDCJTable.pfnGetDCSystemBuffer = GetDCSystemBuffer;
+ psDevInfo->sDCJTable.pfnGetDCInfo = GetDCInfo;
+ psDevInfo->sDCJTable.pfnGetBufferAddr = GetDCBufferAddr;
+ psDevInfo->sDCJTable.pfnCreateDCSwapChain = CreateDCSwapChain;
+ psDevInfo->sDCJTable.pfnDestroyDCSwapChain =
+ DestroyDCSwapChain;
+ psDevInfo->sDCJTable.pfnSetDCDstRect = SetDCDstRect;
+ psDevInfo->sDCJTable.pfnSetDCSrcRect = SetDCSrcRect;
+ psDevInfo->sDCJTable.pfnSetDCDstColourKey = SetDCDstColourKey;
+ psDevInfo->sDCJTable.pfnSetDCSrcColourKey = SetDCSrcColourKey;
+ psDevInfo->sDCJTable.pfnGetDCBuffers = GetDCBuffers;
+ psDevInfo->sDCJTable.pfnSwapToDCBuffer = SwapToDCBuffer;
+ psDevInfo->sDCJTable.pfnSwapToDCSystem = SwapToDCSystem;
+ psDevInfo->sDCJTable.pfnSetDCState = SetDCState;
+
+ /* Register the display device */
+ if(psDevInfo->sPVRJTable.pfnPVRSRVRegisterDCDevice(
+ &psDevInfo->sDCJTable,
+ (IMG_UINT32*) &psDevInfo->ulDeviceID) != PVRSRV_OK)
+ {
+ ERROR_PRINTK("Unable to register the jump table"
+ " services->display");
+ return OMAP_ERROR_DEVICE_REGISTER_FAILED;
+ }
+
+ DEBUG_PRINTK("Display '%s' registered with the GPU with"
+ " id %lu", display->name, psDevInfo->ulDeviceID);
+
+ /*
+ * Register the ProcessFlip function to notify when a frame is
+ * ready to be flipped
+ */
+ pfnCmdProcList[DC_FLIP_COMMAND] = ProcessFlip;
+ aui32SyncCountList[DC_FLIP_COMMAND][0] = 0;
+ aui32SyncCountList[DC_FLIP_COMMAND][1] = 2;
+ if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterCmdProcList(
+ psDevInfo->ulDeviceID, &pfnCmdProcList[0],
+ aui32SyncCountList, OMAP_DC_CMD_COUNT) != PVRSRV_OK)
+ {
+ ERROR_PRINTK("Unable to register callback for "
+ "ProcessFlip command");
+ return OMAP_ERROR_CANT_REGISTER_CALLBACK;
+ }
+
+ }
+ return OMAP_OK;
+}
+
+/*
+ * Here we get the function pointer to get jump table from
+ * services using an external function.
+ * in: szFunctionName
+ * out: ppfnFuncTable
+ */
+static enum OMAP_ERROR get_pvr_dc_jtable (char *szFunctionName,
+ PFN_DC_GET_PVRJTABLE *ppfnFuncTable)
+{
+ if(strcmp("PVRGetDisplayClassJTable", szFunctionName) != 0)
+ {
+ ERROR_PRINTK("Unable to get function pointer for %s"
+ " from services", szFunctionName);
+ return OMAP_ERROR_INVALID_PARAMS;
+ }
+ *ppfnFuncTable = PVRGetDisplayClassJTable;
+
+ return OMAP_OK;
+}
+
+#if defined(LDM_PLATFORM)
+
+static volatile enum OMAP_BOOL bDeviceSuspended;
+
+/*
+ * Common suspend driver function
+ * in: psSwapChain, aPhyAddr
+ */
+static void CommonSuspend(void)
+{
+ if (bDeviceSuspended)
+ {
+ DEBUG_PRINTK("Driver is already suspended");
+ return;
+ }
+
+ DriverSuspend();
+ bDeviceSuspended = OMAP_TRUE;
+}
+
+#if defined(SGX_EARLYSUSPEND)
+
+static struct early_suspend driver_early_suspend;
+
+/*
+ * Android specific, driver is requested to be suspended
+ * in: ea_event
+ */
+static void DriverSuspend_Entry(struct early_suspend *ea_event)
+{
+ DEBUG_PRINTK("Requested driver suspend");
+ CommonSuspend();
+}
+
+/*
+ * Android specific, driver is requested to be suspended
+ * in: ea_event
+ */
+static void DriverResume_Entry(struct early_suspend *ea_event)
+{
+ DEBUG_PRINTK("Requested driver resume");
+ DriverResume();
+ bDeviceSuspended = OMAP_FALSE;
+}
+
+static struct platform_driver omap_sgx_dc_driver = {
+ .driver = {
+ .name = DRVNAME,
+ }
+};
+
+#else /* defined(SGX_EARLYSUSPEND) */
+
+/*
+ * Function called when the driver is requested to be suspended
+ * in: pDevice, state
+ */
+static int DriverSuspend_Entry(struct platform_device unref__ *pDevice,
+ pm_message_t unref__ state)
+{
+ DEBUG_PRINTK("Requested driver suspend");
+ CommonSuspend();
+ return 0;
+}
+
+/*
+ * Function called when the driver is requested to resume
+ * in: pDevice
+ */
+static int DriverResume_Entry(struct platform_device unref__ *pDevice)
+{
+ DEBUG_PRINTK("Requested driver resume");
+ DriverResume();
+ bDeviceSuspended = OMAP_FALSE;
+ return 0;
+}
+
+/*
+ * Function called when the driver is requested to shutdown
+ * in: pDevice
+ */
+static IMG_VOID DriverShutdown_Entry(
+ struct platform_device unref__ *pDevice)
+{
+ DEBUG_PRINTK("Requested driver shutdown");
+ CommonSuspend();
+}
+
+static struct platform_driver omap_sgx_dc_driver = {
+ .driver = {
+ .name = DRVNAME,
+ },
+ .suspend = DriverSuspend_Entry,
+ .resume = DriverResume_Entry,
+ .shutdown = DriverShutdown_Entry,
+};
+
+#endif /* defined(SGX_EARLYSUSPEND) */
+
+#endif /* defined(LDM_PLATFORM) */
+
+/*
+ * Driver init function
+ */
+static int __init omap_sgx_dc_init(void)
+{
+ if(create_display_devices() != OMAP_OK)
+ {
+ WARNING_PRINTK("Driver init failed");
+ return -ENODEV;
+ }
+
+#if defined(LDM_PLATFORM)
+ DEBUG_PRINTK("Registering platform driver");
+ if (platform_driver_register(&omap_sgx_dc_driver))
+ {
+ WARNING_PRINTK("Unable to register platform driver");
+ if(destroy_display_devices() != OMAP_OK)
+ WARNING_PRINTK("Driver cleanup failed\n");
+ return -ENODEV;
+ }
+
+#if defined(SGX_EARLYSUSPEND)
+ driver_early_suspend.suspend = DriverSuspend_Entry;
+ driver_early_suspend.resume = DriverResume_Entry;
+ driver_early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
+ register_early_suspend(&driver_early_suspend);
+ DEBUG_PRINTK("Registered early suspend support");
+#endif
+
+#endif
+ return 0;
+}
+
+/*
+ * Driver exit function
+ */
+static IMG_VOID __exit omap_sgx_dc_deinit(IMG_VOID)
+{
+#if defined(LDM_PLATFORM)
+ DEBUG_PRINTK("Removing platform driver");
+ platform_driver_unregister(&omap_sgx_dc_driver);
+#if defined(SGX_EARLYSUSPEND)
+ unregister_early_suspend(&driver_early_suspend);
+#endif
+#endif
+ if(destroy_display_devices() != OMAP_OK)
+ WARNING_PRINTK("Driver cleanup failed");
+}
+
+MODULE_SUPPORTED_DEVICE(DEVNAME);
+late_initcall(omap_sgx_dc_init);
+module_exit(omap_sgx_dc_deinit);
diff --git a/drivers/gpu/pvr/display/omap_sgx_displayclass.h b/drivers/gpu/pvr/display/omap_sgx_displayclass.h
new file mode 100644
index 0000000..e97c4ad
--- /dev/null
+++ b/drivers/gpu/pvr/display/omap_sgx_displayclass.h
@@ -0,0 +1,123 @@
+/*************************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ *************************************************************************/
+
+#ifndef __OMAP_SGX_DISPLAYCLASS_H__
+#define __OMAP_SGX_DISPLAYCLASS_H__
+
+extern IMG_BOOL PVRGetDisplayClassJTable(
+ PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable);
+
+typedef void * OMAP_HANDLE;
+
+enum OMAP_BOOL
+{
+ OMAP_FALSE = 0,
+ OMAP_TRUE = 1,
+};
+
+struct OMAP_DISP_BUFFER
+{
+ unsigned long ulBufferSize;
+ IMG_SYS_PHYADDR sSysAddr;
+ IMG_CPU_VIRTADDR sCPUVAddr;
+ PVRSRV_SYNC_DATA *psSyncData;
+ struct OMAP_DISP_BUFFER *psNext;
+ struct omap_display_buffer *display_buffer;
+};
+
+struct OMAP_DISP_FLIP_ITEM
+{
+ OMAP_HANDLE hCmdComplete;
+ unsigned long ulSwapInterval;
+ enum OMAP_BOOL bValid;
+ enum OMAP_BOOL bFlipped;
+ enum OMAP_BOOL bCmdCompleted;
+ IMG_SYS_PHYADDR *sSysAddr;
+ struct omap_display_buffer *display_buffer;
+};
+
+struct OMAP_DISP_SWAPCHAIN
+{
+ unsigned long ulBufferCount;
+ struct OMAP_DISP_BUFFER *psBuffer;
+ struct OMAP_DISP_FLIP_ITEM *psFlipItems;
+ unsigned long ulInsertIndex;
+ unsigned long ulRemoveIndex;
+ PVRSRV_DC_DISP2SRV_KMJTABLE *psPVRJTable;
+ enum OMAP_BOOL bFlushCommands;
+ unsigned long ulSetFlushStateRefCount;
+ enum OMAP_BOOL bBlanked;
+ spinlock_t *psSwapChainLock;
+ void *pvDevInfo;
+};
+
+struct OMAP_DISP_DEVINFO
+{
+ unsigned long ulDeviceID;
+ struct OMAP_DISP_BUFFER sSystemBuffer;
+ PVRSRV_DC_DISP2SRV_KMJTABLE sPVRJTable;
+ PVRSRV_DC_SRV2DISP_KMJTABLE sDCJTable;
+ struct OMAP_DISP_SWAPCHAIN *psSwapChain;
+ enum OMAP_BOOL bFlushCommands;
+ enum OMAP_BOOL bDeviceSuspended;
+ struct mutex sSwapChainLockMutex;
+ IMG_DEV_VIRTADDR sDisplayDevVAddr;
+ DISPLAY_INFO sDisplayInfo;
+ DISPLAY_FORMAT sDisplayFormat;
+ DISPLAY_DIMS sDisplayDim;
+ struct workqueue_struct *sync_display_wq;
+ struct work_struct sync_display_work;
+ PVRSRV_PIXEL_FORMAT ePixelFormat;
+ struct omap_display_device *display;
+};
+
+enum OMAP_ERROR
+{
+ OMAP_OK = 0,
+ OMAP_ERROR_GENERIC = 1,
+ OMAP_ERROR_OUT_OF_MEMORY = 2,
+ OMAP_ERROR_TOO_FEW_BUFFERS = 3,
+ OMAP_ERROR_INVALID_PARAMS = 4,
+ OMAP_ERROR_INIT_FAILURE = 5,
+ OMAP_ERROR_CANT_REGISTER_CALLBACK = 6,
+ OMAP_ERROR_INVALID_DEVICE = 7,
+ OMAP_ERROR_DEVICE_REGISTER_FAILED = 8
+
+};
+
+#define DISPLAY_DEVICE_NAME "PowerVR OMAP Display Driver"
+#define DRVNAME "omap_sgx_displayclass"
+#define DEVNAME DRVNAME
+#define DRIVER_PREFIX DRVNAME
+
+#define DEBUG_PRINTK(format, ...) printk("DEBUG " DRIVER_PREFIX \
+ " (%s %i): " format "\n", __func__, __LINE__, ## __VA_ARGS__)
+#define WARNING_PRINTK(format, ...) printk("WARNING " DRIVER_PREFIX \
+ " (%s %i): " format "\n", __func__, __LINE__, ## __VA_ARGS__)
+#define ERROR_PRINTK(format, ...) printk("ERROR " DRIVER_PREFIX \
+ " (%s %i): " format "\n", __func__, __LINE__, ## __VA_ARGS__)
+
+#endif
diff --git a/drivers/gpu/pvr/env_data.h b/drivers/gpu/pvr/env_data.h
new file mode 100644
index 0000000..7716529
--- /dev/null
+++ b/drivers/gpu/pvr/env_data.h
@@ -0,0 +1,66 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _ENV_DATA_
+#define _ENV_DATA_
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+
+#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
+#include <linux/workqueue.h>
+#endif
+
+#define PVRSRV_MAX_BRIDGE_IN_SIZE 0x1000
+#define PVRSRV_MAX_BRIDGE_OUT_SIZE 0x1000
+
+typedef struct _PVR_PCI_DEV_TAG
+{
+ struct pci_dev *psPCIDev;
+ HOST_PCI_INIT_FLAGS ePCIFlags;
+ IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE];
+} PVR_PCI_DEV;
+
+typedef struct _ENV_DATA_TAG
+{
+ IMG_VOID *pvBridgeData;
+ struct pm_dev *psPowerDevice;
+ IMG_BOOL bLISRInstalled;
+ IMG_BOOL bMISRInstalled;
+ IMG_UINT32 ui32IRQ;
+ IMG_VOID *pvISRCookie;
+#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
+ struct workqueue_struct *psWorkQueue;
+#endif
+#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
+ struct work_struct sMISRWork;
+ IMG_VOID *pvMISRData;
+#else
+ struct tasklet_struct sMISRTasklet;
+#endif
+} ENV_DATA;
+
+#endif
diff --git a/drivers/gpu/pvr/env_perproc.h b/drivers/gpu/pvr/env_perproc.h
new file mode 100644
index 0000000..dabf1e3
--- /dev/null
+++ b/drivers/gpu/pvr/env_perproc.h
@@ -0,0 +1,56 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __ENV_PERPROC_H__
+#define __ENV_PERPROC_H__
+
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+
+#include "services.h"
+#include "handle.h"
+
+typedef struct _PVRSRV_ENV_PER_PROCESS_DATA_
+{
+ IMG_HANDLE hBlockAlloc;
+ struct proc_dir_entry *psProcDir;
+#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
+ struct list_head sDRMAuthListHead;
+#endif
+} PVRSRV_ENV_PER_PROCESS_DATA;
+
+IMG_VOID RemovePerProcessProcDir(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
+
+PVRSRV_ERROR LinuxMMapPerProcessConnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
+
+IMG_VOID LinuxMMapPerProcessDisconnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
+
+PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase);
+
+IMG_HANDLE LinuxTerminatingProcessPrivateData(IMG_VOID);
+
+#endif
+
diff --git a/drivers/gpu/pvr/event.c b/drivers/gpu/pvr/event.c
new file mode 100644
index 0000000..7e160c3
--- /dev/null
+++ b/drivers/gpu/pvr/event.c
@@ -0,0 +1,293 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#endif
+
+#include <asm/io.h>
+#include <asm/page.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
+#include <asm/system.h>
+#endif
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <asm/hardirq.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/capability.h>
+#include <linux/sched.h>
+#include <asm/uaccess.h>
+
+#include "img_types.h"
+#include "services_headers.h"
+#include "mm.h"
+#include "pvrmmap.h"
+#include "mmap.h"
+#include "env_data.h"
+#include "proc.h"
+#include "mutex.h"
+#include "lock.h"
+#include "event.h"
+
+typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG
+{
+ rwlock_t sLock;
+ struct list_head sList;
+
+} PVRSRV_LINUX_EVENT_OBJECT_LIST;
+
+
+typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG
+{
+ atomic_t sTimeStamp;
+ IMG_UINT32 ui32TimeStampPrevious;
+#if defined(DEBUG)
+ IMG_UINT ui32Stats;
+#endif
+ wait_queue_head_t sWait;
+ struct list_head sList;
+ IMG_HANDLE hResItem;
+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList;
+} PVRSRV_LINUX_EVENT_OBJECT;
+
+PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList)
+{
+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEventObjectList;
+
+ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST),
+ (IMG_VOID **)&psEventObjectList, IMG_NULL,
+ "Linux Event Object List") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ INIT_LIST_HEAD(&psEventObjectList->sList);
+
+ rwlock_init(&psEventObjectList->sLock);
+
+ *phEventObjectList = (IMG_HANDLE *) psEventObjectList;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList)
+{
+
+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList ;
+
+ if(psEventObjectList)
+ {
+ IMG_BOOL bListEmpty;
+
+ read_lock(&psEventObjectList->sLock);
+ bListEmpty = list_empty(&psEventObjectList->sList);
+ read_unlock(&psEventObjectList->sLock);
+
+ if (!bListEmpty)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty"));
+ return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+ }
+
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST), psEventObjectList, IMG_NULL);
+
+ }
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, IMG_HANDLE hOSEventObject)
+{
+ if(hOSEventObjectList)
+ {
+ if(hOSEventObject)
+ {
+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject;
+#if defined(DEBUG)
+ PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectListDelete: Event object waits: %u", psLinuxEventObject->ui32Stats));
+#endif
+ if(ResManFreeResByPtr(psLinuxEventObject->hResItem, CLEANUP_WITH_POLL) != PVRSRV_OK)
+ {
+ return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+ }
+
+ return PVRSRV_OK;
+ }
+ }
+ return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+
+}
+
+static PVRSRV_ERROR LinuxEventObjectDeleteCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bForceCleanup)
+{
+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = pvParam;
+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList;
+ unsigned long ulLockFlags;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+ PVR_UNREFERENCED_PARAMETER(bForceCleanup);
+
+ write_lock_irqsave(&psLinuxEventObjectList->sLock, ulLockFlags);
+ list_del(&psLinuxEventObject->sList);
+ write_unlock_irqrestore(&psLinuxEventObjectList->sLock, ulLockFlags);
+
+#if defined(DEBUG)
+ PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectDeleteCallback: Event object waits: %u", psLinuxEventObject->ui32Stats));
+#endif
+
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT), psLinuxEventObject, IMG_NULL);
+
+
+ return PVRSRV_OK;
+}
+PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject)
+ {
+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
+ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
+ PVRSRV_PER_PROCESS_DATA *psPerProc;
+ unsigned long ulLockFlags;
+
+ psPerProc = PVRSRVPerProcessData(ui32PID);
+ if (psPerProc == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: Couldn't find per-process data"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+
+ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT),
+ (IMG_VOID **)&psLinuxEventObject, IMG_NULL,
+ "Linux Event Object") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory "));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ INIT_LIST_HEAD(&psLinuxEventObject->sList);
+
+ atomic_set(&psLinuxEventObject->sTimeStamp, 0);
+ psLinuxEventObject->ui32TimeStampPrevious = 0;
+
+#if defined(DEBUG)
+ psLinuxEventObject->ui32Stats = 0;
+#endif
+ init_waitqueue_head(&psLinuxEventObject->sWait);
+
+ psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList;
+
+ psLinuxEventObject->hResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_EVENT_OBJECT,
+ psLinuxEventObject,
+ 0,
+ &LinuxEventObjectDeleteCallback);
+
+ write_lock_irqsave(&psLinuxEventObjectList->sLock, ulLockFlags);
+ list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList);
+ write_unlock_irqrestore(&psLinuxEventObjectList->sLock, ulLockFlags);
+
+ *phOSEventObject = psLinuxEventObject;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList)
+{
+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
+ struct list_head *psListEntry, *psList;
+
+ psList = &psLinuxEventObjectList->sList;
+
+
+ read_lock(&psLinuxEventObjectList->sLock);
+ list_for_each(psListEntry, psList)
+ {
+
+ psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList);
+
+ atomic_inc(&psLinuxEventObject->sTimeStamp);
+ wake_up_interruptible(&psLinuxEventObject->sWait);
+ }
+ read_unlock(&psLinuxEventObjectList->sLock);
+
+ return PVRSRV_OK;
+
+}
+
+PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout)
+{
+ IMG_UINT32 ui32TimeStamp;
+ DEFINE_WAIT(sWait);
+
+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject;
+
+ IMG_UINT32 ui32TimeOutJiffies = msecs_to_jiffies(ui32MSTimeout);
+
+ do
+ {
+ prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE);
+ ui32TimeStamp = (IMG_UINT32)atomic_read(&psLinuxEventObject->sTimeStamp);
+
+ if(psLinuxEventObject->ui32TimeStampPrevious != ui32TimeStamp)
+ {
+ break;
+ }
+
+ LinuxUnLockMutex(&gPVRSRVLock);
+
+ ui32TimeOutJiffies = (IMG_UINT32)schedule_timeout((IMG_INT32)ui32TimeOutJiffies);
+
+ LinuxLockMutex(&gPVRSRVLock);
+#if defined(DEBUG)
+ psLinuxEventObject->ui32Stats++;
+#endif
+
+
+ } while (ui32TimeOutJiffies);
+
+ finish_wait(&psLinuxEventObject->sWait, &sWait);
+
+ psLinuxEventObject->ui32TimeStampPrevious = ui32TimeStamp;
+
+ return ui32TimeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT;
+
+}
+
diff --git a/drivers/gpu/pvr/event.h b/drivers/gpu/pvr/event.h
new file mode 100644
index 0000000..3035283
--- /dev/null
+++ b/drivers/gpu/pvr/event.h
@@ -0,0 +1,32 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList);
+PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList);
+PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject);
+PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, IMG_HANDLE hOSEventObject);
+PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList);
+PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout);
diff --git a/drivers/gpu/pvr/handle.c b/drivers/gpu/pvr/handle.c
new file mode 100644
index 0000000..d911b38
--- /dev/null
+++ b/drivers/gpu/pvr/handle.c
@@ -0,0 +1,1873 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
+#include <stddef.h>
+
+#include "services_headers.h"
+#include "handle.h"
+
+#ifdef DEBUG
+#define HANDLE_BLOCK_SHIFT 2
+#else
+#define HANDLE_BLOCK_SHIFT 8
+#endif
+
+#define DIVIDE_BY_BLOCK_SIZE(i) (((IMG_UINT32)(i)) >> HANDLE_BLOCK_SHIFT)
+#define MULTIPLY_BY_BLOCK_SIZE(i) (((IMG_UINT32)(i)) << HANDLE_BLOCK_SHIFT)
+
+#define HANDLE_BLOCK_SIZE MULTIPLY_BY_BLOCK_SIZE(1)
+#define HANDLE_SUB_BLOCK_MASK (HANDLE_BLOCK_SIZE - 1)
+#define HANDLE_BLOCK_MASK (~(HANDLE_SUB_BLOCK_MASK))
+
+#define HANDLE_HASH_TAB_INIT_SIZE 32
+
+#define INDEX_IS_VALID(psBase, i) ((i) < (psBase)->ui32TotalHandCount)
+
+#if defined (SUPPORT_SID_INTERFACE)
+#define INDEX_TO_HANDLE(i) ((IMG_SID)((i) + 1))
+#define HANDLE_TO_INDEX(h) ((IMG_UINT32)(h) - 1)
+#else
+#define INDEX_TO_HANDLE(i) ((IMG_HANDLE)((IMG_UINTPTR_T)(i) + 1))
+#define HANDLE_TO_INDEX(h) ((IMG_UINT32)(IMG_UINTPTR_T)(h) - 1)
+
+#endif
+
+#define INDEX_TO_BLOCK_INDEX(i) DIVIDE_BY_BLOCK_SIZE(i)
+#define BLOCK_INDEX_TO_INDEX(i) MULTIPLY_BY_BLOCK_SIZE(i)
+#define INDEX_TO_SUB_BLOCK_INDEX(i) ((i) & HANDLE_SUB_BLOCK_MASK)
+
+#define INDEX_TO_INDEX_STRUCT_PTR(psArray, i) (&((psArray)[INDEX_TO_BLOCK_INDEX(i)]))
+#define BASE_AND_INDEX_TO_INDEX_STRUCT_PTR(psBase, i) INDEX_TO_INDEX_STRUCT_PTR((psBase)->psHandleArray, i)
+
+#define INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, i) (BASE_AND_INDEX_TO_INDEX_STRUCT_PTR(psBase, i)->ui32FreeHandBlockCount)
+
+#define INDEX_TO_HANDLE_STRUCT_PTR(psBase, i) (BASE_AND_INDEX_TO_INDEX_STRUCT_PTR(psBase, i)->psHandle + INDEX_TO_SUB_BLOCK_INDEX(i))
+
+#define HANDLE_TO_HANDLE_STRUCT_PTR(psBase, h) (INDEX_TO_HANDLE_STRUCT_PTR(psBase, HANDLE_TO_INDEX(h)))
+
+#define HANDLE_PTR_TO_INDEX(psHandle) ((psHandle)->ui32Index)
+#define HANDLE_PTR_TO_HANDLE(psHandle) INDEX_TO_HANDLE(HANDLE_PTR_TO_INDEX(psHandle))
+
+#define ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE(a) (HANDLE_BLOCK_MASK & (a))
+#define ROUND_UP_TO_MULTIPLE_OF_BLOCK_SIZE(a) ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE((a) + HANDLE_BLOCK_SIZE - 1)
+
+#define DEFAULT_MAX_HANDLE 0x7fffffffu
+#define DEFAULT_MAX_INDEX_PLUS_ONE ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE(DEFAULT_MAX_HANDLE)
+
+#define HANDLES_BATCHED(psBase) ((psBase)->ui32HandBatchSize != 0)
+
+#define HANDLE_ARRAY_SIZE(handleCount) DIVIDE_BY_BLOCK_SIZE(ROUND_UP_TO_MULTIPLE_OF_BLOCK_SIZE(handleCount))
+
+#define SET_FLAG(v, f) ((IMG_VOID)((v) |= (f)))
+#define CLEAR_FLAG(v, f) ((IMG_VOID)((v) &= ~(f)))
+#define TEST_FLAG(v, f) ((IMG_BOOL)(((v) & (f)) != 0))
+
+#define TEST_ALLOC_FLAG(psHandle, f) TEST_FLAG((psHandle)->eFlag, f)
+
+#define SET_INTERNAL_FLAG(psHandle, f) SET_FLAG((psHandle)->eInternalFlag, f)
+#define CLEAR_INTERNAL_FLAG(psHandle, f) CLEAR_FLAG((psHandle)->eInternalFlag, f)
+#define TEST_INTERNAL_FLAG(psHandle, f) TEST_FLAG((psHandle)->eInternalFlag, f)
+
+#define BATCHED_HANDLE(psHandle) TEST_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED)
+
+#define SET_BATCHED_HANDLE(psHandle) SET_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED)
+
+#define SET_UNBATCHED_HANDLE(psHandle) CLEAR_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED)
+
+#define BATCHED_HANDLE_PARTIALLY_FREE(psHandle) TEST_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE)
+
+#define SET_BATCHED_HANDLE_PARTIALLY_FREE(psHandle) SET_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE)
+
+#define HANDLE_STRUCT_IS_FREE(psHandle) ((psHandle)->eType == PVRSRV_HANDLE_TYPE_NONE && (psHandle)->eInternalFlag == INTERNAL_HANDLE_FLAG_NONE)
+
+#ifdef MIN
+#undef MIN
+#endif
+
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+
+struct sHandleList
+{
+ IMG_UINT32 ui32Prev;
+ IMG_UINT32 ui32Next;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hParent;
+#else
+ IMG_HANDLE hParent;
+#endif
+};
+
+enum ePVRSRVInternalHandleFlag
+{
+ INTERNAL_HANDLE_FLAG_NONE = 0x00,
+ INTERNAL_HANDLE_FLAG_BATCHED = 0x01,
+ INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE = 0x02,
+};
+
+struct sHandle
+{
+
+ PVRSRV_HANDLE_TYPE eType;
+
+
+ IMG_VOID *pvData;
+
+
+ IMG_UINT32 ui32NextIndexPlusOne;
+
+
+ enum ePVRSRVInternalHandleFlag eInternalFlag;
+
+
+ PVRSRV_HANDLE_ALLOC_FLAG eFlag;
+
+
+ IMG_UINT32 ui32Index;
+
+
+ struct sHandleList sChildren;
+
+
+ struct sHandleList sSiblings;
+};
+
+struct sHandleIndex
+{
+
+ struct sHandle *psHandle;
+
+
+ IMG_HANDLE hBlockAlloc;
+
+
+ IMG_UINT32 ui32FreeHandBlockCount;
+};
+
+struct _PVRSRV_HANDLE_BASE_
+{
+
+ IMG_HANDLE hBaseBlockAlloc;
+
+
+ IMG_HANDLE hArrayBlockAlloc;
+
+
+ struct sHandleIndex *psHandleArray;
+
+
+ HASH_TABLE *psHashTab;
+
+
+ IMG_UINT32 ui32FreeHandCount;
+
+
+ IMG_UINT32 ui32FirstFreeIndex;
+
+
+ IMG_UINT32 ui32MaxIndexPlusOne;
+
+
+ IMG_UINT32 ui32TotalHandCount;
+
+
+ IMG_UINT32 ui32LastFreeIndexPlusOne;
+
+
+ IMG_UINT32 ui32HandBatchSize;
+
+
+ IMG_UINT32 ui32TotalHandCountPreBatch;
+
+
+ IMG_UINT32 ui32FirstBatchIndexPlusOne;
+
+
+ IMG_UINT32 ui32BatchHandAllocFailures;
+
+
+ IMG_BOOL bPurgingEnabled;
+};
+
+enum eHandKey {
+ HAND_KEY_DATA = 0,
+ HAND_KEY_TYPE,
+ HAND_KEY_PARENT,
+ HAND_KEY_LEN
+};
+
+PVRSRV_HANDLE_BASE *gpsKernelHandleBase = IMG_NULL;
+
+typedef IMG_UINTPTR_T HAND_KEY[HAND_KEY_LEN];
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListInit)
+#endif
+static INLINE
+#if defined (SUPPORT_SID_INTERFACE)
+IMG_VOID HandleListInit(IMG_UINT32 ui32Index, struct sHandleList *psList, IMG_SID hParent)
+#else
+IMG_VOID HandleListInit(IMG_UINT32 ui32Index, struct sHandleList *psList, IMG_HANDLE hParent)
+#endif
+{
+ psList->ui32Next = ui32Index;
+ psList->ui32Prev = ui32Index;
+ psList->hParent = hParent;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitParentList)
+#endif
+static INLINE
+IMG_VOID InitParentList(struct sHandle *psHandle)
+{
+ IMG_UINT32 ui32Parent = HANDLE_PTR_TO_INDEX(psHandle);
+
+ HandleListInit(ui32Parent, &psHandle->sChildren, INDEX_TO_HANDLE(ui32Parent));
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitChildEntry)
+#endif
+static INLINE
+IMG_VOID InitChildEntry(struct sHandle *psHandle)
+{
+ HandleListInit(HANDLE_PTR_TO_INDEX(psHandle), &psHandle->sSiblings, IMG_NULL);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListIsEmpty)
+#endif
+static INLINE
+IMG_BOOL HandleListIsEmpty(IMG_UINT32 ui32Index, struct sHandleList *psList)
+{
+ IMG_BOOL bIsEmpty;
+
+ bIsEmpty = (IMG_BOOL)(psList->ui32Next == ui32Index);
+
+#ifdef DEBUG
+ {
+ IMG_BOOL bIsEmpty2;
+
+ bIsEmpty2 = (IMG_BOOL)(psList->ui32Prev == ui32Index);
+ PVR_ASSERT(bIsEmpty == bIsEmpty2)
+ }
+#endif
+
+ return bIsEmpty;
+}
+
+#ifdef DEBUG
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(NoChildren)
+#endif
+static INLINE
+IMG_BOOL NoChildren(struct sHandle *psHandle)
+{
+ PVR_ASSERT(psHandle->sChildren.hParent == HANDLE_PTR_TO_HANDLE(psHandle))
+
+ return HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psHandle), &psHandle->sChildren);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(NoParent)
+#endif
+static INLINE
+IMG_BOOL NoParent(struct sHandle *psHandle)
+{
+ if (HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psHandle), &psHandle->sSiblings))
+ {
+ PVR_ASSERT(psHandle->sSiblings.hParent == IMG_NULL)
+
+ return IMG_TRUE;
+ }
+ else
+ {
+ PVR_ASSERT(psHandle->sSiblings.hParent != IMG_NULL)
+ }
+ return IMG_FALSE;
+}
+#endif
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(ParentHandle)
+#endif
+static INLINE
+#if defined (SUPPORT_SID_INTERFACE)
+IMG_SID ParentHandle(struct sHandle *psHandle)
+#else
+IMG_HANDLE ParentHandle(struct sHandle *psHandle)
+#endif
+{
+ return psHandle->sSiblings.hParent;
+}
+
+#define LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, i, p, po, eo) \
+ ((struct sHandleList *)((IMG_CHAR *)(INDEX_TO_HANDLE_STRUCT_PTR(psBase, i)) + (((i) == (p)) ? (po) : (eo))))
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListInsertBefore)
+#endif
+static INLINE
+IMG_VOID HandleListInsertBefore(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32InsIndex, struct sHandleList *psIns, IMG_SIZE_T uiParentOffset, IMG_UINT32 ui32EntryIndex, struct sHandleList *psEntry, IMG_SIZE_T uiEntryOffset, IMG_UINT32 ui32ParentIndex)
+{
+
+ struct sHandleList *psPrevIns = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psIns->ui32Prev, ui32ParentIndex, uiParentOffset, uiEntryOffset);
+
+ PVR_ASSERT(psEntry->hParent == IMG_NULL)
+ PVR_ASSERT(ui32InsIndex == psPrevIns->ui32Next)
+ PVR_ASSERT(LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32ParentIndex, ui32ParentIndex, uiParentOffset, uiParentOffset)->hParent == INDEX_TO_HANDLE(ui32ParentIndex))
+
+ psEntry->ui32Prev = psIns->ui32Prev;
+ psIns->ui32Prev = ui32EntryIndex;
+ psEntry->ui32Next = ui32InsIndex;
+ psPrevIns->ui32Next = ui32EntryIndex;
+
+ psEntry->hParent = INDEX_TO_HANDLE(ui32ParentIndex);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(AdoptChild)
+#endif
+static INLINE
+IMG_VOID AdoptChild(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psParent, struct sHandle *psChild)
+{
+ IMG_UINT32 ui32Parent = HANDLE_TO_INDEX(psParent->sChildren.hParent);
+
+ PVR_ASSERT(ui32Parent == HANDLE_PTR_TO_INDEX(psParent))
+
+ HandleListInsertBefore(psBase, ui32Parent, &psParent->sChildren, offsetof(struct sHandle, sChildren), HANDLE_PTR_TO_INDEX(psChild), &psChild->sSiblings, offsetof(struct sHandle, sSiblings), ui32Parent);
+
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListRemove)
+#endif
+static INLINE
+IMG_VOID HandleListRemove(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32EntryIndex, struct sHandleList *psEntry, IMG_SIZE_T uiEntryOffset, IMG_SIZE_T uiParentOffset)
+{
+ if (!HandleListIsEmpty(ui32EntryIndex, psEntry))
+ {
+
+ struct sHandleList *psPrev = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Prev, HANDLE_TO_INDEX(psEntry->hParent), uiParentOffset, uiEntryOffset);
+ struct sHandleList *psNext = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Next, HANDLE_TO_INDEX(psEntry->hParent), uiParentOffset, uiEntryOffset);
+
+
+ PVR_ASSERT(psEntry->hParent != IMG_NULL)
+
+ psPrev->ui32Next = psEntry->ui32Next;
+ psNext->ui32Prev = psEntry->ui32Prev;
+
+ HandleListInit(ui32EntryIndex, psEntry, IMG_NULL);
+ }
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(UnlinkFromParent)
+#endif
+static INLINE
+IMG_VOID UnlinkFromParent(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
+{
+ HandleListRemove(psBase, HANDLE_PTR_TO_INDEX(psHandle), &psHandle->sSiblings, offsetof(struct sHandle, sSiblings), offsetof(struct sHandle, sChildren));
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListIterate)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE *psBase, struct sHandleList *psHead, IMG_SIZE_T uiParentOffset, IMG_SIZE_T uiEntryOffset, PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, struct sHandle *))
+{
+ IMG_UINT32 ui32Index;
+ IMG_UINT32 ui32Parent = HANDLE_TO_INDEX(psHead->hParent);
+
+ PVR_ASSERT(psHead->hParent != IMG_NULL)
+
+
+ for(ui32Index = psHead->ui32Next; ui32Index != ui32Parent; )
+ {
+ struct sHandle *psHandle = INDEX_TO_HANDLE_STRUCT_PTR(psBase, ui32Index);
+
+ struct sHandleList *psEntry = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32Index, ui32Parent, uiParentOffset, uiEntryOffset);
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psEntry->hParent == psHead->hParent)
+
+ ui32Index = psEntry->ui32Next;
+
+ eError = (*pfnIterFunc)(psBase, psHandle);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(IterateOverChildren)
+#endif
+static INLINE
+PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psParent, PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, struct sHandle *))
+{
+ return HandleListIterate(psBase, &psParent->sChildren, offsetof(struct sHandle, sChildren), offsetof(struct sHandle, sSiblings), pfnIterFunc);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(GetHandleStructure)
+#endif
+static INLINE
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR GetHandleStructure(PVRSRV_HANDLE_BASE *psBase, struct sHandle **ppsHandle, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType)
+#else
+PVRSRV_ERROR GetHandleStructure(PVRSRV_HANDLE_BASE *psBase, struct sHandle **ppsHandle, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
+#endif
+{
+ IMG_UINT32 ui32Index = HANDLE_TO_INDEX(hHandle);
+ struct sHandle *psHandle;
+
+
+ if (!INDEX_IS_VALID(psBase, ui32Index))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle index out of range (%u >= %u)", ui32Index, psBase->ui32TotalHandCount));
+#if defined (SUPPORT_SID_INTERFACE)
+ PVR_DBG_BREAK
+#endif
+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+ }
+
+ psHandle = INDEX_TO_HANDLE_STRUCT_PTR(psBase, ui32Index);
+ if (psHandle->eType == PVRSRV_HANDLE_TYPE_NONE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle not allocated (index: %u)", ui32Index));
+#if defined (SUPPORT_SID_INTERFACE)
+ PVR_DBG_BREAK
+#endif
+ return PVRSRV_ERROR_HANDLE_NOT_ALLOCATED;
+ }
+
+
+ if (eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandle->eType)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle type mismatch (%d != %d)", eType, psHandle->eType));
+#if defined (SUPPORT_SID_INTERFACE)
+ PVR_DBG_BREAK
+#endif
+ return PVRSRV_ERROR_HANDLE_TYPE_MISMATCH;
+ }
+
+
+ *ppsHandle = psHandle;
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(ParentIfPrivate)
+#endif
+static INLINE
+#if defined (SUPPORT_SID_INTERFACE)
+IMG_SID ParentIfPrivate(struct sHandle *psHandle)
+#else
+IMG_HANDLE ParentIfPrivate(struct sHandle *psHandle)
+#endif
+{
+ return TEST_ALLOC_FLAG(psHandle, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
+ ParentHandle(psHandle) : IMG_NULL;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitKey)
+#endif
+static INLINE
+#if defined (SUPPORT_SID_INTERFACE)
+IMG_VOID InitKey(HAND_KEY aKey, PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_SID hParent)
+#else
+IMG_VOID InitKey(HAND_KEY aKey, PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hParent)
+#endif
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+
+ aKey[HAND_KEY_DATA] = (IMG_UINTPTR_T)pvData;
+ aKey[HAND_KEY_TYPE] = (IMG_UINTPTR_T)eType;
+ aKey[HAND_KEY_PARENT] = (IMG_UINTPTR_T)hParent;
+}
+
+static
+PVRSRV_ERROR ReallocHandleArray(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32NewCount)
+{
+ struct sHandleIndex *psOldArray = psBase->psHandleArray;
+ IMG_HANDLE hOldArrayBlockAlloc = psBase->hArrayBlockAlloc;
+ IMG_UINT32 ui32OldCount = psBase->ui32TotalHandCount;
+ struct sHandleIndex *psNewArray = IMG_NULL;
+ IMG_HANDLE hNewArrayBlockAlloc = IMG_NULL;
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eReturn = PVRSRV_OK;
+ IMG_UINT32 ui32Index;
+
+ if (ui32NewCount == ui32OldCount)
+ {
+ return PVRSRV_OK;
+ }
+
+ if (ui32NewCount != 0 && !psBase->bPurgingEnabled &&
+ ui32NewCount < ui32OldCount)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (((ui32OldCount % HANDLE_BLOCK_SIZE) != 0) ||
+ ((ui32NewCount % HANDLE_BLOCK_SIZE) != 0))
+ {
+ PVR_ASSERT((ui32OldCount % HANDLE_BLOCK_SIZE) == 0)
+ PVR_ASSERT((ui32NewCount % HANDLE_BLOCK_SIZE) == 0)
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (ui32NewCount != 0)
+ {
+
+ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ HANDLE_ARRAY_SIZE(ui32NewCount) * sizeof(struct sHandleIndex),
+ (IMG_VOID **)&psNewArray,
+ &hNewArrayBlockAlloc,
+ "Memory Area");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Couldn't allocate new handle array (%d)", eError));
+ eReturn = eError;
+ goto error;
+ }
+
+ if (ui32OldCount != 0)
+ {
+ OSMemCopy(psNewArray, psOldArray, HANDLE_ARRAY_SIZE(MIN(ui32NewCount, ui32OldCount)) * sizeof(struct sHandleIndex));
+ }
+ }
+
+
+ for(ui32Index = ui32NewCount; ui32Index < ui32OldCount; ui32Index += HANDLE_BLOCK_SIZE)
+ {
+ struct sHandleIndex *psIndex = INDEX_TO_INDEX_STRUCT_PTR(psOldArray, ui32Index);
+
+ eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(struct sHandle) * HANDLE_BLOCK_SIZE,
+ psIndex->psHandle,
+ psIndex->hBlockAlloc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Couldn't free handle structures (%d)", eError));
+ }
+ }
+
+
+ for(ui32Index = ui32OldCount; ui32Index < ui32NewCount; ui32Index += HANDLE_BLOCK_SIZE)
+ {
+
+ struct sHandleIndex *psIndex = INDEX_TO_INDEX_STRUCT_PTR(psNewArray, ui32Index);
+
+ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(struct sHandle) * HANDLE_BLOCK_SIZE,
+ (IMG_VOID **)&psIndex->psHandle,
+ &psIndex->hBlockAlloc,
+ "Memory Area");
+ if (eError != PVRSRV_OK)
+ {
+ psIndex->psHandle = IMG_NULL;
+ PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Couldn't allocate handle structures (%d)", eError));
+ eReturn = eError;
+ }
+ else
+ {
+ IMG_UINT32 ui32SubIndex;
+
+ psIndex->ui32FreeHandBlockCount = HANDLE_BLOCK_SIZE;
+
+ for(ui32SubIndex = 0; ui32SubIndex < HANDLE_BLOCK_SIZE; ui32SubIndex++)
+ {
+ struct sHandle *psHandle = psIndex->psHandle + ui32SubIndex;
+
+
+ psHandle->ui32Index = ui32SubIndex + ui32Index;
+ psHandle->eType = PVRSRV_HANDLE_TYPE_NONE;
+ psHandle->eInternalFlag = INTERNAL_HANDLE_FLAG_NONE;
+ psHandle->ui32NextIndexPlusOne = 0;
+ }
+ }
+ }
+ if (eReturn != PVRSRV_OK)
+ {
+ goto error;
+ }
+
+#ifdef DEBUG_MAX_HANDLE_COUNT
+
+ if (ui32NewCount > DEBUG_MAX_HANDLE_COUNT)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Max handle count (%u) reached", DEBUG_MAX_HANDLE_COUNT));
+ eReturn = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto error;
+ }
+#endif
+
+ if (psOldArray != IMG_NULL)
+ {
+
+ eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ HANDLE_ARRAY_SIZE(ui32OldCount) * sizeof(struct sHandleIndex),
+ psOldArray,
+ hOldArrayBlockAlloc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Couldn't free old handle array (%d)", eError));
+ }
+ }
+
+ psBase->psHandleArray = psNewArray;
+ psBase->hArrayBlockAlloc = hNewArrayBlockAlloc;
+ psBase->ui32TotalHandCount = ui32NewCount;
+
+ if (ui32NewCount > ui32OldCount)
+ {
+
+ PVR_ASSERT(psBase->ui32FreeHandCount + (ui32NewCount - ui32OldCount) > psBase->ui32FreeHandCount)
+
+
+ psBase->ui32FreeHandCount += (ui32NewCount - ui32OldCount);
+
+
+ if (psBase->ui32FirstFreeIndex == 0)
+ {
+ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0)
+
+ psBase->ui32FirstFreeIndex = ui32OldCount;
+ }
+ else
+ {
+ if (!psBase->bPurgingEnabled)
+ {
+ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0)
+ PVR_ASSERT(INDEX_TO_HANDLE_STRUCT_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne == 0)
+
+ INDEX_TO_HANDLE_STRUCT_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne = ui32OldCount + 1;
+ }
+ }
+
+ if (!psBase->bPurgingEnabled)
+ {
+ psBase->ui32LastFreeIndexPlusOne = ui32NewCount;
+ }
+ }
+ else
+ {
+ PVR_ASSERT(ui32NewCount == 0 || psBase->bPurgingEnabled)
+ PVR_ASSERT(ui32NewCount == 0 || psBase->ui32FirstFreeIndex <= ui32NewCount)
+ PVR_ASSERT(psBase->ui32FreeHandCount - (ui32OldCount - ui32NewCount) < psBase->ui32FreeHandCount)
+
+
+ psBase->ui32FreeHandCount -= (ui32OldCount - ui32NewCount);
+
+ if (ui32NewCount == 0)
+ {
+ psBase->ui32FirstFreeIndex = 0;
+ psBase->ui32LastFreeIndexPlusOne = 0;
+ }
+ }
+
+ PVR_ASSERT(psBase->ui32FirstFreeIndex <= psBase->ui32TotalHandCount)
+
+ return PVRSRV_OK;
+
+error:
+ PVR_ASSERT(eReturn != PVRSRV_OK)
+
+ if (psNewArray != IMG_NULL)
+ {
+
+ for(ui32Index = ui32OldCount; ui32Index < ui32NewCount; ui32Index += HANDLE_BLOCK_SIZE)
+ {
+ struct sHandleIndex *psIndex = INDEX_TO_INDEX_STRUCT_PTR(psNewArray, ui32Index);
+ if (psIndex->psHandle != IMG_NULL)
+ {
+ eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(struct sHandle) * HANDLE_BLOCK_SIZE,
+ psIndex->psHandle,
+ psIndex->hBlockAlloc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Couldn't free handle structures (%d)", eError));
+ }
+ }
+ }
+
+
+ eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ HANDLE_ARRAY_SIZE(ui32NewCount) * sizeof(struct sHandleIndex),
+ psNewArray,
+ hNewArrayBlockAlloc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Couldn't free new handle array (%d)", eError));
+ }
+ }
+
+ return eReturn;
+}
+
+static PVRSRV_ERROR FreeHandleArray(PVRSRV_HANDLE_BASE *psBase)
+{
+ return ReallocHandleArray(psBase, 0);
+}
+
+static PVRSRV_ERROR FreeHandle(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
+{
+ HAND_KEY aKey;
+ IMG_UINT32 ui32Index = HANDLE_PTR_TO_INDEX(psHandle);
+ PVRSRV_ERROR eError;
+
+
+ InitKey(aKey, psBase, psHandle->pvData, psHandle->eType, ParentIfPrivate(psHandle));
+
+ if (!TEST_ALLOC_FLAG(psHandle, PVRSRV_HANDLE_ALLOC_FLAG_MULTI) && !BATCHED_HANDLE_PARTIALLY_FREE(psHandle))
+ {
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hHandle;
+ hHandle = (IMG_SID) HASH_Remove_Extended(psBase->psHashTab, aKey);
+#else
+ IMG_HANDLE hHandle;
+ hHandle = (IMG_HANDLE) HASH_Remove_Extended(psBase->psHashTab, aKey);
+
+#endif
+
+ PVR_ASSERT(hHandle != IMG_NULL)
+ PVR_ASSERT(hHandle == INDEX_TO_HANDLE(ui32Index))
+ PVR_UNREFERENCED_PARAMETER(hHandle);
+ }
+
+
+ UnlinkFromParent(psBase, psHandle);
+
+
+ eError = IterateOverChildren(psBase, psHandle, FreeHandle);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreeHandle: Error whilst freeing subhandles (%d)", eError));
+ return eError;
+ }
+
+
+ psHandle->eType = PVRSRV_HANDLE_TYPE_NONE;
+
+ if (BATCHED_HANDLE(psHandle) && !BATCHED_HANDLE_PARTIALLY_FREE(psHandle))
+ {
+
+ SET_BATCHED_HANDLE_PARTIALLY_FREE(psHandle);
+
+ return PVRSRV_OK;
+ }
+
+
+ if (!psBase->bPurgingEnabled)
+ {
+ if (psBase->ui32FreeHandCount == 0)
+ {
+ PVR_ASSERT(psBase->ui32FirstFreeIndex == 0)
+ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0)
+
+ psBase->ui32FirstFreeIndex = ui32Index;
+ }
+ else
+ {
+
+ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0)
+ PVR_ASSERT(INDEX_TO_HANDLE_STRUCT_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne == 0)
+ INDEX_TO_HANDLE_STRUCT_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne = ui32Index + 1;
+ }
+
+ PVR_ASSERT(psHandle->ui32NextIndexPlusOne == 0)
+
+
+ psBase->ui32LastFreeIndexPlusOne = ui32Index + 1;
+ }
+
+ psBase->ui32FreeHandCount++;
+ INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, ui32Index)++;
+
+ PVR_ASSERT(INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, ui32Index)<= HANDLE_BLOCK_SIZE)
+
+#ifdef DEBUG
+ {
+ IMG_UINT32 ui32BlockedIndex;
+ IMG_UINT32 ui32FreeHandCount = 0;
+
+ for (ui32BlockedIndex = 0; ui32BlockedIndex < psBase->ui32TotalHandCount; ui32BlockedIndex += HANDLE_BLOCK_SIZE)
+ {
+ ui32FreeHandCount += INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, ui32BlockedIndex);
+ }
+
+ PVR_ASSERT(ui32FreeHandCount == psBase->ui32FreeHandCount)
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR FreeAllHandles(PVRSRV_HANDLE_BASE *psBase)
+{
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount)
+ {
+ return eError;
+ }
+
+ for (i = 0; i < psBase->ui32TotalHandCount; i++)
+ {
+ struct sHandle *psHandle;
+
+ psHandle = INDEX_TO_HANDLE_STRUCT_PTR(psBase, i);
+
+ if (psHandle->eType != PVRSRV_HANDLE_TYPE_NONE)
+ {
+ eError = FreeHandle(psBase, psHandle);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreeAllHandles: FreeHandle failed (%d)", eError));
+ break;
+ }
+
+
+ if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount)
+ {
+ break;
+ }
+ }
+ }
+
+ return eError;
+}
+
+static PVRSRV_ERROR FreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
+{
+ PVRSRV_ERROR eError;
+
+ if (HANDLES_BATCHED(psBase))
+ {
+ PVR_DPF((PVR_DBG_WARNING, "FreeHandleBase: Uncommitted/Unreleased handle batch"));
+ PVRSRVReleaseHandleBatch(psBase);
+ }
+
+
+ eError = FreeAllHandles(psBase);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handles (%d)", eError));
+ return eError;
+ }
+
+
+ eError = FreeHandleArray(psBase);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handle array (%d)", eError));
+ return eError;
+ }
+
+ if (psBase->psHashTab != IMG_NULL)
+ {
+
+ HASH_Delete(psBase->psHashTab);
+ }
+
+ eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(*psBase),
+ psBase,
+ psBase->hBaseBlockAlloc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handle base (%d)", eError));
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(FindHandle)
+#endif
+static INLINE
+#if defined (SUPPORT_SID_INTERFACE)
+IMG_SID FindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_SID hParent)
+#else
+IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hParent)
+#endif
+{
+ HAND_KEY aKey;
+
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE)
+
+ InitKey(aKey, psBase, pvData, eType, hParent);
+
+#if defined (SUPPORT_SID_INTERFACE)
+ return (IMG_SID) HASH_Retrieve_Extended(psBase->psHashTab, aKey);
+#else
+ return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey);
+#endif
+}
+
+static PVRSRV_ERROR IncreaseHandleArraySize(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32Delta)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32DeltaAdjusted = ROUND_UP_TO_MULTIPLE_OF_BLOCK_SIZE(ui32Delta);
+ IMG_UINT32 ui32NewTotalHandCount = psBase->ui32TotalHandCount + ui32DeltaAdjusted;
+;
+
+ PVR_ASSERT(ui32Delta != 0)
+
+
+ if (ui32NewTotalHandCount > psBase->ui32MaxIndexPlusOne || ui32NewTotalHandCount <= psBase->ui32TotalHandCount)
+ {
+ ui32NewTotalHandCount = psBase->ui32MaxIndexPlusOne;
+
+ ui32DeltaAdjusted = ui32NewTotalHandCount - psBase->ui32TotalHandCount;
+
+ if (ui32DeltaAdjusted < ui32Delta)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "IncreaseHandleArraySize: Maximum handle limit reached (%d)", psBase->ui32MaxIndexPlusOne));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ PVR_ASSERT(ui32DeltaAdjusted >= ui32Delta)
+
+
+ eError = ReallocHandleArray(psBase, ui32NewTotalHandCount);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "IncreaseHandleArraySize: ReallocHandleArray failed (%d)", eError));
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR EnsureFreeHandles(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32Free)
+{
+ PVRSRV_ERROR eError;
+
+ if (ui32Free > psBase->ui32FreeHandCount)
+ {
+ IMG_UINT32 ui32FreeHandDelta = ui32Free - psBase->ui32FreeHandCount;
+ eError = IncreaseHandleArraySize(psBase, ui32FreeHandDelta);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnsureFreeHandles: Couldn't allocate %u handles to ensure %u free handles (IncreaseHandleArraySize failed with error %d)", ui32FreeHandDelta, ui32Free, eError));
+
+ return eError;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+#if defined (SUPPORT_SID_INTERFACE)
+static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_SID hParent)
+#else
+static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent)
+#endif
+{
+ IMG_UINT32 ui32NewIndex = DEFAULT_MAX_INDEX_PLUS_ONE;
+ struct sHandle *psNewHandle = IMG_NULL;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hHandle;
+#else
+ IMG_HANDLE hHandle;
+#endif
+ HAND_KEY aKey;
+ PVRSRV_ERROR eError;
+
+
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE)
+ PVR_ASSERT(psBase != IMG_NULL)
+ PVR_ASSERT(psBase->psHashTab != IMG_NULL)
+
+ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+ {
+
+ PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == IMG_NULL)
+ }
+
+ if (psBase->ui32FreeHandCount == 0 && HANDLES_BATCHED(psBase))
+ {
+ PVR_DPF((PVR_DBG_WARNING, "AllocHandle: Handle batch size (%u) was too small, allocating additional space", psBase->ui32HandBatchSize));
+ }
+
+
+ eError = EnsureFreeHandles(psBase, 1);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AllocHandle: EnsureFreeHandles failed (%d)", eError));
+ return eError;
+ }
+ PVR_ASSERT(psBase->ui32FreeHandCount != 0)
+
+ if (!psBase->bPurgingEnabled)
+ {
+
+ ui32NewIndex = psBase->ui32FirstFreeIndex;
+
+
+ psNewHandle = INDEX_TO_HANDLE_STRUCT_PTR(psBase, ui32NewIndex);
+ }
+ else
+ {
+ IMG_UINT32 ui32BlockedIndex;
+
+
+
+ PVR_ASSERT((psBase->ui32FirstFreeIndex % HANDLE_BLOCK_SIZE) == 0)
+
+ for (ui32BlockedIndex = ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE(psBase->ui32FirstFreeIndex); ui32BlockedIndex < psBase->ui32TotalHandCount; ui32BlockedIndex += HANDLE_BLOCK_SIZE)
+ {
+ struct sHandleIndex *psIndex = BASE_AND_INDEX_TO_INDEX_STRUCT_PTR(psBase, ui32BlockedIndex);
+
+ if (psIndex->ui32FreeHandBlockCount == 0)
+ {
+ continue;
+ }
+
+ for (ui32NewIndex = ui32BlockedIndex; ui32NewIndex < ui32BlockedIndex + HANDLE_BLOCK_SIZE; ui32NewIndex++)
+ {
+ psNewHandle = INDEX_TO_HANDLE_STRUCT_PTR(psBase, ui32NewIndex);
+ if (HANDLE_STRUCT_IS_FREE(psNewHandle))
+ {
+ break;
+ }
+ }
+ }
+ psBase->ui32FirstFreeIndex = 0;
+ PVR_ASSERT(ui32NewIndex < psBase->ui32TotalHandCount)
+ }
+ PVR_ASSERT(psNewHandle != IMG_NULL)
+
+
+ hHandle = INDEX_TO_HANDLE(ui32NewIndex);
+
+
+ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+ {
+
+ InitKey(aKey, psBase, pvData, eType, hParent);
+
+
+ if (!HASH_Insert_Extended(psBase->psHashTab, aKey, (IMG_UINTPTR_T)hHandle))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Couldn't add handle to hash table"));
+
+ return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+ }
+ }
+
+ psBase->ui32FreeHandCount--;
+
+ PVR_ASSERT(INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, ui32NewIndex) <= HANDLE_BLOCK_SIZE)
+ PVR_ASSERT(INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, ui32NewIndex) > 0)
+
+ INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, ui32NewIndex)--;
+
+
+ if (!psBase->bPurgingEnabled)
+ {
+
+ if (psBase->ui32FreeHandCount == 0)
+ {
+ PVR_ASSERT(psBase->ui32FirstFreeIndex == ui32NewIndex)
+ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == (ui32NewIndex + 1))
+
+ psBase->ui32LastFreeIndexPlusOne = 0;
+ psBase->ui32FirstFreeIndex = 0;
+ }
+ else
+ {
+
+ psBase->ui32FirstFreeIndex = (psNewHandle->ui32NextIndexPlusOne == 0) ?
+ ui32NewIndex + 1 :
+ psNewHandle->ui32NextIndexPlusOne - 1;
+ }
+ }
+
+
+ PVR_ASSERT(psNewHandle->ui32Index == ui32NewIndex)
+
+
+ psNewHandle->eType = eType;
+ psNewHandle->pvData = pvData;
+ psNewHandle->eInternalFlag = INTERNAL_HANDLE_FLAG_NONE;
+ psNewHandle->eFlag = eFlag;
+
+ InitParentList(psNewHandle);
+#if defined(DEBUG)
+ PVR_ASSERT(NoChildren(psNewHandle))
+#endif
+
+ InitChildEntry(psNewHandle);
+#if defined(DEBUG)
+ PVR_ASSERT(NoParent(psNewHandle))
+#endif
+
+ if (HANDLES_BATCHED(psBase))
+ {
+
+ psNewHandle->ui32NextIndexPlusOne = psBase->ui32FirstBatchIndexPlusOne;
+
+ psBase->ui32FirstBatchIndexPlusOne = ui32NewIndex + 1;
+
+
+ SET_BATCHED_HANDLE(psNewHandle);
+ }
+ else
+ {
+ psNewHandle->ui32NextIndexPlusOne = 0;
+ }
+
+
+ *phHandle = hHandle;
+
+ return PVRSRV_OK;
+}
+
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag)
+#else
+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag)
+#endif
+{
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hHandle;
+#else
+ IMG_HANDLE hHandle;
+#endif
+ PVRSRV_ERROR eError;
+
+#if defined (SUPPORT_SID_INTERFACE)
+ *phHandle = 0;
+#else
+ *phHandle = IMG_NULL;
+#endif
+
+ if (HANDLES_BATCHED(psBase))
+ {
+
+ psBase->ui32BatchHandAllocFailures++;
+ }
+
+
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE)
+
+ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+ {
+
+ hHandle = FindHandle(psBase, pvData, eType, IMG_NULL);
+#if defined (SUPPORT_SID_INTERFACE)
+ if (hHandle != 0)
+#else
+ if (hHandle != IMG_NULL)
+#endif
+ {
+ struct sHandle *psHandle;
+
+ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandle: Lookup of existing handle failed"));
+ return eError;
+ }
+
+
+ if (TEST_FLAG(psHandle->eFlag & eFlag, PVRSRV_HANDLE_ALLOC_FLAG_SHARED))
+ {
+ *phHandle = hHandle;
+ eError = PVRSRV_OK;
+ goto exit_ok;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ PVR_DBG_BREAK
+#endif
+ return PVRSRV_ERROR_HANDLE_NOT_SHAREABLE;
+ }
+ }
+
+ eError = AllocHandle(psBase, phHandle, pvData, eType, eFlag, IMG_NULL);
+
+exit_ok:
+ if (HANDLES_BATCHED(psBase) && (eError == PVRSRV_OK))
+ {
+ psBase->ui32BatchHandAllocFailures--;
+ }
+
+ return eError;
+}
+
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_SID hParent)
+#else
+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent)
+#endif
+{
+ struct sHandle *psPHand;
+ struct sHandle *psCHand;
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hParentKey;
+ IMG_SID hHandle;
+
+ *phHandle = 0;
+#else
+ IMG_HANDLE hParentKey;
+ IMG_HANDLE hHandle;
+
+ *phHandle = IMG_NULL;
+#endif
+
+ if (HANDLES_BATCHED(psBase))
+ {
+
+ psBase->ui32BatchHandAllocFailures++;
+ }
+
+
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE)
+
+ hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
+ hParent : IMG_NULL;
+
+
+ eError = GetHandleStructure(psBase, &psPHand, hParent, PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+ {
+
+ hHandle = FindHandle(psBase, pvData, eType, hParentKey);
+#if defined (SUPPORT_SID_INTERFACE)
+ if (hHandle != 0)
+#else
+ if (hHandle != IMG_NULL)
+#endif
+ {
+ struct sHandle *psCHandle;
+ PVRSRV_ERROR eErr;
+
+ eErr = GetHandleStructure(psBase, &psCHandle, hHandle, eType);
+ if (eErr != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Lookup of existing handle failed"));
+ return eErr;
+ }
+
+ PVR_ASSERT(hParentKey != IMG_NULL && ParentHandle(HANDLE_TO_HANDLE_STRUCT_PTR(psBase, hHandle)) == hParent)
+
+
+ if (TEST_FLAG(psCHandle->eFlag & eFlag, PVRSRV_HANDLE_ALLOC_FLAG_SHARED) && ParentHandle(HANDLE_TO_HANDLE_STRUCT_PTR(psBase, hHandle)) == hParent)
+ {
+ *phHandle = hHandle;
+ goto exit_ok;
+ }
+#if defined (SUPPORT_SID_INTERFACE)
+ PVR_DBG_BREAK
+#endif
+ return PVRSRV_ERROR_HANDLE_NOT_SHAREABLE;
+ }
+ }
+
+ eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+
+ psPHand = HANDLE_TO_HANDLE_STRUCT_PTR(psBase, hParent);
+
+ psCHand = HANDLE_TO_HANDLE_STRUCT_PTR(psBase, hHandle);
+
+ AdoptChild(psBase, psPHand, psCHand);
+
+ *phHandle = hHandle;
+
+exit_ok:
+ if (HANDLES_BATCHED(psBase))
+ {
+ psBase->ui32BatchHandAllocFailures--;
+ }
+
+ return PVRSRV_OK;
+}
+
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType)
+#else
+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType)
+#endif
+{
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hHandle;
+#else
+ IMG_HANDLE hHandle;
+#endif
+
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE)
+
+
+#if defined (SUPPORT_SID_INTERFACE)
+ hHandle = (IMG_SID) FindHandle(psBase, pvData, eType, IMG_NULL);
+#else
+ hHandle = (IMG_HANDLE) FindHandle(psBase, pvData, eType, IMG_NULL);
+#endif
+ if (hHandle == IMG_NULL)
+ {
+ return PVRSRV_ERROR_HANDLE_NOT_FOUND;
+ }
+
+ *phHandle = hHandle;
+
+ return PVRSRV_OK;
+}
+
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_SID hHandle)
+#else
+PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle)
+#endif
+{
+ struct sHandle *psHandle;
+ PVRSRV_ERROR eError;
+
+ eError = GetHandleStructure(psBase, &psHandle, hHandle, PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandleAnyType: Error looking up handle (%d)", eError));
+#if defined (SUPPORT_SID_INTERFACE)
+ PVR_DBG_BREAK
+#endif
+ return eError;
+ }
+
+ *ppvData = psHandle->pvData;
+ *peType = psHandle->eType;
+
+ return PVRSRV_OK;
+}
+
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType)
+#else
+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
+#endif
+{
+ struct sHandle *psHandle;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE)
+#if defined (SUPPORT_SID_INTERFACE)
+ PVR_ASSERT(hHandle != 0)
+#endif
+
+ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandle: Error looking up handle (%d)", eError));
+#if defined (SUPPORT_SID_INTERFACE)
+ PVR_DBG_BREAK
+#endif
+ return eError;
+ }
+
+ *ppvData = psHandle->pvData;
+
+ return PVRSRV_OK;
+}
+
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType, IMG_SID hAncestor)
+#else
+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor)
+#endif
+{
+ struct sHandle *psPHand;
+ struct sHandle *psCHand;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE)
+#if defined (SUPPORT_SID_INTERFACE)
+ PVR_ASSERT(hHandle != 0)
+#endif
+
+ eError = GetHandleStructure(psBase, &psCHand, hHandle, eType);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Error looking up subhandle (%d)", eError));
+ return eError;
+ }
+
+
+ for (psPHand = psCHand; ParentHandle(psPHand) != hAncestor; )
+ {
+ eError = GetHandleStructure(psBase, &psPHand, ParentHandle(psPHand), PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Subhandle doesn't belong to given ancestor"));
+ return PVRSRV_ERROR_INVALID_SUBHANDLE;
+ }
+ }
+
+ *ppvData = psCHand->pvData;
+
+ return PVRSRV_OK;
+}
+
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID *phParent, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType)
+#else
+PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
+#endif
+{
+ struct sHandle *psHandle;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE)
+
+ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetParentHandle: Error looking up subhandle (%d)", eError));
+ return eError;
+ }
+
+ *phParent = ParentHandle(psHandle);
+
+ return PVRSRV_OK;
+}
+
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType)
+#else
+PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
+#endif
+{
+ struct sHandle *psHandle;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE)
+
+ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupAndReleaseHandle: Error looking up handle (%d)", eError));
+#if defined (SUPPORT_SID_INTERFACE)
+ PVR_DBG_BREAK
+#endif
+ return eError;
+ }
+
+ *ppvData = psHandle->pvData;
+
+ eError = FreeHandle(psBase, psHandle);
+
+ return eError;
+}
+
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType)
+#else
+PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
+#endif
+{
+ struct sHandle *psHandle;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE)
+
+ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVReleaseHandle: Error looking up handle (%d)", eError));
+ return eError;
+ }
+
+ eError = FreeHandle(psBase, psHandle);
+
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32BatchSize)
+{
+ PVRSRV_ERROR eError;
+
+ if (HANDLES_BATCHED(psBase))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVNewHandleBatch: There is a handle batch already in use (size %u)", psBase->ui32HandBatchSize));
+ return PVRSRV_ERROR_HANDLE_BATCH_IN_USE;
+ }
+
+ if (ui32BatchSize == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVNewHandleBatch: Invalid batch size (%u)", ui32BatchSize));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = EnsureFreeHandles(psBase, ui32BatchSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVNewHandleBatch: EnsureFreeHandles failed (error %d)", eError));
+ return eError;
+ }
+
+ psBase->ui32HandBatchSize = ui32BatchSize;
+
+
+ psBase->ui32TotalHandCountPreBatch = psBase->ui32TotalHandCount;
+
+ PVR_ASSERT(psBase->ui32BatchHandAllocFailures == 0)
+
+ PVR_ASSERT(psBase->ui32FirstBatchIndexPlusOne == 0)
+
+ PVR_ASSERT(HANDLES_BATCHED(psBase))
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PVRSRVHandleBatchCommitOrRelease(PVRSRV_HANDLE_BASE *psBase, IMG_BOOL bCommit)
+{
+
+ IMG_UINT32 ui32IndexPlusOne;
+ IMG_BOOL bCommitBatch = bCommit;
+
+ if (!HANDLES_BATCHED(psBase))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: There is no handle batch"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ }
+
+ if (psBase->ui32BatchHandAllocFailures != 0)
+ {
+ if (bCommit)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: Attempting to commit batch with handle allocation failures."));
+ }
+ bCommitBatch = IMG_FALSE;
+ }
+
+ PVR_ASSERT(psBase->ui32BatchHandAllocFailures == 0 || !bCommit)
+
+ ui32IndexPlusOne = psBase->ui32FirstBatchIndexPlusOne;
+ while(ui32IndexPlusOne != 0)
+ {
+ struct sHandle *psHandle = INDEX_TO_HANDLE_STRUCT_PTR(psBase, ui32IndexPlusOne - 1);
+ IMG_UINT32 ui32NextIndexPlusOne = psHandle->ui32NextIndexPlusOne;
+ PVR_ASSERT(BATCHED_HANDLE(psHandle))
+
+ psHandle->ui32NextIndexPlusOne = 0;
+
+ if (!bCommitBatch || BATCHED_HANDLE_PARTIALLY_FREE(psHandle))
+ {
+ PVRSRV_ERROR eError;
+
+
+ if (!BATCHED_HANDLE_PARTIALLY_FREE(psHandle))
+ {
+
+ SET_UNBATCHED_HANDLE(psHandle);
+ }
+
+ eError = FreeHandle(psBase, psHandle);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: Error freeing handle (%d)", eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK)
+ }
+ else
+ {
+
+ SET_UNBATCHED_HANDLE(psHandle);
+ }
+
+ ui32IndexPlusOne = ui32NextIndexPlusOne;
+ }
+
+#ifdef DEBUG
+ if (psBase->ui32TotalHandCountPreBatch != psBase->ui32TotalHandCount)
+ {
+ IMG_UINT32 ui32Delta = psBase->ui32TotalHandCount - psBase->ui32TotalHandCountPreBatch;
+
+ PVR_ASSERT(psBase->ui32TotalHandCount > psBase->ui32TotalHandCountPreBatch)
+
+ PVR_DPF((PVR_DBG_WARNING, "PVRSRVHandleBatchCommitOrRelease: The batch size was too small. Batch size was %u, but needs to be %u", psBase->ui32HandBatchSize, psBase->ui32HandBatchSize + ui32Delta));
+
+ }
+#endif
+
+ psBase->ui32HandBatchSize = 0;
+ psBase->ui32FirstBatchIndexPlusOne = 0;
+ psBase->ui32TotalHandCountPreBatch = 0;
+ psBase->ui32BatchHandAllocFailures = 0;
+
+ if (psBase->ui32BatchHandAllocFailures != 0 && bCommit)
+ {
+ PVR_ASSERT(!bCommitBatch)
+
+ return PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE;
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase)
+{
+ return PVRSRVHandleBatchCommitOrRelease(psBase, IMG_TRUE);
+}
+
+IMG_VOID PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase)
+{
+ (IMG_VOID) PVRSRVHandleBatchCommitOrRelease(psBase, IMG_FALSE);
+}
+
+PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32MaxHandle)
+{
+ IMG_UINT32 ui32MaxHandleRounded;
+
+ if (HANDLES_BATCHED(psBase))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSetMaxHandle: Limit cannot be set whilst in batch mode"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+ if (ui32MaxHandle == 0 || ui32MaxHandle > DEFAULT_MAX_HANDLE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSetMaxHandle: Limit must be between %u and %u, inclusive", 0, DEFAULT_MAX_HANDLE));
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+ if (psBase->ui32TotalHandCount != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSetMaxHandle: Limit cannot be set because handles have already been allocated"));
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ ui32MaxHandleRounded = ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE(ui32MaxHandle);
+
+
+ if (ui32MaxHandleRounded != 0 && ui32MaxHandleRounded < psBase->ui32MaxIndexPlusOne)
+ {
+ psBase->ui32MaxIndexPlusOne = ui32MaxHandleRounded;
+ }
+
+ PVR_ASSERT(psBase->ui32MaxIndexPlusOne != 0)
+ PVR_ASSERT(psBase->ui32MaxIndexPlusOne <= DEFAULT_MAX_INDEX_PLUS_ONE)
+ PVR_ASSERT((psBase->ui32MaxIndexPlusOne % HANDLE_BLOCK_SIZE) == 0)
+
+ return PVRSRV_OK;
+}
+
+IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase)
+{
+ return psBase->ui32MaxIndexPlusOne;
+}
+
+PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase)
+{
+ if (psBase->bPurgingEnabled)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "PVRSRVEnableHandlePurging: Purging already enabled"));
+ return PVRSRV_OK;
+ }
+
+
+ if (psBase->ui32TotalHandCount != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVEnableHandlePurging: Handles have already been allocated"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psBase->bPurgingEnabled = IMG_TRUE;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase)
+{
+ IMG_UINT32 ui32BlockIndex;
+ IMG_UINT32 ui32NewHandCount;
+
+ if (!psBase->bPurgingEnabled)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPurgeHandles: Purging not enabled for this handle base"));
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+
+ if (HANDLES_BATCHED(psBase))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPurgeHandles: Purging not allowed whilst in batch mode"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ PVR_ASSERT((psBase->ui32TotalHandCount % HANDLE_BLOCK_SIZE) == 0)
+
+ for (ui32BlockIndex = INDEX_TO_BLOCK_INDEX(psBase->ui32TotalHandCount); ui32BlockIndex != 0; ui32BlockIndex--)
+ {
+ if (psBase->psHandleArray[ui32BlockIndex - 1].ui32FreeHandBlockCount != HANDLE_BLOCK_SIZE)
+ {
+ break;
+ }
+ }
+ ui32NewHandCount = BLOCK_INDEX_TO_INDEX(ui32BlockIndex);
+
+
+ if (ui32NewHandCount <= (psBase->ui32TotalHandCount/2))
+ {
+ PVRSRV_ERROR eError;
+
+
+
+ eError = ReallocHandleArray(psBase, ui32NewHandCount);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase)
+{
+ PVRSRV_HANDLE_BASE *psBase;
+ IMG_HANDLE hBlockAlloc;
+ PVRSRV_ERROR eError;
+
+ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(*psBase),
+ (IMG_PVOID *)&psBase,
+ &hBlockAlloc,
+ "Handle Base");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't allocate handle base (%d)", eError));
+ return eError;
+ }
+ OSMemSet(psBase, 0, sizeof(*psBase));
+
+
+ psBase->psHashTab = HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE, sizeof(HAND_KEY), HASH_Func_Default, HASH_Key_Comp_Default);
+ if (psBase->psHashTab == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't create data pointer hash table\n"));
+ (IMG_VOID)PVRSRVFreeHandleBase(psBase);
+ return PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE;
+ }
+
+ psBase->hBaseBlockAlloc = hBlockAlloc;
+
+ psBase->ui32MaxIndexPlusOne = DEFAULT_MAX_INDEX_PLUS_ONE;
+
+ *ppsBase = psBase;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psBase != gpsKernelHandleBase)
+
+ eError = FreeHandleBase(psBase);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeHandleBase: FreeHandleBase failed (%d)", eError));
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(gpsKernelHandleBase == IMG_NULL)
+
+ eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleInit: PVRSRVAllocHandleBase failed (%d)", eError));
+ goto error;
+ }
+
+ eError = PVRSRVEnableHandlePurging(gpsKernelHandleBase);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleInit: PVRSRVEnableHandlePurging failed (%d)", eError));
+ goto error;
+ }
+
+ return PVRSRV_OK;
+error:
+ (IMG_VOID) PVRSRVHandleDeInit();
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (gpsKernelHandleBase != IMG_NULL)
+ {
+ eError = FreeHandleBase(gpsKernelHandleBase);
+ if (eError == PVRSRV_OK)
+ {
+ gpsKernelHandleBase = IMG_NULL;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleDeInit: FreeHandleBase failed (%d)", eError));
+ }
+ }
+
+ return eError;
+}
+#else
+#endif
diff --git a/drivers/gpu/pvr/handle.h b/drivers/gpu/pvr/handle.h
new file mode 100644
index 0000000..536fa56
--- /dev/null
+++ b/drivers/gpu/pvr/handle.h
@@ -0,0 +1,404 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __HANDLE_H__
+#define __HANDLE_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_types.h"
+#include "hash.h"
+#include "resman.h"
+
+typedef enum
+{
+ PVRSRV_HANDLE_TYPE_NONE = 0,
+ PVRSRV_HANDLE_TYPE_PERPROC_DATA,
+ PVRSRV_HANDLE_TYPE_DEV_NODE,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO,
+ PVRSRV_HANDLE_TYPE_DISP_INFO,
+ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
+ PVRSRV_HANDLE_TYPE_BUF_INFO,
+ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
+ PVRSRV_HANDLE_TYPE_BUF_BUFFER,
+ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT,
+ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT,
+ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT,
+ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
+ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+ PVRSRV_HANDLE_TYPE_MMAP_INFO,
+ PVRSRV_HANDLE_TYPE_SOC_TIMER,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ,
+ PVRSRV_HANDLE_TYPE_RESITEM_INFO
+} PVRSRV_HANDLE_TYPE;
+
+typedef enum
+{
+
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE = 0,
+
+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED = 0x01,
+
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 0x02,
+
+ PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 0x04
+} PVRSRV_HANDLE_ALLOC_FLAG;
+
+struct _PVRSRV_HANDLE_BASE_;
+typedef struct _PVRSRV_HANDLE_BASE_ PVRSRV_HANDLE_BASE;
+
+#if defined (PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)
+extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase;
+
+#define KERNEL_HANDLE_BASE (gpsKernelHandleBase)
+
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag);
+
+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_SID hParent);
+
+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_SID hHandle);
+
+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType, IMG_SID hAncestor);
+
+PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID *phParent, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType);
+#else
+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag);
+
+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
+
+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle);
+
+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor);
+
+PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+#endif
+
+PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32BatchSize);
+
+PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase);
+
+IMG_VOID PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase);
+
+PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32MaxHandle);
+
+IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase);
+
+PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase);
+
+PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase);
+
+PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase);
+
+PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase);
+
+PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID);
+
+PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID);
+
+#else
+
+#define KERNEL_HANDLE_BASE IMG_NULL
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVAllocHandle)
+#endif
+static INLINE
+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag)
+{
+ PVR_UNREFERENCED_PARAMETER(eType);
+ PVR_UNREFERENCED_PARAMETER(eFlag);
+ PVR_UNREFERENCED_PARAMETER(psBase);
+
+ *phHandle = pvData;
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVAllocSubHandle)
+#endif
+static INLINE
+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent)
+{
+ PVR_UNREFERENCED_PARAMETER(eType);
+ PVR_UNREFERENCED_PARAMETER(eFlag);
+ PVR_UNREFERENCED_PARAMETER(hParent);
+ PVR_UNREFERENCED_PARAMETER(psBase);
+
+ *phHandle = pvData;
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVFindHandle)
+#endif
+static INLINE
+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType)
+{
+ PVR_UNREFERENCED_PARAMETER(eType);
+ PVR_UNREFERENCED_PARAMETER(psBase);
+
+ *phHandle = pvData;
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVLookupHandleAnyType)
+#endif
+static INLINE
+PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle)
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+
+ *peType = PVRSRV_HANDLE_TYPE_NONE;
+
+ *ppvData = hHandle;
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVLookupHandle)
+#endif
+static INLINE
+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+ PVR_UNREFERENCED_PARAMETER(eType);
+
+ *ppvData = hHandle;
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVLookupSubHandle)
+#endif
+static INLINE
+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor)
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+ PVR_UNREFERENCED_PARAMETER(eType);
+ PVR_UNREFERENCED_PARAMETER(hAncestor);
+
+ *ppvData = hHandle;
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVGetParentHandle)
+#endif
+static INLINE
+PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+ PVR_UNREFERENCED_PARAMETER(eType);
+ PVR_UNREFERENCED_PARAMETER(hHandle);
+
+ *phParent = IMG_NULL;
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVLookupAndReleaseHandle)
+#endif
+static INLINE
+PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
+{
+ PVR_UNREFERENCED_PARAMETER(eType);
+ PVR_UNREFERENCED_PARAMETER(psBase);
+
+ *ppvData = hHandle;
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVReleaseHandle)
+#endif
+static INLINE
+PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
+{
+ PVR_UNREFERENCED_PARAMETER(hHandle);
+ PVR_UNREFERENCED_PARAMETER(eType);
+ PVR_UNREFERENCED_PARAMETER(psBase);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVNewHandleBatch)
+#endif
+static INLINE
+PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32BatchSize)
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+ PVR_UNREFERENCED_PARAMETER(ui32BatchSize);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVCommitHandleBatch)
+#endif
+static INLINE
+PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase)
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVReleaseHandleBatch)
+#endif
+static INLINE
+IMG_VOID PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase)
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSetMaxHandle)
+#endif
+static INLINE
+PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32MaxHandle)
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+ PVR_UNREFERENCED_PARAMETER(ui32MaxHandle);
+
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVGetMaxHandle)
+#endif
+static INLINE
+IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase)
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+
+ return 0;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVEnableHandlePurging)
+#endif
+static INLINE
+PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase)
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPurgeHandles)
+#endif
+static INLINE
+PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase)
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVAllocHandleBase)
+#endif
+static INLINE
+PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase)
+{
+ *ppsBase = IMG_NULL;
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVFreeHandleBase)
+#endif
+static INLINE
+PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVHandleInit)
+#endif
+static INLINE
+PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID)
+{
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVHandleDeInit)
+#endif
+static INLINE
+PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID)
+{
+ return PVRSRV_OK;
+}
+
+#endif
+
+#define PVRSRVAllocHandleNR(psBase, phHandle, pvData, eType, eFlag) \
+ (IMG_VOID)PVRSRVAllocHandle(psBase, phHandle, pvData, eType, eFlag)
+
+#define PVRSRVAllocSubHandleNR(psBase, phHandle, pvData, eType, eFlag, hParent) \
+ (IMG_VOID)PVRSRVAllocSubHandle(psBase, phHandle, pvData, eType, eFlag, hParent)
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/hash.c b/drivers/gpu/pvr/hash.c
new file mode 100644
index 0000000..78eab44
--- /dev/null
+++ b/drivers/gpu/pvr/hash.c
@@ -0,0 +1,505 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "pvr_debug.h"
+#include "img_defs.h"
+#include "services.h"
+#include "servicesint.h"
+#include "hash.h"
+#include "osfunc.h"
+
+#define PRIVATE_MAX(a,b) ((a)>(b)?(a):(b))
+
+#define KEY_TO_INDEX(pHash, key, uSize) \
+ ((pHash)->pfnHashFunc((pHash)->uKeySize, (key), (uSize)) % (uSize))
+
+#define KEY_COMPARE(pHash, pKey1, pKey2) \
+ ((pHash)->pfnKeyComp((pHash)->uKeySize, (pKey1), (pKey2)))
+
+struct _BUCKET_
+{
+
+ struct _BUCKET_ *pNext;
+
+
+ IMG_UINTPTR_T v;
+
+
+ IMG_UINTPTR_T k[];
+};
+typedef struct _BUCKET_ BUCKET;
+
+struct _HASH_TABLE_
+{
+
+ BUCKET **ppBucketTable;
+
+
+ IMG_UINT32 uSize;
+
+
+ IMG_UINT32 uCount;
+
+
+ IMG_UINT32 uMinimumSize;
+
+
+ IMG_UINT32 uKeySize;
+
+
+ HASH_FUNC *pfnHashFunc;
+
+
+ HASH_KEY_COMP *pfnKeyComp;
+};
+
+IMG_UINT32
+HASH_Func_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen)
+{
+ IMG_UINTPTR_T *p = (IMG_UINTPTR_T *)pKey;
+ IMG_UINT32 uKeyLen = (IMG_UINT32)(uKeySize / sizeof(IMG_UINTPTR_T));
+ IMG_UINT32 ui;
+ IMG_UINT32 uHashKey = 0;
+
+ PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+
+ PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0);
+
+ for (ui = 0; ui < uKeyLen; ui++)
+ {
+ IMG_UINT32 uHashPart = (IMG_UINT32)*p++;
+
+ uHashPart += (uHashPart << 12);
+ uHashPart ^= (uHashPart >> 22);
+ uHashPart += (uHashPart << 4);
+ uHashPart ^= (uHashPart >> 9);
+ uHashPart += (uHashPart << 10);
+ uHashPart ^= (uHashPart >> 2);
+ uHashPart += (uHashPart << 7);
+ uHashPart ^= (uHashPart >> 12);
+
+ uHashKey += uHashPart;
+ }
+
+ return uHashKey;
+}
+
+IMG_BOOL
+HASH_Key_Comp_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2)
+{
+ IMG_UINTPTR_T *p1 = (IMG_UINTPTR_T *)pKey1;
+ IMG_UINTPTR_T *p2 = (IMG_UINTPTR_T *)pKey2;
+ IMG_UINT32 uKeyLen = (IMG_UINT32)(uKeySize / sizeof(IMG_UINTPTR_T));
+ IMG_UINT32 ui;
+
+ PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0);
+
+ for (ui = 0; ui < uKeyLen; ui++)
+ {
+ if (*p1++ != *p2++)
+ return IMG_FALSE;
+ }
+
+ return IMG_TRUE;
+}
+
+static PVRSRV_ERROR
+_ChainInsert (HASH_TABLE *pHash, BUCKET *pBucket, BUCKET **ppBucketTable, IMG_UINT32 uSize)
+{
+ IMG_UINT32 uIndex;
+
+ PVR_ASSERT (pBucket != IMG_NULL);
+ PVR_ASSERT (ppBucketTable != IMG_NULL);
+ PVR_ASSERT (uSize != 0);
+
+ if ((pBucket == IMG_NULL) || (ppBucketTable == IMG_NULL) || (uSize == 0))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_ChainInsert: invalid parameter"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize);
+ pBucket->pNext = ppBucketTable[uIndex];
+ ppBucketTable[uIndex] = pBucket;
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_Rehash (HASH_TABLE *pHash,
+ BUCKET **ppOldTable, IMG_UINT32 uOldSize,
+ BUCKET **ppNewTable, IMG_UINT32 uNewSize)
+{
+ IMG_UINT32 uIndex;
+ for (uIndex=0; uIndex< uOldSize; uIndex++)
+ {
+ BUCKET *pBucket;
+ pBucket = ppOldTable[uIndex];
+ while (pBucket != IMG_NULL)
+ {
+ PVRSRV_ERROR eError;
+ BUCKET *pNextBucket = pBucket->pNext;
+ eError = _ChainInsert (pHash, pBucket, ppNewTable, uNewSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_Rehash: call to _ChainInsert failed"));
+ return eError;
+ }
+ pBucket = pNextBucket;
+ }
+ }
+ return PVRSRV_OK;
+}
+
+static IMG_BOOL
+_Resize (HASH_TABLE *pHash, IMG_UINT32 uNewSize)
+{
+ if (uNewSize != pHash->uSize)
+ {
+ BUCKET **ppNewTable;
+ IMG_UINT32 uIndex;
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "HASH_Resize: oldsize=0x%x newsize=0x%x count=0x%x",
+ pHash->uSize, uNewSize, pHash->uCount));
+
+ OSAllocMem(PVRSRV_PAGEABLE_SELECT,
+ sizeof (BUCKET *) * uNewSize,
+ (IMG_PVOID*)&ppNewTable, IMG_NULL,
+ "Hash Table Buckets");
+ if (ppNewTable == IMG_NULL)
+ return IMG_FALSE;
+
+ for (uIndex=0; uIndex<uNewSize; uIndex++)
+ ppNewTable[uIndex] = IMG_NULL;
+
+ if (_Rehash (pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable, uNewSize) != PVRSRV_OK)
+ {
+ return IMG_FALSE;
+ }
+
+ OSFreeMem (PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET *)*pHash->uSize, pHash->ppBucketTable, IMG_NULL);
+
+ pHash->ppBucketTable = ppNewTable;
+ pHash->uSize = uNewSize;
+ }
+ return IMG_TRUE;
+}
+
+
+HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp)
+{
+ HASH_TABLE *pHash;
+ IMG_UINT32 uIndex;
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Create_Extended: InitialSize=0x%x", uInitialLen));
+
+ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
+ sizeof(HASH_TABLE),
+ (IMG_VOID **)&pHash, IMG_NULL,
+ "Hash Table") != PVRSRV_OK)
+ {
+ return IMG_NULL;
+ }
+
+ pHash->uCount = 0;
+ pHash->uSize = uInitialLen;
+ pHash->uMinimumSize = uInitialLen;
+ pHash->uKeySize = (IMG_UINT32)uKeySize;
+ pHash->pfnHashFunc = pfnHashFunc;
+ pHash->pfnKeyComp = pfnKeyComp;
+
+ OSAllocMem(PVRSRV_PAGEABLE_SELECT,
+ sizeof (BUCKET *) * pHash->uSize,
+ (IMG_PVOID*)&pHash->ppBucketTable, IMG_NULL,
+ "Hash Table Buckets");
+
+ if (pHash->ppBucketTable == IMG_NULL)
+ {
+ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(HASH_TABLE), pHash, IMG_NULL);
+
+ return IMG_NULL;
+ }
+
+ for (uIndex=0; uIndex<pHash->uSize; uIndex++)
+ pHash->ppBucketTable[uIndex] = IMG_NULL;
+ return pHash;
+}
+
+HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen)
+{
+ return HASH_Create_Extended(uInitialLen, sizeof(IMG_UINTPTR_T),
+ &HASH_Func_Default, &HASH_Key_Comp_Default);
+}
+
+IMG_VOID
+HASH_Delete (HASH_TABLE *pHash)
+{
+ if (pHash != IMG_NULL)
+ {
+ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Delete"));
+
+ PVR_ASSERT (pHash->uCount==0);
+ if(pHash->uCount != 0)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "HASH_Delete: leak detected in hash table!"));
+ PVR_DPF ((PVR_DBG_ERROR, "Likely Cause: client drivers not freeing alocations before destroying devmemcontext"));
+ }
+ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET *)*pHash->uSize, pHash->ppBucketTable, IMG_NULL);
+ pHash->ppBucketTable = IMG_NULL;
+ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(HASH_TABLE), pHash, IMG_NULL);
+
+ }
+}
+
+IMG_BOOL
+HASH_Insert_Extended (HASH_TABLE *pHash, IMG_VOID *pKey, IMG_UINTPTR_T v)
+{
+ BUCKET *pBucket;
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "HASH_Insert_Extended: Hash=0x%08x, pKey=0x%08x, v=0x%x",
+ (IMG_UINTPTR_T)pHash, (IMG_UINTPTR_T)pKey, v));
+
+ PVR_ASSERT (pHash != IMG_NULL);
+
+ if (pHash == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "HASH_Insert_Extended: invalid parameter"));
+ return IMG_FALSE;
+ }
+
+ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
+ sizeof(BUCKET) + pHash->uKeySize,
+ (IMG_VOID **)&pBucket, IMG_NULL,
+ "Hash Table entry") != PVRSRV_OK)
+ {
+ return IMG_FALSE;
+ }
+
+ pBucket->v = v;
+
+ OSMemCopy(pBucket->k, pKey, pHash->uKeySize);
+ if (_ChainInsert (pHash, pBucket, pHash->ppBucketTable, pHash->uSize) != PVRSRV_OK)
+ {
+ OSFreeMem(PVRSRV_PAGEABLE_SELECT,
+ sizeof(BUCKET) + pHash->uKeySize,
+ pBucket, IMG_NULL);
+ return IMG_FALSE;
+ }
+
+ pHash->uCount++;
+
+
+ if (pHash->uCount << 1 > pHash->uSize)
+ {
+
+
+ _Resize (pHash, pHash->uSize << 1);
+ }
+
+
+ return IMG_TRUE;
+}
+
+IMG_BOOL
+HASH_Insert (HASH_TABLE *pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v)
+{
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "HASH_Insert: Hash=0x%x, k=0x%x, v=0x%x",
+ (IMG_UINTPTR_T)pHash, k, v));
+
+ return HASH_Insert_Extended(pHash, &k, v);
+}
+
+IMG_UINTPTR_T
+HASH_Remove_Extended(HASH_TABLE *pHash, IMG_VOID *pKey)
+{
+ BUCKET **ppBucket;
+ IMG_UINT32 uIndex;
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Remove_Extended: Hash=0x%x, pKey=0x%x",
+ (IMG_UINTPTR_T)pHash, (IMG_UINTPTR_T)pKey));
+
+ PVR_ASSERT (pHash != IMG_NULL);
+
+ if (pHash == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "HASH_Remove_Extended: Null hash table"));
+ return 0;
+ }
+
+ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
+
+ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; ppBucket = &((*ppBucket)->pNext))
+ {
+
+ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
+ {
+ BUCKET *pBucket = *ppBucket;
+ IMG_UINTPTR_T v = pBucket->v;
+ (*ppBucket) = pBucket->pNext;
+
+ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET) + pHash->uKeySize, pBucket, IMG_NULL);
+
+
+ pHash->uCount--;
+
+
+ if (pHash->uSize > (pHash->uCount << 2) &&
+ pHash->uSize > pHash->uMinimumSize)
+ {
+
+
+ _Resize (pHash,
+ PRIVATE_MAX (pHash->uSize >> 1,
+ pHash->uMinimumSize));
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "HASH_Remove_Extended: Hash=0x%x, pKey=0x%x = 0x%x",
+ (IMG_UINTPTR_T)pHash, (IMG_UINTPTR_T)pKey, v));
+ return v;
+ }
+ }
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "HASH_Remove_Extended: Hash=0x%x, pKey=0x%x = 0x0 !!!!",
+ (IMG_UINTPTR_T)pHash, (IMG_UINTPTR_T)pKey));
+ return 0;
+}
+
+IMG_UINTPTR_T
+HASH_Remove (HASH_TABLE *pHash, IMG_UINTPTR_T k)
+{
+ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Remove: Hash=0x%x, k=0x%x",
+ (IMG_UINTPTR_T)pHash, k));
+
+ return HASH_Remove_Extended(pHash, &k);
+}
+
+IMG_UINTPTR_T
+HASH_Retrieve_Extended (HASH_TABLE *pHash, IMG_VOID *pKey)
+{
+ BUCKET **ppBucket;
+ IMG_UINT32 uIndex;
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Retrieve_Extended: Hash=0x%x, pKey=0x%x",
+ (IMG_UINTPTR_T)pHash, (IMG_UINTPTR_T)pKey));
+
+ PVR_ASSERT (pHash != IMG_NULL);
+
+ if (pHash == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "HASH_Retrieve_Extended: Null hash table"));
+ return 0;
+ }
+
+ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
+
+ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; ppBucket = &((*ppBucket)->pNext))
+ {
+
+ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
+ {
+ BUCKET *pBucket = *ppBucket;
+ IMG_UINTPTR_T v = pBucket->v;
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "HASH_Retrieve: Hash=0x%x, pKey=0x%x = 0x%x",
+ (IMG_UINTPTR_T)pHash, (IMG_UINTPTR_T)pKey, v));
+ return v;
+ }
+ }
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "HASH_Retrieve: Hash=0x%x, pKey=0x%x = 0x0 !!!!",
+ (IMG_UINTPTR_T)pHash, (IMG_UINTPTR_T)pKey));
+ return 0;
+}
+
+IMG_UINTPTR_T
+HASH_Retrieve (HASH_TABLE *pHash, IMG_UINTPTR_T k)
+{
+ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Retrieve: Hash=0x%x, k=0x%x",
+ (IMG_UINTPTR_T)pHash, k));
+ return HASH_Retrieve_Extended(pHash, &k);
+}
+
+PVRSRV_ERROR
+HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback)
+{
+ IMG_UINT32 uIndex;
+ for (uIndex=0; uIndex < pHash->uSize; uIndex++)
+ {
+ BUCKET *pBucket;
+ pBucket = pHash->ppBucketTable[uIndex];
+ while (pBucket != IMG_NULL)
+ {
+ PVRSRV_ERROR eError;
+ BUCKET *pNextBucket = pBucket->pNext;
+
+ eError = pfnCallback((IMG_UINTPTR_T) ((IMG_VOID *) *(pBucket->k)), (IMG_UINTPTR_T) pBucket->v);
+
+
+ if (eError != PVRSRV_OK)
+ return eError;
+
+ pBucket = pNextBucket;
+ }
+ }
+ return PVRSRV_OK;
+}
+
+#ifdef HASH_TRACE
+IMG_VOID
+HASH_Dump (HASH_TABLE *pHash)
+{
+ IMG_UINT32 uIndex;
+ IMG_UINT32 uMaxLength=0;
+ IMG_UINT32 uEmptyCount=0;
+
+ PVR_ASSERT (pHash != IMG_NULL);
+ for (uIndex=0; uIndex<pHash->uSize; uIndex++)
+ {
+ BUCKET *pBucket;
+ IMG_UINT32 uLength = 0;
+ if (pHash->ppBucketTable[uIndex] == IMG_NULL)
+ {
+ uEmptyCount++;
+ }
+ for (pBucket=pHash->ppBucketTable[uIndex];
+ pBucket != IMG_NULL;
+ pBucket = pBucket->pNext)
+ {
+ uLength++;
+ }
+ uMaxLength = PRIVATE_MAX (uMaxLength, uLength);
+ }
+
+ PVR_TRACE(("hash table: uMinimumSize=%d size=%d count=%d",
+ pHash->uMinimumSize, pHash->uSize, pHash->uCount));
+ PVR_TRACE((" empty=%d max=%d", uEmptyCount, uMaxLength));
+}
+#endif
diff --git a/drivers/gpu/pvr/hash.h b/drivers/gpu/pvr/hash.h
new file mode 100644
index 0000000..3662089
--- /dev/null
+++ b/drivers/gpu/pvr/hash.h
@@ -0,0 +1,80 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _HASH_H_
+#define _HASH_H_
+
+#include "img_types.h"
+#include "osfunc.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+typedef IMG_UINT32 HASH_FUNC(IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen);
+typedef IMG_BOOL HASH_KEY_COMP(IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2);
+
+typedef struct _HASH_TABLE_ HASH_TABLE;
+
+typedef PVRSRV_ERROR (*HASH_pfnCallback) (
+ IMG_UINTPTR_T k,
+ IMG_UINTPTR_T v
+);
+
+IMG_UINT32 HASH_Func_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen);
+
+IMG_BOOL HASH_Key_Comp_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2);
+
+HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp);
+
+HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen);
+
+IMG_VOID HASH_Delete (HASH_TABLE *pHash);
+
+IMG_BOOL HASH_Insert_Extended (HASH_TABLE *pHash, IMG_VOID *pKey, IMG_UINTPTR_T v);
+
+IMG_BOOL HASH_Insert (HASH_TABLE *pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v);
+
+IMG_UINTPTR_T HASH_Remove_Extended(HASH_TABLE *pHash, IMG_VOID *pKey);
+
+IMG_UINTPTR_T HASH_Remove (HASH_TABLE *pHash, IMG_UINTPTR_T k);
+
+IMG_UINTPTR_T HASH_Retrieve_Extended (HASH_TABLE *pHash, IMG_VOID *pKey);
+
+IMG_UINTPTR_T HASH_Retrieve (HASH_TABLE *pHash, IMG_UINTPTR_T k);
+
+PVRSRV_ERROR HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback);
+
+#ifdef HASH_TRACE
+IMG_VOID HASH_Dump (HASH_TABLE *pHash);
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/img_defs.h b/drivers/gpu/pvr/img_defs.h
new file mode 100644
index 0000000..d5408cf
--- /dev/null
+++ b/drivers/gpu/pvr/img_defs.h
@@ -0,0 +1,136 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+*******************************************************************************/
+#if !defined (__IMG_DEFS_H__)
+#define __IMG_DEFS_H__
+
+#include "img_types.h"
+
+typedef enum img_tag_TriStateSwitch
+{
+ IMG_ON = 0x00,
+ IMG_OFF,
+ IMG_IGNORE
+
+} img_TriStateSwitch, * img_pTriStateSwitch;
+
+#define IMG_SUCCESS 0
+
+#define IMG_NO_REG 1
+
+#if defined (NO_INLINE_FUNCS)
+ #define INLINE
+ #define FORCE_INLINE
+#else
+#if defined (__cplusplus)
+ #define INLINE inline
+ #define FORCE_INLINE inline
+#else
+#if !defined(INLINE)
+ #define INLINE __inline
+#endif
+ #define FORCE_INLINE static __inline
+#endif
+#endif
+
+
+/* Use this in any file, or use attributes under GCC - see below */
+#ifndef PVR_UNREFERENCED_PARAMETER
+#define PVR_UNREFERENCED_PARAMETER(param) (param) = (param)
+#endif
+
+/* The best way to supress unused parameter warnings using GCC is to use a
+ * variable attribute. Place the unref__ between the type and name of an
+ * unused parameter in a function parameter list, eg `int unref__ var'. This
+ * should only be used in GCC build environments, for example, in files that
+ * compile only on Linux. Other files should use UNREFERENCED_PARAMETER */
+#ifdef __GNUC__
+#define unref__ __attribute__ ((unused))
+#else
+#define unref__
+#endif
+
+/*
+ Wide character definitions
+*/
+#ifndef _TCHAR_DEFINED
+#if defined(UNICODE)
+typedef unsigned short TCHAR, *PTCHAR, *PTSTR;
+#else /* #if defined(UNICODE) */
+typedef char TCHAR, *PTCHAR, *PTSTR;
+#endif /* #if defined(UNICODE) */
+#define _TCHAR_DEFINED
+#endif /* #ifndef _TCHAR_DEFINED */
+
+
+ #if defined(__linux__) || defined(__METAG)
+
+ #define IMG_CALLCONV
+ #define IMG_INTERNAL __attribute__((visibility("hidden")))
+ #define IMG_EXPORT __attribute__((visibility("default")))
+ #define IMG_IMPORT
+ #define IMG_RESTRICT __restrict__
+
+ #else
+ #error("define an OS")
+ #endif
+
+// Use default definition if not overridden
+#ifndef IMG_ABORT
+ #define IMG_ABORT() abort()
+#endif
+
+#ifndef IMG_MALLOC
+ #define IMG_MALLOC(A) malloc (A)
+#endif
+
+#ifndef IMG_FREE
+ #define IMG_FREE(A) free (A)
+#endif
+
+#define IMG_CONST const
+
+#if defined(__GNUC__)
+#define IMG_FORMAT_PRINTF(x,y) __attribute__((format(printf,x,y)))
+#else
+#define IMG_FORMAT_PRINTF(x,y)
+#endif
+
+/*
+ * Cleanup request defines
+ */
+#define CLEANUP_WITH_POLL IMG_FALSE
+#define FORCE_CLEANUP IMG_TRUE
+
+#if defined (_WIN64)
+#define IMG_UNDEF (~0ULL)
+#else
+#define IMG_UNDEF (~0UL)
+#endif
+
+#endif /* #if !defined (__IMG_DEFS_H__) */
+/*****************************************************************************
+ End of file (IMG_DEFS.H)
+*****************************************************************************/
diff --git a/drivers/gpu/pvr/img_types.h b/drivers/gpu/pvr/img_types.h
new file mode 100644
index 0000000..71dcebb
--- /dev/null
+++ b/drivers/gpu/pvr/img_types.h
@@ -0,0 +1,206 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+******************************************************************************/
+
+#ifndef __IMG_TYPES_H__
+#define __IMG_TYPES_H__
+
+/* define all address space bit depths: */
+/* CPU virtual address space defaults to 32bits */
+#if !defined(IMG_ADDRSPACE_CPUVADDR_BITS)
+#define IMG_ADDRSPACE_CPUVADDR_BITS 32
+#endif
+
+/* Physical address space defaults to 32bits */
+#if !defined(IMG_ADDRSPACE_PHYSADDR_BITS)
+#define IMG_ADDRSPACE_PHYSADDR_BITS 32
+#endif
+
+typedef unsigned int IMG_UINT, *IMG_PUINT;
+typedef signed int IMG_INT, *IMG_PINT;
+
+typedef unsigned char IMG_UINT8, *IMG_PUINT8;
+typedef unsigned char IMG_BYTE, *IMG_PBYTE;
+typedef signed char IMG_INT8, *IMG_PINT8;
+typedef char IMG_CHAR, *IMG_PCHAR;
+
+typedef unsigned short IMG_UINT16, *IMG_PUINT16;
+typedef signed short IMG_INT16, *IMG_PINT16;
+#if !defined(IMG_UINT32_IS_ULONG)
+typedef unsigned int IMG_UINT32, *IMG_PUINT32;
+typedef signed int IMG_INT32, *IMG_PINT32;
+#else
+typedef unsigned long IMG_UINT32, *IMG_PUINT32;
+typedef signed long IMG_INT32, *IMG_PINT32;
+#endif
+#if !defined(IMG_UINT32_MAX)
+ #define IMG_UINT32_MAX 0xFFFFFFFFUL
+#endif
+
+#if defined(USE_CODE)
+
+typedef unsigned __int64 IMG_UINT64, *IMG_PUINT64;
+typedef __int64 IMG_INT64, *IMG_PINT64;
+
+#else
+ #if (defined(LINUX) || defined(__METAG))
+ typedef unsigned long long IMG_UINT64, *IMG_PUINT64;
+ typedef long long IMG_INT64, *IMG_PINT64;
+ #else
+ #error("define an OS")
+ #endif
+#endif
+
+#if !(defined(LINUX) && defined (__KERNEL__))
+/* Linux kernel mode does not use floating point */
+typedef float IMG_FLOAT, *IMG_PFLOAT;
+typedef double IMG_DOUBLE, *IMG_PDOUBLE;
+#endif
+
+typedef enum tag_img_bool
+{
+ IMG_FALSE = 0,
+ IMG_TRUE = 1,
+ IMG_FORCE_ALIGN = 0x7FFFFFFF
+} IMG_BOOL, *IMG_PBOOL;
+
+typedef void IMG_VOID, *IMG_PVOID;
+
+typedef IMG_INT32 IMG_RESULT;
+
+#if defined(_WIN64)
+ typedef unsigned __int64 IMG_UINTPTR_T;
+ typedef signed __int64 IMG_PTRDIFF_T;
+ typedef IMG_UINT64 IMG_SIZE_T;
+#else
+ typedef unsigned int IMG_UINTPTR_T;
+ typedef IMG_UINT32 IMG_SIZE_T;
+#endif
+
+typedef IMG_PVOID IMG_HANDLE;
+
+typedef void** IMG_HVOID, * IMG_PHVOID;
+
+#define IMG_NULL 0
+
+/* services/stream ID */
+typedef IMG_UINT32 IMG_SID;
+
+typedef IMG_UINT32 IMG_EVENTSID;
+
+/* Which of IMG_HANDLE/IMG_SID depends on SUPPORT_SID_INTERFACE */
+#if defined(SUPPORT_SID_INTERFACE)
+ typedef IMG_SID IMG_S_HANDLE;
+#else
+ typedef IMG_HANDLE IMG_S_HANDLE;
+#endif
+
+/*
+ * Address types.
+ * All types used to refer to a block of memory are wrapped in structures
+ * to enforce some degree of type safety, i.e. a IMG_DEV_VIRTADDR cannot
+ * be assigned to a variable of type IMG_DEV_PHYADDR because they are not the
+ * same thing.
+ *
+ * There is an assumption that the system contains at most one non-cpu mmu,
+ * and a memory block is only mapped by the MMU once.
+ *
+ * Different devices could have offset views of the physical address space.
+ *
+ */
+
+
+/*
+ *
+ * +------------+ +------------+ +------------+ +------------+
+ * | CPU | | DEV | | DEV | | DEV |
+ * +------------+ +------------+ +------------+ +------------+
+ * | | | |
+ * | PVOID |IMG_DEV_VIRTADDR |IMG_DEV_VIRTADDR |
+ * | \-------------------/ |
+ * | | |
+ * +------------+ +------------+ |
+ * | MMU | | MMU | |
+ * +------------+ +------------+ |
+ * | | |
+ * | | |
+ * | | |
+ * +--------+ +---------+ +--------+
+ * | Offset | | (Offset)| | Offset |
+ * +--------+ +---------+ +--------+
+ * | | IMG_DEV_PHYADDR |
+ * | | |
+ * | | IMG_DEV_PHYADDR |
+ * +---------------------------------------------------------------------+
+ * | System Address bus |
+ * +---------------------------------------------------------------------+
+ *
+ */
+
+typedef IMG_PVOID IMG_CPU_VIRTADDR;
+
+/* device virtual address */
+typedef struct _IMG_DEV_VIRTADDR
+{
+ /* device virtual addresses are 32bit for now */
+ IMG_UINT32 uiAddr;
+#define IMG_CAST_TO_DEVVADDR_UINT(var) (IMG_UINT32)(var)
+
+} IMG_DEV_VIRTADDR;
+
+typedef IMG_UINT32 IMG_DEVMEM_SIZE_T;
+
+/* cpu physical address */
+typedef struct _IMG_CPU_PHYADDR
+{
+ /* variable sized type (32,64) */
+ IMG_UINTPTR_T uiAddr;
+} IMG_CPU_PHYADDR;
+
+/* device physical address */
+typedef struct _IMG_DEV_PHYADDR
+{
+#if IMG_ADDRSPACE_PHYSADDR_BITS == 32
+ /* variable sized type (32,64) */
+ IMG_UINTPTR_T uiAddr;
+#else
+ IMG_UINT32 uiAddr;
+ IMG_UINT32 uiHighAddr;
+#endif
+} IMG_DEV_PHYADDR;
+
+/* system physical address */
+typedef struct _IMG_SYS_PHYADDR
+{
+ /* variable sized type (32,64) */
+ IMG_UINTPTR_T uiAddr;
+} IMG_SYS_PHYADDR;
+
+#include "img_defs.h"
+
+#endif /* __IMG_TYPES_H__ */
+/******************************************************************************
+ End of file (img_types.h)
+******************************************************************************/
diff --git a/drivers/gpu/pvr/ioctldef.h b/drivers/gpu/pvr/ioctldef.h
new file mode 100644
index 0000000..4b23ad4
--- /dev/null
+++ b/drivers/gpu/pvr/ioctldef.h
@@ -0,0 +1,98 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __IOCTLDEF_H__
+#define __IOCTLDEF_H__
+
+#define MAKEIOCTLINDEX(i) (((i) >> 2) & 0xFFF)
+
+#ifndef CTL_CODE
+
+#define DEVICE_TYPE ULONG
+
+#define FILE_DEVICE_BEEP 0x00000001
+#define FILE_DEVICE_CD_ROM 0x00000002
+#define FILE_DEVICE_CD_ROM_FILE_SYSTEM 0x00000003
+#define FILE_DEVICE_CONTROLLER 0x00000004
+#define FILE_DEVICE_DATALINK 0x00000005
+#define FILE_DEVICE_DFS 0x00000006
+#define FILE_DEVICE_DISK 0x00000007
+#define FILE_DEVICE_DISK_FILE_SYSTEM 0x00000008
+#define FILE_DEVICE_FILE_SYSTEM 0x00000009
+#define FILE_DEVICE_INPORT_PORT 0x0000000a
+#define FILE_DEVICE_KEYBOARD 0x0000000b
+#define FILE_DEVICE_MAILSLOT 0x0000000c
+#define FILE_DEVICE_MIDI_IN 0x0000000d
+#define FILE_DEVICE_MIDI_OUT 0x0000000e
+#define FILE_DEVICE_MOUSE 0x0000000f
+#define FILE_DEVICE_MULTI_UNC_PROVIDER 0x00000010
+#define FILE_DEVICE_NAMED_PIPE 0x00000011
+#define FILE_DEVICE_NETWORK 0x00000012
+#define FILE_DEVICE_NETWORK_BROWSER 0x00000013
+#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
+#define FILE_DEVICE_NULL 0x00000015
+#define FILE_DEVICE_PARALLEL_PORT 0x00000016
+#define FILE_DEVICE_PHYSICAL_NETCARD 0x00000017
+#define FILE_DEVICE_PRINTER 0x00000018
+#define FILE_DEVICE_SCANNER 0x00000019
+#define FILE_DEVICE_SERIAL_MOUSE_PORT 0x0000001a
+#define FILE_DEVICE_SERIAL_PORT 0x0000001b
+#define FILE_DEVICE_SCREEN 0x0000001c
+#define FILE_DEVICE_SOUND 0x0000001d
+#define FILE_DEVICE_STREAMS 0x0000001e
+#define FILE_DEVICE_TAPE 0x0000001f
+#define FILE_DEVICE_TAPE_FILE_SYSTEM 0x00000020
+#define FILE_DEVICE_TRANSPORT 0x00000021
+#define FILE_DEVICE_UNKNOWN 0x00000022
+#define FILE_DEVICE_VIDEO 0x00000023
+#define FILE_DEVICE_VIRTUAL_DISK 0x00000024
+#define FILE_DEVICE_WAVE_IN 0x00000025
+#define FILE_DEVICE_WAVE_OUT 0x00000026
+#define FILE_DEVICE_8042_PORT 0x00000027
+#define FILE_DEVICE_NETWORK_REDIRECTOR 0x00000028
+#define FILE_DEVICE_BATTERY 0x00000029
+#define FILE_DEVICE_BUS_EXTENDER 0x0000002a
+#define FILE_DEVICE_MODEM 0x0000002b
+#define FILE_DEVICE_VDM 0x0000002c
+#define FILE_DEVICE_MASS_STORAGE 0x0000002d
+
+#define CTL_CODE( DeviceType, Function, Method, Access ) ( \
+ ((DeviceType) << 16) | ((Access) << 14) | ((Function) << 2) | (Method) \
+)
+
+#define METHOD_BUFFERED 0
+#define METHOD_IN_DIRECT 1
+#define METHOD_OUT_DIRECT 2
+#define METHOD_NEITHER 3
+
+#define FILE_ANY_ACCESS 0
+#define FILE_READ_ACCESS ( 0x0001 )
+#define FILE_WRITE_ACCESS ( 0x0002 )
+
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/ion.c b/drivers/gpu/pvr/ion.c
new file mode 100644
index 0000000..b00fdb1
--- /dev/null
+++ b/drivers/gpu/pvr/ion.c
@@ -0,0 +1,112 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ *****************************************************************************/
+
+#include "ion.h"
+
+#include "services.h"
+#include "servicesint.h"
+#include "mutex.h"
+#include "lock.h"
+#include "mm.h"
+#include "handle.h"
+#include "perproc.h"
+#include "env_perproc.h"
+#include "private_data.h"
+#include "pvr_debug.h"
+
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+
+extern struct ion_client *gpsIONClient;
+
+void PVRSRVExportFDToIONHandles(int fd, struct ion_client **client,
+ struct ion_handle *handles[2])
+{
+ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+ LinuxMemArea *psLinuxMemArea;
+ PVRSRV_ERROR eError;
+ struct file *psFile;
+
+ /* Take the bridge mutex so the handle won't be freed underneath us */
+ LinuxLockMutex(&gPVRSRVLock);
+
+ psFile = fget(fd);
+ if(!psFile)
+ goto err_unlock;
+
+ psPrivateData = psFile->private_data;
+ if(!psPrivateData)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: struct file* has no private_data; "
+ "invalid export handle", __func__));
+ goto err_fput;
+ }
+
+ eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
+ (IMG_PVOID *)&psKernelMemInfo,
+ psPrivateData->hKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to look up MEM_INFO handle",
+ __func__));
+ goto err_fput;
+ }
+
+ psLinuxMemArea = (LinuxMemArea *)psKernelMemInfo->sMemBlk.hOSMemHandle;
+ BUG_ON(psLinuxMemArea == IMG_NULL);
+
+ if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_ION)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Valid handle, but not an ION buffer",
+ __func__));
+ goto err_fput;
+ }
+
+ handles[0] = psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[0];
+ handles[1] = psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[1];
+ if(client)
+ *client = gpsIONClient;
+
+err_fput:
+ fput(psFile);
+err_unlock:
+ /* Allow PVRSRV clients to communicate with srvkm again */
+ LinuxUnLockMutex(&gPVRSRVLock);
+}
+
+struct ion_handle *
+PVRSRVExportFDToIONHandle(int fd, struct ion_client **client)
+{
+ struct ion_handle *psHandles[2] = { IMG_NULL, IMG_NULL };
+ PVRSRVExportFDToIONHandles(fd, client, psHandles);
+ return psHandles[0];
+}
+
+EXPORT_SYMBOL(PVRSRVExportFDToIONHandles);
+EXPORT_SYMBOL(PVRSRVExportFDToIONHandle);
diff --git a/drivers/gpu/pvr/ion.h b/drivers/gpu/pvr/ion.h
new file mode 100644
index 0000000..9b0868c
--- /dev/null
+++ b/drivers/gpu/pvr/ion.h
@@ -0,0 +1,39 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ *****************************************************************************/
+
+#ifndef __IMG_LINUX_ION_H__
+#define __IMG_LINUX_ION_H__
+
+#include <linux/ion.h>
+#include <linux/omap_ion.h>
+
+void PVRSRVExportFDToIONHandles(int fd, struct ion_client **client,
+ struct ion_handle *handles[2]);
+
+struct ion_handle *PVRSRVExportFDToIONHandle(int fd,
+ struct ion_client **client);
+
+#endif /* __IMG_LINUX_ION_H__ */
diff --git a/drivers/gpu/pvr/kernelbuffer.h b/drivers/gpu/pvr/kernelbuffer.h
new file mode 100644
index 0000000..4cd36d2
--- /dev/null
+++ b/drivers/gpu/pvr/kernelbuffer.h
@@ -0,0 +1,72 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined (__KERNELBUFFER_H__)
+#define __KERNELBUFFER_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+typedef PVRSRV_ERROR (*PFN_OPEN_BC_DEVICE)(IMG_UINT32, IMG_HANDLE*);
+typedef PVRSRV_ERROR (*PFN_CLOSE_BC_DEVICE)(IMG_UINT32, IMG_HANDLE);
+typedef PVRSRV_ERROR (*PFN_GET_BC_INFO)(IMG_HANDLE, BUFFER_INFO*);
+typedef PVRSRV_ERROR (*PFN_GET_BC_BUFFER)(IMG_HANDLE, IMG_UINT32, PVRSRV_SYNC_DATA*, IMG_HANDLE*);
+
+typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG
+{
+ IMG_UINT32 ui32TableSize;
+ PFN_OPEN_BC_DEVICE pfnOpenBCDevice;
+ PFN_CLOSE_BC_DEVICE pfnCloseBCDevice;
+ PFN_GET_BC_INFO pfnGetBCInfo;
+ PFN_GET_BC_BUFFER pfnGetBCBuffer;
+ PFN_GET_BUFFER_ADDR pfnGetBufferAddr;
+
+} PVRSRV_BC_SRV2BUFFER_KMJTABLE;
+
+
+typedef PVRSRV_ERROR (*PFN_BC_REGISTER_BUFFER_DEV)(PVRSRV_BC_SRV2BUFFER_KMJTABLE*, IMG_UINT32*);
+typedef IMG_VOID (*PFN_BC_SCHEDULE_DEVICES)(IMG_VOID);
+typedef PVRSRV_ERROR (*PFN_BC_REMOVE_BUFFER_DEV)(IMG_UINT32);
+
+typedef struct PVRSRV_BC_BUFFER2SRV_KMJTABLE_TAG
+{
+ IMG_UINT32 ui32TableSize;
+ PFN_BC_REGISTER_BUFFER_DEV pfnPVRSRVRegisterBCDevice;
+ PFN_BC_SCHEDULE_DEVICES pfnPVRSRVScheduleDevices;
+ PFN_BC_REMOVE_BUFFER_DEV pfnPVRSRVRemoveBCDevice;
+
+} PVRSRV_BC_BUFFER2SRV_KMJTABLE, *PPVRSRV_BC_BUFFER2SRV_KMJTABLE;
+
+typedef IMG_BOOL (*PFN_BC_GET_PVRJTABLE) (PPVRSRV_BC_BUFFER2SRV_KMJTABLE);
+
+IMG_IMPORT IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/pvr/kerneldisplay.h b/drivers/gpu/pvr/kerneldisplay.h
new file mode 100644
index 0000000..23384d7
--- /dev/null
+++ b/drivers/gpu/pvr/kerneldisplay.h
@@ -0,0 +1,206 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined (__KERNELDISPLAY_H__)
+#define __KERNELDISPLAY_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+typedef PVRSRV_ERROR (*PFN_OPEN_DC_DEVICE)(IMG_UINT32, IMG_HANDLE*, PVRSRV_SYNC_DATA*);
+typedef PVRSRV_ERROR (*PFN_CLOSE_DC_DEVICE)(IMG_HANDLE);
+typedef PVRSRV_ERROR (*PFN_ENUM_DC_FORMATS)(IMG_HANDLE, IMG_UINT32*, DISPLAY_FORMAT*);
+typedef PVRSRV_ERROR (*PFN_ENUM_DC_DIMS)(IMG_HANDLE,
+ DISPLAY_FORMAT*,
+ IMG_UINT32*,
+ DISPLAY_DIMS*);
+typedef PVRSRV_ERROR (*PFN_GET_DC_SYSTEMBUFFER)(IMG_HANDLE, IMG_HANDLE*);
+typedef PVRSRV_ERROR (*PFN_GET_DC_INFO)(IMG_HANDLE, DISPLAY_INFO*);
+typedef PVRSRV_ERROR (*PFN_CREATE_DC_SWAPCHAIN)(IMG_HANDLE,
+ IMG_UINT32,
+ DISPLAY_SURF_ATTRIBUTES*,
+ DISPLAY_SURF_ATTRIBUTES*,
+ IMG_UINT32,
+ PVRSRV_SYNC_DATA**,
+ IMG_UINT32,
+ IMG_HANDLE*,
+ IMG_UINT32*);
+typedef PVRSRV_ERROR (*PFN_DESTROY_DC_SWAPCHAIN)(IMG_HANDLE,
+ IMG_HANDLE);
+typedef PVRSRV_ERROR (*PFN_SET_DC_DSTRECT)(IMG_HANDLE, IMG_HANDLE, IMG_RECT*);
+typedef PVRSRV_ERROR (*PFN_SET_DC_SRCRECT)(IMG_HANDLE, IMG_HANDLE, IMG_RECT*);
+typedef PVRSRV_ERROR (*PFN_SET_DC_DSTCK)(IMG_HANDLE, IMG_HANDLE, IMG_UINT32);
+typedef PVRSRV_ERROR (*PFN_SET_DC_SRCCK)(IMG_HANDLE, IMG_HANDLE, IMG_UINT32);
+typedef PVRSRV_ERROR (*PFN_GET_DC_BUFFERS)(IMG_HANDLE,
+ IMG_HANDLE,
+ IMG_UINT32*,
+ IMG_HANDLE*);
+typedef PVRSRV_ERROR (*PFN_SWAP_TO_DC_BUFFER)(IMG_HANDLE,
+ IMG_HANDLE,
+ IMG_UINT32,
+ IMG_HANDLE,
+ IMG_UINT32,
+ IMG_RECT*);
+typedef PVRSRV_ERROR (*PFN_SWAP_TO_DC_SYSTEM)(IMG_HANDLE, IMG_HANDLE);
+typedef IMG_VOID (*PFN_QUERY_SWAP_COMMAND_ID)(IMG_HANDLE, IMG_HANDLE, IMG_HANDLE, IMG_HANDLE, IMG_UINT16*, IMG_BOOL*);
+typedef IMG_VOID (*PFN_SET_DC_STATE)(IMG_HANDLE, IMG_UINT32);
+
+typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG
+{
+ IMG_UINT32 ui32TableSize;
+ PFN_OPEN_DC_DEVICE pfnOpenDCDevice;
+ PFN_CLOSE_DC_DEVICE pfnCloseDCDevice;
+ PFN_ENUM_DC_FORMATS pfnEnumDCFormats;
+ PFN_ENUM_DC_DIMS pfnEnumDCDims;
+ PFN_GET_DC_SYSTEMBUFFER pfnGetDCSystemBuffer;
+ PFN_GET_DC_INFO pfnGetDCInfo;
+ PFN_GET_BUFFER_ADDR pfnGetBufferAddr;
+ PFN_CREATE_DC_SWAPCHAIN pfnCreateDCSwapChain;
+ PFN_DESTROY_DC_SWAPCHAIN pfnDestroyDCSwapChain;
+ PFN_SET_DC_DSTRECT pfnSetDCDstRect;
+ PFN_SET_DC_SRCRECT pfnSetDCSrcRect;
+ PFN_SET_DC_DSTCK pfnSetDCDstColourKey;
+ PFN_SET_DC_SRCCK pfnSetDCSrcColourKey;
+ PFN_GET_DC_BUFFERS pfnGetDCBuffers;
+ PFN_SWAP_TO_DC_BUFFER pfnSwapToDCBuffer;
+ PFN_SWAP_TO_DC_SYSTEM pfnSwapToDCSystem;
+ PFN_SET_DC_STATE pfnSetDCState;
+ PFN_QUERY_SWAP_COMMAND_ID pfnQuerySwapCommandID;
+
+} PVRSRV_DC_SRV2DISP_KMJTABLE;
+
+typedef IMG_BOOL (*PFN_ISR_HANDLER)(IMG_VOID*);
+
+typedef PVRSRV_ERROR (*PFN_DC_REGISTER_DISPLAY_DEV)(PVRSRV_DC_SRV2DISP_KMJTABLE*, IMG_UINT32*);
+typedef PVRSRV_ERROR (*PFN_DC_REMOVE_DISPLAY_DEV)(IMG_UINT32);
+typedef PVRSRV_ERROR (*PFN_DC_OEM_FUNCTION)(IMG_UINT32, IMG_VOID*, IMG_UINT32, IMG_VOID*, IMG_UINT32);
+typedef PVRSRV_ERROR (*PFN_DC_REGISTER_COMMANDPROCLIST)(IMG_UINT32, PPFN_CMD_PROC,IMG_UINT32[][2], IMG_UINT32);
+typedef PVRSRV_ERROR (*PFN_DC_REMOVE_COMMANDPROCLIST)(IMG_UINT32, IMG_UINT32);
+typedef IMG_VOID (*PFN_DC_CMD_COMPLETE)(IMG_HANDLE, IMG_BOOL);
+typedef PVRSRV_ERROR (*PFN_DC_REGISTER_SYS_ISR)(PFN_ISR_HANDLER, IMG_VOID*, IMG_UINT32, IMG_UINT32);
+typedef PVRSRV_ERROR (*PFN_DC_REGISTER_POWER)(IMG_UINT32, PFN_PRE_POWER, PFN_POST_POWER,
+ PFN_PRE_CLOCKSPEED_CHANGE, PFN_POST_CLOCKSPEED_CHANGE,
+ IMG_HANDLE, PVRSRV_DEV_POWER_STATE, PVRSRV_DEV_POWER_STATE);
+
+typedef struct _PVRSRV_KERNEL_MEM_INFO_* PDC_MEM_INFO;
+
+typedef PVRSRV_ERROR (*PFN_DC_MEMINFO_GET_CPU_VADDR)(PDC_MEM_INFO, IMG_CPU_VIRTADDR *pVAddr);
+typedef PVRSRV_ERROR (*PFN_DC_MEMINFO_GET_CPU_PADDR)(PDC_MEM_INFO, IMG_SIZE_T uByteOffset, IMG_CPU_PHYADDR *pPAddr);
+typedef PVRSRV_ERROR (*PFN_DC_MEMINFO_GET_BYTE_SIZE)(PDC_MEM_INFO, IMG_SIZE_T *uByteSize);
+typedef IMG_BOOL (*PFN_DC_MEMINFO_IS_PHYS_CONTIG)(PDC_MEM_INFO);
+
+typedef struct PVRSRV_DC_DISP2SRV_KMJTABLE_TAG
+{
+ IMG_UINT32 ui32TableSize;
+ PFN_DC_REGISTER_DISPLAY_DEV pfnPVRSRVRegisterDCDevice;
+ PFN_DC_REMOVE_DISPLAY_DEV pfnPVRSRVRemoveDCDevice;
+ PFN_DC_OEM_FUNCTION pfnPVRSRVOEMFunction;
+ PFN_DC_REGISTER_COMMANDPROCLIST pfnPVRSRVRegisterCmdProcList;
+ PFN_DC_REMOVE_COMMANDPROCLIST pfnPVRSRVRemoveCmdProcList;
+ PFN_DC_CMD_COMPLETE pfnPVRSRVCmdComplete;
+ PFN_DC_REGISTER_SYS_ISR pfnPVRSRVRegisterSystemISRHandler;
+ PFN_DC_REGISTER_POWER pfnPVRSRVRegisterPowerDevice;
+ PFN_DC_CMD_COMPLETE pfnPVRSRVFreeCmdCompletePacket;
+ PFN_DC_MEMINFO_GET_CPU_VADDR pfnPVRSRVDCMemInfoGetCpuVAddr;
+ PFN_DC_MEMINFO_GET_CPU_PADDR pfnPVRSRVDCMemInfoGetCpuPAddr;
+ PFN_DC_MEMINFO_GET_BYTE_SIZE pfnPVRSRVDCMemInfoGetByteSize;
+ PFN_DC_MEMINFO_IS_PHYS_CONTIG pfnPVRSRVDCMemInfoIsPhysContig;
+
+} PVRSRV_DC_DISP2SRV_KMJTABLE, *PPVRSRV_DC_DISP2SRV_KMJTABLE;
+
+
+typedef struct DISPLAYCLASS_FLIP_COMMAND_TAG
+{
+
+ IMG_HANDLE hExtDevice;
+
+
+ IMG_HANDLE hExtSwapChain;
+
+
+ IMG_HANDLE hExtBuffer;
+
+
+ IMG_HANDLE hPrivateTag;
+
+
+ IMG_UINT32 ui32ClipRectCount;
+
+
+ IMG_RECT *psClipRect;
+
+
+ IMG_UINT32 ui32SwapInterval;
+
+} DISPLAYCLASS_FLIP_COMMAND;
+
+
+typedef struct DISPLAYCLASS_FLIP_COMMAND2_TAG
+{
+
+ IMG_HANDLE hExtDevice;
+
+
+ IMG_HANDLE hExtSwapChain;
+
+
+ IMG_HANDLE hUnused;
+
+
+ IMG_UINT32 ui32SwapInterval;
+
+
+ IMG_PVOID pvPrivData;
+
+
+ IMG_UINT32 ui32PrivDataLength;
+
+
+ PDC_MEM_INFO *ppsMemInfos;
+
+
+ IMG_UINT32 ui32NumMemInfos;
+
+} DISPLAYCLASS_FLIP_COMMAND2;
+
+#define DC_FLIP_COMMAND 0
+
+#define DC_STATE_NO_FLUSH_COMMANDS 0
+#define DC_STATE_FLUSH_COMMANDS 1
+#define DC_STATE_FORCE_SWAP_TO_SYSTEM 2
+
+typedef IMG_BOOL (*PFN_DC_GET_PVRJTABLE)(PPVRSRV_DC_DISP2SRV_KMJTABLE);
+
+IMG_IMPORT IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/linkage.h b/drivers/gpu/pvr/linkage.h
new file mode 100644
index 0000000..e64012c
--- /dev/null
+++ b/drivers/gpu/pvr/linkage.h
@@ -0,0 +1,52 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __LINKAGE_H__
+#define __LINKAGE_H__
+
+#if !defined(SUPPORT_DRI_DRM)
+long PVRSRV_BridgeDispatchKM(struct file *file, unsigned int cmd, unsigned long arg);
+#endif
+
+IMG_VOID PVRDPFInit(IMG_VOID);
+PVRSRV_ERROR PVROSFuncInit(IMG_VOID);
+IMG_VOID PVROSFuncDeInit(IMG_VOID);
+
+#ifdef DEBUG
+
+IMG_INT PVRDebugProcSetLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data);
+void ProcSeqShowDebugLevel(struct seq_file *sfile,void* el);
+
+#ifdef PVR_MANUAL_POWER_CONTROL
+IMG_INT PVRProcSetPowerLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data);
+
+void ProcSeqShowPowerLevel(struct seq_file *sfile,void* el);
+
+#endif
+
+#endif
+
+#endif
diff --git a/drivers/gpu/pvr/lists.c b/drivers/gpu/pvr/lists.c
new file mode 100644
index 0000000..1081781
--- /dev/null
+++ b/drivers/gpu/pvr/lists.c
@@ -0,0 +1,99 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "lists.h"
+#include "services_headers.h"
+
+IMPLEMENT_LIST_ANY_VA(BM_HEAP)
+IMPLEMENT_LIST_ANY_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_ANY_VA_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_FOR_EACH_VA(BM_HEAP)
+IMPLEMENT_LIST_REMOVE(BM_HEAP)
+IMPLEMENT_LIST_INSERT(BM_HEAP)
+
+IMPLEMENT_LIST_ANY_VA(BM_CONTEXT)
+IMPLEMENT_LIST_ANY_VA_2(BM_CONTEXT, IMG_HANDLE, IMG_NULL)
+IMPLEMENT_LIST_ANY_VA_2(BM_CONTEXT, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_FOR_EACH(BM_CONTEXT)
+IMPLEMENT_LIST_REMOVE(BM_CONTEXT)
+IMPLEMENT_LIST_INSERT(BM_CONTEXT)
+
+IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_ANY_VA(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_FOR_EACH(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_INSERT(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_REMOVE(PVRSRV_DEVICE_NODE)
+
+IMPLEMENT_LIST_ANY_VA(PVRSRV_POWER_DEV)
+IMPLEMENT_LIST_ANY_VA_2(PVRSRV_POWER_DEV, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_INSERT(PVRSRV_POWER_DEV)
+IMPLEMENT_LIST_REMOVE(PVRSRV_POWER_DEV)
+
+
+IMG_VOID* MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE* psDeviceNode, va_list va)
+{
+ IMG_UINT32 ui32DevIndex;
+ IMG_BOOL bIgnoreClass;
+ PVRSRV_DEVICE_CLASS eDevClass;
+
+ ui32DevIndex = va_arg(va, IMG_UINT32);
+ bIgnoreClass = va_arg(va, IMG_BOOL);
+ if (!bIgnoreClass)
+ {
+ eDevClass = va_arg(va, PVRSRV_DEVICE_CLASS);
+ }
+ else
+ {
+
+
+ eDevClass = PVRSRV_DEVICE_CLASS_FORCE_I32;
+ }
+
+ if ((bIgnoreClass || psDeviceNode->sDevId.eDeviceClass == eDevClass) &&
+ psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex)
+ {
+ return psDeviceNode;
+ }
+ return IMG_NULL;
+}
+
+IMG_VOID* MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV *psPowerDev, va_list va)
+{
+ IMG_UINT32 ui32DeviceIndex;
+
+ ui32DeviceIndex = va_arg(va, IMG_UINT32);
+
+ if (psPowerDev->ui32DeviceIndex == ui32DeviceIndex)
+ {
+ return psPowerDev;
+ }
+ else
+ {
+ return IMG_NULL;
+ }
+}
diff --git a/drivers/gpu/pvr/lists.h b/drivers/gpu/pvr/lists.h
new file mode 100644
index 0000000..a02307a
--- /dev/null
+++ b/drivers/gpu/pvr/lists.h
@@ -0,0 +1,244 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __LISTS_UTILS__
+#define __LISTS_UTILS__
+
+#include <stdarg.h>
+#include "img_types.h"
+
+#define DECLARE_LIST_FOR_EACH(TYPE) \
+IMG_VOID List_##TYPE##_ForEach(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_FOR_EACH(TYPE) \
+IMG_VOID List_##TYPE##_ForEach(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode))\
+{\
+ while(psHead)\
+ {\
+ pfnCallBack(psHead);\
+ psHead = psHead->psNext;\
+ }\
+}
+
+
+#define DECLARE_LIST_FOR_EACH_VA(TYPE) \
+IMG_VOID List_##TYPE##_ForEach_va(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_FOR_EACH_VA(TYPE) \
+IMG_VOID List_##TYPE##_ForEach_va(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode, va_list va), ...) \
+{\
+ va_list ap;\
+ while(psHead)\
+ {\
+ va_start(ap, pfnCallBack);\
+ pfnCallBack(psHead, ap);\
+ psHead = psHead->psNext;\
+ va_end(ap);\
+ }\
+}
+
+
+#define DECLARE_LIST_ANY(TYPE) \
+IMG_VOID* List_##TYPE##_Any(TYPE *psHead, IMG_VOID* (*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_ANY(TYPE) \
+IMG_VOID* List_##TYPE##_Any(TYPE *psHead, IMG_VOID* (*pfnCallBack)(TYPE* psNode))\
+{ \
+ IMG_VOID *pResult;\
+ TYPE *psNextNode;\
+ pResult = IMG_NULL;\
+ psNextNode = psHead;\
+ while(psHead && !pResult)\
+ {\
+ psNextNode = psNextNode->psNext;\
+ pResult = pfnCallBack(psHead);\
+ psHead = psNextNode;\
+ }\
+ return pResult;\
+}
+
+
+#define DECLARE_LIST_ANY_VA(TYPE) \
+IMG_VOID* List_##TYPE##_Any_va(TYPE *psHead, IMG_VOID*(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_ANY_VA(TYPE) \
+IMG_VOID* List_##TYPE##_Any_va(TYPE *psHead, IMG_VOID*(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
+{\
+ va_list ap;\
+ TYPE *psNextNode;\
+ IMG_VOID* pResult = IMG_NULL;\
+ while(psHead && !pResult)\
+ {\
+ psNextNode = psHead->psNext;\
+ va_start(ap, pfnCallBack);\
+ pResult = pfnCallBack(psHead, ap);\
+ va_end(ap);\
+ psHead = psNextNode;\
+ }\
+ return pResult;\
+}
+
+#define DECLARE_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))\
+{ \
+ RTYPE result;\
+ TYPE *psNextNode;\
+ result = CONTINUE;\
+ psNextNode = psHead;\
+ while(psHead && result == CONTINUE)\
+ {\
+ psNextNode = psNextNode->psNext;\
+ result = pfnCallBack(psHead);\
+ psHead = psNextNode;\
+ }\
+ return result;\
+}
+
+
+#define DECLARE_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
+{\
+ va_list ap;\
+ TYPE *psNextNode;\
+ RTYPE result = CONTINUE;\
+ while(psHead && result == CONTINUE)\
+ {\
+ psNextNode = psHead->psNext;\
+ va_start(ap, pfnCallBack);\
+ result = pfnCallBack(psHead, ap);\
+ va_end(ap);\
+ psHead = psNextNode;\
+ }\
+ return result;\
+}
+
+
+#define DECLARE_LIST_REMOVE(TYPE) \
+IMG_VOID List_##TYPE##_Remove(TYPE *psNode)
+
+#define IMPLEMENT_LIST_REMOVE(TYPE) \
+IMG_VOID List_##TYPE##_Remove(TYPE *psNode)\
+{\
+ (*psNode->ppsThis)=psNode->psNext;\
+ if(psNode->psNext)\
+ {\
+ psNode->psNext->ppsThis = psNode->ppsThis;\
+ }\
+}
+
+#define DECLARE_LIST_INSERT(TYPE) \
+IMG_VOID List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)
+
+#define IMPLEMENT_LIST_INSERT(TYPE) \
+IMG_VOID List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)\
+{\
+ psNewNode->ppsThis = ppsHead;\
+ psNewNode->psNext = *ppsHead;\
+ *ppsHead = psNewNode;\
+ if(psNewNode->psNext)\
+ {\
+ psNewNode->psNext->ppsThis = &(psNewNode->psNext);\
+ }\
+}
+
+#define DECLARE_LIST_REVERSE(TYPE) \
+IMG_VOID List_##TYPE##_Reverse(TYPE **ppsHead)
+
+#define IMPLEMENT_LIST_REVERSE(TYPE) \
+IMG_VOID List_##TYPE##_Reverse(TYPE **ppsHead)\
+{\
+ TYPE *psTmpNode1; \
+ TYPE *psTmpNode2; \
+ TYPE *psCurNode; \
+ psTmpNode1 = IMG_NULL; \
+ psCurNode = *ppsHead; \
+ while(psCurNode) { \
+ psTmpNode2 = psCurNode->psNext; \
+ psCurNode->psNext = psTmpNode1; \
+ psTmpNode1 = psCurNode; \
+ psCurNode = psTmpNode2; \
+ if(psCurNode) \
+ { \
+ psTmpNode1->ppsThis = &(psCurNode->psNext); \
+ } \
+ else \
+ { \
+ psTmpNode1->ppsThis = ppsHead; \
+ } \
+ } \
+ *ppsHead = psTmpNode1; \
+}
+
+#define IS_LAST_ELEMENT(x) ((x)->psNext == IMG_NULL)
+
+#include "services_headers.h"
+
+DECLARE_LIST_ANY_VA(BM_HEAP);
+DECLARE_LIST_ANY_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_ANY_VA_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_FOR_EACH_VA(BM_HEAP);
+DECLARE_LIST_REMOVE(BM_HEAP);
+DECLARE_LIST_INSERT(BM_HEAP);
+
+DECLARE_LIST_ANY_VA(BM_CONTEXT);
+DECLARE_LIST_ANY_VA_2(BM_CONTEXT, IMG_HANDLE, IMG_NULL);
+DECLARE_LIST_ANY_VA_2(BM_CONTEXT, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_FOR_EACH(BM_CONTEXT);
+DECLARE_LIST_REMOVE(BM_CONTEXT);
+DECLARE_LIST_INSERT(BM_CONTEXT);
+
+DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_INSERT(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE);
+
+DECLARE_LIST_ANY_VA(PVRSRV_POWER_DEV);
+DECLARE_LIST_ANY_VA_2(PVRSRV_POWER_DEV, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_INSERT(PVRSRV_POWER_DEV);
+DECLARE_LIST_REMOVE(PVRSRV_POWER_DEV);
+
+#undef DECLARE_LIST_ANY_2
+#undef DECLARE_LIST_ANY_VA
+#undef DECLARE_LIST_ANY_VA_2
+#undef DECLARE_LIST_FOR_EACH
+#undef DECLARE_LIST_FOR_EACH_VA
+#undef DECLARE_LIST_INSERT
+#undef DECLARE_LIST_REMOVE
+
+IMG_VOID* MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE* psDeviceNode, va_list va);
+IMG_VOID* MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV *psPowerDev, va_list va);
+
+#endif
+
diff --git a/drivers/gpu/pvr/lock.h b/drivers/gpu/pvr/lock.h
new file mode 100644
index 0000000..a0854c3
--- /dev/null
+++ b/drivers/gpu/pvr/lock.h
@@ -0,0 +1,32 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __LOCK_H__
+#define __LOCK_H__
+
+extern PVRSRV_LINUX_MUTEX gPVRSRVLock;
+
+#endif
diff --git a/drivers/gpu/pvr/mem.c b/drivers/gpu/pvr/mem.c
new file mode 100644
index 0000000..746494a
--- /dev/null
+++ b/drivers/gpu/pvr/mem.c
@@ -0,0 +1,155 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "services_headers.h"
+#include "pvr_bridge_km.h"
+
+
+static PVRSRV_ERROR
+FreeSharedSysMemCallBack(IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bDummy)
+{
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = pvParam;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+ PVR_UNREFERENCED_PARAMETER(bDummy);
+
+ OSFreePages(psKernelMemInfo->ui32Flags,
+ psKernelMemInfo->uAllocSize,
+ psKernelMemInfo->pvLinAddrKM,
+ psKernelMemInfo->sMemBlk.hOSMemHandle);
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(PVRSRV_KERNEL_MEM_INFO),
+ psKernelMemInfo,
+ IMG_NULL);
+
+
+ return PVRSRV_OK;
+}
+
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVAllocSharedSysMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_UINT32 ui32Flags,
+ IMG_SIZE_T uSize,
+ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo)
+{
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(PVRSRV_KERNEL_MEM_INFO),
+ (IMG_VOID **)&psKernelMemInfo, IMG_NULL,
+ "Kernel Memory Info") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for meminfo"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ OSMemSet(psKernelMemInfo, 0, sizeof(*psKernelMemInfo));
+
+ ui32Flags &= ~PVRSRV_HAP_MAPTYPE_MASK;
+ ui32Flags |= PVRSRV_HAP_MULTI_PROCESS;
+ psKernelMemInfo->ui32Flags = ui32Flags;
+ psKernelMemInfo->uAllocSize = uSize;
+
+ if(OSAllocPages(psKernelMemInfo->ui32Flags,
+ psKernelMemInfo->uAllocSize,
+ (IMG_UINT32)HOST_PAGESIZE(),
+ IMG_NULL,
+ 0,
+ &psKernelMemInfo->pvLinAddrKM,
+ &psKernelMemInfo->sMemBlk.hOSMemHandle)
+ != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for block"));
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(PVRSRV_KERNEL_MEM_INFO),
+ psKernelMemInfo,
+ 0);
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+
+ psKernelMemInfo->sMemBlk.hResItem =
+ ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_SHARED_MEM_INFO,
+ psKernelMemInfo,
+ 0,
+ &FreeSharedSysMemCallBack);
+
+ *ppsKernelMemInfo = psKernelMemInfo;
+
+ return PVRSRV_OK;
+}
+
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
+{
+ PVRSRV_ERROR eError;
+
+ if(psKernelMemInfo->sMemBlk.hResItem)
+ {
+ eError = ResManFreeResByPtr(psKernelMemInfo->sMemBlk.hResItem, CLEANUP_WITH_POLL);
+ }
+ else
+ {
+ eError = FreeSharedSysMemCallBack(psKernelMemInfo, 0, CLEANUP_WITH_POLL);
+ }
+
+ return eError;
+}
+
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if(!psKernelMemInfo)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if(psKernelMemInfo->sMemBlk.hResItem)
+ {
+ eError = ResManDissociateRes(psKernelMemInfo->sMemBlk.hResItem, IMG_NULL);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDissociateMemFromResmanKM: ResManDissociateRes failed"));
+ PVR_DBG_BREAK;
+ return eError;
+ }
+
+ psKernelMemInfo->sMemBlk.hResItem = IMG_NULL;
+ }
+
+ return eError;
+}
+
diff --git a/drivers/gpu/pvr/mem_debug.c b/drivers/gpu/pvr/mem_debug.c
new file mode 100644
index 0000000..b9cc780
--- /dev/null
+++ b/drivers/gpu/pvr/mem_debug.c
@@ -0,0 +1,247 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef MEM_DEBUG_C
+#define MEM_DEBUG_C
+
+#if defined(PVRSRV_DEBUG_OS_MEMORY)
+
+#include "img_types.h"
+#include "services_headers.h"
+
+#if defined (__cplusplus)
+extern "C"
+{
+#endif
+
+#define STOP_ON_ERROR 0
+
+
+
+
+
+
+
+ IMG_BOOL MemCheck(const IMG_PVOID pvAddr, const IMG_UINT8 ui8Pattern, IMG_SIZE_T uSize)
+ {
+ IMG_UINT8 *pui8Addr;
+ for (pui8Addr = (IMG_UINT8*)pvAddr; uSize > 0; uSize--, pui8Addr++)
+ {
+ if (*pui8Addr != ui8Pattern)
+ {
+ return IMG_FALSE;
+ }
+ }
+ return IMG_TRUE;
+ }
+
+
+
+ IMG_VOID OSCheckMemDebug(IMG_PVOID pvCpuVAddr, IMG_SIZE_T uSize, const IMG_CHAR *pszFileName, const IMG_UINT32 uLine)
+ {
+ OSMEM_DEBUG_INFO const *psInfo = (OSMEM_DEBUG_INFO *)((IMG_UINT32)pvCpuVAddr - TEST_BUFFER_PADDING_STATUS);
+
+
+ if (pvCpuVAddr == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : null pointer"
+ " - referenced %s:%d - allocated %s:%d",
+ pvCpuVAddr,
+ pszFileName, uLine,
+ psInfo->sFileName, psInfo->uLineNo));
+ while (STOP_ON_ERROR);
+ }
+
+
+ if (((IMG_UINT32)pvCpuVAddr&3) != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : invalid alignment"
+ " - referenced %s:%d - allocated %s:%d",
+ pvCpuVAddr,
+ pszFileName, uLine,
+ psInfo->sFileName, psInfo->uLineNo));
+ while (STOP_ON_ERROR);
+ }
+
+
+ if (!MemCheck((IMG_PVOID)psInfo->sGuardRegionBefore, 0xB1, sizeof(psInfo->sGuardRegionBefore)))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : guard region before overwritten"
+ " - referenced %s:%d - allocated %s:%d",
+ pvCpuVAddr,
+ pszFileName, uLine,
+ psInfo->sFileName, psInfo->uLineNo));
+ while (STOP_ON_ERROR);
+ }
+
+
+ if (uSize != psInfo->uSize)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Pointer 0x%X : supplied size was different to stored size (0x%X != 0x%X)"
+ " - referenced %s:%d - allocated %s:%d",
+ pvCpuVAddr, uSize, psInfo->uSize,
+ pszFileName, uLine,
+ psInfo->sFileName, psInfo->uLineNo));
+ while (STOP_ON_ERROR);
+ }
+
+
+ if ((0x01234567 ^ psInfo->uSizeParityCheck) != psInfo->uSize)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Pointer 0x%X : stored size parity error (0x%X != 0x%X)"
+ " - referenced %s:%d - allocated %s:%d",
+ pvCpuVAddr, psInfo->uSize, 0x01234567 ^ psInfo->uSizeParityCheck,
+ pszFileName, uLine,
+ psInfo->sFileName, psInfo->uLineNo));
+ while (STOP_ON_ERROR);
+ }
+ else
+ {
+
+ uSize = psInfo->uSize;
+ }
+
+
+ if (uSize)
+ {
+ if (!MemCheck((IMG_VOID*)((IMG_UINT32)pvCpuVAddr + uSize), 0xB2, TEST_BUFFER_PADDING_AFTER))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : guard region after overwritten"
+ " - referenced from %s:%d - allocated from %s:%d",
+ pvCpuVAddr,
+ pszFileName, uLine,
+ psInfo->sFileName, psInfo->uLineNo));
+ }
+ }
+
+
+ if (psInfo->eValid != isAllocated)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : not allocated (freed? %d)"
+ " - referenced %s:%d - freed %s:%d",
+ pvCpuVAddr, psInfo->eValid == isFree,
+ pszFileName, uLine,
+ psInfo->sFileName, psInfo->uLineNo));
+ while (STOP_ON_ERROR);
+ }
+ }
+
+ IMG_VOID debug_strcpy(IMG_CHAR *pDest, const IMG_CHAR *pSrc)
+ {
+ IMG_SIZE_T i = 0;
+
+ for (; i < 128; i++)
+ {
+ *pDest = *pSrc;
+ if (*pSrc == '\0') break;
+ pDest++;
+ pSrc++;
+ }
+ }
+
+ PVRSRV_ERROR OSAllocMem_Debug_Wrapper(IMG_UINT32 ui32Flags,
+ IMG_UINT32 ui32Size,
+ IMG_PVOID *ppvCpuVAddr,
+ IMG_HANDLE *phBlockAlloc,
+ IMG_CHAR *pszFilename,
+ IMG_UINT32 ui32Line)
+ {
+ OSMEM_DEBUG_INFO *psInfo;
+
+ PVRSRV_ERROR eError;
+
+ eError = OSAllocMem_Debug_Linux_Memory_Allocations(ui32Flags,
+ ui32Size + TEST_BUFFER_PADDING,
+ ppvCpuVAddr,
+ phBlockAlloc,
+ pszFilename,
+ ui32Line);
+
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ OSMemSet((IMG_CHAR *)(*ppvCpuVAddr) + TEST_BUFFER_PADDING_STATUS, 0xBB, ui32Size);
+ OSMemSet((IMG_CHAR *)(*ppvCpuVAddr) + ui32Size + TEST_BUFFER_PADDING_STATUS, 0xB2, TEST_BUFFER_PADDING_AFTER);
+
+
+ psInfo = (OSMEM_DEBUG_INFO *)(*ppvCpuVAddr);
+
+ OSMemSet(psInfo->sGuardRegionBefore, 0xB1, sizeof(psInfo->sGuardRegionBefore));
+ debug_strcpy(psInfo->sFileName, pszFilename);
+ psInfo->uLineNo = ui32Line;
+ psInfo->eValid = isAllocated;
+ psInfo->uSize = ui32Size;
+ psInfo->uSizeParityCheck = 0x01234567 ^ ui32Size;
+
+
+ *ppvCpuVAddr = (IMG_PVOID) ((IMG_UINT32)*ppvCpuVAddr)+TEST_BUFFER_PADDING_STATUS;
+
+#ifdef PVRSRV_LOG_MEMORY_ALLOCS
+
+ PVR_TRACE(("Allocated pointer (after debug info): 0x%X from %s:%d", *ppvCpuVAddr, pszFilename, ui32Line));
+#endif
+
+ return PVRSRV_OK;
+ }
+
+ PVRSRV_ERROR OSFreeMem_Debug_Wrapper(IMG_UINT32 ui32Flags,
+ IMG_UINT32 ui32Size,
+ IMG_PVOID pvCpuVAddr,
+ IMG_HANDLE hBlockAlloc,
+ IMG_CHAR *pszFilename,
+ IMG_UINT32 ui32Line)
+ {
+ OSMEM_DEBUG_INFO *psInfo;
+
+
+ OSCheckMemDebug(pvCpuVAddr, ui32Size, pszFilename, ui32Line);
+
+
+ OSMemSet(pvCpuVAddr, 0xBF, ui32Size + TEST_BUFFER_PADDING_AFTER);
+
+
+ psInfo = (OSMEM_DEBUG_INFO *)((IMG_UINT32) pvCpuVAddr - TEST_BUFFER_PADDING_STATUS);
+
+
+ psInfo->uSize = 0;
+ psInfo->uSizeParityCheck = 0;
+ psInfo->eValid = isFree;
+ psInfo->uLineNo = ui32Line;
+ debug_strcpy(psInfo->sFileName, pszFilename);
+
+ return OSFreeMem_Debug_Linux_Memory_Allocations(ui32Flags, ui32Size + TEST_BUFFER_PADDING, psInfo, hBlockAlloc, pszFilename, ui32Line);
+ }
+
+#if defined (__cplusplus)
+
+}
+#endif
+
+#endif
+
+#endif
diff --git a/drivers/gpu/pvr/metrics.c b/drivers/gpu/pvr/metrics.c
new file mode 100644
index 0000000..640eb04
--- /dev/null
+++ b/drivers/gpu/pvr/metrics.c
@@ -0,0 +1,160 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "services_headers.h"
+#include "metrics.h"
+
+#if defined(SUPPORT_VGX)
+#include "vgxapi_km.h"
+#endif
+
+#if defined(SUPPORT_SGX)
+#include "sgxapi_km.h"
+#endif
+
+#if defined(DEBUG) || defined(TIMING)
+
+static volatile IMG_UINT32 *pui32TimerRegister = 0;
+
+#define PVRSRV_TIMER_TOTAL_IN_TICKS(X) asTimers[X].ui32Total
+#define PVRSRV_TIMER_TOTAL_IN_MS(X) ((1000*asTimers[X].ui32Total)/ui32TicksPerMS)
+#define PVRSRV_TIMER_COUNT(X) asTimers[X].ui32Count
+
+
+Temporal_Data asTimers[PVRSRV_NUM_TIMERS];
+
+
+IMG_UINT32 PVRSRVTimeNow(IMG_VOID)
+{
+ if (!pui32TimerRegister)
+ {
+ static IMG_BOOL bFirstTime = IMG_TRUE;
+
+ if (bFirstTime)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVTimeNow: No timer register set up"));
+
+ bFirstTime = IMG_FALSE;
+ }
+
+ return 0;
+ }
+
+#if defined(__sh__)
+
+ return (0xffffffff-*pui32TimerRegister);
+
+#else
+
+ return 0;
+
+#endif
+}
+
+
+static IMG_UINT32 PVRSRVGetCPUFreq(IMG_VOID)
+{
+ IMG_UINT32 ui32Time1, ui32Time2;
+
+ ui32Time1 = PVRSRVTimeNow();
+
+ OSWaitus(1000000);
+
+ ui32Time2 = PVRSRVTimeNow();
+
+ PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetCPUFreq: timer frequency = %d Hz", ui32Time2 - ui32Time1));
+
+ return (ui32Time2 - ui32Time1);
+}
+
+
+IMG_VOID PVRSRVSetupMetricTimers(IMG_VOID *pvDevInfo)
+{
+ IMG_UINT32 ui32Loop;
+
+ PVR_UNREFERENCED_PARAMETER(pvDevInfo);
+
+ for(ui32Loop=0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++)
+ {
+ asTimers[ui32Loop].ui32Total = 0;
+ asTimers[ui32Loop].ui32Count = 0;
+ }
+
+
+ #if defined(__sh__)
+
+
+
+
+
+ *TCR_2 = TIMER_DIVISOR;
+
+
+ *TCOR_2 = *TCNT_2 = (IMG_UINT)0xffffffff;
+
+
+ *TST_REG |= (IMG_UINT8)0x04;
+
+ pui32TimerRegister = (IMG_UINT32 *)TCNT_2;
+
+ #else
+
+ pui32TimerRegister = 0;
+
+ #endif
+
+}
+
+
+IMG_VOID PVRSRVOutputMetricTotals(IMG_VOID)
+{
+ IMG_UINT32 ui32TicksPerMS, ui32Loop;
+
+ ui32TicksPerMS = PVRSRVGetCPUFreq();
+
+ if (!ui32TicksPerMS)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOutputMetricTotals: Failed to get CPU Freq"));
+ return;
+ }
+
+ for(ui32Loop=0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++)
+ {
+ if (asTimers[ui32Loop].ui32Count & 0x80000000L)
+ {
+ PVR_DPF((PVR_DBG_WARNING,"PVRSRVOutputMetricTotals: Timer %u is still ON", ui32Loop));
+ }
+ }
+#if 0
+
+ PVR_DPF((PVR_DBG_ERROR," Timer(%u): Total = %u",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_TOTAL_IN_TICKS(PVRSRV_TIMER_EXAMPLE_1)));
+ PVR_DPF((PVR_DBG_ERROR," Timer(%u): Time = %ums",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_TOTAL_IN_MS(PVRSRV_TIMER_EXAMPLE_1)));
+ PVR_DPF((PVR_DBG_ERROR," Timer(%u): Count = %u",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_COUNT(PVRSRV_TIMER_EXAMPLE_1)));
+#endif
+}
+
+#endif
+
diff --git a/drivers/gpu/pvr/metrics.h b/drivers/gpu/pvr/metrics.h
new file mode 100644
index 0000000..69e1b3d
--- /dev/null
+++ b/drivers/gpu/pvr/metrics.h
@@ -0,0 +1,130 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _METRICS_
+#define _METRICS_
+
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+#if defined(DEBUG) || defined(TIMING)
+
+
+typedef struct
+{
+ IMG_UINT32 ui32Start;
+ IMG_UINT32 ui32Stop;
+ IMG_UINT32 ui32Total;
+ IMG_UINT32 ui32Count;
+} Temporal_Data;
+
+extern Temporal_Data asTimers[];
+
+extern IMG_UINT32 PVRSRVTimeNow(IMG_VOID);
+extern IMG_VOID PVRSRVSetupMetricTimers(IMG_VOID *pvDevInfo);
+extern IMG_VOID PVRSRVOutputMetricTotals(IMG_VOID);
+
+
+#define PVRSRV_TIMER_DUMMY 0
+
+#define PVRSRV_TIMER_EXAMPLE_1 1
+#define PVRSRV_TIMER_EXAMPLE_2 2
+
+
+#define PVRSRV_NUM_TIMERS (PVRSRV_TIMER_EXAMPLE_2 + 1)
+
+#define PVRSRV_TIME_START(X) { \
+ asTimers[X].ui32Count += 1; \
+ asTimers[X].ui32Count |= 0x80000000L; \
+ asTimers[X].ui32Start = PVRSRVTimeNow(); \
+ asTimers[X].ui32Stop = 0; \
+ }
+
+#define PVRSRV_TIME_SUSPEND(X) { \
+ asTimers[X].ui32Stop += PVRSRVTimeNow() - asTimers[X].ui32Start; \
+ }
+
+#define PVRSRV_TIME_RESUME(X) { \
+ asTimers[X].ui32Start = PVRSRVTimeNow(); \
+ }
+
+#define PVRSRV_TIME_STOP(X) { \
+ asTimers[X].ui32Stop += PVRSRVTimeNow() - asTimers[X].ui32Start; \
+ asTimers[X].ui32Total += asTimers[X].ui32Stop; \
+ asTimers[X].ui32Count &= 0x7FFFFFFFL; \
+ }
+
+#define PVRSRV_TIME_RESET(X) { \
+ asTimers[X].ui32Start = 0; \
+ asTimers[X].ui32Stop = 0; \
+ asTimers[X].ui32Total = 0; \
+ asTimers[X].ui32Count = 0; \
+ }
+
+
+#if defined(__sh__)
+
+#define TST_REG ((volatile IMG_UINT8 *) (psDevInfo->pvSOCRegsBaseKM))
+
+#define TCOR_2 ((volatile IMG_UINT *) (psDevInfo->pvSOCRegsBaseKM+28))
+#define TCNT_2 ((volatile IMG_UINT *) (psDevInfo->pvSOCRegsBaseKM+32))
+#define TCR_2 ((volatile IMG_UINT16 *)(psDevInfo->pvSOCRegsBaseKM+36))
+
+#define TIMER_DIVISOR 4
+
+#endif
+
+
+
+
+
+#else
+
+
+
+#define PVRSRV_TIME_START(X)
+#define PVRSRV_TIME_SUSPEND(X)
+#define PVRSRV_TIME_RESUME(X)
+#define PVRSRV_TIME_STOP(X)
+#define PVRSRV_TIME_RESET(X)
+
+#define PVRSRVSetupMetricTimers(X)
+#define PVRSRVOutputMetricTotals()
+
+
+
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+
+#endif
+
diff --git a/drivers/gpu/pvr/mm.c b/drivers/gpu/pvr/mm.c
new file mode 100644
index 0000000..ecad206
--- /dev/null
+++ b/drivers/gpu/pvr/mm.c
@@ -0,0 +1,2713 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+
+
+
+
+
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#endif
+
+#if !defined(PVR_LINUX_MEM_AREA_POOL_MAX_PAGES)
+#define PVR_LINUX_MEM_AREA_POOL_MAX_PAGES 0
+#endif
+
+#include <linux/kernel.h>
+#include <asm/atomic.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
+#include <linux/wrapper.h>
+#endif
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/sched.h>
+
+#if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
+#include <linux/shrinker.h>
+#endif
+#endif
+
+#include "img_defs.h"
+#include "services.h"
+#include "servicesint.h"
+#include "syscommon.h"
+#include "mutils.h"
+#include "mm.h"
+#include "pvrmmap.h"
+#include "mmap.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "proc.h"
+#include "mutex.h"
+#include "lock.h"
+
+#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ #include "lists.h"
+#endif
+
+static atomic_t g_sPagePoolEntryCount = ATOMIC_INIT(0);
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+typedef enum {
+ DEBUG_MEM_ALLOC_TYPE_KMALLOC,
+ DEBUG_MEM_ALLOC_TYPE_VMALLOC,
+ DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
+ DEBUG_MEM_ALLOC_TYPE_IOREMAP,
+ DEBUG_MEM_ALLOC_TYPE_IO,
+ DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE,
+ DEBUG_MEM_ALLOC_TYPE_ION,
+#if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
+ DEBUG_MEM_ALLOC_TYPE_VMAP,
+#endif
+ DEBUG_MEM_ALLOC_TYPE_COUNT
+} DEBUG_MEM_ALLOC_TYPE;
+
+typedef struct _DEBUG_MEM_ALLOC_REC
+{
+ DEBUG_MEM_ALLOC_TYPE eAllocType;
+ IMG_VOID *pvKey;
+ IMG_VOID *pvCpuVAddr;
+ IMG_UINT32 ulCpuPAddr;
+ IMG_VOID *pvPrivateData;
+ IMG_UINT32 ui32Bytes;
+ pid_t pid;
+ IMG_CHAR *pszFileName;
+ IMG_UINT32 ui32Line;
+
+ struct _DEBUG_MEM_ALLOC_REC *psNext;
+ struct _DEBUG_MEM_ALLOC_REC **ppsThis;
+} DEBUG_MEM_ALLOC_REC;
+
+static IMPLEMENT_LIST_ANY_VA_2(DEBUG_MEM_ALLOC_REC, IMG_BOOL, IMG_FALSE)
+static IMPLEMENT_LIST_ANY_VA(DEBUG_MEM_ALLOC_REC)
+static IMPLEMENT_LIST_FOR_EACH(DEBUG_MEM_ALLOC_REC)
+static IMPLEMENT_LIST_INSERT(DEBUG_MEM_ALLOC_REC)
+static IMPLEMENT_LIST_REMOVE(DEBUG_MEM_ALLOC_REC)
+
+
+static DEBUG_MEM_ALLOC_REC *g_MemoryRecords;
+
+static IMG_UINT32 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT];
+static IMG_UINT32 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT];
+
+static IMG_UINT32 g_SysRAMWaterMark;
+static IMG_UINT32 g_SysRAMHighWaterMark;
+
+static inline IMG_UINT32
+SysRAMTrueWaterMark(void)
+{
+ return g_SysRAMWaterMark + PAGES_TO_BYTES(atomic_read(&g_sPagePoolEntryCount));
+}
+
+static IMG_UINT32 g_IOMemWaterMark;
+static IMG_UINT32 g_IOMemHighWaterMark;
+
+static IMG_VOID DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType,
+ IMG_VOID *pvKey,
+ IMG_VOID *pvCpuVAddr,
+ IMG_UINT32 ulCpuPAddr,
+ IMG_VOID *pvPrivateData,
+ IMG_UINT32 ui32Bytes,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32Line);
+
+static IMG_VOID DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_VOID *pvKey, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
+
+static IMG_CHAR *DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType);
+
+
+static struct proc_dir_entry *g_SeqFileMemoryRecords;
+static void* ProcSeqNextMemoryRecords(struct seq_file *sfile,void* el,loff_t off);
+static void ProcSeqShowMemoryRecords(struct seq_file *sfile,void* el);
+static void* ProcSeqOff2ElementMemoryRecords(struct seq_file * sfile, loff_t off);
+
+#endif
+
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+typedef struct _DEBUG_LINUX_MEM_AREA_REC
+{
+ LinuxMemArea *psLinuxMemArea;
+ IMG_UINT32 ui32Flags;
+ pid_t pid;
+
+ struct _DEBUG_LINUX_MEM_AREA_REC *psNext;
+ struct _DEBUG_LINUX_MEM_AREA_REC **ppsThis;
+}DEBUG_LINUX_MEM_AREA_REC;
+
+
+static IMPLEMENT_LIST_ANY_VA(DEBUG_LINUX_MEM_AREA_REC)
+static IMPLEMENT_LIST_FOR_EACH(DEBUG_LINUX_MEM_AREA_REC)
+static IMPLEMENT_LIST_INSERT(DEBUG_LINUX_MEM_AREA_REC)
+static IMPLEMENT_LIST_REMOVE(DEBUG_LINUX_MEM_AREA_REC)
+
+
+
+
+static DEBUG_LINUX_MEM_AREA_REC *g_LinuxMemAreaRecords;
+static IMG_UINT32 g_LinuxMemAreaCount;
+static IMG_UINT32 g_LinuxMemAreaWaterMark;
+static IMG_UINT32 g_LinuxMemAreaHighWaterMark;
+
+
+static struct proc_dir_entry *g_SeqFileMemArea;
+
+static void* ProcSeqNextMemArea(struct seq_file *sfile,void* el,loff_t off);
+static void ProcSeqShowMemArea(struct seq_file *sfile,void* el);
+static void* ProcSeqOff2ElementMemArea(struct seq_file *sfile, loff_t off);
+
+#endif
+
+#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+static PVRSRV_LINUX_MUTEX g_sDebugMutex;
+#endif
+
+#if (defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS))
+static void ProcSeqStartstopDebugMutex(struct seq_file *sfile,IMG_BOOL start);
+#endif
+
+typedef struct
+{
+
+ struct list_head sPagePoolItem;
+
+ struct page *psPage;
+} LinuxPagePoolEntry;
+
+static LinuxKMemCache *g_PsLinuxMemAreaCache;
+static LinuxKMemCache *g_PsLinuxPagePoolCache;
+
+static LIST_HEAD(g_sPagePoolList);
+static int g_iPagePoolMaxEntries;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+static IMG_VOID ReservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length);
+static IMG_VOID UnreservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length);
+#endif
+
+static LinuxMemArea *LinuxMemAreaStructAlloc(IMG_VOID);
+static IMG_VOID LinuxMemAreaStructFree(LinuxMemArea *psLinuxMemArea);
+#if defined(DEBUG_LINUX_MEM_AREAS)
+static IMG_VOID DebugLinuxMemAreaRecordAdd(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Flags);
+static DEBUG_LINUX_MEM_AREA_REC *DebugLinuxMemAreaRecordFind(LinuxMemArea *psLinuxMemArea);
+static IMG_VOID DebugLinuxMemAreaRecordRemove(LinuxMemArea *psLinuxMemArea);
+#endif
+
+
+static inline IMG_BOOL
+AreaIsUncached(IMG_UINT32 ui32AreaFlags)
+{
+ return (ui32AreaFlags & (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_UNCACHED)) != 0;
+}
+
+static inline IMG_BOOL
+CanFreeToPool(LinuxMemArea *psLinuxMemArea)
+{
+ return AreaIsUncached(psLinuxMemArea->ui32AreaFlags) && !psLinuxMemArea->bNeedsCacheInvalidate;
+}
+
+IMG_VOID *
+_KMallocWrapper(IMG_UINT32 ui32ByteSize, gfp_t uFlags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
+{
+ IMG_VOID *pvRet;
+ pvRet = kmalloc(ui32ByteSize, uFlags);
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ if (pvRet)
+ {
+ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMALLOC,
+ pvRet,
+ pvRet,
+ 0,
+ NULL,
+ ui32ByteSize,
+ pszFileName,
+ ui32Line
+ );
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(pszFileName);
+ PVR_UNREFERENCED_PARAMETER(ui32Line);
+#endif
+ return pvRet;
+}
+
+
+IMG_VOID
+_KFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
+{
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMALLOC, pvCpuVAddr, pszFileName, ui32Line);
+#else
+ PVR_UNREFERENCED_PARAMETER(pszFileName);
+ PVR_UNREFERENCED_PARAMETER(ui32Line);
+#endif
+ kfree(pvCpuVAddr);
+}
+
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+static IMG_VOID
+DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType,
+ IMG_VOID *pvKey,
+ IMG_VOID *pvCpuVAddr,
+ IMG_UINT32 ulCpuPAddr,
+ IMG_VOID *pvPrivateData,
+ IMG_UINT32 ui32Bytes,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32Line)
+{
+ DEBUG_MEM_ALLOC_REC *psRecord;
+
+ LinuxLockMutex(&g_sDebugMutex);
+
+ psRecord = kmalloc(sizeof(DEBUG_MEM_ALLOC_REC), GFP_KERNEL);
+
+ psRecord->eAllocType = eAllocType;
+ psRecord->pvKey = pvKey;
+ psRecord->pvCpuVAddr = pvCpuVAddr;
+ psRecord->ulCpuPAddr = ulCpuPAddr;
+ psRecord->pvPrivateData = pvPrivateData;
+ psRecord->pid = OSGetCurrentProcessIDKM();
+ psRecord->ui32Bytes = ui32Bytes;
+ psRecord->pszFileName = pszFileName;
+ psRecord->ui32Line = ui32Line;
+
+ List_DEBUG_MEM_ALLOC_REC_Insert(&g_MemoryRecords, psRecord);
+
+ g_WaterMarkData[eAllocType] += ui32Bytes;
+ if (g_WaterMarkData[eAllocType] > g_HighWaterMarkData[eAllocType])
+ {
+ g_HighWaterMarkData[eAllocType] = g_WaterMarkData[eAllocType];
+ }
+
+ if (eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC
+ || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC
+ || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES
+ || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
+ {
+ IMG_UINT32 ui32SysRAMTrueWaterMark;
+
+ g_SysRAMWaterMark += ui32Bytes;
+ ui32SysRAMTrueWaterMark = SysRAMTrueWaterMark();
+
+ if (ui32SysRAMTrueWaterMark > g_SysRAMHighWaterMark)
+ {
+ g_SysRAMHighWaterMark = ui32SysRAMTrueWaterMark;
+ }
+ }
+ else if (eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP
+ || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO)
+ {
+ g_IOMemWaterMark += ui32Bytes;
+ if (g_IOMemWaterMark > g_IOMemHighWaterMark)
+ {
+ g_IOMemHighWaterMark = g_IOMemWaterMark;
+ }
+ }
+
+ LinuxUnLockMutex(&g_sDebugMutex);
+}
+
+
+static IMG_BOOL DebugMemAllocRecordRemove_AnyVaCb(DEBUG_MEM_ALLOC_REC *psCurrentRecord, va_list va)
+{
+ DEBUG_MEM_ALLOC_TYPE eAllocType;
+ IMG_VOID *pvKey;
+
+ eAllocType = va_arg(va, DEBUG_MEM_ALLOC_TYPE);
+ pvKey = va_arg(va, IMG_VOID*);
+
+ if (psCurrentRecord->eAllocType == eAllocType
+ && psCurrentRecord->pvKey == pvKey)
+ {
+ eAllocType = psCurrentRecord->eAllocType;
+ g_WaterMarkData[eAllocType] -= psCurrentRecord->ui32Bytes;
+
+ if (eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC
+ || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC
+ || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES
+ || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
+ {
+ g_SysRAMWaterMark -= psCurrentRecord->ui32Bytes;
+ }
+ else if (eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP
+ || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO)
+ {
+ g_IOMemWaterMark -= psCurrentRecord->ui32Bytes;
+ }
+
+ List_DEBUG_MEM_ALLOC_REC_Remove(psCurrentRecord);
+ kfree(psCurrentRecord);
+
+ return IMG_TRUE;
+ }
+ else
+ {
+ return IMG_FALSE;
+ }
+}
+
+
+static IMG_VOID
+DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_VOID *pvKey, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
+{
+ LinuxLockMutex(&g_sDebugMutex);
+
+
+ if (!List_DEBUG_MEM_ALLOC_REC_IMG_BOOL_Any_va(g_MemoryRecords,
+ DebugMemAllocRecordRemove_AnyVaCb,
+ eAllocType,
+ pvKey))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: couldn't find an entry for type=%s with pvKey=%p (called from %s, line %d\n",
+ __FUNCTION__, DebugMemAllocRecordTypeToString(eAllocType), pvKey,
+ pszFileName, ui32Line));
+ }
+
+ LinuxUnLockMutex(&g_sDebugMutex);
+}
+
+
+static IMG_CHAR *
+DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType)
+{
+ IMG_CHAR *apszDebugMemoryRecordTypes[] = {
+ "KMALLOC",
+ "VMALLOC",
+ "ALLOC_PAGES",
+ "IOREMAP",
+ "IO",
+ "KMEM_CACHE_ALLOC",
+#if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
+ "VMAP"
+#endif
+ };
+ return apszDebugMemoryRecordTypes[eAllocType];
+}
+#endif
+
+
+static IMG_BOOL
+AllocFlagsToPGProt(pgprot_t *pPGProtFlags, IMG_UINT32 ui32AllocFlags)
+{
+ pgprot_t PGProtFlags;
+
+ switch (ui32AllocFlags & PVRSRV_HAP_CACHETYPE_MASK)
+ {
+ case PVRSRV_HAP_CACHED:
+ PGProtFlags = PAGE_KERNEL;
+ break;
+ case PVRSRV_HAP_WRITECOMBINE:
+ PGProtFlags = PGPROT_WC(PAGE_KERNEL);
+ break;
+ case PVRSRV_HAP_UNCACHED:
+ PGProtFlags = PGPROT_UC(PAGE_KERNEL);
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unknown mapping flags=0x%08x",
+ __FUNCTION__, ui32AllocFlags));
+ dump_stack();
+ return IMG_FALSE;
+ }
+
+ *pPGProtFlags = PGProtFlags;
+
+ return IMG_TRUE;
+}
+
+IMG_VOID *
+_VMallocWrapper(IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32AllocFlags,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32Line)
+{
+ pgprot_t PGProtFlags;
+ IMG_VOID *pvRet;
+
+ if (!AllocFlagsToPGProt(&PGProtFlags, ui32AllocFlags))
+ {
+ return NULL;
+ }
+
+
+ pvRet = __vmalloc(ui32Bytes, GFP_KERNEL | __GFP_HIGHMEM, PGProtFlags);
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ if (pvRet)
+ {
+ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_VMALLOC,
+ pvRet,
+ pvRet,
+ 0,
+ NULL,
+ PAGE_ALIGN(ui32Bytes),
+ pszFileName,
+ ui32Line
+ );
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(pszFileName);
+ PVR_UNREFERENCED_PARAMETER(ui32Line);
+#endif
+
+ return pvRet;
+}
+
+
+IMG_VOID
+_VFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
+{
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_VMALLOC, pvCpuVAddr, pszFileName, ui32Line);
+#else
+ PVR_UNREFERENCED_PARAMETER(pszFileName);
+ PVR_UNREFERENCED_PARAMETER(ui32Line);
+#endif
+ vfree(pvCpuVAddr);
+}
+
+
+#if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
+static IMG_VOID *
+_VMapWrapper(struct page **ppsPageList, IMG_UINT32 ui32NumPages, IMG_UINT32 ui32AllocFlags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
+{
+ pgprot_t PGProtFlags;
+ IMG_VOID *pvRet;
+
+ if (!AllocFlagsToPGProt(&PGProtFlags, ui32AllocFlags))
+ {
+ return NULL;
+ }
+
+ pvRet = vmap(ppsPageList, ui32NumPages, GFP_KERNEL | __GFP_HIGHMEM, PGProtFlags);
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ if (pvRet)
+ {
+ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_VMAP,
+ pvRet,
+ pvRet,
+ 0,
+ NULL,
+ PAGES_TO_BYTES(ui32NumPages),
+ pszFileName,
+ ui32Line
+ );
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(pszFileName);
+ PVR_UNREFERENCED_PARAMETER(ui32Line);
+#endif
+
+ return pvRet;
+}
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+#define VMapWrapper(ppsPageList, ui32Bytes, ui32AllocFlags) _VMapWrapper(ppsPageList, ui32Bytes, ui32AllocFlags, __FILE__, __LINE__)
+#else
+#define VMapWrapper(ppsPageList, ui32Bytes, ui32AllocFlags) _VMapWrapper(ppsPageList, ui32Bytes, ui32AllocFlags, NULL, 0)
+#endif
+
+
+static IMG_VOID
+_VUnmapWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
+{
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_VMAP, pvCpuVAddr, pszFileName, ui32Line);
+#else
+ PVR_UNREFERENCED_PARAMETER(pszFileName);
+ PVR_UNREFERENCED_PARAMETER(ui32Line);
+#endif
+ vunmap(pvCpuVAddr);
+}
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+#define VUnmapWrapper(pvCpuVAddr) _VUnmapWrapper(pvCpuVAddr, __FILE__, __LINE__)
+#else
+#define VUnmapWrapper(pvCpuVAddr) _VUnmapWrapper(pvCpuVAddr, NULL, 0)
+#endif
+
+#endif
+
+
+IMG_VOID
+_KMemCacheFreeWrapper(LinuxKMemCache *psCache, IMG_VOID *pvObject, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
+{
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, pvObject, pszFileName, ui32Line);
+#else
+ PVR_UNREFERENCED_PARAMETER(pszFileName);
+ PVR_UNREFERENCED_PARAMETER(ui32Line);
+#endif
+
+ kmem_cache_free(psCache, pvObject);
+}
+
+
+const IMG_CHAR *
+KMemCacheNameWrapper(LinuxKMemCache *psCache)
+{
+ PVR_UNREFERENCED_PARAMETER(psCache);
+
+
+ return "";
+}
+
+
+static LinuxPagePoolEntry *
+LinuxPagePoolEntryAlloc(IMG_VOID)
+{
+ return KMemCacheAllocWrapper(g_PsLinuxPagePoolCache, GFP_KERNEL);
+}
+
+static IMG_VOID
+LinuxPagePoolEntryFree(LinuxPagePoolEntry *psPagePoolEntry)
+{
+ KMemCacheFreeWrapper(g_PsLinuxPagePoolCache, psPagePoolEntry);
+}
+
+
+static struct page *
+AllocPageFromLinux(void)
+{
+ struct page *psPage;
+
+ psPage = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, 0);
+ if (!psPage)
+ {
+ return NULL;
+
+ }
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
+ SetPageReserved(psPage);
+#else
+ mem_map_reserve(psPage);
+#endif
+#endif
+ return psPage;
+}
+
+
+static IMG_VOID
+FreePageToLinux(struct page *psPage)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
+ ClearPageReserved(psPage);
+#else
+ mem_map_reserve(psPage);
+#endif
+#endif
+ __free_pages(psPage, 0);
+}
+
+
+#if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0)
+static DEFINE_MUTEX(g_sPagePoolMutex);
+
+static inline void
+PagePoolLock(void)
+{
+ mutex_lock(&g_sPagePoolMutex);
+}
+
+static inline void
+PagePoolUnlock(void)
+{
+ mutex_unlock(&g_sPagePoolMutex);
+}
+
+static inline int
+PagePoolTrylock(void)
+{
+ return mutex_trylock(&g_sPagePoolMutex);
+}
+
+#else
+static inline void
+PagePoolLock(void)
+{
+}
+
+static inline void
+PagePoolUnlock(void)
+{
+}
+
+static inline int
+PagePoolTrylock(void)
+{
+ return 1;
+}
+#endif
+
+
+static inline void
+AddEntryToPool(LinuxPagePoolEntry *psPagePoolEntry)
+{
+ list_add_tail(&psPagePoolEntry->sPagePoolItem, &g_sPagePoolList);
+ atomic_inc(&g_sPagePoolEntryCount);
+}
+
+static inline void
+RemoveEntryFromPool(LinuxPagePoolEntry *psPagePoolEntry)
+{
+ list_del(&psPagePoolEntry->sPagePoolItem);
+ atomic_dec(&g_sPagePoolEntryCount);
+}
+
+static inline LinuxPagePoolEntry *
+RemoveFirstEntryFromPool(void)
+{
+ LinuxPagePoolEntry *psPagePoolEntry;
+
+ if (list_empty(&g_sPagePoolList))
+ {
+ PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) == 0);
+
+ return NULL;
+ }
+
+ PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) > 0);
+
+ psPagePoolEntry = list_first_entry(&g_sPagePoolList, LinuxPagePoolEntry, sPagePoolItem);
+
+ RemoveEntryFromPool(psPagePoolEntry);
+
+ return psPagePoolEntry;
+}
+
+static struct page *
+AllocPage(IMG_UINT32 ui32AreaFlags, IMG_BOOL *pbFromPagePool)
+{
+ struct page *psPage = NULL;
+
+
+ if (AreaIsUncached(ui32AreaFlags) && atomic_read(&g_sPagePoolEntryCount) != 0)
+ {
+ LinuxPagePoolEntry *psPagePoolEntry;
+
+ PagePoolLock();
+ psPagePoolEntry = RemoveFirstEntryFromPool();
+ PagePoolUnlock();
+
+
+ if (psPagePoolEntry)
+ {
+ psPage = psPagePoolEntry->psPage;
+ LinuxPagePoolEntryFree(psPagePoolEntry);
+ *pbFromPagePool = IMG_TRUE;
+ }
+ }
+
+ if (!psPage)
+ {
+ psPage = AllocPageFromLinux();
+ if (psPage)
+ {
+ *pbFromPagePool = IMG_FALSE;
+ }
+ }
+
+ return psPage;
+
+}
+
+static IMG_VOID
+FreePage(IMG_BOOL bToPagePool, struct page *psPage)
+{
+
+ if (bToPagePool && atomic_read(&g_sPagePoolEntryCount) < g_iPagePoolMaxEntries)
+ {
+ LinuxPagePoolEntry *psPagePoolEntry = LinuxPagePoolEntryAlloc();
+ if (psPagePoolEntry)
+ {
+ psPagePoolEntry->psPage = psPage;
+
+ PagePoolLock();
+ AddEntryToPool(psPagePoolEntry);
+ PagePoolUnlock();
+
+ return;
+ }
+ }
+
+ FreePageToLinux(psPage);
+}
+
+static IMG_VOID
+FreePagePool(IMG_VOID)
+{
+ LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+
+ PagePoolLock();
+
+#if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0)
+ PVR_TRACE(("%s: Freeing %d pages from pool", __FUNCTION__, atomic_read(&g_sPagePoolEntryCount)));
+#else
+ PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) == 0);
+ PVR_ASSERT(list_empty(&g_sPagePoolList));
+#endif
+
+ list_for_each_entry_safe(psPagePoolEntry, psTempPoolEntry, &g_sPagePoolList, sPagePoolItem)
+ {
+ RemoveEntryFromPool(psPagePoolEntry);
+
+ FreePageToLinux(psPagePoolEntry->psPage);
+ LinuxPagePoolEntryFree(psPagePoolEntry);
+ }
+
+ PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) == 0);
+
+ PagePoolUnlock();
+}
+
+#if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK)
+#if defined(PVRSRV_NEED_PVR_ASSERT)
+static struct shrinker g_sShrinker;
+#endif
+
+static int
+ShrinkPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+ unsigned long uNumToScan = psShrinkControl->nr_to_scan;
+
+ PVR_ASSERT(psShrinker == &g_sShrinker);
+ (void)psShrinker;
+
+ if (uNumToScan != 0)
+ {
+ LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+
+ PVR_TRACE(("%s: Number to scan: %ld", __FUNCTION__, uNumToScan));
+ PVR_TRACE(("%s: Pages in pool before scan: %d", __FUNCTION__, atomic_read(&g_sPagePoolEntryCount)));
+
+ if (!PagePoolTrylock())
+ {
+ PVR_TRACE(("%s: Couldn't get page pool lock", __FUNCTION__));
+ return -1;
+ }
+
+ list_for_each_entry_safe(psPagePoolEntry, psTempPoolEntry, &g_sPagePoolList, sPagePoolItem)
+ {
+ RemoveEntryFromPool(psPagePoolEntry);
+
+ FreePageToLinux(psPagePoolEntry->psPage);
+ LinuxPagePoolEntryFree(psPagePoolEntry);
+
+ if (--uNumToScan == 0)
+ {
+ break;
+ }
+ }
+
+ if (list_empty(&g_sPagePoolList))
+ {
+ PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) == 0);
+ }
+
+ PagePoolUnlock();
+
+ PVR_TRACE(("%s: Pages in pool after scan: %d", __FUNCTION__, atomic_read(&g_sPagePoolEntryCount)));
+ }
+
+ return atomic_read(&g_sPagePoolEntryCount);
+}
+#endif
+
+static IMG_BOOL
+AllocPages(IMG_UINT32 ui32AreaFlags, struct page ***pppsPageList, IMG_HANDLE *phBlockPageList, IMG_UINT32 ui32NumPages, IMG_BOOL *pbFromPagePool)
+{
+ struct page **ppsPageList;
+ IMG_HANDLE hBlockPageList;
+ IMG_INT32 i;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bFromPagePool = IMG_FALSE;
+
+ eError = OSAllocMem(0, sizeof(*ppsPageList) * ui32NumPages, (IMG_VOID **)&ppsPageList, &hBlockPageList,
+ "Array of pages");
+ if (eError != PVRSRV_OK)
+ {
+ goto failed_page_list_alloc;
+ }
+
+ *pbFromPagePool = IMG_TRUE;
+ for(i = 0; i < (IMG_INT32)ui32NumPages; i++)
+ {
+ ppsPageList[i] = AllocPage(ui32AreaFlags, &bFromPagePool);
+ if (!ppsPageList[i])
+ {
+ goto failed_alloc_pages;
+ }
+ *pbFromPagePool &= bFromPagePool;
+ }
+
+ *pppsPageList = ppsPageList;
+ *phBlockPageList = hBlockPageList;
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
+ ppsPageList,
+ 0,
+ 0,
+ NULL,
+ PAGES_TO_BYTES(ui32NumPages),
+ "unknown",
+ 0
+ );
+#endif
+
+ return IMG_TRUE;
+
+failed_alloc_pages:
+ for(i--; i >= 0; i--)
+ {
+ FreePage(*pbFromPagePool, ppsPageList[i]);
+ }
+ (IMG_VOID) OSFreeMem(0, sizeof(*ppsPageList) * ui32NumPages, ppsPageList, hBlockPageList);
+
+failed_page_list_alloc:
+ return IMG_FALSE;
+}
+
+
+static IMG_VOID
+FreePages(IMG_BOOL bToPagePool, struct page **ppsPageList, IMG_HANDLE hBlockPageList, IMG_UINT32 ui32NumPages)
+{
+ IMG_INT32 i;
+
+ for(i = 0; i < (IMG_INT32)ui32NumPages; i++)
+ {
+ FreePage(bToPagePool, ppsPageList[i]);
+ }
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, ppsPageList, __FILE__, __LINE__);
+#endif
+
+ (IMG_VOID) OSFreeMem(0, sizeof(*ppsPageList) * ui32NumPages, ppsPageList, hBlockPageList);
+}
+
+
+LinuxMemArea *
+NewVMallocLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags)
+{
+ LinuxMemArea *psLinuxMemArea = NULL;
+ IMG_VOID *pvCpuVAddr;
+#if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
+ IMG_UINT32 ui32NumPages = 0;
+ struct page **ppsPageList = NULL;
+ IMG_HANDLE hBlockPageList;
+#endif
+ IMG_BOOL bFromPagePool = IMG_FALSE;
+
+ psLinuxMemArea = LinuxMemAreaStructAlloc();
+ if (!psLinuxMemArea)
+ {
+ goto failed;
+ }
+
+#if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
+ ui32NumPages = RANGE_TO_PAGES(ui32Bytes);
+
+ if (!AllocPages(ui32AreaFlags, &ppsPageList, &hBlockPageList, ui32NumPages, &bFromPagePool))
+ {
+ goto failed;
+ }
+
+ pvCpuVAddr = VMapWrapper(ppsPageList, ui32NumPages, ui32AreaFlags);
+#else
+ pvCpuVAddr = VMallocWrapper(ui32Bytes, ui32AreaFlags);
+ if (!pvCpuVAddr)
+ {
+ goto failed;
+ }
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+
+ ReservePages(pvCpuVAddr, ui32Bytes);
+#endif
+#endif
+
+ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_VMALLOC;
+ psLinuxMemArea->uData.sVmalloc.pvVmallocAddress = pvCpuVAddr;
+#if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
+ psLinuxMemArea->uData.sVmalloc.ppsPageList = ppsPageList;
+ psLinuxMemArea->uData.sVmalloc.hBlockPageList = hBlockPageList;
+#endif
+ psLinuxMemArea->ui32ByteSize = ui32Bytes;
+ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
+ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
+#endif
+
+
+ if (AreaIsUncached(ui32AreaFlags) && !bFromPagePool)
+ {
+ OSInvalidateCPUCacheRangeKM(psLinuxMemArea, pvCpuVAddr, ui32Bytes);
+ }
+
+ return psLinuxMemArea;
+
+failed:
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed!", __FUNCTION__));
+#if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
+ if (ppsPageList)
+ {
+ FreePages(bFromPagePool, ppsPageList, hBlockPageList, ui32NumPages);
+ }
+#endif
+ if (psLinuxMemArea)
+ {
+ LinuxMemAreaStructFree(psLinuxMemArea);
+ }
+
+ return NULL;
+}
+
+
+IMG_VOID
+FreeVMallocLinuxMemArea(LinuxMemArea *psLinuxMemArea)
+{
+#if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
+ IMG_UINT32 ui32NumPages;
+ struct page **ppsPageList;
+ IMG_HANDLE hBlockPageList;
+#endif
+
+ PVR_ASSERT(psLinuxMemArea);
+ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_VMALLOC);
+ PVR_ASSERT(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
+#endif
+
+ PVR_DPF((PVR_DBG_MESSAGE,"%s: pvCpuVAddr: %p",
+ __FUNCTION__, psLinuxMemArea->uData.sVmalloc.pvVmallocAddress));
+
+#if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
+ VUnmapWrapper(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
+
+ ui32NumPages = RANGE_TO_PAGES(psLinuxMemArea->ui32ByteSize);
+ ppsPageList = psLinuxMemArea->uData.sVmalloc.ppsPageList;
+ hBlockPageList = psLinuxMemArea->uData.sVmalloc.hBlockPageList;
+
+ FreePages(CanFreeToPool(psLinuxMemArea), ppsPageList, hBlockPageList, ui32NumPages);
+#else
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+ UnreservePages(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress,
+ psLinuxMemArea->ui32ByteSize);
+#endif
+
+ VFreeWrapper(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
+#endif
+
+ LinuxMemAreaStructFree(psLinuxMemArea);
+}
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+static IMG_VOID
+ReservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length)
+{
+ IMG_VOID *pvPage;
+ IMG_VOID *pvEnd = pvAddress + ui32Length;
+
+ for(pvPage = pvAddress; pvPage < pvEnd; pvPage += PAGE_SIZE)
+ {
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
+ SetPageReserved(vmalloc_to_page(pvPage));
+#else
+ mem_map_reserve(vmalloc_to_page(pvPage));
+#endif
+ }
+}
+
+
+static IMG_VOID
+UnreservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length)
+{
+ IMG_VOID *pvPage;
+ IMG_VOID *pvEnd = pvAddress + ui32Length;
+
+ for(pvPage = pvAddress; pvPage < pvEnd; pvPage += PAGE_SIZE)
+ {
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
+ ClearPageReserved(vmalloc_to_page(pvPage));
+#else
+ mem_map_unreserve(vmalloc_to_page(pvPage));
+#endif
+ }
+}
+#endif
+
+
+IMG_VOID *
+_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32MappingFlags,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32Line)
+{
+ IMG_VOID *pvIORemapCookie;
+
+ switch (ui32MappingFlags & PVRSRV_HAP_CACHETYPE_MASK)
+ {
+ case PVRSRV_HAP_CACHED:
+ pvIORemapCookie = (IMG_VOID *)IOREMAP(BasePAddr.uiAddr, ui32Bytes);
+ break;
+ case PVRSRV_HAP_WRITECOMBINE:
+ pvIORemapCookie = (IMG_VOID *)IOREMAP_WC(BasePAddr.uiAddr, ui32Bytes);
+ break;
+ case PVRSRV_HAP_UNCACHED:
+ pvIORemapCookie = (IMG_VOID *)IOREMAP_UC(BasePAddr.uiAddr, ui32Bytes);
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "IORemapWrapper: unknown mapping flags"));
+ return NULL;
+ }
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ if (pvIORemapCookie)
+ {
+ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IOREMAP,
+ pvIORemapCookie,
+ pvIORemapCookie,
+ BasePAddr.uiAddr,
+ NULL,
+ ui32Bytes,
+ pszFileName,
+ ui32Line
+ );
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(pszFileName);
+ PVR_UNREFERENCED_PARAMETER(ui32Line);
+#endif
+
+ return pvIORemapCookie;
+}
+
+
+IMG_VOID
+_IOUnmapWrapper(IMG_VOID *pvIORemapCookie, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
+{
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IOREMAP, pvIORemapCookie, pszFileName, ui32Line);
+#else
+ PVR_UNREFERENCED_PARAMETER(pszFileName);
+ PVR_UNREFERENCED_PARAMETER(ui32Line);
+#endif
+ iounmap(pvIORemapCookie);
+}
+
+
+LinuxMemArea *
+NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32AreaFlags)
+{
+ LinuxMemArea *psLinuxMemArea;
+ IMG_VOID *pvIORemapCookie;
+
+ psLinuxMemArea = LinuxMemAreaStructAlloc();
+ if (!psLinuxMemArea)
+ {
+ return NULL;
+ }
+
+ pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32AreaFlags);
+ if (!pvIORemapCookie)
+ {
+ LinuxMemAreaStructFree(psLinuxMemArea);
+ return NULL;
+ }
+
+ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IOREMAP;
+ psLinuxMemArea->uData.sIORemap.pvIORemapCookie = pvIORemapCookie;
+ psLinuxMemArea->uData.sIORemap.CPUPhysAddr = BasePAddr;
+ psLinuxMemArea->ui32ByteSize = ui32Bytes;
+ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
+ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
+#endif
+
+ return psLinuxMemArea;
+}
+
+
+IMG_VOID
+FreeIORemapLinuxMemArea(LinuxMemArea *psLinuxMemArea)
+{
+ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IOREMAP);
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
+#endif
+
+ IOUnmapWrapper(psLinuxMemArea->uData.sIORemap.pvIORemapCookie);
+
+ LinuxMemAreaStructFree(psLinuxMemArea);
+}
+
+
+#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+static IMG_BOOL
+TreatExternalPagesAsContiguous(IMG_SYS_PHYADDR *psSysPhysAddr, IMG_UINT32 ui32Bytes, IMG_BOOL bPhysContig)
+{
+ IMG_UINT32 ui32;
+ IMG_UINT32 ui32AddrChk;
+ IMG_UINT32 ui32NumPages = RANGE_TO_PAGES(ui32Bytes);
+
+
+ for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr;
+ ui32 < ui32NumPages;
+ ui32++, ui32AddrChk = (bPhysContig) ? (ui32AddrChk + PAGE_SIZE) : psSysPhysAddr[ui32].uiAddr)
+ {
+ if (!pfn_valid(PHYS_TO_PFN(ui32AddrChk)))
+ {
+ break;
+ }
+ }
+ if (ui32 == ui32NumPages)
+ {
+ return IMG_FALSE;
+ }
+
+ if (!bPhysContig)
+ {
+ for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr;
+ ui32 < ui32NumPages;
+ ui32++, ui32AddrChk += PAGE_SIZE)
+ {
+ if (psSysPhysAddr[ui32].uiAddr != ui32AddrChk)
+ {
+ return IMG_FALSE;
+ }
+ }
+ }
+
+ return IMG_TRUE;
+}
+#endif
+
+LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, IMG_BOOL bPhysContig, IMG_UINT32 ui32AreaFlags)
+{
+ LinuxMemArea *psLinuxMemArea;
+
+ psLinuxMemArea = LinuxMemAreaStructAlloc();
+ if (!psLinuxMemArea)
+ {
+ return NULL;
+ }
+
+ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_EXTERNAL_KV;
+ psLinuxMemArea->uData.sExternalKV.pvExternalKV = pvCPUVAddr;
+ psLinuxMemArea->uData.sExternalKV.bPhysContig =
+#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+ (bPhysContig || TreatExternalPagesAsContiguous(pBasePAddr, ui32Bytes, bPhysContig))
+ ? IMG_TRUE : IMG_FALSE;
+#else
+ bPhysContig;
+#endif
+ if (psLinuxMemArea->uData.sExternalKV.bPhysContig)
+ {
+ psLinuxMemArea->uData.sExternalKV.uPhysAddr.SysPhysAddr = *pBasePAddr;
+ }
+ else
+ {
+ psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr = pBasePAddr;
+ }
+ psLinuxMemArea->ui32ByteSize = ui32Bytes;
+ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
+ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
+#endif
+
+ return psLinuxMemArea;
+}
+
+
+IMG_VOID
+FreeExternalKVLinuxMemArea(LinuxMemArea *psLinuxMemArea)
+{
+ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_EXTERNAL_KV);
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
+#endif
+
+ LinuxMemAreaStructFree(psLinuxMemArea);
+}
+
+
+LinuxMemArea *
+NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32AreaFlags)
+{
+ LinuxMemArea *psLinuxMemArea = LinuxMemAreaStructAlloc();
+ if (!psLinuxMemArea)
+ {
+ return NULL;
+ }
+
+
+ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IO;
+ psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr = BasePAddr.uiAddr;
+ psLinuxMemArea->ui32ByteSize = ui32Bytes;
+ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
+ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IO,
+ (IMG_VOID *)BasePAddr.uiAddr,
+ 0,
+ BasePAddr.uiAddr,
+ NULL,
+ ui32Bytes,
+ "unknown",
+ 0
+ );
+#endif
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
+#endif
+
+ return psLinuxMemArea;
+}
+
+
+IMG_VOID
+FreeIOLinuxMemArea(LinuxMemArea *psLinuxMemArea)
+{
+ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO);
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
+#endif
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO,
+ (IMG_VOID *)psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr, __FILE__, __LINE__);
+#endif
+
+
+
+ LinuxMemAreaStructFree(psLinuxMemArea);
+}
+
+
+LinuxMemArea *
+NewAllocPagesLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags)
+{
+ LinuxMemArea *psLinuxMemArea;
+ IMG_UINT32 ui32NumPages;
+ struct page **ppsPageList;
+ IMG_HANDLE hBlockPageList;
+ IMG_BOOL bFromPagePool;
+
+ psLinuxMemArea = LinuxMemAreaStructAlloc();
+ if (!psLinuxMemArea)
+ {
+ goto failed_area_alloc;
+ }
+
+ ui32NumPages = RANGE_TO_PAGES(ui32Bytes);
+
+ if (!AllocPages(ui32AreaFlags, &ppsPageList, &hBlockPageList, ui32NumPages, &bFromPagePool))
+ {
+ goto failed_alloc_pages;
+ }
+
+ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_ALLOC_PAGES;
+ psLinuxMemArea->uData.sPageList.ppsPageList = ppsPageList;
+ psLinuxMemArea->uData.sPageList.hBlockPageList = hBlockPageList;
+ psLinuxMemArea->ui32ByteSize = ui32Bytes;
+ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
+ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
+
+
+ psLinuxMemArea->bNeedsCacheInvalidate = AreaIsUncached(ui32AreaFlags) && !bFromPagePool;
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
+#endif
+
+ return psLinuxMemArea;
+
+failed_alloc_pages:
+ LinuxMemAreaStructFree(psLinuxMemArea);
+failed_area_alloc:
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed", __FUNCTION__));
+
+ return NULL;
+}
+
+
+IMG_VOID
+FreeAllocPagesLinuxMemArea(LinuxMemArea *psLinuxMemArea)
+{
+ IMG_UINT32 ui32NumPages;
+ struct page **ppsPageList;
+ IMG_HANDLE hBlockPageList;
+
+ PVR_ASSERT(psLinuxMemArea);
+ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ALLOC_PAGES);
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
+#endif
+
+ ui32NumPages = RANGE_TO_PAGES(psLinuxMemArea->ui32ByteSize);
+ ppsPageList = psLinuxMemArea->uData.sPageList.ppsPageList;
+ hBlockPageList = psLinuxMemArea->uData.sPageList.hBlockPageList;
+
+ FreePages(CanFreeToPool(psLinuxMemArea), ppsPageList, hBlockPageList, ui32NumPages);
+
+ LinuxMemAreaStructFree(psLinuxMemArea);
+}
+
+#if defined(CONFIG_ION_OMAP)
+
+#include "env_perproc.h"
+
+#include <linux/ion.h>
+#include <linux/omap_ion.h>
+
+extern struct ion_client *gpsIONClient;
+
+LinuxMemArea *
+NewIONLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags,
+ IMG_PVOID pvPrivData, IMG_UINT32 ui32PrivDataLength)
+{
+ const IMG_UINT32 ui32AllocDataLen =
+ offsetof(struct omap_ion_tiler_alloc_data, handle);
+ struct omap_ion_tiler_alloc_data asAllocData[2] = {};
+ u32 *pu32PageAddrs[2] = { NULL, NULL };
+ IMG_UINT32 i, ui32NumHandlesPerFd;
+ IMG_BYTE *pbPrivData = pvPrivData;
+ IMG_CPU_PHYADDR *pCPUPhysAddrs;
+ int iNumPages[2] = { 0, 0 };
+ LinuxMemArea *psLinuxMemArea;
+
+ psLinuxMemArea = LinuxMemAreaStructAlloc();
+ if (!psLinuxMemArea)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate LinuxMemArea struct", __func__));
+ goto err_out;
+ }
+
+
+ BUG_ON(ui32PrivDataLength != ui32AllocDataLen &&
+ ui32PrivDataLength != ui32AllocDataLen * 2);
+ ui32NumHandlesPerFd = ui32PrivDataLength / ui32AllocDataLen;
+
+
+ for(i = 0; i < ui32NumHandlesPerFd; i++)
+ {
+ memcpy(&asAllocData[i], &pbPrivData[i * ui32AllocDataLen], ui32AllocDataLen);
+
+ if (omap_ion_tiler_alloc(gpsIONClient, &asAllocData[i]) < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate via ion_tiler", __func__));
+ goto err_free;
+ }
+
+ if (omap_tiler_pages(gpsIONClient, asAllocData[i].handle, &iNumPages[i],
+ &pu32PageAddrs[i]) < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to compute tiler pages", __func__));
+ goto err_free;
+ }
+ }
+
+
+ BUG_ON(ui32Bytes != (iNumPages[0] + iNumPages[1]) * PAGE_SIZE);
+ BUG_ON(sizeof(IMG_CPU_PHYADDR) != sizeof(int));
+
+
+ pCPUPhysAddrs = vmalloc(sizeof(IMG_CPU_PHYADDR) * (iNumPages[0] + iNumPages[1]));
+ if (!pCPUPhysAddrs)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate page list", __func__));
+ goto err_free;
+ }
+ for(i = 0; i < iNumPages[0]; i++)
+ pCPUPhysAddrs[i].uiAddr = pu32PageAddrs[0][i];
+ for(i = 0; i < iNumPages[1]; i++)
+ pCPUPhysAddrs[iNumPages[0] + i].uiAddr = pu32PageAddrs[1][i];
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_ION,
+ asAllocData[0].handle,
+ 0,
+ 0,
+ NULL,
+ PAGE_ALIGN(ui32Bytes),
+ "unknown",
+ 0
+ );
+#endif
+
+ for(i = 0; i < 2; i++)
+ psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[i] = asAllocData[i].handle;
+
+ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_ION;
+ psLinuxMemArea->uData.sIONTilerAlloc.pCPUPhysAddrs = pCPUPhysAddrs;
+ psLinuxMemArea->ui32ByteSize = ui32Bytes;
+ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
+ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
+
+
+ psLinuxMemArea->bNeedsCacheInvalidate = AreaIsUncached(ui32AreaFlags);
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
+#endif
+
+err_out:
+ return psLinuxMemArea;
+
+err_free:
+ LinuxMemAreaStructFree(psLinuxMemArea);
+ psLinuxMemArea = IMG_NULL;
+ goto err_out;
+}
+
+
+IMG_VOID
+FreeIONLinuxMemArea(LinuxMemArea *psLinuxMemArea)
+{
+ IMG_UINT32 i;
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
+#endif
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ION,
+ psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[0],
+ __FILE__, __LINE__);
+#endif
+
+ for(i = 0; i < 2; i++)
+ {
+ if (!psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[i])
+ break;
+ ion_free(gpsIONClient, psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[i]);
+ psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[i] = IMG_NULL;
+ }
+
+
+ vfree(psLinuxMemArea->uData.sIONTilerAlloc.pCPUPhysAddrs);
+ psLinuxMemArea->uData.sIONTilerAlloc.pCPUPhysAddrs = IMG_NULL;
+
+ LinuxMemAreaStructFree(psLinuxMemArea);
+}
+
+#endif
+
+struct page*
+LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea,
+ IMG_UINT32 ui32ByteOffset)
+{
+ IMG_UINT32 ui32PageIndex;
+ IMG_CHAR *pui8Addr;
+
+ switch (psLinuxMemArea->eAreaType)
+ {
+ case LINUX_MEM_AREA_ALLOC_PAGES:
+ ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
+ return psLinuxMemArea->uData.sPageList.ppsPageList[ui32PageIndex];
+
+ case LINUX_MEM_AREA_VMALLOC:
+ pui8Addr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
+ pui8Addr += ui32ByteOffset;
+ return vmalloc_to_page(pui8Addr);
+
+ case LINUX_MEM_AREA_SUB_ALLOC:
+
+ return LinuxMemAreaOffsetToPage(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea,
+ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset
+ + ui32ByteOffset);
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unsupported request for struct page from LinuxMemArea with type=%s",
+ __FUNCTION__, LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType)));
+ return NULL;
+ }
+}
+
+
+LinuxKMemCache *
+KMemCacheCreateWrapper(IMG_CHAR *pszName,
+ size_t Size,
+ size_t Align,
+ IMG_UINT32 ui32Flags)
+{
+#if defined(DEBUG_LINUX_SLAB_ALLOCATIONS)
+ ui32Flags |= SLAB_POISON|SLAB_RED_ZONE;
+#endif
+ return kmem_cache_create(pszName, Size, Align, ui32Flags, NULL
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
+ , NULL
+#endif
+ );
+}
+
+
+IMG_VOID
+KMemCacheDestroyWrapper(LinuxKMemCache *psCache)
+{
+ kmem_cache_destroy(psCache);
+}
+
+
+IMG_VOID *
+_KMemCacheAllocWrapper(LinuxKMemCache *psCache,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14))
+ gfp_t Flags,
+#else
+ IMG_INT Flags,
+#endif
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32Line)
+{
+ IMG_VOID *pvRet;
+
+ pvRet = kmem_cache_zalloc(psCache, Flags);
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE,
+ pvRet,
+ pvRet,
+ 0,
+ psCache,
+ kmem_cache_size(psCache),
+ pszFileName,
+ ui32Line
+ );
+#else
+ PVR_UNREFERENCED_PARAMETER(pszFileName);
+ PVR_UNREFERENCED_PARAMETER(ui32Line);
+#endif
+
+ return pvRet;
+}
+
+
+LinuxMemArea *
+NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea,
+ IMG_UINT32 ui32ByteOffset,
+ IMG_UINT32 ui32Bytes)
+{
+ LinuxMemArea *psLinuxMemArea;
+
+ PVR_ASSERT((ui32ByteOffset+ui32Bytes) <= psParentLinuxMemArea->ui32ByteSize);
+
+ psLinuxMemArea = LinuxMemAreaStructAlloc();
+ if (!psLinuxMemArea)
+ {
+ return NULL;
+ }
+
+ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_SUB_ALLOC;
+ psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea = psParentLinuxMemArea;
+ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset = ui32ByteOffset;
+ psLinuxMemArea->ui32ByteSize = ui32Bytes;
+ psLinuxMemArea->ui32AreaFlags = psParentLinuxMemArea->ui32AreaFlags;
+ psLinuxMemArea->bNeedsCacheInvalidate = psParentLinuxMemArea->bNeedsCacheInvalidate;
+ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+ {
+ DEBUG_LINUX_MEM_AREA_REC *psParentRecord;
+ psParentRecord = DebugLinuxMemAreaRecordFind(psParentLinuxMemArea);
+ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, psParentRecord->ui32Flags);
+ }
+#endif
+
+ return psLinuxMemArea;
+}
+
+
+static IMG_VOID
+FreeSubLinuxMemArea(LinuxMemArea *psLinuxMemArea)
+{
+ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC);
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
+#endif
+
+
+
+ LinuxMemAreaStructFree(psLinuxMemArea);
+}
+
+
+static LinuxMemArea *
+LinuxMemAreaStructAlloc(IMG_VOID)
+{
+#if 0
+ LinuxMemArea *psLinuxMemArea;
+ psLinuxMemArea = kmem_cache_alloc(g_PsLinuxMemAreaCache, GFP_KERNEL);
+ printk(KERN_ERR "%s: psLinuxMemArea=%p\n", __FUNCTION__, psLinuxMemArea);
+ dump_stack();
+ return psLinuxMemArea;
+#else
+ return KMemCacheAllocWrapper(g_PsLinuxMemAreaCache, GFP_KERNEL);
+#endif
+}
+
+
+static IMG_VOID
+LinuxMemAreaStructFree(LinuxMemArea *psLinuxMemArea)
+{
+ KMemCacheFreeWrapper(g_PsLinuxMemAreaCache, psLinuxMemArea);
+
+
+}
+
+
+IMG_VOID
+LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea)
+{
+ switch (psLinuxMemArea->eAreaType)
+ {
+ case LINUX_MEM_AREA_VMALLOC:
+ FreeVMallocLinuxMemArea(psLinuxMemArea);
+ break;
+ case LINUX_MEM_AREA_ALLOC_PAGES:
+ FreeAllocPagesLinuxMemArea(psLinuxMemArea);
+ break;
+ case LINUX_MEM_AREA_IOREMAP:
+ FreeIORemapLinuxMemArea(psLinuxMemArea);
+ break;
+ case LINUX_MEM_AREA_EXTERNAL_KV:
+ FreeExternalKVLinuxMemArea(psLinuxMemArea);
+ break;
+ case LINUX_MEM_AREA_IO:
+ FreeIOLinuxMemArea(psLinuxMemArea);
+ break;
+ case LINUX_MEM_AREA_SUB_ALLOC:
+ FreeSubLinuxMemArea(psLinuxMemArea);
+ break;
+ case LINUX_MEM_AREA_ION:
+ FreeIONLinuxMemArea(psLinuxMemArea);
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown are type (%d)\n",
+ __FUNCTION__, psLinuxMemArea->eAreaType));
+ break;
+ }
+}
+
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+static IMG_VOID
+DebugLinuxMemAreaRecordAdd(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Flags)
+{
+ DEBUG_LINUX_MEM_AREA_REC *psNewRecord;
+ const IMG_CHAR *pi8FlagsString;
+
+ LinuxLockMutex(&g_sDebugMutex);
+
+ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
+ {
+ g_LinuxMemAreaWaterMark += psLinuxMemArea->ui32ByteSize;
+ if (g_LinuxMemAreaWaterMark > g_LinuxMemAreaHighWaterMark)
+ {
+ g_LinuxMemAreaHighWaterMark = g_LinuxMemAreaWaterMark;
+ }
+ }
+ g_LinuxMemAreaCount++;
+
+
+ psNewRecord = kmalloc(sizeof(DEBUG_LINUX_MEM_AREA_REC), GFP_KERNEL);
+ if (psNewRecord)
+ {
+
+ psNewRecord->psLinuxMemArea = psLinuxMemArea;
+ psNewRecord->ui32Flags = ui32Flags;
+ psNewRecord->pid = OSGetCurrentProcessIDKM();
+
+ List_DEBUG_LINUX_MEM_AREA_REC_Insert(&g_LinuxMemAreaRecords, psNewRecord);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed to allocate linux memory area record.",
+ __FUNCTION__));
+ }
+
+
+ pi8FlagsString = HAPFlagsToString(ui32Flags);
+ if (strstr(pi8FlagsString, "UNKNOWN"))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unexpected flags (0x%08x) associated with psLinuxMemArea @ %p",
+ __FUNCTION__,
+ ui32Flags,
+ psLinuxMemArea));
+
+ }
+
+ LinuxUnLockMutex(&g_sDebugMutex);
+}
+
+
+
+static IMG_VOID* MatchLinuxMemArea_AnyVaCb(DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord,
+ va_list va)
+{
+ LinuxMemArea *psLinuxMemArea;
+
+ psLinuxMemArea = va_arg(va, LinuxMemArea*);
+ if (psCurrentRecord->psLinuxMemArea == psLinuxMemArea)
+ {
+ return psCurrentRecord;
+ }
+ else
+ {
+ return IMG_NULL;
+ }
+}
+
+
+static DEBUG_LINUX_MEM_AREA_REC *
+DebugLinuxMemAreaRecordFind(LinuxMemArea *psLinuxMemArea)
+{
+ DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord;
+
+ LinuxLockMutex(&g_sDebugMutex);
+ psCurrentRecord = List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
+ MatchLinuxMemArea_AnyVaCb,
+ psLinuxMemArea);
+
+ LinuxUnLockMutex(&g_sDebugMutex);
+
+ return psCurrentRecord;
+}
+
+
+static IMG_VOID
+DebugLinuxMemAreaRecordRemove(LinuxMemArea *psLinuxMemArea)
+{
+ DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord;
+
+ LinuxLockMutex(&g_sDebugMutex);
+
+ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
+ {
+ g_LinuxMemAreaWaterMark -= psLinuxMemArea->ui32ByteSize;
+ }
+ g_LinuxMemAreaCount--;
+
+
+ psCurrentRecord = List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
+ MatchLinuxMemArea_AnyVaCb,
+ psLinuxMemArea);
+ if (psCurrentRecord)
+ {
+
+ List_DEBUG_LINUX_MEM_AREA_REC_Remove(psCurrentRecord);
+ kfree(psCurrentRecord);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: couldn't find an entry for psLinuxMemArea=%p\n",
+ __FUNCTION__, psLinuxMemArea));
+ }
+
+ LinuxUnLockMutex(&g_sDebugMutex);
+}
+#endif
+
+
+IMG_VOID *
+LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea)
+{
+ switch (psLinuxMemArea->eAreaType)
+ {
+ case LINUX_MEM_AREA_VMALLOC:
+ return psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
+ case LINUX_MEM_AREA_IOREMAP:
+ return psLinuxMemArea->uData.sIORemap.pvIORemapCookie;
+ case LINUX_MEM_AREA_EXTERNAL_KV:
+ return psLinuxMemArea->uData.sExternalKV.pvExternalKV;
+ case LINUX_MEM_AREA_SUB_ALLOC:
+ {
+ IMG_CHAR *pAddr =
+ LinuxMemAreaToCpuVAddr(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea);
+ if (!pAddr)
+ {
+ return NULL;
+ }
+ return pAddr + psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset;
+ }
+ default:
+ return NULL;
+ }
+}
+
+
+IMG_CPU_PHYADDR
+LinuxMemAreaToCpuPAddr(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset)
+{
+ IMG_CPU_PHYADDR CpuPAddr;
+
+ CpuPAddr.uiAddr = 0;
+
+ switch (psLinuxMemArea->eAreaType)
+ {
+ case LINUX_MEM_AREA_IOREMAP:
+ {
+ CpuPAddr = psLinuxMemArea->uData.sIORemap.CPUPhysAddr;
+ CpuPAddr.uiAddr += ui32ByteOffset;
+ break;
+ }
+ case LINUX_MEM_AREA_EXTERNAL_KV:
+ {
+ if (psLinuxMemArea->uData.sExternalKV.bPhysContig)
+ {
+ CpuPAddr = SysSysPAddrToCpuPAddr(psLinuxMemArea->uData.sExternalKV.uPhysAddr.SysPhysAddr);
+ CpuPAddr.uiAddr += ui32ByteOffset;
+ }
+ else
+ {
+ IMG_UINT32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
+ IMG_SYS_PHYADDR SysPAddr = psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr[ui32PageIndex];
+
+ CpuPAddr = SysSysPAddrToCpuPAddr(SysPAddr);
+ CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset);
+ }
+ break;
+ }
+ case LINUX_MEM_AREA_IO:
+ {
+ CpuPAddr = psLinuxMemArea->uData.sIO.CPUPhysAddr;
+ CpuPAddr.uiAddr += ui32ByteOffset;
+ break;
+ }
+ case LINUX_MEM_AREA_VMALLOC:
+ {
+ IMG_CHAR *pCpuVAddr;
+ pCpuVAddr =
+ (IMG_CHAR *)psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
+ pCpuVAddr += ui32ByteOffset;
+ CpuPAddr.uiAddr = VMallocToPhys(pCpuVAddr);
+ break;
+ }
+ case LINUX_MEM_AREA_ION:
+ {
+ IMG_UINT32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
+ CpuPAddr = psLinuxMemArea->uData.sIONTilerAlloc.pCPUPhysAddrs[ui32PageIndex];
+ CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset);
+ break;
+ }
+ case LINUX_MEM_AREA_ALLOC_PAGES:
+ {
+ struct page *page;
+ IMG_UINT32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
+ page = psLinuxMemArea->uData.sPageList.ppsPageList[ui32PageIndex];
+ CpuPAddr.uiAddr = page_to_phys(page);
+ CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset);
+ break;
+ }
+ case LINUX_MEM_AREA_SUB_ALLOC:
+ {
+ CpuPAddr =
+ OSMemHandleToCpuPAddr(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea,
+ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset
+ + ui32ByteOffset);
+ break;
+ }
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n",
+ __FUNCTION__, psLinuxMemArea->eAreaType));
+ PVR_ASSERT(CpuPAddr.uiAddr);
+ break;
+ }
+ }
+
+ return CpuPAddr;
+}
+
+
+IMG_BOOL
+LinuxMemAreaPhysIsContig(LinuxMemArea *psLinuxMemArea)
+{
+ switch (psLinuxMemArea->eAreaType)
+ {
+ case LINUX_MEM_AREA_IOREMAP:
+ case LINUX_MEM_AREA_IO:
+ return IMG_TRUE;
+
+ case LINUX_MEM_AREA_EXTERNAL_KV:
+ return psLinuxMemArea->uData.sExternalKV.bPhysContig;
+
+ case LINUX_MEM_AREA_ION:
+ case LINUX_MEM_AREA_VMALLOC:
+ case LINUX_MEM_AREA_ALLOC_PAGES:
+ return IMG_FALSE;
+
+ case LINUX_MEM_AREA_SUB_ALLOC:
+
+ return LinuxMemAreaPhysIsContig(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea);
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n",
+ __FUNCTION__, psLinuxMemArea->eAreaType));
+ break;
+ }
+ return IMG_FALSE;
+}
+
+
+const IMG_CHAR *
+LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType)
+{
+
+ switch (eMemAreaType)
+ {
+ case LINUX_MEM_AREA_IOREMAP:
+ return "LINUX_MEM_AREA_IOREMAP";
+ case LINUX_MEM_AREA_EXTERNAL_KV:
+ return "LINUX_MEM_AREA_EXTERNAL_KV";
+ case LINUX_MEM_AREA_IO:
+ return "LINUX_MEM_AREA_IO";
+ case LINUX_MEM_AREA_VMALLOC:
+ return "LINUX_MEM_AREA_VMALLOC";
+ case LINUX_MEM_AREA_SUB_ALLOC:
+ return "LINUX_MEM_AREA_SUB_ALLOC";
+ case LINUX_MEM_AREA_ALLOC_PAGES:
+ return "LINUX_MEM_AREA_ALLOC_PAGES";
+ case LINUX_MEM_AREA_ION:
+ return "LINUX_MEM_AREA_ION";
+ default:
+ PVR_ASSERT(0);
+ }
+
+ return "";
+}
+
+
+#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+static void ProcSeqStartstopDebugMutex(struct seq_file *sfile, IMG_BOOL start)
+{
+ if (start)
+ {
+ LinuxLockMutex(&g_sDebugMutex);
+ }
+ else
+ {
+ LinuxUnLockMutex(&g_sDebugMutex);
+ }
+}
+#endif
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+
+static IMG_VOID* DecOffMemAreaRec_AnyVaCb(DEBUG_LINUX_MEM_AREA_REC *psNode, va_list va)
+{
+ off_t *pOff = va_arg(va, off_t*);
+ if (--(*pOff))
+ {
+ return IMG_NULL;
+ }
+ else
+ {
+ return psNode;
+ }
+}
+
+
+static void* ProcSeqNextMemArea(struct seq_file *sfile,void* el,loff_t off)
+{
+ DEBUG_LINUX_MEM_AREA_REC *psRecord;
+ psRecord = (DEBUG_LINUX_MEM_AREA_REC*)
+ List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
+ DecOffMemAreaRec_AnyVaCb,
+ &off);
+ return (void*)psRecord;
+}
+
+static void* ProcSeqOff2ElementMemArea(struct seq_file * sfile, loff_t off)
+{
+ DEBUG_LINUX_MEM_AREA_REC *psRecord;
+ if (!off)
+ {
+ return PVR_PROC_SEQ_START_TOKEN;
+ }
+
+ psRecord = (DEBUG_LINUX_MEM_AREA_REC*)
+ List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
+ DecOffMemAreaRec_AnyVaCb,
+ &off);
+ return (void*)psRecord;
+}
+
+
+static void ProcSeqShowMemArea(struct seq_file *sfile,void* el)
+{
+ DEBUG_LINUX_MEM_AREA_REC *psRecord = (DEBUG_LINUX_MEM_AREA_REC*)el;
+ if (el == PVR_PROC_SEQ_START_TOKEN)
+ {
+
+#if !defined(DEBUG_LINUX_XML_PROC_FILES)
+ seq_printf(sfile,
+ "Number of Linux Memory Areas: %u\n"
+ "At the current water mark these areas correspond to %u bytes (excluding SUB areas)\n"
+ "At the highest water mark these areas corresponded to %u bytes (excluding SUB areas)\n"
+ "\nDetails for all Linux Memory Areas:\n"
+ "%s %-24s %s %s %-8s %-5s %s\n",
+ g_LinuxMemAreaCount,
+ g_LinuxMemAreaWaterMark,
+ g_LinuxMemAreaHighWaterMark,
+ "psLinuxMemArea",
+ "LinuxMemType",
+ "CpuVAddr",
+ "CpuPAddr",
+ "Bytes",
+ "Pid",
+ "Flags"
+ );
+#else
+ seq_printf(sfile,
+ "<mem_areas_header>\n"
+ "\t<count>%u</count>\n"
+ "\t<watermark key=\"mar0\" description=\"current\" bytes=\"%u\"/>\n"
+ "\t<watermark key=\"mar1\" description=\"high\" bytes=\"%u\"/>\n"
+ "</mem_areas_header>\n",
+ g_LinuxMemAreaCount,
+ g_LinuxMemAreaWaterMark,
+ g_LinuxMemAreaHighWaterMark
+ );
+#endif
+ return;
+ }
+
+ seq_printf(sfile,
+#if !defined(DEBUG_LINUX_XML_PROC_FILES)
+ "%8p %-24s %8p %08x %-8d %-5u %08x=(%s)\n",
+#else
+ "<linux_mem_area>\n"
+ "\t<pointer>%8p</pointer>\n"
+ "\t<type>%s</type>\n"
+ "\t<cpu_virtual>%8p</cpu_virtual>\n"
+ "\t<cpu_physical>%08x</cpu_physical>\n"
+ "\t<bytes>%d</bytes>\n"
+ "\t<pid>%u</pid>\n"
+ "\t<flags>%08x</flags>\n"
+ "\t<flags_string>%s</flags_string>\n"
+ "</linux_mem_area>\n",
+#endif
+ psRecord->psLinuxMemArea,
+ LinuxMemAreaTypeToString(psRecord->psLinuxMemArea->eAreaType),
+ LinuxMemAreaToCpuVAddr(psRecord->psLinuxMemArea),
+ LinuxMemAreaToCpuPAddr(psRecord->psLinuxMemArea,0).uiAddr,
+ psRecord->psLinuxMemArea->ui32ByteSize,
+ psRecord->pid,
+ psRecord->ui32Flags,
+ HAPFlagsToString(psRecord->ui32Flags)
+ );
+
+}
+
+#endif
+
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+
+static IMG_VOID* DecOffMemAllocRec_AnyVaCb(DEBUG_MEM_ALLOC_REC *psNode, va_list va)
+{
+ off_t *pOff = va_arg(va, off_t*);
+ if (--(*pOff))
+ {
+ return IMG_NULL;
+ }
+ else
+ {
+ return psNode;
+ }
+}
+
+
+
+static void* ProcSeqNextMemoryRecords(struct seq_file *sfile,void* el,loff_t off)
+{
+ DEBUG_MEM_ALLOC_REC *psRecord;
+ psRecord = (DEBUG_MEM_ALLOC_REC*)
+ List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords,
+ DecOffMemAllocRec_AnyVaCb,
+ &off);
+#if defined(DEBUG_LINUX_XML_PROC_FILES)
+ if (!psRecord)
+ {
+ seq_printf(sfile, "</meminfo>\n");
+ }
+#endif
+
+ return (void*)psRecord;
+}
+
+static void* ProcSeqOff2ElementMemoryRecords(struct seq_file *sfile, loff_t off)
+{
+ DEBUG_MEM_ALLOC_REC *psRecord;
+ if (!off)
+ {
+ return PVR_PROC_SEQ_START_TOKEN;
+ }
+
+ psRecord = (DEBUG_MEM_ALLOC_REC*)
+ List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords,
+ DecOffMemAllocRec_AnyVaCb,
+ &off);
+
+#if defined(DEBUG_LINUX_XML_PROC_FILES)
+ if (!psRecord)
+ {
+ seq_printf(sfile, "</meminfo>\n");
+ }
+#endif
+
+ return (void*)psRecord;
+}
+
+static void ProcSeqShowMemoryRecords(struct seq_file *sfile,void* el)
+{
+ DEBUG_MEM_ALLOC_REC *psRecord = (DEBUG_MEM_ALLOC_REC*)el;
+ if (el == PVR_PROC_SEQ_START_TOKEN)
+ {
+#if !defined(DEBUG_LINUX_XML_PROC_FILES)
+
+ seq_printf(sfile, "%-60s: %d bytes\n",
+ "Current Water Mark of bytes allocated via kmalloc",
+ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
+ seq_printf(sfile, "%-60s: %d bytes\n",
+ "Highest Water Mark of bytes allocated via kmalloc",
+ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
+ seq_printf(sfile, "%-60s: %d bytes\n",
+ "Current Water Mark of bytes allocated via vmalloc",
+ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
+ seq_printf(sfile, "%-60s: %d bytes\n",
+ "Highest Water Mark of bytes allocated via vmalloc",
+ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
+ seq_printf(sfile, "%-60s: %d bytes\n",
+ "Current Water Mark of bytes allocated via alloc_pages",
+ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
+ seq_printf(sfile, "%-60s: %d bytes\n",
+ "Highest Water Mark of bytes allocated via alloc_pages",
+ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
+ seq_printf(sfile, "%-60s: %d bytes\n",
+ "Current Water Mark of bytes allocated via ioremap",
+ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
+ seq_printf(sfile, "%-60s: %d bytes\n",
+ "Highest Water Mark of bytes allocated via ioremap",
+ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
+ seq_printf(sfile, "%-60s: %d bytes\n",
+ "Current Water Mark of bytes reserved for \"IO\" memory areas",
+ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
+ seq_printf(sfile, "%-60s: %d bytes\n",
+ "Highest Water Mark of bytes allocated for \"IO\" memory areas",
+ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
+ seq_printf(sfile, "%-60s: %d bytes\n",
+ "Current Water Mark of bytes allocated via kmem_cache_alloc",
+ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
+ seq_printf(sfile, "%-60s: %d bytes\n",
+ "Highest Water Mark of bytes allocated via kmem_cache_alloc",
+ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
+#if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
+ seq_printf(sfile, "%-60s: %d bytes\n",
+ "Current Water Mark of bytes mapped via vmap",
+ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMAP]);
+ seq_printf(sfile, "%-60s: %d bytes\n",
+ "Highest Water Mark of bytes mapped via vmap",
+ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMAP]);
+#endif
+#if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0)
+ seq_printf(sfile, "%-60s: %d pages\n",
+ "Number of pages in page pool",
+ atomic_read(&g_sPagePoolEntryCount));
+#endif
+ seq_printf( sfile, "\n");
+ seq_printf(sfile, "%-60s: %d bytes\n",
+ "The Current Water Mark for memory allocated from system RAM",
+ SysRAMTrueWaterMark());
+ seq_printf(sfile, "%-60s: %d bytes\n",
+ "The Highest Water Mark for memory allocated from system RAM",
+ g_SysRAMHighWaterMark);
+ seq_printf(sfile, "%-60s: %d bytes\n",
+ "The Current Water Mark for memory allocated from IO memory",
+ g_IOMemWaterMark);
+ seq_printf(sfile, "%-60s: %d bytes\n",
+ "The Highest Water Mark for memory allocated from IO memory",
+ g_IOMemHighWaterMark);
+
+ seq_printf( sfile, "\n");
+
+ seq_printf(sfile, "Details for all known allocations:\n"
+ "%-16s %-8s %-8s %-10s %-5s %-10s %s\n",
+ "Type",
+ "CpuVAddr",
+ "CpuPAddr",
+ "Bytes",
+ "PID",
+ "PrivateData",
+ "Filename:Line");
+
+#else
+
+
+ seq_printf(sfile, "<meminfo>\n<meminfo_header>\n");
+ seq_printf(sfile,
+ "<watermark key=\"mr0\" description=\"kmalloc_current\" bytes=\"%d\"/>\n",
+ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
+ seq_printf(sfile,
+ "<watermark key=\"mr1\" description=\"kmalloc_high\" bytes=\"%d\"/>\n",
+ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
+ seq_printf(sfile,
+ "<watermark key=\"mr2\" description=\"vmalloc_current\" bytes=\"%d\"/>\n",
+ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
+ seq_printf(sfile,
+ "<watermark key=\"mr3\" description=\"vmalloc_high\" bytes=\"%d\"/>\n",
+ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
+ seq_printf(sfile,
+ "<watermark key=\"mr4\" description=\"alloc_pages_current\" bytes=\"%d\"/>\n",
+ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
+ seq_printf(sfile,
+ "<watermark key=\"mr5\" description=\"alloc_pages_high\" bytes=\"%d\"/>\n",
+ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
+ seq_printf(sfile,
+ "<watermark key=\"mr6\" description=\"ioremap_current\" bytes=\"%d\"/>\n",
+ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
+ seq_printf(sfile,
+ "<watermark key=\"mr7\" description=\"ioremap_high\" bytes=\"%d\"/>\n",
+ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
+ seq_printf(sfile,
+ "<watermark key=\"mr8\" description=\"io_current\" bytes=\"%d\"/>\n",
+ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
+ seq_printf(sfile,
+ "<watermark key=\"mr9\" description=\"io_high\" bytes=\"%d\"/>\n",
+ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
+ seq_printf(sfile,
+ "<watermark key=\"mr10\" description=\"kmem_cache_current\" bytes=\"%d\"/>\n",
+ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
+ seq_printf(sfile,
+ "<watermark key=\"mr11\" description=\"kmem_cache_high\" bytes=\"%d\"/>\n",
+ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
+#if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
+ seq_printf(sfile,
+ "<watermark key=\"mr12\" description=\"vmap_current\" bytes=\"%d\"/>\n",
+ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMAP]);
+ seq_printf(sfile,
+ "<watermark key=\"mr13\" description=\"vmap_high\" bytes=\"%d\"/>\n",
+ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMAP]);
+#endif
+ seq_printf(sfile,
+ "<watermark key=\"mr14\" description=\"system_ram_current\" bytes=\"%d\"/>\n",
+ SysRAMTrueWaterMark());
+ seq_printf(sfile,
+ "<watermark key=\"mr15\" description=\"system_ram_high\" bytes=\"%d\"/>\n",
+ g_SysRAMHighWaterMark);
+ seq_printf(sfile,
+ "<watermark key=\"mr16\" description=\"system_io_current\" bytes=\"%d\"/>\n",
+ g_IOMemWaterMark);
+ seq_printf(sfile,
+ "<watermark key=\"mr17\" description=\"system_io_high\" bytes=\"%d\"/>\n",
+ g_IOMemHighWaterMark);
+
+#if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0)
+ seq_printf(sfile,
+ "<watermark key=\"mr18\" description=\"page_pool_current\" bytes=\"%d\"/>\n",
+ PAGES_TO_BYTES(atomic_read(&g_sPagePoolEntryCount)));
+#endif
+ seq_printf(sfile, "</meminfo_header>\n");
+
+#endif
+ return;
+ }
+
+ if (psRecord->eAllocType != DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
+ {
+ seq_printf(sfile,
+#if !defined(DEBUG_LINUX_XML_PROC_FILES)
+ "%-16s %-8p %08x %-10d %-5d %-10s %s:%d\n",
+#else
+ "<allocation>\n"
+ "\t<type>%s</type>\n"
+ "\t<cpu_virtual>%-8p</cpu_virtual>\n"
+ "\t<cpu_physical>%08x</cpu_physical>\n"
+ "\t<bytes>%d</bytes>\n"
+ "\t<pid>%d</pid>\n"
+ "\t<private>%s</private>\n"
+ "\t<filename>%s</filename>\n"
+ "\t<line>%d</line>\n"
+ "</allocation>\n",
+#endif
+ DebugMemAllocRecordTypeToString(psRecord->eAllocType),
+ psRecord->pvCpuVAddr,
+ psRecord->ulCpuPAddr,
+ psRecord->ui32Bytes,
+ psRecord->pid,
+ "NULL",
+ psRecord->pszFileName,
+ psRecord->ui32Line);
+ }
+ else
+ {
+ seq_printf(sfile,
+#if !defined(DEBUG_LINUX_XML_PROC_FILES)
+ "%-16s %-8p %08x %-10d %-5d %-10s %s:%d\n",
+#else
+ "<allocation>\n"
+ "\t<type>%s</type>\n"
+ "\t<cpu_virtual>%-8p</cpu_virtual>\n"
+ "\t<cpu_physical>%08x</cpu_physical>\n"
+ "\t<bytes>%d</bytes>\n"
+ "\t<pid>%d</pid>\n"
+ "\t<private>%s</private>\n"
+ "\t<filename>%s</filename>\n"
+ "\t<line>%d</line>\n"
+ "</allocation>\n",
+#endif
+ DebugMemAllocRecordTypeToString(psRecord->eAllocType),
+ psRecord->pvCpuVAddr,
+ psRecord->ulCpuPAddr,
+ psRecord->ui32Bytes,
+ psRecord->pid,
+ KMemCacheNameWrapper(psRecord->pvPrivateData),
+ psRecord->pszFileName,
+ psRecord->ui32Line);
+ }
+}
+
+#endif
+
+
+#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MMAP_AREAS)
+const IMG_CHAR *
+HAPFlagsToString(IMG_UINT32 ui32Flags)
+{
+ static IMG_CHAR szFlags[50];
+ IMG_INT32 i32Pos = 0;
+ IMG_UINT32 ui32CacheTypeIndex, ui32MapTypeIndex;
+ IMG_CHAR *apszCacheTypes[] = {
+ "UNCACHED",
+ "CACHED",
+ "WRITECOMBINE",
+ "UNKNOWN"
+ };
+ IMG_CHAR *apszMapType[] = {
+ "KERNEL_ONLY",
+ "SINGLE_PROCESS",
+ "MULTI_PROCESS",
+ "FROM_EXISTING_PROCESS",
+ "NO_CPU_VIRTUAL",
+ "UNKNOWN"
+ };
+
+
+ if (ui32Flags & PVRSRV_HAP_UNCACHED) {
+ ui32CacheTypeIndex = 0;
+ } else if (ui32Flags & PVRSRV_HAP_CACHED) {
+ ui32CacheTypeIndex = 1;
+ } else if (ui32Flags & PVRSRV_HAP_WRITECOMBINE) {
+ ui32CacheTypeIndex = 2;
+ } else {
+ ui32CacheTypeIndex = 3;
+ PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type (%u)",
+ __FUNCTION__, (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)));
+ }
+
+
+ if (ui32Flags & PVRSRV_HAP_KERNEL_ONLY) {
+ ui32MapTypeIndex = 0;
+ } else if (ui32Flags & PVRSRV_HAP_SINGLE_PROCESS) {
+ ui32MapTypeIndex = 1;
+ } else if (ui32Flags & PVRSRV_HAP_MULTI_PROCESS) {
+ ui32MapTypeIndex = 2;
+ } else if (ui32Flags & PVRSRV_HAP_FROM_EXISTING_PROCESS) {
+ ui32MapTypeIndex = 3;
+ } else if (ui32Flags & PVRSRV_HAP_NO_CPU_VIRTUAL) {
+ ui32MapTypeIndex = 4;
+ } else {
+ ui32MapTypeIndex = 5;
+ PVR_DPF((PVR_DBG_ERROR, "%s: unknown map type (%u)",
+ __FUNCTION__, (ui32Flags & PVRSRV_HAP_MAPTYPE_MASK)));
+ }
+
+ i32Pos = sprintf(szFlags, "%s|", apszCacheTypes[ui32CacheTypeIndex]);
+ if (i32Pos <= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: sprintf for cache type %u failed (%d)",
+ __FUNCTION__, ui32CacheTypeIndex, i32Pos));
+ szFlags[0] = 0;
+ }
+ else
+ {
+ sprintf(szFlags + i32Pos, "%s", apszMapType[ui32MapTypeIndex]);
+ }
+
+ return szFlags;
+}
+#endif
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+static IMG_VOID LinuxMMCleanup_MemAreas_ForEachCb(DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord)
+{
+ LinuxMemArea *psLinuxMemArea;
+
+ psLinuxMemArea = psCurrentRecord->psLinuxMemArea;
+ PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up Linux memory area (%p), type=%s, size=%d bytes",
+ __FUNCTION__,
+ psCurrentRecord->psLinuxMemArea,
+ LinuxMemAreaTypeToString(psCurrentRecord->psLinuxMemArea->eAreaType),
+ psCurrentRecord->psLinuxMemArea->ui32ByteSize));
+
+ LinuxMemAreaDeepFree(psLinuxMemArea);
+}
+#endif
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+static IMG_VOID LinuxMMCleanup_MemRecords_ForEachVa(DEBUG_MEM_ALLOC_REC *psCurrentRecord)
+
+{
+
+ PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up memory: "
+ "type=%s "
+ "CpuVAddr=%p "
+ "CpuPAddr=0x%08x, "
+ "allocated @ file=%s,line=%d",
+ __FUNCTION__,
+ DebugMemAllocRecordTypeToString(psCurrentRecord->eAllocType),
+ psCurrentRecord->pvCpuVAddr,
+ psCurrentRecord->ulCpuPAddr,
+ psCurrentRecord->pszFileName,
+ psCurrentRecord->ui32Line));
+ switch (psCurrentRecord->eAllocType)
+ {
+ case DEBUG_MEM_ALLOC_TYPE_KMALLOC:
+ KFreeWrapper(psCurrentRecord->pvCpuVAddr);
+ break;
+ case DEBUG_MEM_ALLOC_TYPE_IOREMAP:
+ IOUnmapWrapper(psCurrentRecord->pvCpuVAddr);
+ break;
+ case DEBUG_MEM_ALLOC_TYPE_IO:
+
+ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO, psCurrentRecord->pvKey, __FILE__, __LINE__);
+ break;
+ case DEBUG_MEM_ALLOC_TYPE_VMALLOC:
+ VFreeWrapper(psCurrentRecord->pvCpuVAddr);
+ break;
+ case DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES:
+ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, psCurrentRecord->pvKey, __FILE__, __LINE__);
+ break;
+ case DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE:
+ KMemCacheFreeWrapper(psCurrentRecord->pvPrivateData, psCurrentRecord->pvCpuVAddr);
+ break;
+#if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
+ case DEBUG_MEM_ALLOC_TYPE_VMAP:
+ VUnmapWrapper(psCurrentRecord->pvCpuVAddr);
+ break;
+#endif
+ default:
+ PVR_ASSERT(0);
+ }
+}
+#endif
+
+
+#if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK)
+static struct shrinker g_sShrinker =
+{
+ .shrink = ShrinkPagePool,
+ .seeks = DEFAULT_SEEKS
+};
+
+static IMG_BOOL g_bShrinkerRegistered;
+#endif
+
+IMG_VOID
+LinuxMMCleanup(IMG_VOID)
+{
+#if defined(DEBUG_LINUX_MEM_AREAS)
+ {
+ if (g_LinuxMemAreaCount)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: There are %d LinuxMemArea allocation unfreed (%d bytes)",
+ __FUNCTION__, g_LinuxMemAreaCount, g_LinuxMemAreaWaterMark));
+ }
+
+ List_DEBUG_LINUX_MEM_AREA_REC_ForEach(g_LinuxMemAreaRecords, LinuxMMCleanup_MemAreas_ForEachCb);
+
+ if (g_SeqFileMemArea)
+ {
+ RemoveProcEntrySeq(g_SeqFileMemArea);
+ }
+ }
+#endif
+
+#if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK)
+ if (g_bShrinkerRegistered)
+ {
+ unregister_shrinker(&g_sShrinker);
+ }
+#endif
+
+
+ FreePagePool();
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ {
+
+
+ List_DEBUG_MEM_ALLOC_REC_ForEach(g_MemoryRecords, LinuxMMCleanup_MemRecords_ForEachVa);
+
+ if (g_SeqFileMemoryRecords)
+ {
+ RemoveProcEntrySeq(g_SeqFileMemoryRecords);
+ }
+ }
+#endif
+
+ if (g_PsLinuxMemAreaCache)
+ {
+ KMemCacheDestroyWrapper(g_PsLinuxMemAreaCache);
+ }
+
+ if (g_PsLinuxPagePoolCache)
+ {
+ KMemCacheDestroyWrapper(g_PsLinuxPagePoolCache);
+ }
+}
+
+PVRSRV_ERROR
+LinuxMMInit(IMG_VOID)
+{
+#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ LinuxInitMutex(&g_sDebugMutex);
+#endif
+
+#if defined(DEBUG_LINUX_MEM_AREAS)
+ {
+ g_SeqFileMemArea = CreateProcReadEntrySeq(
+ "mem_areas",
+ NULL,
+ ProcSeqNextMemArea,
+ ProcSeqShowMemArea,
+ ProcSeqOff2ElementMemArea,
+ ProcSeqStartstopDebugMutex
+ );
+ if (!g_SeqFileMemArea)
+ {
+ goto failed;
+ }
+ }
+#endif
+
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ {
+ g_SeqFileMemoryRecords = CreateProcReadEntrySeq(
+ "meminfo",
+ NULL,
+ ProcSeqNextMemoryRecords,
+ ProcSeqShowMemoryRecords,
+ ProcSeqOff2ElementMemoryRecords,
+ ProcSeqStartstopDebugMutex
+ );
+ if (!g_SeqFileMemoryRecords)
+ {
+ goto failed;
+ }
+ }
+#endif
+
+ g_PsLinuxMemAreaCache = KMemCacheCreateWrapper("img-mm", sizeof(LinuxMemArea), 0, 0);
+ if (!g_PsLinuxMemAreaCache)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate mem area kmem_cache", __FUNCTION__));
+ goto failed;
+ }
+
+#if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0)
+ g_iPagePoolMaxEntries = PVR_LINUX_MEM_AREA_POOL_MAX_PAGES;
+ if (g_iPagePoolMaxEntries <= 0 || g_iPagePoolMaxEntries > INT_MAX/2)
+ {
+ g_iPagePoolMaxEntries = INT_MAX/2;
+ PVR_TRACE(("%s: No limit set for page pool size", __FUNCTION__));
+ }
+ else
+ {
+ PVR_TRACE(("%s: Maximum page pool size: %d", __FUNCTION__, g_iPagePoolMaxEntries));
+ }
+
+ g_PsLinuxPagePoolCache = KMemCacheCreateWrapper("img-mm-pool", sizeof(LinuxPagePoolEntry), 0, 0);
+ if (!g_PsLinuxPagePoolCache)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate page pool kmem_cache", __FUNCTION__));
+ goto failed;
+ }
+#endif
+
+#if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK)
+ register_shrinker(&g_sShrinker);
+ g_bShrinkerRegistered = IMG_TRUE;
+#endif
+
+ return PVRSRV_OK;
+
+failed:
+ LinuxMMCleanup();
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+}
+
diff --git a/drivers/gpu/pvr/mm.h b/drivers/gpu/pvr/mm.h
new file mode 100644
index 0000000..e62bf33
--- /dev/null
+++ b/drivers/gpu/pvr/mm.h
@@ -0,0 +1,384 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __IMG_LINUX_MM_H__
+#define __IMG_LINUX_MM_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#endif
+
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+
+#include <asm/io.h>
+
+#define PHYS_TO_PFN(phys) ((phys) >> PAGE_SHIFT)
+#define PFN_TO_PHYS(pfn) ((pfn) << PAGE_SHIFT)
+
+#define RANGE_TO_PAGES(range) (((range) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)
+
+#define ADDR_TO_PAGE_OFFSET(addr) (((unsigned long)(addr)) & (PAGE_SIZE - 1))
+
+#define PAGES_TO_BYTES(pages) ((pages) << PAGE_SHIFT)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10))
+#define REMAP_PFN_RANGE(vma, addr, pfn, size, prot) remap_pfn_range(vma, addr, pfn, size, prot)
+#else
+#define REMAP_PFN_RANGE(vma, addr, pfn, size, prot) remap_page_range(vma, addr, PFN_TO_PHYS(pfn), size, prot)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12))
+#define IO_REMAP_PFN_RANGE(vma, addr, pfn, size, prot) io_remap_pfn_range(vma, addr, pfn, size, prot)
+#else
+#define IO_REMAP_PFN_RANGE(vma, addr, pfn, size, prot) io_remap_page_range(vma, addr, PFN_TO_PHYS(pfn), size, prot)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+#define VM_INSERT_PAGE(vma, addr, page) vm_insert_page(vma, addr, page)
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10))
+#define VM_INSERT_PAGE(vma, addr, page) remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, vma->vm_page_prot);
+#else
+#define VM_INSERT_PAGE(vma, addr, page) remap_page_range(vma, addr, page_to_phys(page), PAGE_SIZE, vma->vm_page_prot);
+#endif
+#endif
+
+static inline IMG_UINT32 VMallocToPhys(IMG_VOID *pCpuVAddr)
+{
+ return (page_to_phys(vmalloc_to_page(pCpuVAddr)) + ADDR_TO_PAGE_OFFSET(pCpuVAddr));
+
+}
+
+typedef enum {
+ LINUX_MEM_AREA_IOREMAP,
+ LINUX_MEM_AREA_EXTERNAL_KV,
+ LINUX_MEM_AREA_IO,
+ LINUX_MEM_AREA_VMALLOC,
+ LINUX_MEM_AREA_ALLOC_PAGES,
+ LINUX_MEM_AREA_SUB_ALLOC,
+ LINUX_MEM_AREA_ION,
+#if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
+ LINUX_MEM_AREA_VMAP,
+#endif
+ LINUX_MEM_AREA_TYPE_COUNT
+}LINUX_MEM_AREA_TYPE;
+
+typedef struct _LinuxMemArea LinuxMemArea;
+
+
+struct _LinuxMemArea {
+ LINUX_MEM_AREA_TYPE eAreaType;
+ union _uData
+ {
+ struct _sIORemap
+ {
+
+ IMG_CPU_PHYADDR CPUPhysAddr;
+ IMG_VOID *pvIORemapCookie;
+ }sIORemap;
+ struct _sExternalKV
+ {
+
+ IMG_BOOL bPhysContig;
+ union {
+
+ IMG_SYS_PHYADDR SysPhysAddr;
+ IMG_SYS_PHYADDR *pSysPhysAddr;
+ } uPhysAddr;
+ IMG_VOID *pvExternalKV;
+ }sExternalKV;
+ struct _sIO
+ {
+
+ IMG_CPU_PHYADDR CPUPhysAddr;
+ }sIO;
+ struct _sVmalloc
+ {
+
+ IMG_VOID *pvVmallocAddress;
+#if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
+ struct page **ppsPageList;
+ IMG_HANDLE hBlockPageList;
+#endif
+ }sVmalloc;
+ struct _sPageList
+ {
+
+ struct page **ppsPageList;
+ IMG_HANDLE hBlockPageList;
+ }sPageList;
+ struct _sIONTilerAlloc
+ {
+
+ IMG_CPU_PHYADDR *pCPUPhysAddrs;
+ struct ion_handle *psIONHandle[2];
+ }sIONTilerAlloc;
+ struct _sSubAlloc
+ {
+
+ LinuxMemArea *psParentLinuxMemArea;
+ IMG_UINT32 ui32ByteOffset;
+ }sSubAlloc;
+ }uData;
+
+ IMG_UINT32 ui32ByteSize;
+
+ IMG_UINT32 ui32AreaFlags;
+
+ IMG_BOOL bMMapRegistered;
+
+ IMG_BOOL bNeedsCacheInvalidate;
+
+
+ struct list_head sMMapItem;
+
+
+ struct list_head sMMapOffsetStructList;
+};
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17))
+typedef kmem_cache_t LinuxKMemCache;
+#else
+typedef struct kmem_cache LinuxKMemCache;
+#endif
+
+
+PVRSRV_ERROR LinuxMMInit(IMG_VOID);
+
+
+IMG_VOID LinuxMMCleanup(IMG_VOID);
+
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+#define KMallocWrapper(ui32ByteSize, uFlags) _KMallocWrapper(ui32ByteSize, uFlags, __FILE__, __LINE__)
+#else
+#define KMallocWrapper(ui32ByteSize, uFlags) _KMallocWrapper(ui32ByteSize, uFlags, NULL, 0)
+#endif
+IMG_VOID *_KMallocWrapper(IMG_UINT32 ui32ByteSize, gfp_t uFlags, IMG_CHAR *szFileName, IMG_UINT32 ui32Line);
+
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+#define KFreeWrapper(pvCpuVAddr) _KFreeWrapper(pvCpuVAddr, __FILE__, __LINE__)
+#else
+#define KFreeWrapper(pvCpuVAddr) _KFreeWrapper(pvCpuVAddr, NULL, 0)
+#endif
+IMG_VOID _KFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
+
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+#define VMallocWrapper(ui32Bytes, ui32AllocFlags) _VMallocWrapper(ui32Bytes, ui32AllocFlags, __FILE__, __LINE__)
+#else
+#define VMallocWrapper(ui32Bytes, ui32AllocFlags) _VMallocWrapper(ui32Bytes, ui32AllocFlags, NULL, 0)
+#endif
+IMG_VOID *_VMallocWrapper(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AllocFlags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
+
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+#define VFreeWrapper(pvCpuVAddr) _VFreeWrapper(pvCpuVAddr, __FILE__, __LINE__)
+#else
+#define VFreeWrapper(pvCpuVAddr) _VFreeWrapper(pvCpuVAddr, NULL, 0)
+#endif
+IMG_VOID _VFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
+
+
+LinuxMemArea *NewVMallocLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
+
+
+IMG_VOID FreeVMallocLinuxMemArea(LinuxMemArea *psLinuxMemArea);
+
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \
+ _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, __FILE__, __LINE__)
+#else
+#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \
+ _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, NULL, 0)
+#endif
+IMG_VOID *_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32MappingFlags,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32Line);
+
+
+LinuxMemArea *NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
+
+
+IMG_VOID FreeIORemapLinuxMemArea(LinuxMemArea *psLinuxMemArea);
+
+LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, IMG_BOOL bPhysContig, IMG_UINT32 ui32AreaFlags);
+
+
+IMG_VOID FreeExternalKVLinuxMemArea(LinuxMemArea *psLinuxMemArea);
+
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+#define IOUnmapWrapper(pvIORemapCookie) \
+ _IOUnmapWrapper(pvIORemapCookie, __FILE__, __LINE__)
+#else
+#define IOUnmapWrapper(pvIORemapCookie) \
+ _IOUnmapWrapper(pvIORemapCookie, NULL, 0)
+#endif
+IMG_VOID _IOUnmapWrapper(IMG_VOID *pvIORemapCookie, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
+
+
+struct page *LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset);
+
+
+LinuxKMemCache *KMemCacheCreateWrapper(IMG_CHAR *pszName, size_t Size, size_t Align, IMG_UINT32 ui32Flags);
+
+
+IMG_VOID KMemCacheDestroyWrapper(LinuxKMemCache *psCache);
+
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, __FILE__, __LINE__)
+#else
+#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, NULL, 0)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14))
+IMG_VOID *_KMemCacheAllocWrapper(LinuxKMemCache *psCache, gfp_t Flags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
+#else
+IMG_VOID *_KMemCacheAllocWrapper(LinuxKMemCache *psCache, int Flags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
+#endif
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+#define KMemCacheFreeWrapper(psCache, pvObject) _KMemCacheFreeWrapper(psCache, pvObject, __FILE__, __LINE__)
+#else
+#define KMemCacheFreeWrapper(psCache, pvObject) _KMemCacheFreeWrapper(psCache, pvObject, NULL, 0)
+#endif
+IMG_VOID _KMemCacheFreeWrapper(LinuxKMemCache *psCache, IMG_VOID *pvObject, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
+
+
+const IMG_CHAR *KMemCacheNameWrapper(LinuxKMemCache *psCache);
+
+
+LinuxMemArea *NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
+
+
+IMG_VOID FreeIOLinuxMemArea(LinuxMemArea *psLinuxMemArea);
+
+
+LinuxMemArea *NewAllocPagesLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
+
+
+IMG_VOID FreeAllocPagesLinuxMemArea(LinuxMemArea *psLinuxMemArea);
+
+
+#if defined(CONFIG_ION_OMAP)
+
+LinuxMemArea *
+NewIONLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags,
+ IMG_PVOID pvPrivData, IMG_UINT32 ui32PrivDataLength);
+
+
+IMG_VOID FreeIONLinuxMemArea(LinuxMemArea *psLinuxMemArea);
+
+#else
+
+static inline LinuxMemArea *
+NewIONLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags,
+ IMG_PVOID pvPrivData, IMG_UINT32 ui32PrivDataLength)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
+ PVR_UNREFERENCED_PARAMETER(ui32AreaFlags);
+ PVR_UNREFERENCED_PARAMETER(pvPrivData);
+ PVR_UNREFERENCED_PARAMETER(ui32PrivDataLength);
+ BUG();
+ return IMG_NULL;
+}
+
+static inline IMG_VOID FreeIONLinuxMemArea(LinuxMemArea *psLinuxMemArea)
+{
+ PVR_UNREFERENCED_PARAMETER(psLinuxMemArea);
+ BUG();
+}
+
+#endif
+
+
+LinuxMemArea *NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea,
+ IMG_UINT32 ui32ByteOffset,
+ IMG_UINT32 ui32Bytes);
+
+
+IMG_VOID LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea);
+
+
+#if defined(LINUX_MEM_AREAS_DEBUG)
+IMG_VOID LinuxMemAreaRegister(LinuxMemArea *psLinuxMemArea);
+#else
+#define LinuxMemAreaRegister(X)
+#endif
+
+
+IMG_VOID *LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea);
+
+
+IMG_CPU_PHYADDR LinuxMemAreaToCpuPAddr(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset);
+
+
+#define LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset) PHYS_TO_PFN(LinuxMemAreaToCpuPAddr(psLinuxMemArea, ui32ByteOffset).uiAddr)
+
+IMG_BOOL LinuxMemAreaPhysIsContig(LinuxMemArea *psLinuxMemArea);
+
+static inline LinuxMemArea *
+LinuxMemAreaRoot(LinuxMemArea *psLinuxMemArea)
+{
+ if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
+ {
+ return psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea;
+ }
+ else
+ {
+ return psLinuxMemArea;
+ }
+}
+
+
+static inline LINUX_MEM_AREA_TYPE
+LinuxMemAreaRootType(LinuxMemArea *psLinuxMemArea)
+{
+ return LinuxMemAreaRoot(psLinuxMemArea)->eAreaType;
+}
+
+
+const IMG_CHAR *LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType);
+
+
+#if defined(DEBUG) || defined(DEBUG_LINUX_MEM_AREAS)
+const IMG_CHAR *HAPFlagsToString(IMG_UINT32 ui32Flags);
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/mmap.c b/drivers/gpu/pvr/mmap.c
new file mode 100644
index 0000000..a63223c
--- /dev/null
+++ b/drivers/gpu/pvr/mmap.c
@@ -0,0 +1,1220 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#endif
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
+#include <linux/wrapper.h>
+#endif
+#include <linux/slab.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
+#include <linux/highmem.h>
+#endif
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/shmparam.h>
+#include <asm/pgtable.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
+#include <linux/sched.h>
+#include <asm/current.h>
+#endif
+#if defined(SUPPORT_DRI_DRM)
+#include <drm/drmP.h>
+#endif
+
+#include "services_headers.h"
+
+#include "pvrmmap.h"
+#include "mutils.h"
+#include "mmap.h"
+#include "mm.h"
+#include "proc.h"
+#include "mutex.h"
+#include "handle.h"
+#include "perproc.h"
+#include "env_perproc.h"
+#include "bridged_support.h"
+#if defined(SUPPORT_DRI_DRM)
+#include "pvr_drm.h"
+#endif
+
+#if !defined(PVR_SECURE_HANDLES) && !defined (SUPPORT_SID_INTERFACE)
+#error "The mmap code requires PVR_SECURE_HANDLES"
+#endif
+
+PVRSRV_LINUX_MUTEX g_sMMapMutex;
+
+static LinuxKMemCache *g_psMemmapCache = NULL;
+static LIST_HEAD(g_sMMapAreaList);
+static LIST_HEAD(g_sMMapOffsetStructList);
+#if defined(DEBUG_LINUX_MMAP_AREAS)
+static IMG_UINT32 g_ui32RegisteredAreas = 0;
+static IMG_UINT32 g_ui32TotalByteSize = 0;
+#endif
+
+
+#if defined(DEBUG_LINUX_MMAP_AREAS)
+static struct proc_dir_entry *g_ProcMMap;
+#endif
+
+#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+#define MMAP2_PGOFF_RESOLUTION (32-PAGE_SHIFT+12)
+#define RESERVED_PGOFF_BITS 1
+#define MAX_MMAP_HANDLE ((1UL<<(MMAP2_PGOFF_RESOLUTION-RESERVED_PGOFF_BITS))-1)
+
+#define FIRST_PHYSICAL_PFN 0
+#define LAST_PHYSICAL_PFN (FIRST_PHYSICAL_PFN + MAX_MMAP_HANDLE)
+#define FIRST_SPECIAL_PFN (LAST_PHYSICAL_PFN + 1)
+#define LAST_SPECIAL_PFN (FIRST_SPECIAL_PFN + MAX_MMAP_HANDLE)
+
+#else
+
+#if PAGE_SHIFT != 12
+#error This build variant has not yet been made non-4KB page-size aware
+#endif
+
+#if defined(PVR_MMAP_OFFSET_BASE)
+#define FIRST_SPECIAL_PFN PVR_MMAP_OFFSET_BASE
+#else
+#define FIRST_SPECIAL_PFN 0x80000000UL
+#endif
+
+#if defined(PVR_NUM_MMAP_HANDLES)
+#define MAX_MMAP_HANDLE PVR_NUM_MMAP_HANDLES
+#else
+#define MAX_MMAP_HANDLE 0x7fffffffUL
+#endif
+
+#endif
+
+#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+static inline IMG_BOOL
+PFNIsPhysical(IMG_UINT32 pfn)
+{
+
+ return ( (pfn <= LAST_PHYSICAL_PFN)) ? IMG_TRUE : IMG_FALSE;
+}
+
+static inline IMG_BOOL
+PFNIsSpecial(IMG_UINT32 pfn)
+{
+
+ return ((pfn >= FIRST_SPECIAL_PFN) ) ? IMG_TRUE : IMG_FALSE;
+}
+#endif
+
+#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+static inline IMG_HANDLE
+MMapOffsetToHandle(IMG_UINT32 pfn)
+{
+ if (PFNIsPhysical(pfn))
+ {
+ PVR_ASSERT(PFNIsPhysical(pfn));
+ return IMG_NULL;
+ }
+ return (IMG_HANDLE)(pfn - FIRST_SPECIAL_PFN);
+}
+#endif
+
+static inline IMG_UINT32
+#if defined (SUPPORT_SID_INTERFACE)
+HandleToMMapOffset(IMG_SID hHandle)
+#else
+HandleToMMapOffset(IMG_HANDLE hHandle)
+#endif
+{
+ IMG_UINT32 ulHandle = (IMG_UINT32)hHandle;
+
+#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+ if (PFNIsSpecial(ulHandle))
+ {
+ PVR_ASSERT(PFNIsSpecial(ulHandle));
+ return 0;
+ }
+#endif
+ return ulHandle + FIRST_SPECIAL_PFN;
+}
+
+#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+static inline IMG_BOOL
+LinuxMemAreaUsesPhysicalMap(LinuxMemArea *psLinuxMemArea)
+{
+ return LinuxMemAreaPhysIsContig(psLinuxMemArea);
+}
+#endif
+
+#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+static inline IMG_UINT32
+GetCurrentThreadID(IMG_VOID)
+{
+
+ return (IMG_UINT32)current->pid;
+}
+#endif
+
+static PKV_OFFSET_STRUCT
+CreateOffsetStruct(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Offset, IMG_UINT32 ui32RealByteSize)
+{
+ PKV_OFFSET_STRUCT psOffsetStruct;
+#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
+ const IMG_CHAR *pszName = LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
+#endif
+
+#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s(%s, psLinuxMemArea: 0x%p, ui32AllocFlags: 0x%8x)",
+ __FUNCTION__, pszName, psLinuxMemArea, psLinuxMemArea->ui32AreaFlags));
+#endif
+
+ PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType != LINUX_MEM_AREA_SUB_ALLOC);
+
+ PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
+
+ psOffsetStruct = KMemCacheAllocWrapper(g_psMemmapCache, GFP_KERNEL);
+ if(psOffsetStruct == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRMMapRegisterArea: Couldn't alloc another mapping record from cache"));
+ return IMG_NULL;
+ }
+
+ psOffsetStruct->ui32MMapOffset = ui32Offset;
+
+ psOffsetStruct->psLinuxMemArea = psLinuxMemArea;
+
+ psOffsetStruct->ui32RealByteSize = ui32RealByteSize;
+
+
+#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+ psOffsetStruct->ui32TID = GetCurrentThreadID();
+#endif
+ psOffsetStruct->ui32PID = OSGetCurrentProcessIDKM();
+
+#if defined(DEBUG_LINUX_MMAP_AREAS)
+
+ psOffsetStruct->pszName = pszName;
+#endif
+
+ list_add_tail(&psOffsetStruct->sAreaItem, &psLinuxMemArea->sMMapOffsetStructList);
+
+ return psOffsetStruct;
+}
+
+
+static IMG_VOID
+DestroyOffsetStruct(PKV_OFFSET_STRUCT psOffsetStruct)
+{
+#ifdef DEBUG
+ IMG_CPU_PHYADDR CpuPAddr;
+ CpuPAddr = LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0);
+#endif
+
+ list_del(&psOffsetStruct->sAreaItem);
+
+ if (psOffsetStruct->bOnMMapList)
+ {
+ list_del(&psOffsetStruct->sMMapItem);
+ }
+
+#ifdef DEBUG
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Table entry: "
+ "psLinuxMemArea=%p, CpuPAddr=0x%08X", __FUNCTION__,
+ psOffsetStruct->psLinuxMemArea,
+ CpuPAddr.uiAddr));
+#endif
+
+ KMemCacheFreeWrapper(g_psMemmapCache, psOffsetStruct);
+}
+
+
+static inline IMG_VOID
+DetermineUsersSizeAndByteOffset(LinuxMemArea *psLinuxMemArea,
+ IMG_UINT32 *pui32RealByteSize,
+ IMG_UINT32 *pui32ByteOffset)
+{
+ IMG_UINT32 ui32PageAlignmentOffset;
+ IMG_CPU_PHYADDR CpuPAddr;
+
+ CpuPAddr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0);
+ ui32PageAlignmentOffset = ADDR_TO_PAGE_OFFSET(CpuPAddr.uiAddr);
+
+ *pui32ByteOffset = ui32PageAlignmentOffset;
+
+ *pui32RealByteSize = PAGE_ALIGN(psLinuxMemArea->ui32ByteSize + ui32PageAlignmentOffset);
+}
+
+
+PVRSRV_ERROR
+PVRMMapOSMemHandleToMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hMHandle,
+#else
+ IMG_HANDLE hMHandle,
+#endif
+ IMG_UINT32 *pui32MMapOffset,
+ IMG_UINT32 *pui32ByteOffset,
+ IMG_UINT32 *pui32RealByteSize,
+ IMG_UINT32 *pui32UserVAddr)
+{
+ LinuxMemArea *psLinuxMemArea;
+ PKV_OFFSET_STRUCT psOffsetStruct;
+ IMG_HANDLE hOSMemHandle;
+ PVRSRV_ERROR eError;
+
+ LinuxLockMutex(&g_sMMapMutex);
+
+ PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <= MAX_MMAP_HANDLE);
+
+ eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle, hMHandle);
+ if (eError != PVRSRV_OK)
+ {
+#if defined (SUPPORT_SID_INTERFACE)
+ PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle %x failed", __FUNCTION__, hMHandle));
+#else
+ PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle %p failed", __FUNCTION__, hMHandle));
+#endif
+
+ goto exit_unlock;
+ }
+
+ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
+
+ DetermineUsersSizeAndByteOffset(psLinuxMemArea,
+ pui32RealByteSize,
+ pui32ByteOffset);
+
+
+ list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
+ {
+ if (psPerProc->ui32PID == psOffsetStruct->ui32PID)
+ {
+
+ PVR_ASSERT(*pui32RealByteSize == psOffsetStruct->ui32RealByteSize);
+
+ *pui32MMapOffset = psOffsetStruct->ui32MMapOffset;
+ *pui32UserVAddr = psOffsetStruct->ui32UserVAddr;
+ PVRSRVOffsetStructIncRef(psOffsetStruct);
+
+ eError = PVRSRV_OK;
+ goto exit_unlock;
+ }
+ }
+
+
+ *pui32UserVAddr = 0;
+
+#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+ if (LinuxMemAreaUsesPhysicalMap(psLinuxMemArea))
+ {
+ *pui32MMapOffset = LinuxMemAreaToCpuPFN(psLinuxMemArea, 0);
+ PVR_ASSERT(PFNIsPhysical(*pui32MMapOffset));
+ }
+ else
+#endif
+ {
+ *pui32MMapOffset = HandleToMMapOffset(hMHandle);
+#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+ PVR_ASSERT(PFNIsSpecial(*pui32MMapOffset));
+#endif
+ }
+
+ psOffsetStruct = CreateOffsetStruct(psLinuxMemArea, *pui32MMapOffset, *pui32RealByteSize);
+ if (psOffsetStruct == IMG_NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto exit_unlock;
+ }
+
+
+ list_add_tail(&psOffsetStruct->sMMapItem, &g_sMMapOffsetStructList);
+
+ psOffsetStruct->bOnMMapList = IMG_TRUE;
+
+ PVRSRVOffsetStructIncRef(psOffsetStruct);
+
+ eError = PVRSRV_OK;
+
+
+
+
+ *pui32MMapOffset = *pui32MMapOffset << (PAGE_SHIFT - 12);
+
+exit_unlock:
+ LinuxUnLockMutex(&g_sMMapMutex);
+
+ return eError;
+}
+
+
+PVRSRV_ERROR
+PVRMMapReleaseMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hMHandle,
+#else
+ IMG_HANDLE hMHandle,
+#endif
+ IMG_BOOL *pbMUnmap,
+ IMG_UINT32 *pui32RealByteSize,
+ IMG_UINT32 *pui32UserVAddr)
+{
+ LinuxMemArea *psLinuxMemArea;
+ PKV_OFFSET_STRUCT psOffsetStruct;
+ IMG_HANDLE hOSMemHandle;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
+
+ LinuxLockMutex(&g_sMMapMutex);
+
+ PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <= MAX_MMAP_HANDLE);
+
+ eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle, hMHandle);
+ if (eError != PVRSRV_OK)
+ {
+#if defined (SUPPORT_SID_INTERFACE)
+ PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle %x failed", __FUNCTION__, hMHandle));
+#else
+ PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle %p failed", __FUNCTION__, hMHandle));
+#endif
+
+ goto exit_unlock;
+ }
+
+ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
+
+
+ list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
+ {
+ if (psOffsetStruct->ui32PID == ui32PID)
+ {
+ if (psOffsetStruct->ui32RefCount == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Attempt to release mmap data with zero reference count for offset struct 0x%p, memory area %p", __FUNCTION__, psOffsetStruct, psLinuxMemArea));
+ eError = PVRSRV_ERROR_STILL_MAPPED;
+ goto exit_unlock;
+ }
+
+ PVRSRVOffsetStructDecRef(psOffsetStruct);
+
+ *pbMUnmap = (IMG_BOOL)((psOffsetStruct->ui32RefCount == 0) && (psOffsetStruct->ui32UserVAddr != 0));
+
+ *pui32UserVAddr = (*pbMUnmap) ? psOffsetStruct->ui32UserVAddr : 0;
+ *pui32RealByteSize = (*pbMUnmap) ? psOffsetStruct->ui32RealByteSize : 0;
+
+ eError = PVRSRV_OK;
+ goto exit_unlock;
+ }
+ }
+
+
+#if defined (SUPPORT_SID_INTERFACE)
+ PVR_DPF((PVR_DBG_ERROR, "%s: Mapping data not found for handle %x (memory area %p)", __FUNCTION__, hMHandle, psLinuxMemArea));
+#else
+ PVR_DPF((PVR_DBG_ERROR, "%s: Mapping data not found for handle %p (memory area %p)", __FUNCTION__, hMHandle, psLinuxMemArea));
+#endif
+
+ eError = PVRSRV_ERROR_MAPPING_NOT_FOUND;
+
+exit_unlock:
+ LinuxUnLockMutex(&g_sMMapMutex);
+
+ return eError;
+}
+
+static inline PKV_OFFSET_STRUCT
+FindOffsetStructByOffset(IMG_UINT32 ui32Offset, IMG_UINT32 ui32RealByteSize)
+{
+ PKV_OFFSET_STRUCT psOffsetStruct;
+#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+ IMG_UINT32 ui32TID = GetCurrentThreadID();
+#endif
+ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
+
+ list_for_each_entry(psOffsetStruct, &g_sMMapOffsetStructList, sMMapItem)
+ {
+ if (ui32Offset == psOffsetStruct->ui32MMapOffset && ui32RealByteSize == psOffsetStruct->ui32RealByteSize && psOffsetStruct->ui32PID == ui32PID)
+ {
+#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+
+ if (!PFNIsPhysical(ui32Offset) || psOffsetStruct->ui32TID == ui32TID)
+#endif
+ {
+ return psOffsetStruct;
+ }
+ }
+ }
+
+ return IMG_NULL;
+}
+
+
+static IMG_BOOL
+DoMapToUser(LinuxMemArea *psLinuxMemArea,
+ struct vm_area_struct* ps_vma,
+ IMG_UINT32 ui32ByteOffset)
+{
+ IMG_UINT32 ui32ByteSize;
+
+ if (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
+ {
+ return DoMapToUser(LinuxMemAreaRoot(psLinuxMemArea),
+ ps_vma,
+ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset + ui32ByteOffset);
+ }
+
+
+ ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start;
+ PVR_ASSERT(ADDR_TO_PAGE_OFFSET(ui32ByteSize) == 0);
+
+#if defined (__sparc__)
+
+#error "SPARC not supported"
+#endif
+
+#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+ if (PFNIsPhysical(ps_vma->vm_pgoff))
+ {
+ IMG_INT result;
+
+ PVR_ASSERT(LinuxMemAreaPhysIsContig(psLinuxMemArea));
+ PVR_ASSERT(LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset) == ps_vma->vm_pgoff);
+
+ result = IO_REMAP_PFN_RANGE(ps_vma, ps_vma->vm_start, ps_vma->vm_pgoff, ui32ByteSize, ps_vma->vm_page_prot);
+
+ if(result == 0)
+ {
+ return IMG_TRUE;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Failed to map contiguous physical address range (%d), trying non-contiguous path", __FUNCTION__, result));
+ }
+#endif
+
+ {
+
+ IMG_UINT32 ulVMAPos;
+ IMG_UINT32 ui32ByteEnd = ui32ByteOffset + ui32ByteSize;
+ IMG_UINT32 ui32PA;
+#if defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+ IMG_BOOL bMixedMap = IMG_FALSE;
+#endif
+
+ for(ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd; ui32PA += PAGE_SIZE)
+ {
+ IMG_UINT32 pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32PA);
+
+ if (!pfn_valid(pfn))
+ {
+#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+ PVR_DPF((PVR_DBG_ERROR,"%s: Error - PFN invalid: 0x%x", __FUNCTION__, pfn));
+ return IMG_FALSE;
+#else
+ bMixedMap = IMG_TRUE;
+#endif
+ }
+ }
+
+#if defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+ if (bMixedMap)
+ {
+ ps_vma->vm_flags |= VM_MIXEDMAP;
+ }
+#endif
+
+ ulVMAPos = ps_vma->vm_start;
+ for(ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd; ui32PA += PAGE_SIZE)
+ {
+ IMG_UINT32 pfn;
+ IMG_INT result;
+
+ pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32PA);
+
+#if defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+ if (bMixedMap)
+ {
+ result = vm_insert_mixed(ps_vma, ulVMAPos, pfn);
+ if(result != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Error - vm_insert_mixed failed (%d)", __FUNCTION__, result));
+ return IMG_FALSE;
+ }
+ }
+ else
+#endif
+ {
+ struct page *psPage;
+
+ PVR_ASSERT(pfn_valid(pfn));
+
+ psPage = pfn_to_page(pfn);
+
+ result = VM_INSERT_PAGE(ps_vma, ulVMAPos, psPage);
+ if(result != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Error - VM_INSERT_PAGE failed (%d)", __FUNCTION__, result));
+ return IMG_FALSE;
+ }
+ }
+ ulVMAPos += PAGE_SIZE;
+ }
+ }
+
+ return IMG_TRUE;
+}
+
+
+static IMG_VOID
+MMapVOpenNoLock(struct vm_area_struct* ps_vma)
+{
+ PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
+
+ PVR_ASSERT(psOffsetStruct != IMG_NULL);
+ PVR_ASSERT(!psOffsetStruct->bOnMMapList);
+
+ PVRSRVOffsetStructIncMapped(psOffsetStruct);
+
+ if (psOffsetStruct->ui32Mapped > 1)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Offset structure 0x%p is being shared across processes (psOffsetStruct->ui32Mapped: %u)", __FUNCTION__, psOffsetStruct, psOffsetStruct->ui32Mapped));
+ PVR_ASSERT((ps_vma->vm_flags & VM_DONTCOPY) == 0);
+ }
+
+#if defined(DEBUG_LINUX_MMAP_AREAS)
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: psLinuxMemArea 0x%p, KVAddress 0x%p MMapOffset %d, ui32Mapped %d",
+ __FUNCTION__,
+ psOffsetStruct->psLinuxMemArea,
+ LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
+ psOffsetStruct->ui32MMapOffset,
+ psOffsetStruct->ui32Mapped));
+#endif
+}
+
+
+static void
+MMapVOpen(struct vm_area_struct* ps_vma)
+{
+ LinuxLockMutex(&g_sMMapMutex);
+
+ MMapVOpenNoLock(ps_vma);
+
+ LinuxUnLockMutex(&g_sMMapMutex);
+}
+
+
+static IMG_VOID
+MMapVCloseNoLock(struct vm_area_struct* ps_vma)
+{
+ PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
+ PVR_ASSERT(psOffsetStruct != IMG_NULL)
+
+#if defined(DEBUG_LINUX_MMAP_AREAS)
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: psLinuxMemArea %p, CpuVAddr %p ui32MMapOffset %d, ui32Mapped %d",
+ __FUNCTION__,
+ psOffsetStruct->psLinuxMemArea,
+ LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
+ psOffsetStruct->ui32MMapOffset,
+ psOffsetStruct->ui32Mapped));
+#endif
+
+ PVR_ASSERT(!psOffsetStruct->bOnMMapList);
+ PVRSRVOffsetStructDecMapped(psOffsetStruct);
+ if (psOffsetStruct->ui32Mapped == 0)
+ {
+ if (psOffsetStruct->ui32RefCount != 0)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: psOffsetStruct %p has non-zero reference count (ui32RefCount = %u). User mode address of start of mapping: 0x%x", __FUNCTION__, psOffsetStruct, psOffsetStruct->ui32RefCount, psOffsetStruct->ui32UserVAddr));
+ }
+
+ DestroyOffsetStruct(psOffsetStruct);
+ }
+
+ ps_vma->vm_private_data = NULL;
+}
+
+static void
+MMapVClose(struct vm_area_struct* ps_vma)
+{
+ LinuxLockMutex(&g_sMMapMutex);
+
+ MMapVCloseNoLock(ps_vma);
+
+ LinuxUnLockMutex(&g_sMMapMutex);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
+static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr,
+ void *buf, int len, int write)
+{
+ PKV_OFFSET_STRUCT psOffsetStruct;
+ LinuxMemArea *psLinuxMemArea;
+ unsigned long ulOffset;
+ int iRetVal = -EINVAL;
+ IMG_VOID *pvKernelAddr;
+
+ LinuxLockMutex(&g_sMMapMutex);
+
+ psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
+ psLinuxMemArea = psOffsetStruct->psLinuxMemArea;
+ ulOffset = addr - ps_vma->vm_start;
+
+ if (ulOffset+len > psLinuxMemArea->ui32ByteSize)
+
+ goto exit_unlock;
+
+ pvKernelAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
+
+ if (pvKernelAddr)
+ {
+ memcpy(buf, pvKernelAddr+ulOffset, len);
+ iRetVal = len;
+ }
+ else
+ {
+ IMG_UINT32 pfn, ui32OffsetInPage;
+ struct page *page;
+
+ pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ulOffset);
+
+ if (!pfn_valid(pfn))
+ goto exit_unlock;
+
+ page = pfn_to_page(pfn);
+ ui32OffsetInPage = ADDR_TO_PAGE_OFFSET(ulOffset);
+
+ if (ui32OffsetInPage+len > PAGE_SIZE)
+
+ goto exit_unlock;
+
+ pvKernelAddr = kmap(page);
+ memcpy(buf, pvKernelAddr+ui32OffsetInPage, len);
+ kunmap(page);
+
+ iRetVal = len;
+ }
+
+exit_unlock:
+ LinuxUnLockMutex(&g_sMMapMutex);
+ return iRetVal;
+}
+#endif
+
+static struct vm_operations_struct MMapIOOps =
+{
+ .open=MMapVOpen,
+ .close=MMapVClose,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
+ .access=MMapVAccess,
+#endif
+};
+
+
+int
+PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma)
+{
+ LinuxMemArea *psFlushMemArea = IMG_NULL;
+ PKV_OFFSET_STRUCT psOffsetStruct;
+ IMG_UINT32 ui32ByteSize;
+ IMG_VOID *pvBase = IMG_NULL;
+ int iRetVal = 0;
+
+ PVR_UNREFERENCED_PARAMETER(pFile);
+
+ LinuxLockMutex(&g_sMMapMutex);
+
+ ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Received mmap(2) request with ui32MMapOffset 0x%08lx,"
+ " and ui32ByteSize %d(0x%08x)",
+ __FUNCTION__,
+ ps_vma->vm_pgoff,
+ ui32ByteSize, ui32ByteSize));
+
+ psOffsetStruct = FindOffsetStructByOffset(ps_vma->vm_pgoff, ui32ByteSize);
+ if (psOffsetStruct == IMG_NULL)
+ {
+#if defined(SUPPORT_DRI_DRM)
+ LinuxUnLockMutex(&g_sMMapMutex);
+
+#if !defined(SUPPORT_DRI_DRM_EXT)
+
+ return drm_mmap(pFile, ps_vma);
+#else
+
+ return -ENOENT;
+#endif
+#else
+ PVR_UNREFERENCED_PARAMETER(pFile);
+
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Attempted to mmap unregistered area at vm_pgoff 0x%lx",
+ __FUNCTION__, ps_vma->vm_pgoff));
+ iRetVal = -EINVAL;
+#endif
+ goto unlock_and_return;
+ }
+
+ list_del(&psOffsetStruct->sMMapItem);
+ psOffsetStruct->bOnMMapList = IMG_FALSE;
+
+
+ if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
+ ((ps_vma->vm_flags & VM_SHARED) == 0))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Cannot mmap non-shareable writable areas", __FUNCTION__));
+ iRetVal = -EINVAL;
+ goto unlock_and_return;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped psLinuxMemArea 0x%p\n",
+ __FUNCTION__, psOffsetStruct->psLinuxMemArea));
+
+ ps_vma->vm_flags |= VM_RESERVED;
+ ps_vma->vm_flags |= VM_IO;
+
+
+ ps_vma->vm_flags |= VM_DONTEXPAND;
+
+
+ ps_vma->vm_flags |= VM_DONTCOPY;
+
+ ps_vma->vm_private_data = (void *)psOffsetStruct;
+
+ switch(psOffsetStruct->psLinuxMemArea->ui32AreaFlags & PVRSRV_HAP_CACHETYPE_MASK)
+ {
+ case PVRSRV_HAP_CACHED:
+
+ break;
+ case PVRSRV_HAP_WRITECOMBINE:
+ ps_vma->vm_page_prot = PGPROT_WC(ps_vma->vm_page_prot);
+ break;
+ case PVRSRV_HAP_UNCACHED:
+ ps_vma->vm_page_prot = PGPROT_UC(ps_vma->vm_page_prot);
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type", __FUNCTION__));
+ iRetVal = -EINVAL;
+ goto unlock_and_return;
+ }
+
+
+ ps_vma->vm_ops = &MMapIOOps;
+
+ if(!DoMapToUser(psOffsetStruct->psLinuxMemArea, ps_vma, 0))
+ {
+ iRetVal = -EAGAIN;
+ goto unlock_and_return;
+ }
+
+ PVR_ASSERT(psOffsetStruct->ui32UserVAddr == 0)
+
+ psOffsetStruct->ui32UserVAddr = ps_vma->vm_start;
+
+
+ if(psOffsetStruct->psLinuxMemArea->bNeedsCacheInvalidate)
+ {
+ IMG_UINT32 ui32ByteOffset, ui32DummyByteSize;
+
+ DetermineUsersSizeAndByteOffset(psOffsetStruct->psLinuxMemArea,
+ &ui32DummyByteSize,
+ &ui32ByteOffset);
+
+ pvBase = (IMG_VOID *)ps_vma->vm_start + ui32ByteOffset;
+ psFlushMemArea = psOffsetStruct->psLinuxMemArea;
+
+ psOffsetStruct->psLinuxMemArea->bNeedsCacheInvalidate = IMG_FALSE;
+ }
+
+
+ MMapVOpenNoLock(ps_vma);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped area at offset 0x%08lx\n",
+ __FUNCTION__, ps_vma->vm_pgoff));
+
+unlock_and_return:
+ if (iRetVal != 0 && psOffsetStruct != IMG_NULL)
+ {
+ DestroyOffsetStruct(psOffsetStruct);
+ }
+
+ LinuxUnLockMutex(&g_sMMapMutex);
+
+ if(psFlushMemArea)
+ {
+ OSInvalidateCPUCacheRangeKM(psFlushMemArea, pvBase,
+ psFlushMemArea->ui32ByteSize);
+ }
+
+ return iRetVal;
+}
+
+
+#if defined(DEBUG_LINUX_MMAP_AREAS)
+
+static void ProcSeqStartstopMMapRegistations(struct seq_file *sfile,IMG_BOOL start)
+{
+ if(start)
+ {
+ LinuxLockMutex(&g_sMMapMutex);
+ }
+ else
+ {
+ LinuxUnLockMutex(&g_sMMapMutex);
+ }
+}
+
+
+static void* ProcSeqOff2ElementMMapRegistrations(struct seq_file *sfile, loff_t off)
+{
+ LinuxMemArea *psLinuxMemArea;
+ if(!off)
+ {
+ return PVR_PROC_SEQ_START_TOKEN;
+ }
+
+ list_for_each_entry(psLinuxMemArea, &g_sMMapAreaList, sMMapItem)
+ {
+ PKV_OFFSET_STRUCT psOffsetStruct;
+
+ list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
+ {
+ off--;
+ if (off == 0)
+ {
+ PVR_ASSERT(psOffsetStruct->psLinuxMemArea == psLinuxMemArea);
+ return (void*)psOffsetStruct;
+ }
+ }
+ }
+ return (void*)0;
+}
+
+static void* ProcSeqNextMMapRegistrations(struct seq_file *sfile,void* el,loff_t off)
+{
+ return ProcSeqOff2ElementMMapRegistrations(sfile,off);
+}
+
+
+static void ProcSeqShowMMapRegistrations(struct seq_file *sfile, void *el)
+{
+ KV_OFFSET_STRUCT *psOffsetStruct = (KV_OFFSET_STRUCT*)el;
+ LinuxMemArea *psLinuxMemArea;
+ IMG_UINT32 ui32RealByteSize;
+ IMG_UINT32 ui32ByteOffset;
+
+ if(el == PVR_PROC_SEQ_START_TOKEN)
+ {
+ seq_printf( sfile,
+#if !defined(DEBUG_LINUX_XML_PROC_FILES)
+ "Allocations registered for mmap: %u\n"
+ "In total these areas correspond to %u bytes\n"
+ "psLinuxMemArea "
+ "UserVAddr "
+ "KernelVAddr "
+ "CpuPAddr "
+ "MMapOffset "
+ "ByteLength "
+ "LinuxMemType "
+ "Pid Name Flags\n",
+#else
+ "<mmap_header>\n"
+ "\t<count>%u</count>\n"
+ "\t<bytes>%u</bytes>\n"
+ "</mmap_header>\n",
+#endif
+ g_ui32RegisteredAreas,
+ g_ui32TotalByteSize
+ );
+ return;
+ }
+
+ psLinuxMemArea = psOffsetStruct->psLinuxMemArea;
+
+ DetermineUsersSizeAndByteOffset(psLinuxMemArea,
+ &ui32RealByteSize,
+ &ui32ByteOffset);
+
+ seq_printf( sfile,
+#if !defined(DEBUG_LINUX_XML_PROC_FILES)
+ "%-8p %08x %-8p %08x %08x %-8d %-24s %-5u %-8s %08x(%s)\n",
+#else
+ "<mmap_record>\n"
+ "\t<pointer>%-8p</pointer>\n"
+ "\t<user_virtual>%-8x</user_virtual>\n"
+ "\t<kernel_virtual>%-8p</kernel_virtual>\n"
+ "\t<cpu_physical>%08x</cpu_physical>\n"
+ "\t<mmap_offset>%08x</mmap_offset>\n"
+ "\t<bytes>%-8d</bytes>\n"
+ "\t<linux_mem_area_type>%-24s</linux_mem_area_type>\n"
+ "\t<pid>%-5u</pid>\n"
+ "\t<name>%-8s</name>\n"
+ "\t<flags>%08x</flags>\n"
+ "\t<flags_string>%s</flags_string>\n"
+ "</mmap_record>\n",
+#endif
+ psLinuxMemArea,
+ psOffsetStruct->ui32UserVAddr + ui32ByteOffset,
+ LinuxMemAreaToCpuVAddr(psLinuxMemArea),
+ LinuxMemAreaToCpuPAddr(psLinuxMemArea,0).uiAddr,
+ psOffsetStruct->ui32MMapOffset,
+ psLinuxMemArea->ui32ByteSize,
+ LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType),
+ psOffsetStruct->ui32PID,
+ psOffsetStruct->pszName,
+ psLinuxMemArea->ui32AreaFlags,
+ HAPFlagsToString(psLinuxMemArea->ui32AreaFlags));
+}
+
+#endif
+
+
+PVRSRV_ERROR
+PVRMMapRegisterArea(LinuxMemArea *psLinuxMemArea)
+{
+ PVRSRV_ERROR eError;
+#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
+ const IMG_CHAR *pszName = LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
+#endif
+
+ LinuxLockMutex(&g_sMMapMutex);
+
+#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s(%s, psLinuxMemArea 0x%p, ui32AllocFlags 0x%8x)",
+ __FUNCTION__, pszName, psLinuxMemArea, psLinuxMemArea->ui32AreaFlags));
+#endif
+
+ PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType != LINUX_MEM_AREA_SUB_ALLOC);
+
+
+ if(psLinuxMemArea->bMMapRegistered)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: psLinuxMemArea 0x%p is already registered",
+ __FUNCTION__, psLinuxMemArea));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto exit_unlock;
+ }
+
+ list_add_tail(&psLinuxMemArea->sMMapItem, &g_sMMapAreaList);
+
+ psLinuxMemArea->bMMapRegistered = IMG_TRUE;
+
+#if defined(DEBUG_LINUX_MMAP_AREAS)
+ g_ui32RegisteredAreas++;
+
+ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
+ {
+ g_ui32TotalByteSize += psLinuxMemArea->ui32ByteSize;
+ }
+#endif
+
+ eError = PVRSRV_OK;
+
+exit_unlock:
+ LinuxUnLockMutex(&g_sMMapMutex);
+
+ return eError;
+}
+
+
+PVRSRV_ERROR
+PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea)
+{
+ PVRSRV_ERROR eError;
+ PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct;
+
+ LinuxLockMutex(&g_sMMapMutex);
+
+ PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
+
+ list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
+ {
+ if (psOffsetStruct->ui32Mapped != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: psOffsetStruct 0x%p for memory area 0x0x%p is still mapped; psOffsetStruct->ui32Mapped %u", __FUNCTION__, psOffsetStruct, psLinuxMemArea, psOffsetStruct->ui32Mapped));
+ dump_stack();
+ PVRSRVDumpRefCountCCB();
+ eError = PVRSRV_ERROR_STILL_MAPPED;
+ goto exit_unlock;
+ }
+ else
+ {
+
+ PVR_DPF((PVR_DBG_WARNING, "%s: psOffsetStruct 0x%p was never mapped", __FUNCTION__, psOffsetStruct));
+ }
+
+ PVR_ASSERT((psOffsetStruct->ui32Mapped == 0) && psOffsetStruct->bOnMMapList);
+
+ DestroyOffsetStruct(psOffsetStruct);
+ }
+
+ list_del(&psLinuxMemArea->sMMapItem);
+
+ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
+
+#if defined(DEBUG_LINUX_MMAP_AREAS)
+ g_ui32RegisteredAreas--;
+ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
+ {
+ g_ui32TotalByteSize -= psLinuxMemArea->ui32ByteSize;
+ }
+#endif
+
+ eError = PVRSRV_OK;
+
+exit_unlock:
+ LinuxUnLockMutex(&g_sMMapMutex);
+ return eError;
+}
+
+
+PVRSRV_ERROR
+LinuxMMapPerProcessConnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc)
+{
+ PVR_UNREFERENCED_PARAMETER(psEnvPerProc);
+
+ return PVRSRV_OK;
+}
+
+IMG_VOID
+LinuxMMapPerProcessDisconnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc)
+{
+ PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct;
+ IMG_BOOL bWarn = IMG_FALSE;
+ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
+
+ PVR_UNREFERENCED_PARAMETER(psEnvPerProc);
+
+ LinuxLockMutex(&g_sMMapMutex);
+
+ list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct, &g_sMMapOffsetStructList, sMMapItem)
+ {
+ if (psOffsetStruct->ui32PID == ui32PID)
+ {
+ if (!bWarn)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: process has unmapped offset structures. Removing them", __FUNCTION__));
+ bWarn = IMG_TRUE;
+ }
+ PVR_ASSERT(psOffsetStruct->ui32Mapped == 0);
+ PVR_ASSERT(psOffsetStruct->bOnMMapList);
+
+ DestroyOffsetStruct(psOffsetStruct);
+ }
+ }
+
+ LinuxUnLockMutex(&g_sMMapMutex);
+}
+
+
+PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
+{
+ PVRSRV_ERROR eError;
+
+ eError = PVRSRVSetMaxHandle(psHandleBase, MAX_MMAP_HANDLE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: failed to set handle limit (%d)", __FUNCTION__, eError));
+ return eError;
+ }
+
+ return eError;
+}
+
+
+IMG_VOID
+PVRMMapInit(IMG_VOID)
+{
+ LinuxInitMutex(&g_sMMapMutex);
+
+ g_psMemmapCache = KMemCacheCreateWrapper("img-mmap", sizeof(KV_OFFSET_STRUCT), 0, 0);
+ if (!g_psMemmapCache)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate kmem_cache", __FUNCTION__));
+ goto error;
+ }
+
+#if defined(DEBUG_LINUX_MMAP_AREAS)
+ g_ProcMMap = CreateProcReadEntrySeq("mmap", NULL,
+ ProcSeqNextMMapRegistrations,
+ ProcSeqShowMMapRegistrations,
+ ProcSeqOff2ElementMMapRegistrations,
+ ProcSeqStartstopMMapRegistations
+ );
+#endif
+ return;
+
+error:
+ PVRMMapCleanup();
+ return;
+}
+
+
+IMG_VOID
+PVRMMapCleanup(IMG_VOID)
+{
+ PVRSRV_ERROR eError;
+
+ if (!list_empty(&g_sMMapAreaList))
+ {
+ LinuxMemArea *psLinuxMemArea, *psTmpMemArea;
+
+ PVR_DPF((PVR_DBG_ERROR, "%s: Memory areas are still registered with MMap", __FUNCTION__));
+
+ PVR_TRACE(("%s: Unregistering memory areas", __FUNCTION__));
+ list_for_each_entry_safe(psLinuxMemArea, psTmpMemArea, &g_sMMapAreaList, sMMapItem)
+ {
+ eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRMMapRemoveRegisteredArea failed (%d)", __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ LinuxMemAreaDeepFree(psLinuxMemArea);
+ }
+ }
+ PVR_ASSERT(list_empty((&g_sMMapAreaList)));
+
+#if defined(DEBUG_LINUX_MMAP_AREAS)
+ RemoveProcEntrySeq(g_ProcMMap);
+#endif
+
+ if(g_psMemmapCache)
+ {
+ KMemCacheDestroyWrapper(g_psMemmapCache);
+ g_psMemmapCache = NULL;
+ }
+}
diff --git a/drivers/gpu/pvr/mmap.h b/drivers/gpu/pvr/mmap.h
new file mode 100644
index 0000000..224e652
--- /dev/null
+++ b/drivers/gpu/pvr/mmap.h
@@ -0,0 +1,122 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined(__MMAP_H__)
+#define __MMAP_H__
+
+#include <linux/mm.h>
+#include <linux/list.h>
+
+#if defined(VM_MIXEDMAP)
+#define PVR_MAKE_ALL_PFNS_SPECIAL
+#endif
+
+#include "perproc.h"
+#include "mm.h"
+
+typedef struct KV_OFFSET_STRUCT_TAG
+{
+
+ IMG_UINT32 ui32Mapped;
+
+
+ IMG_UINT32 ui32MMapOffset;
+
+ IMG_UINT32 ui32RealByteSize;
+
+
+ LinuxMemArea *psLinuxMemArea;
+
+#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
+
+ IMG_UINT32 ui32TID;
+#endif
+
+
+ IMG_UINT32 ui32PID;
+
+
+ IMG_BOOL bOnMMapList;
+
+
+ IMG_UINT32 ui32RefCount;
+
+
+ IMG_UINT32 ui32UserVAddr;
+
+
+#if defined(DEBUG_LINUX_MMAP_AREAS)
+ const IMG_CHAR *pszName;
+#endif
+
+
+ struct list_head sMMapItem;
+
+
+ struct list_head sAreaItem;
+}KV_OFFSET_STRUCT, *PKV_OFFSET_STRUCT;
+
+
+
+IMG_VOID PVRMMapInit(IMG_VOID);
+
+
+IMG_VOID PVRMMapCleanup(IMG_VOID);
+
+
+PVRSRV_ERROR PVRMMapRegisterArea(LinuxMemArea *psLinuxMemArea);
+
+
+PVRSRV_ERROR PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea);
+
+
+PVRSRV_ERROR PVRMMapOSMemHandleToMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hMHandle,
+#else
+ IMG_HANDLE hMHandle,
+#endif
+ IMG_UINT32 *pui32MMapOffset,
+ IMG_UINT32 *pui32ByteOffset,
+ IMG_UINT32 *pui32RealByteSize,
+ IMG_UINT32 *pui32UserVAddr);
+
+PVRSRV_ERROR
+PVRMMapReleaseMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hMHandle,
+#else
+ IMG_HANDLE hMHandle,
+#endif
+ IMG_BOOL *pbMUnmap,
+ IMG_UINT32 *pui32RealByteSize,
+ IMG_UINT32 *pui32UserVAddr);
+
+int PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma);
+
+
+#endif
+
diff --git a/drivers/gpu/pvr/mnemedefs.h b/drivers/gpu/pvr/mnemedefs.h
new file mode 100644
index 0000000..905081d
--- /dev/null
+++ b/drivers/gpu/pvr/mnemedefs.h
@@ -0,0 +1,94 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _MNEMEDEFS_KM_H_
+#define _MNEMEDEFS_KM_H_
+
+#define MNE_CR_CTRL 0x0D00
+#define MNE_CR_CTRL_BYP_CC_N_MASK 0x00010000U
+#define MNE_CR_CTRL_BYP_CC_N_SHIFT 16
+#define MNE_CR_CTRL_BYP_CC_N_SIGNED 0
+#define MNE_CR_CTRL_BYP_CC_MASK 0x00008000U
+#define MNE_CR_CTRL_BYP_CC_SHIFT 15
+#define MNE_CR_CTRL_BYP_CC_SIGNED 0
+#define MNE_CR_CTRL_USE_INVAL_REQ_MASK 0x00007800U
+#define MNE_CR_CTRL_USE_INVAL_REQ_SHIFT 11
+#define MNE_CR_CTRL_USE_INVAL_REQ_SIGNED 0
+#define MNE_CR_CTRL_BYPASS_ALL_MASK 0x00000400U
+#define MNE_CR_CTRL_BYPASS_ALL_SHIFT 10
+#define MNE_CR_CTRL_BYPASS_ALL_SIGNED 0
+#define MNE_CR_CTRL_BYPASS_MASK 0x000003E0U
+#define MNE_CR_CTRL_BYPASS_SHIFT 5
+#define MNE_CR_CTRL_BYPASS_SIGNED 0
+#define MNE_CR_CTRL_PAUSE_MASK 0x00000010U
+#define MNE_CR_CTRL_PAUSE_SHIFT 4
+#define MNE_CR_CTRL_PAUSE_SIGNED 0
+#define MNE_CR_USE_INVAL 0x0D04
+#define MNE_CR_USE_INVAL_ADDR_MASK 0xFFFFFFFFU
+#define MNE_CR_USE_INVAL_ADDR_SHIFT 0
+#define MNE_CR_USE_INVAL_ADDR_SIGNED 0
+#define MNE_CR_STAT 0x0D08
+#define MNE_CR_STAT_PAUSED_MASK 0x00000400U
+#define MNE_CR_STAT_PAUSED_SHIFT 10
+#define MNE_CR_STAT_PAUSED_SIGNED 0
+#define MNE_CR_STAT_READS_MASK 0x000003FFU
+#define MNE_CR_STAT_READS_SHIFT 0
+#define MNE_CR_STAT_READS_SIGNED 0
+#define MNE_CR_STAT_STATS 0x0D0C
+#define MNE_CR_STAT_STATS_RST_MASK 0x000FFFF0U
+#define MNE_CR_STAT_STATS_RST_SHIFT 4
+#define MNE_CR_STAT_STATS_RST_SIGNED 0
+#define MNE_CR_STAT_STATS_SEL_MASK 0x0000000FU
+#define MNE_CR_STAT_STATS_SEL_SHIFT 0
+#define MNE_CR_STAT_STATS_SEL_SIGNED 0
+#define MNE_CR_STAT_STATS_OUT 0x0D10
+#define MNE_CR_STAT_STATS_OUT_VALUE_MASK 0xFFFFFFFFU
+#define MNE_CR_STAT_STATS_OUT_VALUE_SHIFT 0
+#define MNE_CR_STAT_STATS_OUT_VALUE_SIGNED 0
+#define MNE_CR_EVENT_STATUS 0x0D14
+#define MNE_CR_EVENT_STATUS_INVAL_MASK 0x00000001U
+#define MNE_CR_EVENT_STATUS_INVAL_SHIFT 0
+#define MNE_CR_EVENT_STATUS_INVAL_SIGNED 0
+#define MNE_CR_EVENT_CLEAR 0x0D18
+#define MNE_CR_EVENT_CLEAR_INVAL_MASK 0x00000001U
+#define MNE_CR_EVENT_CLEAR_INVAL_SHIFT 0
+#define MNE_CR_EVENT_CLEAR_INVAL_SIGNED 0
+#define MNE_CR_CTRL_INVAL 0x0D20
+#define MNE_CR_CTRL_INVAL_PREQ_PDS_MASK 0x00000008U
+#define MNE_CR_CTRL_INVAL_PREQ_PDS_SHIFT 3
+#define MNE_CR_CTRL_INVAL_PREQ_PDS_SIGNED 0
+#define MNE_CR_CTRL_INVAL_PREQ_USEC_MASK 0x00000004U
+#define MNE_CR_CTRL_INVAL_PREQ_USEC_SHIFT 2
+#define MNE_CR_CTRL_INVAL_PREQ_USEC_SIGNED 0
+#define MNE_CR_CTRL_INVAL_PREQ_CACHE_MASK 0x00000002U
+#define MNE_CR_CTRL_INVAL_PREQ_CACHE_SHIFT 1
+#define MNE_CR_CTRL_INVAL_PREQ_CACHE_SIGNED 0
+#define MNE_CR_CTRL_INVAL_ALL_MASK 0x00000001U
+#define MNE_CR_CTRL_INVAL_ALL_SHIFT 0
+#define MNE_CR_CTRL_INVAL_ALL_SIGNED 0
+
+#endif
+
diff --git a/drivers/gpu/pvr/module.c b/drivers/gpu/pvr/module.c
new file mode 100644
index 0000000..9edee40
--- /dev/null
+++ b/drivers/gpu/pvr/module.c
@@ -0,0 +1,874 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#endif
+
+#if defined(SUPPORT_DRI_DRM) && !defined(SUPPORT_DRI_DRM_PLUGIN)
+#define PVR_MOD_STATIC
+#else
+
+ #if defined(LDM_PLATFORM)
+ #define PVR_LDM_PLATFORM_MODULE
+ #define PVR_LDM_MODULE
+ #else
+ #if defined(LDM_PCI)
+ #define PVR_LDM_PCI_MODULE
+ #define PVR_LDM_MODULE
+ #endif
+ #endif
+#define PVR_MOD_STATIC static
+#endif
+
+#if defined(PVR_LDM_PLATFORM_PRE_REGISTERED)
+#if !defined(NO_HARDWARE)
+#define PVR_USE_PRE_REGISTERED_PLATFORM_DEV
+#endif
+#endif
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+
+#if defined(SUPPORT_DRI_DRM)
+#include <drm/drmP.h>
+#if defined(PVR_SECURE_DRM_AUTH_EXPORT)
+#include "env_perproc.h"
+#endif
+#endif
+
+#if defined(PVR_LDM_PLATFORM_MODULE)
+#include <linux/platform_device.h>
+#endif
+
+#if defined(PVR_LDM_PCI_MODULE)
+#include <linux/pci.h>
+#endif
+
+#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
+#include <asm/uaccess.h>
+#endif
+
+#if defined(PVR_LDM_MODULE) || defined(PVR_DRI_DRM_PLATFORM_DEV)
+#include <asm/atomic.h>
+#endif
+
+#include "img_defs.h"
+#include "services.h"
+#include "kerneldisplay.h"
+#include "kernelbuffer.h"
+#include "syscommon.h"
+#include "pvrmmap.h"
+#include "mutils.h"
+#include "mm.h"
+#include "mmap.h"
+#include "mutex.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "perproc.h"
+#include "handle.h"
+#include "pvr_bridge_km.h"
+#include "proc.h"
+#include "pvrmodule.h"
+#include "private_data.h"
+#include "lock.h"
+#include "linkage.h"
+#include "buffer_manager.h"
+
+#if defined(SUPPORT_DRI_DRM)
+#include "pvr_drm.h"
+#endif
+#if defined(PVR_LDM_MODULE)
+#define DRVNAME PVR_LDM_DRIVER_REGISTRATION_NAME
+#endif
+#define DEVNAME PVRSRV_MODNAME
+
+#if defined(SUPPORT_DRI_DRM)
+#define PRIVATE_DATA(pFile) ((pFile)->driver_priv)
+#else
+#define PRIVATE_DATA(pFile) ((pFile)->private_data)
+#endif
+
+MODULE_SUPPORTED_DEVICE(DEVNAME);
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+#include <linux/moduleparam.h>
+extern IMG_UINT32 gPVRDebugLevel;
+module_param(gPVRDebugLevel, uint, 0644);
+MODULE_PARM_DESC(gPVRDebugLevel, "Sets the level of debug output (default 0x7)");
+#endif
+
+#if defined(CONFIG_SGX_DVFS_MODE_NONE)
+#define DEFAULT_IDLE_MODE 0
+#elif defined(CONFIG_SGX_DVFS_MODE_LINEAR)
+#define DEFAULT_IDLE_MODE 1
+#elif defined(CONFIG_SGX_DVFS_MODE_OPTIMIZED)
+#define DEFAULT_IDLE_MODE 2
+#else
+#error "sgx ide mode not defined"
+#endif
+
+bool sgx_idle_logging = false;
+module_param(sgx_idle_logging, bool, 0644);
+uint sgx_idle_mode = DEFAULT_IDLE_MODE;
+module_param(sgx_idle_mode, uint, 0644);
+uint sgx_idle_timeout = CONFIG_SGX_DVFS_IDLE_TIMEOUT * NSEC_PER_USEC;
+module_param(sgx_idle_timeout, uint, 0644);
+
+uint sgx_apm_latency = SYS_SGX_ACTIVE_POWER_LATENCY_MS;
+module_param(sgx_apm_latency, uint, 0644);
+
+#if defined(CONFIG_ION_OMAP)
+#include <linux/ion.h>
+#include <linux/omap_ion.h>
+extern struct ion_device *omap_ion_device;
+struct ion_client *gpsIONClient;
+EXPORT_SYMBOL(gpsIONClient);
+#endif
+
+
+EXPORT_SYMBOL(PVRGetDisplayClassJTable);
+EXPORT_SYMBOL(PVRGetBufferClassJTable);
+
+#if defined(PVR_LDM_MODULE) && !defined(SUPPORT_DRI_DRM)
+static struct class *psPvrClass;
+#endif
+
+#if !defined(SUPPORT_DRI_DRM)
+static int AssignedMajorNumber;
+
+static int PVRSRVOpen(struct inode* pInode, struct file* pFile);
+static int PVRSRVRelease(struct inode* pInode, struct file* pFile);
+
+static struct file_operations pvrsrv_fops =
+{
+ .owner=THIS_MODULE,
+ .unlocked_ioctl = PVRSRV_BridgeDispatchKM,
+ .open=PVRSRVOpen,
+ .release=PVRSRVRelease,
+ .mmap=PVRMMap,
+};
+#endif
+
+PVRSRV_LINUX_MUTEX gPVRSRVLock;
+
+IMG_UINT32 gui32ReleasePID;
+
+#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
+static IMG_UINT32 gPVRPowerLevel;
+#endif
+
+#if defined(PVR_LDM_MODULE)
+
+#if defined(PVR_LDM_PLATFORM_MODULE)
+#define LDM_DEV struct platform_device
+#define LDM_DRV struct platform_driver
+#endif
+
+#if defined(PVR_LDM_PCI_MODULE)
+#define LDM_DEV struct pci_dev
+#define LDM_DRV struct pci_driver
+#endif
+#if defined(PVR_LDM_PLATFORM_MODULE)
+static int PVRSRVDriverRemove(LDM_DEV *device);
+static int PVRSRVDriverProbe(LDM_DEV *device);
+#endif
+#if defined(PVR_LDM_PCI_MODULE)
+static void PVRSRVDriverRemove(LDM_DEV *device);
+static int PVRSRVDriverProbe(LDM_DEV *device, const struct pci_device_id *id);
+#endif
+static int PVRSRVDriverSuspend(LDM_DEV *device, pm_message_t state);
+static void PVRSRVDriverShutdown(LDM_DEV *device);
+static int PVRSRVDriverResume(LDM_DEV *device);
+
+#if defined(PVR_LDM_PCI_MODULE)
+struct pci_device_id powervr_id_table[] __devinitdata = {
+ {PCI_DEVICE(SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV_DEVICE_ID)},
+#if defined (SYS_SGX_DEV1_DEVICE_ID)
+ {PCI_DEVICE(SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV1_DEVICE_ID)},
+#endif
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, powervr_id_table);
+#endif
+
+#if defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV)
+static struct platform_device_id powervr_id_table[] __devinitdata = {
+ {SYS_SGX_DEV_NAME, 0},
+ {}
+};
+#endif
+
+static LDM_DRV powervr_driver = {
+#if defined(PVR_LDM_PLATFORM_MODULE)
+ .driver = {
+ .name = DRVNAME,
+ },
+#endif
+#if defined(PVR_LDM_PCI_MODULE)
+ .name = DRVNAME,
+#endif
+#if defined(PVR_LDM_PCI_MODULE) || defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV)
+ .id_table = powervr_id_table,
+#endif
+ .probe = PVRSRVDriverProbe,
+#if defined(PVR_LDM_PLATFORM_MODULE)
+ .remove = PVRSRVDriverRemove,
+#endif
+#if defined(PVR_LDM_PCI_MODULE)
+ .remove = __devexit_p(PVRSRVDriverRemove),
+#endif
+ .suspend = PVRSRVDriverSuspend,
+ .resume = PVRSRVDriverResume,
+ .shutdown = PVRSRVDriverShutdown,
+};
+
+LDM_DEV *gpsPVRLDMDev;
+
+#if defined(MODULE) && defined(PVR_LDM_PLATFORM_MODULE) && \
+ !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV)
+static void PVRSRVDeviceRelease(struct device unref__ *pDevice)
+{
+}
+
+static struct platform_device powervr_device = {
+ .name = DEVNAME,
+ .id = -1,
+ .dev = {
+ .release = PVRSRVDeviceRelease
+ }
+};
+#endif
+
+#if defined(PVR_LDM_PLATFORM_MODULE)
+static int PVRSRVDriverProbe(LDM_DEV *pDevice)
+#endif
+#if defined(PVR_LDM_PCI_MODULE)
+static int __devinit PVRSRVDriverProbe(LDM_DEV *pDevice, const struct pci_device_id *id)
+#endif
+{
+ SYS_DATA *psSysData;
+
+ PVR_TRACE(("PVRSRVDriverProbe(pDevice=%p)", pDevice));
+
+#if 0
+
+ if (PerDeviceSysInitialise((IMG_PVOID)pDevice) != PVRSRV_OK)
+ {
+ return -EINVAL;
+ }
+#endif
+
+ psSysData = SysAcquireDataNoCheck();
+ if (psSysData == IMG_NULL)
+ {
+ gpsPVRLDMDev = pDevice;
+ if (SysInitialise() != PVRSRV_OK)
+ {
+ return -ENODEV;
+ }
+ }
+
+#if defined(CONFIG_ION_OMAP)
+ gpsIONClient = ion_client_create(omap_ion_device,
+ 1 << ION_HEAP_TYPE_CARVEOUT |
+ 1 << OMAP_ION_HEAP_TYPE_TILER,
+ "pvr");
+ if (IS_ERR_OR_NULL(gpsIONClient))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDriverProbe: Couldn't create ion client"));
+ return PTR_ERR(gpsIONClient);
+ }
+#endif
+
+ return 0;
+}
+
+
+#if defined (PVR_LDM_PLATFORM_MODULE)
+static int PVRSRVDriverRemove(LDM_DEV *pDevice)
+#endif
+#if defined(PVR_LDM_PCI_MODULE)
+static void __devexit PVRSRVDriverRemove(LDM_DEV *pDevice)
+#endif
+{
+ SYS_DATA *psSysData;
+
+ PVR_TRACE(("PVRSRVDriverRemove(pDevice=%p)", pDevice));
+
+#if defined(CONFIG_ION_OMAP)
+ ion_client_destroy(gpsIONClient);
+ gpsIONClient = IMG_NULL;
+#endif
+
+ SysAcquireData(&psSysData);
+
+#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
+ if (gPVRPowerLevel != 0)
+ {
+ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) == PVRSRV_OK)
+ {
+ gPVRPowerLevel = 0;
+ }
+ }
+#endif
+ (void) SysDeinitialise(psSysData);
+
+ gpsPVRLDMDev = IMG_NULL;
+
+#if 0
+ if (PerDeviceSysDeInitialise((IMG_PVOID)pDevice) != PVRSRV_OK)
+ {
+ return -EINVAL;
+ }
+#endif
+
+#if defined (PVR_LDM_PLATFORM_MODULE)
+ return 0;
+#endif
+#if defined (PVR_LDM_PCI_MODULE)
+ return;
+#endif
+}
+#endif
+
+
+#if defined(PVR_LDM_MODULE) || defined(PVR_DRI_DRM_PLATFORM_DEV)
+#if defined(SUPPORT_DRI_DRM) && !defined(PVR_DRI_DRM_PLATFORM_DEV) && \
+ !defined(SUPPORT_DRI_DRM_PLUGIN)
+void PVRSRVDriverShutdown(struct drm_device *pDevice)
+#else
+PVR_MOD_STATIC void PVRSRVDriverShutdown(LDM_DEV *pDevice)
+#endif
+{
+ static atomic_t sDriverIsShutdown = ATOMIC_INIT(1);
+
+ PVR_TRACE(("PVRSRVDriverShutdown(pDevice=%p)", pDevice));
+
+ if (atomic_dec_and_test(&sDriverIsShutdown))
+ {
+
+ LinuxLockMutex(&gPVRSRVLock);
+
+ (void) PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3);
+ }
+}
+
+#endif
+
+
+#if defined(PVR_LDM_MODULE) || defined(SUPPORT_DRI_DRM)
+#if defined(SUPPORT_DRI_DRM) && !defined(PVR_DRI_DRM_PLATFORM_DEV) && \
+ !defined(SUPPORT_DRI_DRM_PLUGIN)
+int PVRSRVDriverSuspend(struct drm_device *pDevice, pm_message_t state)
+#else
+PVR_MOD_STATIC int PVRSRVDriverSuspend(LDM_DEV *pDevice, pm_message_t state)
+#endif
+{
+#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM))
+ PVR_TRACE(( "PVRSRVDriverSuspend(pDevice=%p)", pDevice));
+
+ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3) != PVRSRV_OK)
+ {
+ return -EINVAL;
+ }
+#endif
+ return 0;
+}
+
+
+#if defined(SUPPORT_DRI_DRM) && !defined(PVR_DRI_DRM_PLATFORM_DEV) && \
+ !defined(SUPPORT_DRI_DRM_PLUGIN)
+int PVRSRVDriverResume(struct drm_device *pDevice)
+#else
+PVR_MOD_STATIC int PVRSRVDriverResume(LDM_DEV *pDevice)
+#endif
+{
+#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM))
+ PVR_TRACE(("PVRSRVDriverResume(pDevice=%p)", pDevice));
+
+ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) != PVRSRV_OK)
+ {
+ return -EINVAL;
+ }
+#endif
+ return 0;
+}
+#endif
+
+
+#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM)
+IMG_INT PVRProcSetPowerLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data)
+{
+ IMG_CHAR data_buffer[2];
+ IMG_UINT32 PVRPowerLevel;
+
+ if (count != sizeof(data_buffer))
+ {
+ return -EINVAL;
+ }
+ else
+ {
+ if (copy_from_user(data_buffer, buffer, count))
+ return -EINVAL;
+ if (data_buffer[count - 1] != '\n')
+ return -EINVAL;
+ PVRPowerLevel = data_buffer[0] - '0';
+ if (PVRPowerLevel != gPVRPowerLevel)
+ {
+ if (PVRPowerLevel != 0)
+ {
+ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3) != PVRSRV_OK)
+ {
+ return -EINVAL;
+ }
+ }
+ else
+ {
+ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) != PVRSRV_OK)
+ {
+ return -EINVAL;
+ }
+ }
+
+ gPVRPowerLevel = PVRPowerLevel;
+ }
+ }
+ return (count);
+}
+
+void ProcSeqShowPowerLevel(struct seq_file *sfile,void* el)
+{
+ seq_printf(sfile, "%lu\n", gPVRPowerLevel);
+}
+
+#endif
+
+#if defined(SUPPORT_DRI_DRM)
+int PVRSRVOpen(struct drm_device unref__ *dev, struct drm_file *pFile)
+#else
+static int PVRSRVOpen(struct inode unref__ * pInode, struct file *pFile)
+#endif
+{
+ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
+ IMG_HANDLE hBlockAlloc;
+ int iRet = -ENOMEM;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32PID;
+#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
+ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
+#endif
+
+ LinuxLockMutex(&gPVRSRVLock);
+
+ ui32PID = OSGetCurrentProcessIDKM();
+
+ if (PVRSRVProcessConnect(ui32PID, 0) != PVRSRV_OK)
+ goto err_unlock;
+
+#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
+ psEnvPerProc = PVRSRVPerProcessPrivateData(ui32PID);
+ if (psEnvPerProc == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: No per-process private data", __FUNCTION__));
+ goto err_unlock;
+ }
+#endif
+
+ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_FILE_PRIVATE_DATA),
+ (IMG_PVOID *)&psPrivateData,
+ &hBlockAlloc,
+ "File Private Data");
+
+ if(eError != PVRSRV_OK)
+ goto err_unlock;
+
+#if defined (SUPPORT_SID_INTERFACE)
+ psPrivateData->hKernelMemInfo = 0;
+#else
+ psPrivateData->hKernelMemInfo = NULL;
+#endif
+#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
+ psPrivateData->psDRMFile = pFile;
+
+ list_add_tail(&psPrivateData->sDRMAuthListItem, &psEnvPerProc->sDRMAuthListHead);
+#endif
+ psPrivateData->ui32OpenPID = ui32PID;
+ psPrivateData->hBlockAlloc = hBlockAlloc;
+ PRIVATE_DATA(pFile) = psPrivateData;
+ iRet = 0;
+err_unlock:
+ LinuxUnLockMutex(&gPVRSRVLock);
+ return iRet;
+}
+
+
+#if defined(SUPPORT_DRI_DRM)
+void PVRSRVRelease(void *pvPrivData)
+#else
+static int PVRSRVRelease(struct inode unref__ * pInode, struct file *pFile)
+#endif
+{
+ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
+ int err = 0;
+
+ LinuxLockMutex(&gPVRSRVLock);
+
+#if defined(SUPPORT_DRI_DRM)
+ psPrivateData = (PVRSRV_FILE_PRIVATE_DATA *)pvPrivData;
+#else
+ psPrivateData = PRIVATE_DATA(pFile);
+#endif
+ if (psPrivateData != IMG_NULL)
+ {
+#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
+ list_del(&psPrivateData->sDRMAuthListItem);
+#endif
+
+ if(psPrivateData->hKernelMemInfo)
+ {
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+
+
+ if(PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
+ (IMG_PVOID *)&psKernelMemInfo,
+ psPrivateData->hKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to look up export handle", __FUNCTION__));
+ err = -EFAULT;
+ goto err_unlock;
+ }
+
+
+ if (psKernelMemInfo->sShareMemWorkaround.bInUse)
+ {
+ BM_XProcIndexRelease(psKernelMemInfo->sShareMemWorkaround.ui32ShareIndex);
+ }
+
+
+ if(FreeMemCallBackCommon(psKernelMemInfo, 0,
+ PVRSRV_FREE_CALLBACK_ORIGIN_EXTERNAL) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: FreeMemCallBackCommon failed", __FUNCTION__));
+ err = -EFAULT;
+ goto err_unlock;
+ }
+ }
+
+
+ gui32ReleasePID = psPrivateData->ui32OpenPID;
+ PVRSRVProcessDisconnect(psPrivateData->ui32OpenPID);
+ gui32ReleasePID = 0;
+
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_FILE_PRIVATE_DATA),
+ psPrivateData, psPrivateData->hBlockAlloc);
+
+#if !defined(SUPPORT_DRI_DRM)
+ PRIVATE_DATA(pFile) = IMG_NULL;
+#endif
+ }
+
+err_unlock:
+ LinuxUnLockMutex(&gPVRSRVLock);
+#if defined(SUPPORT_DRI_DRM)
+ return;
+#else
+ return err;
+#endif
+}
+
+
+#if defined(SUPPORT_DRI_DRM)
+int PVRCore_Init(void)
+#else
+static int __init PVRCore_Init(void)
+#endif
+{
+ int error;
+#if !defined(PVR_LDM_MODULE)
+ PVRSRV_ERROR eError;
+#else
+#if !defined(SUPPORT_DRI_DRM)
+ struct device *psDev;
+#endif
+#endif
+
+#if !defined(SUPPORT_DRI_DRM)
+
+ PVRDPFInit();
+#endif
+ PVR_TRACE(("PVRCore_Init"));
+
+ LinuxInitMutex(&gPVRSRVLock);
+
+ if (CreateProcEntries ())
+ {
+ error = -ENOMEM;
+ return error;
+ }
+
+ if (PVROSFuncInit() != PVRSRV_OK)
+ {
+ error = -ENOMEM;
+ goto init_failed;
+ }
+
+ PVRLinuxMUtilsInit();
+
+ if(LinuxMMInit() != PVRSRV_OK)
+ {
+ error = -ENOMEM;
+ goto init_failed;
+ }
+
+ LinuxBridgeInit();
+
+ PVRMMapInit();
+
+#if defined(PVR_LDM_MODULE)
+
+#if defined(PVR_LDM_PLATFORM_MODULE) || defined(SUPPORT_DRI_DRM_PLUGIN)
+ if ((error = platform_driver_register(&powervr_driver)) != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform driver (%d)", error));
+
+ goto init_failed;
+ }
+
+#if defined(MODULE) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV)
+ if ((error = platform_device_register(&powervr_device)) != 0)
+ {
+ platform_driver_unregister(&powervr_driver);
+
+ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform device (%d)", error));
+
+ goto init_failed;
+ }
+#endif
+#endif
+
+#if defined(PVR_LDM_PCI_MODULE)
+ if ((error = pci_register_driver(&powervr_driver)) != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register PCI driver (%d)", error));
+
+ goto init_failed;
+ }
+#endif
+#endif
+
+#if !defined(PVR_LDM_MODULE)
+
+ if ((eError = SysInitialise()) != PVRSRV_OK)
+ {
+ error = -ENODEV;
+#if defined(TCF_REV) && (TCF_REV == 110)
+ if(eError == PVRSRV_ERROR_NOT_SUPPORTED)
+ {
+ printk("\nAtlas wrapper (FPGA image) version mismatch");
+ error = -ENODEV;
+ }
+#endif
+ goto init_failed;
+ }
+#endif
+
+#if !defined(SUPPORT_DRI_DRM)
+ AssignedMajorNumber = register_chrdev(0, DEVNAME, &pvrsrv_fops);
+
+ if (AssignedMajorNumber <= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to get major number"));
+
+ error = -EBUSY;
+ goto sys_deinit;
+ }
+
+ PVR_TRACE(("PVRCore_Init: major device %d", AssignedMajorNumber));
+
+#if defined(PVR_LDM_MODULE)
+
+ psPvrClass = class_create(THIS_MODULE, "pvr");
+
+ if (IS_ERR(psPvrClass))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create class (%ld)", PTR_ERR(psPvrClass)));
+ error = -EBUSY;
+ goto unregister_device;
+ }
+
+ psDev = device_create(psPvrClass, NULL, MKDEV(AssignedMajorNumber, 0),
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
+ NULL,
+#endif
+ DEVNAME);
+ if (IS_ERR(psDev))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create device (%ld)", PTR_ERR(psDev)));
+ error = -EBUSY;
+ goto destroy_class;
+ }
+#endif
+#endif
+
+ return 0;
+
+#if !defined(SUPPORT_DRI_DRM)
+#if defined(PVR_LDM_MODULE)
+destroy_class:
+ class_destroy(psPvrClass);
+unregister_device:
+ unregister_chrdev((IMG_UINT)AssignedMajorNumber, DEVNAME);
+#endif
+sys_deinit:
+#endif
+#if defined(PVR_LDM_MODULE)
+#if defined(PVR_LDM_PCI_MODULE)
+ pci_unregister_driver(&powervr_driver);
+#endif
+
+#if defined (PVR_LDM_PLATFORM_MODULE)
+#if defined(MODULE) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV)
+ platform_device_unregister(&powervr_device);
+#endif
+ platform_driver_unregister(&powervr_driver);
+#endif
+
+#else
+
+ {
+ SYS_DATA *psSysData;
+
+ psSysData = SysAcquireDataNoCheck();
+ if (psSysData != IMG_NULL)
+ {
+ (void) SysDeinitialise(psSysData);
+ }
+ }
+#endif
+init_failed:
+ PVRMMapCleanup();
+ LinuxMMCleanup();
+ LinuxBridgeDeInit();
+ PVROSFuncDeInit();
+ RemoveProcEntries();
+
+ return error;
+
+}
+
+
+#if defined(SUPPORT_DRI_DRM)
+void PVRCore_Cleanup(void)
+#else
+static void __exit PVRCore_Cleanup(void)
+#endif
+{
+#if !defined(PVR_LDM_MODULE)
+ SYS_DATA *psSysData;
+#endif
+ PVR_TRACE(("PVRCore_Cleanup"));
+
+#if !defined(PVR_LDM_MODULE)
+ SysAcquireData(&psSysData);
+#endif
+
+#if !defined(SUPPORT_DRI_DRM)
+
+#if defined(PVR_LDM_MODULE)
+ device_destroy(psPvrClass, MKDEV(AssignedMajorNumber, 0));
+ class_destroy(psPvrClass);
+#endif
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
+ if (
+#endif
+ unregister_chrdev((IMG_UINT)AssignedMajorNumber, DEVNAME)
+#if !(LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
+ ;
+#else
+ )
+ {
+ PVR_DPF((PVR_DBG_ERROR," can't unregister device major %d", AssignedMajorNumber));
+ }
+#endif
+#endif
+
+#if defined(PVR_LDM_MODULE)
+
+#if defined(PVR_LDM_PCI_MODULE)
+ pci_unregister_driver(&powervr_driver);
+#endif
+
+#if defined (PVR_LDM_PLATFORM_MODULE)
+#if defined(MODULE) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV)
+ platform_device_unregister(&powervr_device);
+#endif
+ platform_driver_unregister(&powervr_driver);
+#endif
+
+#else
+#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
+ if (gPVRPowerLevel != 0)
+ {
+ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) == PVRSRV_OK)
+ {
+ gPVRPowerLevel = 0;
+ }
+ }
+#endif
+
+ (void) SysDeinitialise(psSysData);
+#endif
+
+ PVRMMapCleanup();
+
+ LinuxMMCleanup();
+
+ LinuxBridgeDeInit();
+
+ PVROSFuncDeInit();
+
+ RemoveProcEntries();
+
+ PVR_TRACE(("PVRCore_Cleanup: unloading"));
+}
+
+#if !defined(SUPPORT_DRI_DRM)
+module_init(PVRCore_Init);
+module_exit(PVRCore_Cleanup);
+#endif
diff --git a/drivers/gpu/pvr/mutex.c b/drivers/gpu/pvr/mutex.c
new file mode 100644
index 0000000..742fa03
--- /dev/null
+++ b/drivers/gpu/pvr/mutex.c
@@ -0,0 +1,136 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/errno.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+#include <linux/mutex.h>
+#else
+#include <asm/semaphore.h>
+#endif
+#include <linux/module.h>
+
+#include <img_defs.h>
+#include <services.h>
+
+#include "mutex.h"
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+
+IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
+{
+ mutex_init(psPVRSRVMutex);
+}
+
+IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
+{
+ mutex_lock(psPVRSRVMutex);
+}
+
+PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
+{
+ if(mutex_lock_interruptible(psPVRSRVMutex) == -EINTR)
+ {
+ return PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR;
+ }
+ else
+ {
+ return PVRSRV_OK;
+ }
+}
+
+IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
+{
+ return mutex_trylock(psPVRSRVMutex);
+}
+
+IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
+{
+ mutex_unlock(psPVRSRVMutex);
+}
+
+IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
+{
+ return (mutex_is_locked(psPVRSRVMutex)) ? IMG_TRUE : IMG_FALSE;
+}
+
+
+#else
+
+
+IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
+{
+ init_MUTEX(&psPVRSRVMutex->sSemaphore);
+ atomic_set(&psPVRSRVMutex->Count, 0);
+}
+
+IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
+{
+ down(&psPVRSRVMutex->sSemaphore);
+ atomic_dec(&psPVRSRVMutex->Count);
+}
+
+PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
+{
+ if(down_interruptible(&psPVRSRVMutex->sSemaphore) == -EINTR)
+ {
+
+ return PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR;
+ }else{
+ atomic_dec(&psPVRSRVMutex->Count);
+ return PVRSRV_OK;
+ }
+}
+
+IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
+{
+ IMG_INT32 Status = down_trylock(&psPVRSRVMutex->sSemaphore);
+ if(Status == 0)
+ {
+ atomic_dec(&psPVRSRVMutex->Count);
+ }
+
+ return Status;
+}
+
+IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
+{
+ atomic_inc(&psPVRSRVMutex->Count);
+ up(&psPVRSRVMutex->sSemaphore);
+}
+
+IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
+{
+ IMG_INT32 iCount;
+
+ iCount = atomic_read(&psPVRSRVMutex->Count);
+
+ return (IMG_BOOL)iCount;
+}
+
+#endif
+
diff --git a/drivers/gpu/pvr/mutex.h b/drivers/gpu/pvr/mutex.h
new file mode 100644
index 0000000..5e787b7
--- /dev/null
+++ b/drivers/gpu/pvr/mutex.h
@@ -0,0 +1,70 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __INCLUDED_LINUX_MUTEX_H_
+#define __INCLUDED_LINUX_MUTEX_H_
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+#include <linux/mutex.h>
+#else
+#include <asm/semaphore.h>
+#endif
+
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+
+typedef struct mutex PVRSRV_LINUX_MUTEX;
+
+#else
+
+
+typedef struct {
+ struct semaphore sSemaphore;
+
+ atomic_t Count;
+}PVRSRV_LINUX_MUTEX;
+
+#endif
+
+
+extern IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
+
+extern IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
+
+extern PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
+
+extern IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
+
+extern IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
+
+extern IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
+
+
+#endif
+
diff --git a/drivers/gpu/pvr/mutils.c b/drivers/gpu/pvr/mutils.c
new file mode 100644
index 0000000..a012cf5
--- /dev/null
+++ b/drivers/gpu/pvr/mutils.c
@@ -0,0 +1,136 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#endif
+
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "mutils.h"
+
+#if defined(SUPPORT_LINUX_X86_PAT)
+#define PAT_LINUX_X86_WC 1
+
+#define PAT_X86_ENTRY_BITS 8
+
+#define PAT_X86_BIT_PWT 1U
+#define PAT_X86_BIT_PCD 2U
+#define PAT_X86_BIT_PAT 4U
+#define PAT_X86_BIT_MASK (PAT_X86_BIT_PAT | PAT_X86_BIT_PCD | PAT_X86_BIT_PWT)
+
+static IMG_BOOL g_write_combining_available = IMG_FALSE;
+
+#define PROT_TO_PAT_INDEX(v, B) ((v & _PAGE_ ## B) ? PAT_X86_BIT_ ## B : 0)
+
+static inline IMG_UINT
+pvr_pat_index(pgprotval_t prot_val)
+{
+ IMG_UINT ret = 0;
+ pgprotval_t val = prot_val & _PAGE_CACHE_MASK;
+
+ ret |= PROT_TO_PAT_INDEX(val, PAT);
+ ret |= PROT_TO_PAT_INDEX(val, PCD);
+ ret |= PROT_TO_PAT_INDEX(val, PWT);
+
+ return ret;
+}
+
+static inline IMG_UINT
+pvr_pat_entry(u64 pat, IMG_UINT index)
+{
+ return (IMG_UINT)(pat >> (index * PAT_X86_ENTRY_BITS)) & PAT_X86_BIT_MASK;
+}
+
+static IMG_VOID
+PVRLinuxX86PATProbe(IMG_VOID)
+{
+
+ if (cpu_has_pat)
+ {
+ u64 pat;
+ IMG_UINT pat_index;
+ IMG_UINT pat_entry;
+
+ PVR_TRACE(("%s: PAT available", __FUNCTION__));
+
+ rdmsrl(MSR_IA32_CR_PAT, pat);
+ PVR_TRACE(("%s: Top 32 bits of PAT: 0x%.8x", __FUNCTION__, (IMG_UINT)(pat >> 32)));
+ PVR_TRACE(("%s: Bottom 32 bits of PAT: 0x%.8x", __FUNCTION__, (IMG_UINT)(pat)));
+
+ pat_index = pvr_pat_index(_PAGE_CACHE_WC);
+ PVR_TRACE(("%s: PAT index for write combining: %u", __FUNCTION__, pat_index));
+
+ pat_entry = pvr_pat_entry(pat, pat_index);
+ PVR_TRACE(("%s: PAT entry for write combining: 0x%.2x (should be 0x%.2x)", __FUNCTION__, pat_entry, PAT_LINUX_X86_WC));
+
+#if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
+ g_write_combining_available = (IMG_BOOL)(pat_entry == PAT_LINUX_X86_WC);
+#endif
+ }
+#if defined(DEBUG)
+#if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
+ if (g_write_combining_available)
+ {
+ PVR_TRACE(("%s: Write combining available via PAT", __FUNCTION__));
+ }
+ else
+ {
+ PVR_TRACE(("%s: Write combining not available", __FUNCTION__));
+ }
+#else
+ PVR_TRACE(("%s: Write combining disabled in driver build", __FUNCTION__));
+#endif
+#endif
+}
+
+pgprot_t
+pvr_pgprot_writecombine(pgprot_t prot)
+{
+
+
+ return (g_write_combining_available) ?
+ __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | _PAGE_CACHE_WC) : pgprot_noncached(prot);
+}
+#endif
+
+IMG_VOID
+PVRLinuxMUtilsInit(IMG_VOID)
+{
+#if defined(SUPPORT_LINUX_X86_PAT)
+ PVRLinuxX86PATProbe();
+#endif
+}
+
diff --git a/drivers/gpu/pvr/mutils.h b/drivers/gpu/pvr/mutils.h
new file mode 100644
index 0000000..b2a8ba0
--- /dev/null
+++ b/drivers/gpu/pvr/mutils.h
@@ -0,0 +1,103 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __IMG_LINUX_MUTILS_H__
+#define __IMG_LINUX_MUTILS_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#endif
+
+#if !(defined(__i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)))
+#if defined(SUPPORT_LINUX_X86_PAT)
+#undef SUPPORT_LINUX_X86_PAT
+#endif
+#endif
+
+#if defined(SUPPORT_LINUX_X86_PAT)
+ pgprot_t pvr_pgprot_writecombine(pgprot_t prot);
+ #define PGPROT_WC(pv) pvr_pgprot_writecombine(pv)
+#else
+ #if defined(__arm__) || defined(__sh__)
+ #define PGPROT_WC(pv) pgprot_writecombine(pv)
+ #else
+ #if defined(__i386__) || defined(__mips__)
+ #define PGPROT_WC(pv) pgprot_noncached(pv)
+ #else
+ #define PGPROT_WC(pv) pgprot_noncached(pv)
+ #error Unsupported architecture!
+ #endif
+ #endif
+#endif
+
+#define PGPROT_UC(pv) pgprot_noncached(pv)
+
+#if defined(__i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
+ #define IOREMAP(pa, bytes) ioremap_cache(pa, bytes)
+#else
+ #if defined(__arm__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
+ #define IOREMAP(pa, bytes) ioremap_cached(pa, bytes)
+ #else
+ #define IOREMAP(pa, bytes) ioremap(pa, bytes)
+ #endif
+#endif
+
+#if defined(SUPPORT_LINUX_X86_PAT)
+ #if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
+ #define IOREMAP_WC(pa, bytes) ioremap_wc(pa, bytes)
+ #else
+ #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes)
+ #endif
+#else
+ #if defined(__arm__)
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
+ #define IOREMAP_WC(pa, bytes) ioremap_wc(pa, bytes)
+ #else
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
+ #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes)
+ #else
+ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17))
+ #define IOREMAP_WC(pa, bytes) __ioremap(pa, bytes, L_PTE_BUFFERABLE)
+ #else
+ #define IOREMAP_WC(pa, bytes) __ioremap(pa, bytes, , L_PTE_BUFFERABLE, 1)
+ #endif
+ #endif
+ #endif
+ #else
+ #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes)
+ #endif
+#endif
+
+#define IOREMAP_UC(pa, bytes) ioremap_nocache(pa, bytes)
+
+IMG_VOID PVRLinuxMUtilsInit(IMG_VOID);
+
+#endif
+
diff --git a/drivers/gpu/pvr/ocpdefs.h b/drivers/gpu/pvr/ocpdefs.h
new file mode 100644
index 0000000..3bbab7b
--- /dev/null
+++ b/drivers/gpu/pvr/ocpdefs.h
@@ -0,0 +1,271 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _OCPDEFS_H_
+#define _OCPDEFS_H_
+
+#define EUR_CR_OCP_REVISION 0xFE00
+#define EUR_CR_OCP_REVISION_REV_MASK 0xFFFFFFFFUL
+#define EUR_CR_OCP_REVISION_REV_SHIFT 0
+#define EUR_CR_OCP_REVISION_REV_SIGNED 0
+
+#define EUR_CR_OCP_HWINFO 0xFE04
+#define EUR_CR_OCP_HWINFO_SYS_BUS_WIDTH_MASK 0x00000003UL
+#define EUR_CR_OCP_HWINFO_SYS_BUS_WIDTH_SHIFT 0
+#define EUR_CR_OCP_HWINFO_SYS_BUS_WIDTH_SIGNED 0
+
+#define EUR_CR_OCP_HWINFO_MEM_BUS_WIDTH_MASK 0x00000004UL
+#define EUR_CR_OCP_HWINFO_MEM_BUS_WIDTH_SHIFT 2
+#define EUR_CR_OCP_HWINFO_MEM_BUS_WIDTH_SIGNED 0
+
+#define EUR_CR_OCP_SYSCONFIG 0xFE10
+#define EUR_CR_OCP_SYSCONFIG_IDLE_MODE_MASK 0x0000000CUL
+#define EUR_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT 2
+#define EUR_CR_OCP_SYSCONFIG_IDLE_MODE_SIGNED 0
+
+#define EUR_CR_OCP_SYSCONFIG_STANDBY_MODE_MASK 0x00000030UL
+#define EUR_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT 4
+#define EUR_CR_OCP_SYSCONFIG_STANDBY_MODE_SIGNED 0
+
+#define EUR_CR_OCP_IRQSTATUS_RAW_0 0xFE24
+#define EUR_CR_OCP_IRQSTATUS_RAW_0_INIT_MASK 0x00000001UL
+#define EUR_CR_OCP_IRQSTATUS_RAW_0_INIT_SHIFT 0
+#define EUR_CR_OCP_IRQSTATUS_RAW_0_INIT_SIGNED 0
+
+#define EUR_CR_OCP_IRQSTATUS_RAW_1 0xFE28
+#define EUR_CR_OCP_IRQSTATUS_RAW_1_TARGET_MASK 0x00000001UL
+#define EUR_CR_OCP_IRQSTATUS_RAW_1_TARGET_SHIFT 0
+#define EUR_CR_OCP_IRQSTATUS_RAW_1_TARGET_SIGNED 0
+
+#define EUR_CR_OCP_IRQSTATUS_RAW_2 0xFE2C
+#define EUR_CR_OCP_IRQSTATUS_RAW_2_SGXCORE_MASK 0x00000001UL
+#define EUR_CR_OCP_IRQSTATUS_RAW_2_SGXCORE_SHIFT 0
+#define EUR_CR_OCP_IRQSTATUS_RAW_2_SGXCORE_SIGNED 0
+
+#define EUR_CR_OCP_IRQSTATUS_0 0xFE30
+#define EUR_CR_OCP_IRQSTATUS_0_INIT_MASK 0x00000001UL
+#define EUR_CR_OCP_IRQSTATUS_0_INIT_SHIFT 0
+#define EUR_CR_OCP_IRQSTATUS_0_INIT_SIGNED 0
+
+#define EUR_CR_OCP_IRQSTATUS_1 0xFE34
+#define EUR_CR_OCP_IRQSTATUS_1_TARGET_MASK 0x00000001UL
+#define EUR_CR_OCP_IRQSTATUS_1_TARGET_SHIFT 0
+#define EUR_CR_OCP_IRQSTATUS_1_TARGET_SIGNED 0
+
+#define EUR_CR_OCP_IRQSTATUS_2 0xFE38
+#define EUR_CR_OCP_IRQSTATUS_2_SGXCORE_MASK 0x00000001UL
+#define EUR_CR_OCP_IRQSTATUS_2_SGXCORE_SHIFT 0
+#define EUR_CR_OCP_IRQSTATUS_2_SGXCORE_SIGNED 0
+
+#define EUR_CR_OCP_IRQENABLE_SET_0 0xFE3C
+#define EUR_CR_OCP_IRQENABLE_SET_0_INIT_MASK 0x00000001UL
+#define EUR_CR_OCP_IRQENABLE_SET_0_INIT_SHIFT 0
+#define EUR_CR_OCP_IRQENABLE_SET_0_INIT_SIGNED 0
+
+#define EUR_CR_OCP_IRQENABLE_SET_1 0xFE40
+#define EUR_CR_OCP_IRQENABLE_SET_1_TARGET_MASK 0x00000001UL
+#define EUR_CR_OCP_IRQENABLE_SET_1_TARGET_SHIFT 0
+#define EUR_CR_OCP_IRQENABLE_SET_1_TARGET_SIGNED 0
+
+#define EUR_CR_OCP_IRQENABLE_SET_2 0xFE44
+#define EUR_CR_OCP_IRQENABLE_SET_2_SGXCORE_MASK 0x00000001UL
+#define EUR_CR_OCP_IRQENABLE_SET_2_SGXCORE_SHIFT 0
+#define EUR_CR_OCP_IRQENABLE_SET_2_SGXCORE_SIGNED 0
+
+#define EUR_CR_OCP_IRQENABLE_CLR_0 0xFE48
+#define EUR_CR_OCP_IRQENABLE_CLR_0_INIT_MASK 0x00000001UL
+#define EUR_CR_OCP_IRQENABLE_CLR_0_INIT_SHIFT 0
+#define EUR_CR_OCP_IRQENABLE_CLR_0_INIT_SIGNED 0
+
+#define EUR_CR_OCP_IRQENABLE_CLR_1 0xFE4C
+#define EUR_CR_OCP_IRQENABLE_CLR_1_TARGET_MASK 0x00000001UL
+#define EUR_CR_OCP_IRQENABLE_CLR_1_TARGET_SHIFT 0
+#define EUR_CR_OCP_IRQENABLE_CLR_1_TARGET_SIGNED 0
+
+#define EUR_CR_OCP_IRQENABLE_CLR_2 0xFE50
+#define EUR_CR_OCP_IRQENABLE_CLR_2_SGXCORE_MASK 0x00000001UL
+#define EUR_CR_OCP_IRQENABLE_CLR_2_SGXCORE_SHIFT 0
+#define EUR_CR_OCP_IRQENABLE_CLR_2_SGXCORE_SIGNED 0
+
+#define EUR_CR_OCP_PAGE_CONFIG 0xFF00
+#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_SIZE_MASK 0x00000001UL
+#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_SIZE_SHIFT 0
+#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_SIZE_SIGNED 0
+
+#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_CHECK_ENABLE_MASK 0x00000004UL
+#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_CHECK_ENABLE_SHIFT 2
+#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_CHECK_ENABLE_SIGNED 0
+
+#define EUR_CR_OCP_PAGE_CONFIG_SIZE_MASK 0x00000018UL
+#define EUR_CR_OCP_PAGE_CONFIG_SIZE_SHIFT 3
+#define EUR_CR_OCP_PAGE_CONFIG_SIZE_SIGNED 0
+
+#define EUR_CR_OCP_INTERRUPT_EVENT 0xFF04
+#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNEXPECTED_MASK 0x00000001UL
+#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNEXPECTED_SHIFT 0
+#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNEXPECTED_SIGNED 0
+
+#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNUSED_TAG_MASK 0x00000002UL
+#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNUSED_TAG_SHIFT 1
+#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNUSED_TAG_SIGNED 0
+
+#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_ERROR_MASK 0x00000004UL
+#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_ERROR_SHIFT 2
+#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_ERROR_SIGNED 0
+
+#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_PAGE_CROSS_ERROR_MASK 0x00000008UL
+#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_PAGE_CROSS_ERROR_SHIFT 3
+#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_PAGE_CROSS_ERROR_SIGNED 0
+
+#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_READ_TAG_FIFO_OVR_MASK 0x00000010UL
+#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_READ_TAG_FIFO_OVR_SHIFT 4
+#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_READ_TAG_FIFO_OVR_SIGNED 0
+
+#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_MEM_REQ_FIFO_OVR_MASK 0x00000020UL
+#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_MEM_REQ_FIFO_OVR_SHIFT 5
+#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_MEM_REQ_FIFO_OVR_SIGNED 0
+
+#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_RESP_FIFO_FULL_MASK 0x00000100UL
+#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_RESP_FIFO_FULL_SHIFT 8
+#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_RESP_FIFO_FULL_SIGNED 0
+
+#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_CMD_FIFO_FULL_MASK 0x00000200UL
+#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_CMD_FIFO_FULL_SHIFT 9
+#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_CMD_FIFO_FULL_SIGNED 0
+
+#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_INVALID_OCP_CMD_MASK 0x00000400UL
+#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_INVALID_OCP_CMD_SHIFT 10
+#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_INVALID_OCP_CMD_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_CONFIG 0xFF08
+#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_TARGET_IDLE_MASK 0x00000003UL
+#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_TARGET_IDLE_SHIFT 0
+#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_TARGET_IDLE_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_INIT_IDLE_MASK 0x0000000CUL
+#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_INIT_IDLE_SHIFT 2
+#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_INIT_IDLE_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_PASS_DATA_MASK 0x00000010UL
+#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_PASS_DATA_SHIFT 4
+#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_PASS_DATA_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_CONFIG_SELECT_INIT_IDLE_MASK 0x00000020UL
+#define EUR_CR_OCP_DEBUG_CONFIG_SELECT_INIT_IDLE_SHIFT 5
+#define EUR_CR_OCP_DEBUG_CONFIG_SELECT_INIT_IDLE_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_MASK 0x80000000UL
+#define EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_SHIFT 31
+#define EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS 0xFF0C
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_MCONNECT_MASK 0x00000003UL
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_MCONNECT_SHIFT 0
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_MCONNECT_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SCONNECT_MASK 0x00000004UL
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SCONNECT_SHIFT 2
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SCONNECT_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEREQ_MASK 0x00000008UL
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEREQ_SHIFT 3
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEREQ_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SDISCACK_MASK 0x00000030UL
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SDISCACK_SHIFT 4
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SDISCACK_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEACK_MASK 0x000000C0UL
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEACK_SHIFT 6
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEACK_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_MCONNECT0_MASK 0x00000300UL
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_MCONNECT0_SHIFT 8
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_MCONNECT0_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT0_MASK 0x00000400UL
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT0_SHIFT 10
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT0_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT1_MASK 0x00000800UL
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT1_SHIFT 11
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT1_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT2_MASK 0x00001000UL
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT2_SHIFT 12
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT2_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCACK_MASK 0x00006000UL
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCACK_SHIFT 13
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCACK_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCREQ_MASK 0x00008000UL
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCREQ_SHIFT 15
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCREQ_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_MWAIT_MASK 0x00010000UL
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_MWAIT_SHIFT 16
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_MWAIT_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_MSTANDBY_MASK 0x00020000UL
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_MSTANDBY_SHIFT 17
+#define EUR_CR_OCP_DEBUG_STATUS_INIT_MSTANDBY_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_CMD_OUT_MASK 0x001C0000UL
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_CMD_OUT_SHIFT 18
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_CMD_OUT_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_WHICH_TARGET_REGISTER_MASK 0x03E00000UL
+#define EUR_CR_OCP_DEBUG_STATUS_WHICH_TARGET_REGISTER_SHIFT 21
+#define EUR_CR_OCP_DEBUG_STATUS_WHICH_TARGET_REGISTER_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_RESP_ERROR_MASK 0x04000000UL
+#define EUR_CR_OCP_DEBUG_STATUS_RESP_ERROR_SHIFT 26
+#define EUR_CR_OCP_DEBUG_STATUS_RESP_ERROR_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_CMD_FIFO_FULL_MASK 0x08000000UL
+#define EUR_CR_OCP_DEBUG_STATUS_CMD_FIFO_FULL_SHIFT 27
+#define EUR_CR_OCP_DEBUG_STATUS_CMD_FIFO_FULL_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_RESP_FIFO_FULL_MASK 0x10000000UL
+#define EUR_CR_OCP_DEBUG_STATUS_RESP_FIFO_FULL_SHIFT 28
+#define EUR_CR_OCP_DEBUG_STATUS_RESP_FIFO_FULL_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_IDLE_MASK 0x20000000UL
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_IDLE_SHIFT 29
+#define EUR_CR_OCP_DEBUG_STATUS_TARGET_IDLE_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_CMD_RESP_DEBUG_STATE_MASK 0x40000000UL
+#define EUR_CR_OCP_DEBUG_STATUS_CMD_RESP_DEBUG_STATE_SHIFT 30
+#define EUR_CR_OCP_DEBUG_STATUS_CMD_RESP_DEBUG_STATE_SIGNED 0
+
+#define EUR_CR_OCP_DEBUG_STATUS_CMD_DEBUG_STATE_MASK 0x80000000UL
+#define EUR_CR_OCP_DEBUG_STATUS_CMD_DEBUG_STATE_SHIFT 31
+#define EUR_CR_OCP_DEBUG_STATUS_CMD_DEBUG_STATE_SIGNED 0
+
+
+#endif
+
diff --git a/drivers/gpu/pvr/omap3/oemfuncs.h b/drivers/gpu/pvr/omap3/oemfuncs.h
new file mode 100644
index 0000000..d283564
--- /dev/null
+++ b/drivers/gpu/pvr/omap3/oemfuncs.h
@@ -0,0 +1,56 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined(__OEMFUNCS_H__)
+#define __OEMFUNCS_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+typedef IMG_UINT32 (*PFN_SRV_BRIDGEDISPATCH)( IMG_UINT32 Ioctl,
+ IMG_BYTE *pInBuf,
+ IMG_UINT32 InBufLen,
+ IMG_BYTE *pOutBuf,
+ IMG_UINT32 OutBufLen,
+ IMG_UINT32 *pdwBytesTransferred);
+typedef struct PVRSRV_DC_OEM_JTABLE_TAG
+{
+ PFN_SRV_BRIDGEDISPATCH pfnOEMBridgeDispatch;
+ IMG_PVOID pvDummy1;
+ IMG_PVOID pvDummy2;
+ IMG_PVOID pvDummy3;
+
+} PVRSRV_DC_OEM_JTABLE;
+
+#define OEM_GET_EXT_FUNCS (1<<1)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/omap3/sysconfig.c b/drivers/gpu/pvr/omap3/sysconfig.c
new file mode 100644
index 0000000..d503661
--- /dev/null
+++ b/drivers/gpu/pvr/omap3/sysconfig.c
@@ -0,0 +1,963 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "services_headers.h"
+#include "kerneldisplay.h"
+#include "oemfuncs.h"
+#include "sgxinfo.h"
+#include "sgxinfokm.h"
+#include "syslocal.h"
+#include "sysconfig.h"
+
+#include "ocpdefs.h"
+
+#if !defined(NO_HARDWARE) && \
+ defined(SYS_USING_INTERRUPTS) && \
+ defined(SGX530) && (SGX_CORE_REV == 125)
+#define SGX_OCP_REGS_ENABLED
+#endif
+
+SYS_DATA* gpsSysData = (SYS_DATA*)IMG_NULL;
+SYS_DATA gsSysData;
+
+static SYS_SPECIFIC_DATA gsSysSpecificData;
+SYS_SPECIFIC_DATA *gpsSysSpecificData;
+
+static IMG_UINT32 gui32SGXDeviceID;
+static SGX_DEVICE_MAP gsSGXDeviceMap;
+static PVRSRV_DEVICE_NODE *gpsSGXDevNode;
+
+#define DEVICE_SGX_INTERRUPT (1 << 0)
+
+#if defined(NO_HARDWARE)
+static IMG_CPU_VIRTADDR gsSGXRegsCPUVAddr;
+#endif
+
+IMG_UINT32 PVRSRV_BridgeDispatchKM(IMG_UINT32 Ioctl,
+ IMG_BYTE *pInBuf,
+ IMG_UINT32 InBufLen,
+ IMG_BYTE *pOutBuf,
+ IMG_UINT32 OutBufLen,
+ IMG_UINT32 *pdwBytesTransferred);
+
+#if defined(DEBUG) && defined(DUMP_OMAP34xx_CLOCKS) && defined(__linux__)
+
+#pragma GCC diagnostic ignored "-Wstrict-prototypes"
+#include <mach/clock.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+#include <../mach-omap2/clock_34xx.h>
+#define ONCHIP_CLKS onchip_clks
+#else
+#include <../mach-omap2/clock34xx.h>
+#define ONCHIP_CLKS onchip_34xx_clks
+#endif
+
+static void omap3_clk_recalc(struct clk *clk) {}
+static void omap3_followparent_recalc(struct clk *clk) {}
+static void omap3_propagate_rate(struct clk *clk) {}
+static void omap3_table_recalc(struct clk *clk) {}
+static long omap3_round_to_table_rate(struct clk *clk, unsigned long rate) { return 0; }
+static int omap3_select_table_rate(struct clk *clk, unsigned long rate) { return 0; }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
+static void omap3_dpll_recalc(struct clk *clk, unsigned long parent_rate,
+ u8 rate_storage) {}
+static void omap3_clkoutx2_recalc(struct clk *clk, unsigned long parent_rate,
+ u8 rate_storage) {}
+static void omap3_dpll_allow_idle(struct clk *clk) {}
+static void omap3_dpll_deny_idle(struct clk *clk) {}
+static u32 omap3_dpll_autoidle_read(struct clk *clk) { return 0; }
+static int omap3_noncore_dpll_enable(struct clk *clk) { return 0; }
+static void omap3_noncore_dpll_disable(struct clk *clk) {}
+static int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate) { return 0; }
+static int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate) { return 0; }
+void followparent_recalc(struct clk *clk, unsigned long new_parent_rate,
+ u8 rate_storage) {}
+long omap2_dpll_round_rate(struct clk *clk, unsigned long target_rate) { return 0; }
+void omap2_clksel_recalc(struct clk *clk, unsigned long new_parent_rate,
+ u8 rate_storage) {}
+long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate) { return 0; }
+int omap2_clksel_set_rate(struct clk *clk, unsigned long rate) { return 0; }
+void omap2_fixed_divisor_recalc(struct clk *clk, unsigned long new_parent_rate,
+ u8 rate_storage) {}
+void omap2_init_clksel_parent(struct clk *clk) {}
+#endif
+
+static void dump_omap34xx_clocks(void)
+{
+ struct clk **c;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+ struct vdd_prcm_config *t1 = vdd1_rate_table;
+ struct vdd_prcm_config *t2 = vdd2_rate_table;
+
+ t1 = t1;
+ t2 = t2;
+#else
+
+ omap3_dpll_allow_idle(0);
+ omap3_dpll_deny_idle(0);
+ omap3_dpll_autoidle_read(0);
+ omap3_clk_recalc(0);
+ omap3_followparent_recalc(0);
+ omap3_propagate_rate(0);
+ omap3_table_recalc(0);
+ omap3_round_to_table_rate(0, 0);
+ omap3_select_table_rate(0, 0);
+#endif
+
+ for(c = ONCHIP_CLKS; c < ONCHIP_CLKS + ARRAY_SIZE(ONCHIP_CLKS); c++)
+ {
+ struct clk *cp = *c, *copy;
+ unsigned long rate;
+ copy = clk_get(NULL, cp->name);
+ if(!copy)
+ continue;
+ rate = clk_get_rate(copy);
+ if (rate < 1000000)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: clock %s is %lu KHz (%lu Hz)", __func__, cp->name, rate/1000, rate));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: clock %s is %lu MHz (%lu Hz)", __func__, cp->name, rate/1000000, rate));
+ }
+ }
+}
+
+#else
+
+static INLINE void dump_omap34xx_clocks(void) {}
+
+#endif
+
+#if defined(SGX_OCP_REGS_ENABLED)
+
+#define SYS_OMAP3430_OCP_REGS_SYS_PHYS_BASE (SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE + EUR_CR_OCP_REVISION)
+#define SYS_OMAP3430_OCP_REGS_SIZE 0x110
+
+static IMG_CPU_VIRTADDR gpvOCPRegsLinAddr;
+
+static PVRSRV_ERROR EnableSGXClocksWrap(SYS_DATA *psSysData)
+{
+ PVRSRV_ERROR eError = EnableSGXClocks(psSysData);
+
+ if(eError == PVRSRV_OK)
+ {
+ OSWriteHWReg(gpvOCPRegsLinAddr,
+ EUR_CR_OCP_DEBUG_CONFIG - EUR_CR_OCP_REVISION,
+ EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_MASK);
+ }
+
+ return eError;
+}
+
+#else
+
+static INLINE PVRSRV_ERROR EnableSGXClocksWrap(SYS_DATA *psSysData)
+{
+ return EnableSGXClocks(psSysData);
+}
+
+#endif
+
+static INLINE PVRSRV_ERROR EnableSystemClocksWrap(SYS_DATA *psSysData)
+{
+ PVRSRV_ERROR eError = EnableSystemClocks(psSysData);
+
+#if !defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ if(eError == PVRSRV_OK)
+ {
+
+ EnableSGXClocksWrap(psSysData);
+ }
+#endif
+
+ return eError;
+}
+
+static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData)
+{
+#if defined(NO_HARDWARE)
+ PVRSRV_ERROR eError;
+ IMG_CPU_PHYADDR sCpuPAddr;
+#endif
+
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+
+
+ gsSGXDeviceMap.ui32Flags = 0x0;
+
+#if defined(NO_HARDWARE)
+
+
+ eError = OSBaseAllocContigMemory(SYS_OMAP3430_SGX_REGS_SIZE,
+ &gsSGXRegsCPUVAddr,
+ &sCpuPAddr);
+ if(eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ gsSGXDeviceMap.sRegsCpuPBase = sCpuPAddr;
+ gsSGXDeviceMap.sRegsSysPBase = SysCpuPAddrToSysPAddr(gsSGXDeviceMap.sRegsCpuPBase);
+ gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE;
+#if defined(__linux__)
+
+ gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;
+#else
+
+ gsSGXDeviceMap.pvRegsCpuVBase = IMG_NULL;
+#endif
+
+ OSMemSet(gsSGXRegsCPUVAddr, 0, SYS_OMAP3430_SGX_REGS_SIZE);
+
+
+
+
+ gsSGXDeviceMap.ui32IRQ = 0;
+
+#else
+
+ gsSGXDeviceMap.sRegsSysPBase.uiAddr = SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE;
+ gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
+ gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE;
+
+ gsSGXDeviceMap.ui32IRQ = SYS_OMAP3430_SGX_IRQ;
+
+#endif
+
+#if defined(PDUMP)
+ {
+ static IMG_CHAR pszPDumpDevName[] = "SGXMEM";
+ gsSGXDeviceMap.pszPDumpDevName = pszPDumpDevName;
+ }
+#endif
+
+
+ return PVRSRV_OK;
+}
+
+
+IMG_CHAR *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion)
+{
+ static IMG_CHAR aszVersionString[100];
+ SYS_DATA *psSysData;
+ IMG_UINT32 ui32SGXRevision;
+ IMG_INT32 i32Count;
+#if !defined(NO_HARDWARE)
+ IMG_VOID *pvRegsLinAddr;
+
+ pvRegsLinAddr = OSMapPhysToLin(sRegRegion,
+ SYS_OMAP3430_SGX_REGS_SIZE,
+ PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+ IMG_NULL);
+ if(!pvRegsLinAddr)
+ {
+ return IMG_NULL;
+ }
+
+ ui32SGXRevision = OSReadHWReg((IMG_PVOID)((IMG_PBYTE)pvRegsLinAddr),
+ EUR_CR_CORE_REVISION);
+#else
+ ui32SGXRevision = 0;
+#endif
+
+ SysAcquireData(&psSysData);
+
+ i32Count = OSSNPrintf(aszVersionString, 100,
+ "SGX revision = %u.%u.%u",
+ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAJOR_MASK)
+ >> EUR_CR_CORE_REVISION_MAJOR_SHIFT),
+ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MINOR_MASK)
+ >> EUR_CR_CORE_REVISION_MINOR_SHIFT),
+ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAINTENANCE_MASK)
+ >> EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT)
+ );
+
+#if !defined(NO_HARDWARE)
+ OSUnMapPhysToLin(pvRegsLinAddr,
+ SYS_OMAP3430_SGX_REGS_SIZE,
+ PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+ IMG_NULL);
+#endif
+
+ if(i32Count == -1)
+ {
+ return IMG_NULL;
+ }
+
+ return aszVersionString;
+}
+
+
+PVRSRV_ERROR SysInitialise(IMG_VOID)
+{
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ IMG_CPU_PHYADDR TimerRegPhysBase;
+#if !defined(SGX_DYNAMIC_TIMING_INFO)
+ SGX_TIMING_INFORMATION* psTimingInfo;
+#endif
+ gpsSysData = &gsSysData;
+ OSMemSet(gpsSysData, 0, sizeof(SYS_DATA));
+
+ gpsSysSpecificData = &gsSysSpecificData;
+ OSMemSet(gpsSysSpecificData, 0, sizeof(SYS_SPECIFIC_DATA));
+
+ gpsSysData->pvSysSpecificData = gpsSysSpecificData;
+
+ eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to setup env structure"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA);
+
+ gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT;
+
+
+ for(i=0; i<SYS_DEVICE_COUNT; i++)
+ {
+ gpsSysData->sDeviceID[i].uiID = i;
+ gpsSysData->sDeviceID[i].bInUse = IMG_FALSE;
+ }
+
+ gpsSysData->psDeviceNodeList = IMG_NULL;
+ gpsSysData->psQueueList = IMG_NULL;
+
+ eError = SysInitialiseCommon(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed in SysInitialiseCommon"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+
+ TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_REGS_SYS_PHYS_BASE;
+ gpsSysData->pvSOCTimerRegisterKM = IMG_NULL;
+ gpsSysData->hSOCTimerRegisterOSMemHandle = 0;
+ OSReservePhys(TimerRegPhysBase,
+ 4,
+ PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
+ (IMG_VOID **)&gpsSysData->pvSOCTimerRegisterKM,
+ &gpsSysData->hSOCTimerRegisterOSMemHandle);
+
+#if !defined(SGX_DYNAMIC_TIMING_INFO)
+
+ psTimingInfo = &gsSGXDeviceMap.sTimingInfo;
+ psTimingInfo->ui32CoreClockSpeed = SYS_SGX_CLOCK_SPEED;
+ psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ;
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ psTimingInfo->bEnableActivePM = IMG_TRUE;
+#else
+ psTimingInfo->bEnableActivePM = IMG_FALSE;
+#endif
+ psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS;
+ psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ;
+#endif
+
+
+
+ gpsSysSpecificData->ui32SrcClockDiv = 3;
+
+
+
+
+
+ eError = SysLocateDevices(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to locate devices"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV);
+
+#if defined(SGX_OCP_REGS_ENABLED)
+ {
+ IMG_SYS_PHYADDR sOCPRegsSysPBase;
+ IMG_CPU_PHYADDR sOCPRegsCpuPBase;
+
+ sOCPRegsSysPBase.uiAddr = SYS_OMAP3430_OCP_REGS_SYS_PHYS_BASE;
+ sOCPRegsCpuPBase = SysSysPAddrToCpuPAddr(sOCPRegsSysPBase);
+
+ gpvOCPRegsLinAddr = OSMapPhysToLin(sOCPRegsCpuPBase,
+ SYS_OMAP3430_OCP_REGS_SIZE,
+ PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+ IMG_NULL);
+
+ if (gpvOCPRegsLinAddr == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to map OCP registers"));
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_OCPREGS);
+ }
+#endif
+
+
+ eError = PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice,
+ DEVICE_SGX_INTERRUPT, &gui32SGXDeviceID);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register device!"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_REGDEV);
+
+
+
+
+
+ psDeviceNode = gpsSysData->psDeviceNodeList;
+ while(psDeviceNode)
+ {
+
+ switch(psDeviceNode->sDevId.eDeviceType)
+ {
+ case PVRSRV_DEVICE_TYPE_SGX:
+ {
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
+
+
+
+
+ psDeviceNode->psLocalDevMemArena = IMG_NULL;
+
+
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
+
+
+ for(i=0; i<psDevMemoryInfo->ui32HeapCount; i++)
+ {
+ psDeviceMemoryHeap[i].ui32Attribs |= PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG;
+ }
+
+ gpsSGXDevNode = psDeviceNode;
+ gsSysSpecificData.psSGXDevNode = psDeviceNode;
+
+ break;
+ }
+ default:
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to find SGX device node!"));
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+
+
+ psDeviceNode = psDeviceNode->psNext;
+ }
+
+ eError = EnableSystemClocksWrap(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable system clocks (%d)", eError));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ eError = EnableSGXClocksWrap(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable SGX clocks (%d)", eError));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+#endif
+
+ dump_omap34xx_clocks();
+
+ eError = PVRSRVInitialiseDevice(gui32SGXDeviceID);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV);
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+
+ DisableSGXClocks(gpsSysData);
+#endif
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR SysFinalise(IMG_VOID)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ eError = EnableSGXClocksWrap(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable SGX clocks (%d)", eError));
+ return eError;
+ }
+#endif
+
+ eError = OSInstallMISR(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to install MISR"));
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR);
+
+#if defined(SYS_USING_INTERRUPTS)
+
+ eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to install ISR"));
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR);
+#endif
+
+
+ gpsSysData->pszVersionString = SysCreateVersionString(gsSGXDeviceMap.sRegsCpuPBase);
+ if (!gpsSysData->pszVersionString)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to create a system version string"));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "SysFinalise: Version string: %s", gpsSysData->pszVersionString));
+ }
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+
+ DisableSGXClocks(gpsSysData);
+#endif
+
+ gpsSysSpecificData->bSGXInitComplete = IMG_TRUE;
+
+ return eError;
+}
+
+
+PVRSRV_ERROR SysDeinitialise (SYS_DATA *psSysData)
+{
+ PVRSRV_ERROR eError;
+
+#if defined(SYS_USING_INTERRUPTS)
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR))
+ {
+ eError = OSUninstallDeviceLISR(psSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallDeviceLISR failed"));
+ return eError;
+ }
+ }
+#endif
+
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR))
+ {
+ eError = OSUninstallMISR(psSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallMISR failed"));
+ return eError;
+ }
+ }
+
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV))
+ {
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ PVR_ASSERT(SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS));
+
+ eError = EnableSGXClocksWrap(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: EnableSGXClocks failed"));
+ return eError;
+ }
+#endif
+
+
+ eError = PVRSRVDeinitialiseDevice (gui32SGXDeviceID);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init the device"));
+ return eError;
+ }
+ }
+
+#if defined(SGX_OCP_REGS_ENABLED)
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_OCPREGS))
+ {
+ OSUnMapPhysToLin(gpvOCPRegsLinAddr,
+ SYS_OMAP3430_OCP_REGS_SIZE,
+ PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+ IMG_NULL);
+ }
+#endif
+
+
+
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS))
+ {
+ DisableSystemClocks(gpsSysData);
+ }
+
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA))
+ {
+ eError = OSDeInitEnvData(gpsSysData->pvEnvSpecificData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init env structure"));
+ return eError;
+ }
+ }
+
+ if(gpsSysData->pvSOCTimerRegisterKM)
+ {
+ OSUnReservePhys(gpsSysData->pvSOCTimerRegisterKM,
+ 4,
+ PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
+ gpsSysData->hSOCTimerRegisterOSMemHandle);
+ }
+
+ SysDeinitialiseCommon(gpsSysData);
+
+#if defined(NO_HARDWARE)
+ if(SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV))
+ {
+
+ OSBaseFreeContigMemory(SYS_OMAP3430_SGX_REGS_SIZE, gsSGXRegsCPUVAddr, gsSGXDeviceMap.sRegsCpuPBase);
+ }
+#endif
+
+
+ gpsSysSpecificData->ui32SysSpecificData = 0;
+ gpsSysSpecificData->bSGXInitComplete = IMG_FALSE;
+
+ gpsSysData = IMG_NULL;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
+ IMG_VOID **ppvDeviceMap)
+{
+
+ switch(eDeviceType)
+ {
+ case PVRSRV_DEVICE_TYPE_SGX:
+ {
+
+ *ppvDeviceMap = (IMG_VOID*)&gsSGXDeviceMap;
+
+ break;
+ }
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysGetDeviceMemoryMap: unsupported device type"));
+ }
+ }
+ return PVRSRV_OK;
+}
+
+
+IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType,
+ IMG_CPU_PHYADDR CpuPAddr)
+{
+ IMG_DEV_PHYADDR DevPAddr;
+
+ PVR_UNREFERENCED_PARAMETER(eDeviceType);
+
+
+ DevPAddr.uiAddr = CpuPAddr.uiAddr;
+
+ return DevPAddr;
+}
+
+IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR sys_paddr)
+{
+ IMG_CPU_PHYADDR cpu_paddr;
+
+
+ cpu_paddr.uiAddr = sys_paddr.uiAddr;
+ return cpu_paddr;
+}
+
+IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr)
+{
+ IMG_SYS_PHYADDR sys_paddr;
+
+
+ sys_paddr.uiAddr = cpu_paddr.uiAddr;
+ return sys_paddr;
+}
+
+
+IMG_DEV_PHYADDR SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr)
+{
+ IMG_DEV_PHYADDR DevPAddr;
+
+ PVR_UNREFERENCED_PARAMETER(eDeviceType);
+
+
+ DevPAddr.uiAddr = SysPAddr.uiAddr;
+
+ return DevPAddr;
+}
+
+
+IMG_SYS_PHYADDR SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR DevPAddr)
+{
+ IMG_SYS_PHYADDR SysPAddr;
+
+ PVR_UNREFERENCED_PARAMETER(eDeviceType);
+
+
+ SysPAddr.uiAddr = DevPAddr.uiAddr;
+
+ return SysPAddr;
+}
+
+
+IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+}
+
+
+IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+}
+
+
+IMG_UINT32 SysGetInterruptSource(SYS_DATA *psSysData,
+ PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+#if defined(NO_HARDWARE)
+
+ return 0xFFFFFFFF;
+#else
+
+ return psDeviceNode->ui32SOCInterruptBit;
+#endif
+}
+
+
+IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits)
+{
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+ PVR_UNREFERENCED_PARAMETER(ui32ClearBits);
+
+
+ OSReadHWReg(((PVRSRV_SGXDEV_INFO *)gpsSGXDevNode->pvDevice)->pvRegsBaseKM,
+ EUR_CR_EVENT_HOST_CLEAR);
+}
+
+
+PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (eNewPowerState == PVRSRV_SYS_POWER_STATE_D3)
+ {
+ PVR_TRACE(("SysSystemPrePowerState: Entering state D3"));
+
+#if defined(SYS_USING_INTERRUPTS)
+ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR))
+ {
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+ IMG_BOOL bWrapped = WrapSystemPowerChange(&gsSysSpecificData);
+#endif
+ eError = OSUninstallDeviceLISR(gpsSysData);
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+ if (bWrapped)
+ {
+ UnwrapSystemPowerChange(&gsSysSpecificData);
+ }
+#endif
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysSystemPrePowerState: OSUninstallDeviceLISR failed (%d)", eError));
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR);
+ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR);
+ }
+#endif
+
+ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS))
+ {
+ DisableSystemClocks(gpsSysData);
+
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS);
+ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);
+ }
+ }
+
+ return eError;
+}
+
+
+PVRSRV_ERROR SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (eNewPowerState == PVRSRV_SYS_POWER_STATE_D0)
+ {
+ PVR_TRACE(("SysSystemPostPowerState: Entering state D0"));
+
+ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS))
+ {
+ eError = EnableSystemClocksWrap(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: EnableSystemClocksWrap failed (%d)", eError));
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);
+ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS);
+ }
+
+#if defined(SYS_USING_INTERRUPTS)
+ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR))
+ {
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+ IMG_BOOL bWrapped = WrapSystemPowerChange(&gsSysSpecificData);
+#endif
+
+ eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode);
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+ if (bWrapped)
+ {
+ UnwrapSystemPowerChange(&gsSysSpecificData);
+ }
+#endif
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: OSInstallDeviceLISR failed to install ISR (%d)", eError));
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR);
+ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR);
+ }
+#endif
+ }
+ return eError;
+}
+
+
+PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+ PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);
+
+ if (ui32DeviceIndex != gui32SGXDeviceID)
+ {
+ return PVRSRV_OK;
+ }
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "SysDevicePrePowerState: SGX Entering state D3"));
+ DisableSGXClocks(gpsSysData);
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(eNewPowerState );
+#endif
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER(eNewPowerState);
+
+ if (ui32DeviceIndex != gui32SGXDeviceID)
+ {
+ return eError;
+ }
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "SysDevicePostPowerState: SGX Leaving state D3"));
+ eError = EnableSGXClocksWrap(gpsSysData);
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);
+#endif
+
+ return eError;
+}
+
+
+PVRSRV_ERROR SysOEMFunction ( IMG_UINT32 ui32ID,
+ IMG_VOID *pvIn,
+ IMG_UINT32 ulInSize,
+ IMG_VOID *pvOut,
+ IMG_UINT32 ulOutSize)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32ID);
+ PVR_UNREFERENCED_PARAMETER(pvIn);
+ PVR_UNREFERENCED_PARAMETER(ulInSize);
+ PVR_UNREFERENCED_PARAMETER(pvOut);
+ PVR_UNREFERENCED_PARAMETER(ulOutSize);
+
+ if ((ui32ID == OEM_GET_EXT_FUNCS) &&
+ (ulOutSize == sizeof(PVRSRV_DC_OEM_JTABLE)))
+ {
+
+ PVRSRV_DC_OEM_JTABLE *psOEMJTable = (PVRSRV_DC_OEM_JTABLE*) pvOut;
+ psOEMJTable->pfnOEMBridgeDispatch = &PVRSRV_BridgeDispatchKM;
+ return PVRSRV_OK;
+ }
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+}
diff --git a/drivers/gpu/pvr/omap3/sysconfig.h b/drivers/gpu/pvr/omap3/sysconfig.h
new file mode 100644
index 0000000..dd8a07d
--- /dev/null
+++ b/drivers/gpu/pvr/omap3/sysconfig.h
@@ -0,0 +1,59 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined(__SOCCONFIG_H__)
+#define __SOCCONFIG_H__
+
+#include "syscommon.h"
+
+#define VS_PRODUCT_NAME "OMAP3"
+
+#if defined(SGX530) && (SGX_CORE_REV == 125)
+#define SYS_SGX_CLOCK_SPEED 200000000
+#else
+#define SYS_SGX_CLOCK_SPEED 110666666
+#endif
+
+#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ (100)
+#define SYS_SGX_PDS_TIMER_FREQ (1000)
+
+#if !defined(SYS_SGX_ACTIVE_POWER_LATENCY_MS)
+#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (1)
+#endif
+
+
+#define SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE 0x50000000
+
+#define SYS_OMAP3430_SGX_REGS_SIZE 0x4000
+
+#define SYS_OMAP3430_SGX_IRQ 21
+
+#define SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE 0x48088024
+#define SYS_OMAP3430_GP11TIMER_REGS_SYS_PHYS_BASE 0x48088028
+#define SYS_OMAP3430_GP11TIMER_TSICR_SYS_PHYS_BASE 0x48088040
+
+
+#endif
diff --git a/drivers/gpu/pvr/omap3/sysinfo.h b/drivers/gpu/pvr/omap3/sysinfo.h
new file mode 100644
index 0000000..07718276
--- /dev/null
+++ b/drivers/gpu/pvr/omap3/sysinfo.h
@@ -0,0 +1,40 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+#if defined(PVR_LINUX_USING_WORKQUEUES)
+#define MAX_HW_TIME_US (1000000)
+#else
+#define MAX_HW_TIME_US (500000)
+#endif
+
+#define WAIT_TRY_COUNT (10000)
+
+#define SYS_DEVICE_COUNT 3
+
+#endif
diff --git a/drivers/gpu/pvr/omap3/syslocal.h b/drivers/gpu/pvr/omap3/syslocal.h
new file mode 100644
index 0000000..5b0894f
--- /dev/null
+++ b/drivers/gpu/pvr/omap3/syslocal.h
@@ -0,0 +1,136 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined(__SYSLOCAL_H__)
+#define __SYSLOCAL_H__
+
+#if defined(__linux__)
+
+#include <linux/version.h>
+#include <linux/clk.h>
+#if defined(PVR_LINUX_USING_WORKQUEUES)
+#include <linux/mutex.h>
+#else
+#include <linux/spinlock.h>
+#endif
+#include <asm/atomic.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
+#include <linux/semaphore.h>
+#include <linux/resource.h>
+#else
+#include <asm/semaphore.h>
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22))
+#include <asm/arch/resource.h>
+#endif
+#endif
+
+#endif
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+IMG_CHAR *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion);
+
+IMG_VOID DisableSystemClocks(SYS_DATA *psSysData);
+PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData);
+
+IMG_VOID DisableSGXClocks(SYS_DATA *psSysData);
+PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData);
+
+#define SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS 0x00000001
+#define SYS_SPECIFIC_DATA_ENABLE_LISR 0x00000002
+#define SYS_SPECIFIC_DATA_ENABLE_MISR 0x00000004
+#define SYS_SPECIFIC_DATA_ENABLE_ENVDATA 0x00000008
+#define SYS_SPECIFIC_DATA_ENABLE_LOCDEV 0x00000010
+#define SYS_SPECIFIC_DATA_ENABLE_REGDEV 0x00000020
+#define SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT 0x00000040
+#define SYS_SPECIFIC_DATA_ENABLE_INITDEV 0x00000080
+#define SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV 0x00000100
+
+#define SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR 0x00000200
+#define SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS 0x00000400
+#define SYS_SPECIFIC_DATA_ENABLE_OCPREGS 0x00000800
+
+#define SYS_SPECIFIC_DATA_SET(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData |= (flag)))
+
+#define SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData &= ~(flag)))
+
+#define SYS_SPECIFIC_DATA_TEST(psSysSpecData, flag) (((psSysSpecData)->ui32SysSpecificData & (flag)) != 0)
+
+typedef struct _SYS_SPECIFIC_DATA_TAG_
+{
+ IMG_UINT32 ui32SysSpecificData;
+ PVRSRV_DEVICE_NODE *psSGXDevNode;
+ IMG_BOOL bSGXInitComplete;
+#if !defined(__linux__)
+ IMG_BOOL bSGXClocksEnabled;
+#endif
+ IMG_UINT32 ui32SrcClockDiv;
+#if defined(__linux__)
+ IMG_BOOL bSysClocksOneTimeInit;
+ atomic_t sSGXClocksEnabled;
+#if defined(PVR_LINUX_USING_WORKQUEUES)
+ struct mutex sPowerLock;
+#else
+ IMG_BOOL bConstraintNotificationsEnabled;
+ spinlock_t sPowerLock;
+ atomic_t sPowerLockCPU;
+ spinlock_t sNotifyLock;
+ atomic_t sNotifyLockCPU;
+ IMG_BOOL bCallVDD2PostFunc;
+#endif
+ struct clk *psCORE_CK;
+ struct clk *psSGX_FCK;
+ struct clk *psSGX_ICK;
+ struct clk *psMPU_CK;
+#if defined(DEBUG) || defined(TIMING)
+ struct clk *psGPT11_FCK;
+ struct clk *psGPT11_ICK;
+#endif
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22))
+ struct constraint_handle *pVdd2Handle;
+#endif
+#endif
+} SYS_SPECIFIC_DATA;
+
+extern SYS_SPECIFIC_DATA *gpsSysSpecificData;
+
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData);
+IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData);
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
+
+
diff --git a/drivers/gpu/pvr/omap3/sysutils.c b/drivers/gpu/pvr/omap3/sysutils.c
new file mode 100644
index 0000000..d2c4231
--- /dev/null
+++ b/drivers/gpu/pvr/omap3/sysutils.c
@@ -0,0 +1,33 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if defined(__linux__)
+#if defined(PVR_LINUX_USING_WORKQUEUES)
+#include "sysutils_linux_wqueue_compat.c"
+#else
+#include "sysutils_linux.c"
+#endif
+#endif
diff --git a/drivers/gpu/pvr/omap3/sysutils_linux_wqueue_compat.c b/drivers/gpu/pvr/omap3/sysutils_linux_wqueue_compat.c
new file mode 100644
index 0000000..be90433
--- /dev/null
+++ b/drivers/gpu/pvr/omap3/sysutils_linux_wqueue_compat.c
@@ -0,0 +1,484 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/hardirq.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <plat/omap-pm.h>
+
+#include "sgxdefs.h"
+#include "services_headers.h"
+#include "sysinfo.h"
+#include "sgxapi_km.h"
+#include "sysconfig.h"
+#include "sgxinfokm.h"
+#include "syslocal.h"
+
+#if !defined(PVR_LINUX_USING_WORKQUEUES)
+#error "PVR_LINUX_USING_WORKQUEUES must be defined"
+#endif
+
+#define ONE_MHZ 1000000
+#define HZ_TO_MHZ(m) ((m) / ONE_MHZ)
+
+#if defined(SUPPORT_OMAP3430_SGXFCLK_96M)
+#define SGX_PARENT_CLOCK "cm_96m_fck"
+#else
+#define SGX_PARENT_CLOCK "core_ck"
+#endif
+
+extern struct platform_device *gpsPVRLDMDev;
+#if defined(SGX530) && (SGX_CORE_REV == 125)
+#define OMAP_MEMORY_BUS_CLOCK_MAX 800000
+#else
+#define OMAP_MEMORY_BUS_CLOCK_MAX 664000
+#endif
+static IMG_VOID PowerLockWrap(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ if (!in_interrupt())
+ {
+ BUG_ON(in_atomic());
+ mutex_lock(&psSysSpecData->sPowerLock);
+ }
+}
+
+static IMG_VOID PowerLockUnwrap(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ if (!in_interrupt())
+ {
+ BUG_ON(in_atomic());
+ mutex_unlock(&psSysSpecData->sPowerLock);
+ }
+}
+
+PVRSRV_ERROR SysPowerLockWrap(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+
+ PowerLockWrap(psSysSpecData);
+
+ return PVRSRV_OK;
+}
+
+IMG_VOID SysPowerLockUnwrap(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+
+ PowerLockUnwrap(psSysSpecData);
+}
+
+IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ return IMG_TRUE;
+}
+
+IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+}
+
+static inline IMG_UINT32 scale_by_rate(IMG_UINT32 val, IMG_UINT32 rate1, IMG_UINT32 rate2)
+{
+ if (rate1 >= rate2)
+ {
+ return val * (rate1 / rate2);
+ }
+
+ return val / (rate2 / rate1);
+}
+
+static inline IMG_UINT32 scale_prop_to_SGX_clock(IMG_UINT32 val, IMG_UINT32 rate)
+{
+ return scale_by_rate(val, rate, SYS_SGX_CLOCK_SPEED);
+}
+
+static inline IMG_UINT32 scale_inv_prop_to_SGX_clock(IMG_UINT32 val, IMG_UINT32 rate)
+{
+ return scale_by_rate(val, SYS_SGX_CLOCK_SPEED, rate);
+}
+
+IMG_VOID SysGetSGXTimingInformation(SGX_TIMING_INFORMATION *psTimingInfo)
+{
+ IMG_UINT32 rate;
+
+#if defined(NO_HARDWARE)
+ rate = SYS_SGX_CLOCK_SPEED;
+#else
+ PVR_ASSERT(atomic_read(&gpsSysSpecificData->sSGXClocksEnabled) != 0);
+
+ rate = clk_get_rate(gpsSysSpecificData->psSGX_FCK);
+ PVR_ASSERT(rate != 0);
+#endif
+ psTimingInfo->ui32CoreClockSpeed = rate;
+ psTimingInfo->ui32HWRecoveryFreq = scale_prop_to_SGX_clock(SYS_SGX_HWRECOVERY_TIMEOUT_FREQ, rate);
+ psTimingInfo->ui32uKernelFreq = scale_prop_to_SGX_clock(SYS_SGX_PDS_TIMER_FREQ, rate);
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ psTimingInfo->bEnableActivePM = IMG_TRUE;
+#else
+ psTimingInfo->bEnableActivePM = IMG_FALSE;
+#endif
+ psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS;
+}
+
+PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData)
+{
+#if !defined(NO_HARDWARE)
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+ long lNewRate;
+ long lRate;
+ IMG_INT res;
+
+ if (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0)
+ {
+ return PVRSRV_OK;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: Enabling SGX Clocks"));
+
+#if defined(DEBUG)
+ {
+ IMG_UINT32 rate = clk_get_rate(psSysSpecData->psMPU_CK);
+ PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: CPU Clock is %dMhz", HZ_TO_MHZ(rate)));
+ }
+#endif
+
+ res = clk_enable(psSysSpecData->psSGX_FCK);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX functional clock (%d)", res));
+ return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
+ }
+
+ res = clk_enable(psSysSpecData->psSGX_ICK);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX interface clock (%d)", res));
+
+ clk_disable(psSysSpecData->psSGX_FCK);
+ return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
+ }
+
+ lNewRate = clk_round_rate(psSysSpecData->psSGX_FCK, SYS_SGX_CLOCK_SPEED + ONE_MHZ);
+ if (lNewRate <= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't round SGX functional clock rate"));
+ return PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE;
+ }
+
+ lRate = clk_get_rate(psSysSpecData->psSGX_FCK);
+ if (lRate != lNewRate)
+ {
+ res = clk_set_rate(psSysSpecData->psSGX_FCK, lNewRate);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "EnableSGXClocks: Couldn't set SGX functional clock rate (%d)", res));
+ }
+ }
+
+#if defined(DEBUG)
+ {
+ IMG_UINT32 rate = clk_get_rate(psSysSpecData->psSGX_FCK);
+ PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: SGX Functional Clock is %dMhz", HZ_TO_MHZ(rate)));
+ }
+#endif
+
+#if defined(SYS_OMAP3430_PIN_MEMORY_BUS_CLOCK)
+ omap_pm_set_min_bus_tput(&gpsPVRLDMDev->dev, OCP_INITIATOR_AGENT, OMAP_MEMORY_BUS_CLOCK_MAX);
+#endif
+
+
+ atomic_set(&psSysSpecData->sSGXClocksEnabled, 1);
+
+#else /* !defined(NO_HARDWARE) */
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+#endif /* !defined(NO_HARDWARE) */
+ return PVRSRV_OK;
+}
+
+
+IMG_VOID DisableSGXClocks(SYS_DATA *psSysData)
+{
+#if !defined(NO_HARDWARE)
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+
+ if (atomic_read(&psSysSpecData->sSGXClocksEnabled) == 0)
+ {
+ return;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "DisableSGXClocks: Disabling SGX Clocks"));
+
+ if (psSysSpecData->psSGX_ICK)
+ {
+ clk_disable(psSysSpecData->psSGX_ICK);
+ }
+
+ if (psSysSpecData->psSGX_FCK)
+ {
+ clk_disable(psSysSpecData->psSGX_FCK);
+ }
+
+#if defined(SYS_OMAP3430_PIN_MEMORY_BUS_CLOCK)
+ omap_pm_set_min_bus_tput(&gpsPVRLDMDev->dev, OCP_INITIATOR_AGENT, 0);
+#endif
+
+ atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
+
+#else
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+#endif
+}
+
+PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+ struct clk *psCLK;
+ IMG_INT res;
+ PVRSRV_ERROR eError;
+
+#if defined(DEBUG) || defined(TIMING)
+ IMG_INT rate;
+ struct clk *sys_ck;
+ IMG_CPU_PHYADDR TimerRegPhysBase;
+ IMG_HANDLE hTimerEnable;
+ IMG_UINT32 *pui32TimerEnable;
+
+#endif
+
+ PVR_TRACE(("EnableSystemClocks: Enabling System Clocks"));
+
+ if (!psSysSpecData->bSysClocksOneTimeInit)
+ {
+ mutex_init(&psSysSpecData->sPowerLock);
+
+ atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
+
+ psCLK = clk_get(NULL, SGX_PARENT_CLOCK);
+ if (IS_ERR(psCLK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get Core Clock"));
+ goto ExitError;
+ }
+ psSysSpecData->psCORE_CK = psCLK;
+
+ psCLK = clk_get(NULL, "sgx_fck");
+ if (IS_ERR(psCLK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get SGX Functional Clock"));
+ goto ExitError;
+ }
+ psSysSpecData->psSGX_FCK = psCLK;
+
+ psCLK = clk_get(NULL, "sgx_ick");
+ if (IS_ERR(psCLK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get SGX Interface Clock"));
+ goto ExitError;
+ }
+ psSysSpecData->psSGX_ICK = psCLK;
+
+#if defined(DEBUG)
+ psCLK = clk_get(NULL, "mpu_ck");
+ if (IS_ERR(psCLK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get MPU Clock"));
+ goto ExitError;
+ }
+ psSysSpecData->psMPU_CK = psCLK;
+#endif
+ res = clk_set_parent(psSysSpecData->psSGX_FCK, psSysSpecData->psCORE_CK);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set SGX parent clock (%d)", res));
+ goto ExitError;
+ }
+
+ psSysSpecData->bSysClocksOneTimeInit = IMG_TRUE;
+ }
+
+#if defined(DEBUG) || defined(TIMING)
+
+ psCLK = clk_get(NULL, "gpt11_fck");
+ if (IS_ERR(psCLK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 functional clock"));
+ goto ExitUnRegisterConstraintNotifications;
+ }
+ psSysSpecData->psGPT11_FCK = psCLK;
+
+ psCLK = clk_get(NULL, "gpt11_ick");
+ if (IS_ERR(psCLK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 interface clock"));
+ goto ExitUnRegisterConstraintNotifications;
+ }
+ psSysSpecData->psGPT11_ICK = psCLK;
+
+ sys_ck = clk_get(NULL, "sys_ck");
+ if (IS_ERR(sys_ck))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get System clock"));
+ goto ExitUnRegisterConstraintNotifications;
+ }
+
+ if(clk_get_parent(psSysSpecData->psGPT11_FCK) != sys_ck)
+ {
+ PVR_TRACE(("Setting GPTIMER11 parent to System Clock"));
+ res = clk_set_parent(psSysSpecData->psGPT11_FCK, sys_ck);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set GPTIMER11 parent clock (%d)", res));
+ goto ExitUnRegisterConstraintNotifications;
+ }
+ }
+
+ rate = clk_get_rate(psSysSpecData->psGPT11_FCK);
+ PVR_TRACE(("GPTIMER11 clock is %dMHz", HZ_TO_MHZ(rate)));
+
+ res = clk_enable(psSysSpecData->psGPT11_FCK);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 functional clock (%d)", res));
+ goto ExitUnRegisterConstraintNotifications;
+ }
+
+ res = clk_enable(psSysSpecData->psGPT11_ICK);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 interface clock (%d)", res));
+ goto ExitDisableGPT11FCK;
+ }
+
+
+ TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_TSICR_SYS_PHYS_BASE;
+ pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
+ 4,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ &hTimerEnable);
+
+ if (pui32TimerEnable == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));
+ goto ExitDisableGPT11ICK;
+ }
+
+ rate = *pui32TimerEnable;
+ if(!(rate & 4))
+ {
+ PVR_TRACE(("Setting GPTIMER11 mode to posted (currently is non-posted)"));
+
+
+ *pui32TimerEnable = rate | 4;
+ }
+
+ OSUnMapPhysToLin(pui32TimerEnable,
+ 4,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ hTimerEnable);
+
+
+ TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
+ pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
+ 4,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ &hTimerEnable);
+
+ if (pui32TimerEnable == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));
+ goto ExitDisableGPT11ICK;
+ }
+
+
+ *pui32TimerEnable = 3;
+
+ OSUnMapPhysToLin(pui32TimerEnable,
+ 4,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ hTimerEnable);
+
+#endif
+
+ eError = PVRSRV_OK;
+ goto Exit;
+
+#if defined(DEBUG) || defined(TIMING)
+ExitDisableGPT11ICK:
+ clk_disable(psSysSpecData->psGPT11_ICK);
+ExitDisableGPT11FCK:
+ clk_disable(psSysSpecData->psGPT11_FCK);
+ExitUnRegisterConstraintNotifications:
+#endif
+ExitError:
+ eError = PVRSRV_ERROR_DISABLE_CLOCK_FAILURE;
+Exit:
+ return eError;
+}
+
+IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)
+{
+#if defined(DEBUG) || defined(TIMING)
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+ IMG_CPU_PHYADDR TimerRegPhysBase;
+ IMG_HANDLE hTimerDisable;
+ IMG_UINT32 *pui32TimerDisable;
+#endif
+
+ PVR_TRACE(("DisableSystemClocks: Disabling System Clocks"));
+
+
+ DisableSGXClocks(psSysData);
+
+#if defined(DEBUG) || defined(TIMING)
+
+ TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
+ pui32TimerDisable = OSMapPhysToLin(TimerRegPhysBase,
+ 4,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ &hTimerDisable);
+
+ if (pui32TimerDisable == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DisableSystemClocks: OSMapPhysToLin failed"));
+ }
+ else
+ {
+ *pui32TimerDisable = 0;
+
+ OSUnMapPhysToLin(pui32TimerDisable,
+ 4,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ hTimerDisable);
+ }
+
+ clk_disable(psSysSpecData->psGPT11_ICK);
+
+ clk_disable(psSysSpecData->psGPT11_FCK);
+
+#endif
+}
diff --git a/drivers/gpu/pvr/omap4/oemfuncs.h b/drivers/gpu/pvr/omap4/oemfuncs.h
new file mode 100644
index 0000000..c8eea1e
--- /dev/null
+++ b/drivers/gpu/pvr/omap4/oemfuncs.h
@@ -0,0 +1,56 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined(__OEMFUNCS_H__)
+#define __OEMFUNCS_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+typedef IMG_UINT32 (*PFN_SRV_BRIDGEDISPATCH)( IMG_UINT32 Ioctl,
+ IMG_BYTE *pInBuf,
+ IMG_UINT32 InBufLen,
+ IMG_BYTE *pOutBuf,
+ IMG_UINT32 OutBufLen,
+ IMG_UINT32 *pdwBytesTransferred);
+typedef struct PVRSRV_DC_OEM_JTABLE_TAG
+{
+ PFN_SRV_BRIDGEDISPATCH pfnOEMBridgeDispatch;
+ IMG_PVOID pvDummy1;
+ IMG_PVOID pvDummy2;
+ IMG_PVOID pvDummy3;
+
+} PVRSRV_DC_OEM_JTABLE;
+
+#define OEM_GET_EXT_FUNCS (1<<1)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/omap4/sysconfig.c b/drivers/gpu/pvr/omap4/sysconfig.c
new file mode 100644
index 0000000..d4d2483
--- /dev/null
+++ b/drivers/gpu/pvr/omap4/sysconfig.c
@@ -0,0 +1,1285 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <linux/debugfs.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+
+#include "sysconfig.h"
+#include "services_headers.h"
+#include "kerneldisplay.h"
+#include "oemfuncs.h"
+#include "sgxinfo.h"
+#include "sgxinfokm.h"
+#include "syslocal.h"
+
+#include "ocpdefs.h"
+
+SYS_DATA* gpsSysData = (SYS_DATA*)IMG_NULL;
+SYS_DATA gsSysData;
+
+static SYS_SPECIFIC_DATA gsSysSpecificData;
+SYS_SPECIFIC_DATA *gpsSysSpecificData;
+
+static IMG_UINT32 gui32SGXDeviceID;
+static SGX_DEVICE_MAP gsSGXDeviceMap;
+static PVRSRV_DEVICE_NODE *gpsSGXDevNode;
+
+extern bool sgx_idle_logging;
+extern uint sgx_idle_mode;
+extern uint sgx_idle_timeout;
+extern uint sgx_apm_latency;
+
+#if defined(NO_HARDWARE) || defined(SGX_OCP_REGS_ENABLED)
+static IMG_CPU_VIRTADDR gsSGXRegsCPUVAddr;
+#endif
+
+#if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO)
+extern struct platform_device *gpsPVRLDMDev;
+#endif
+
+IMG_UINT32 PVRSRV_BridgeDispatchKM(IMG_UINT32 Ioctl,
+ IMG_BYTE *pInBuf,
+ IMG_UINT32 InBufLen,
+ IMG_BYTE *pOutBuf,
+ IMG_UINT32 OutBufLen,
+ IMG_UINT32 *pdwBytesTransferred);
+
+static void sgx_idle_init(void);
+
+#if defined(SGX_OCP_REGS_ENABLED)
+
+static IMG_CPU_VIRTADDR gpvOCPRegsLinAddr;
+
+static PVRSRV_ERROR EnableSGXClocksWrap(SYS_DATA *psSysData)
+{
+ PVRSRV_ERROR eError = EnableSGXClocks(psSysData);
+
+#if !defined(SGX_OCP_NO_INT_BYPASS)
+ if(eError == PVRSRV_OK)
+ {
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_SYSCONFIG, 0x14);
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_DEBUG_CONFIG, EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_MASK);
+ }
+#endif
+ return eError;
+}
+
+#else
+
+static INLINE PVRSRV_ERROR EnableSGXClocksWrap(SYS_DATA *psSysData)
+{
+ return EnableSGXClocks(psSysData);
+}
+
+#endif
+
+static INLINE PVRSRV_ERROR EnableSystemClocksWrap(SYS_DATA *psSysData)
+{
+ PVRSRV_ERROR eError = EnableSystemClocks(psSysData);
+
+#if !defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ if(eError == PVRSRV_OK)
+ {
+
+ eError = EnableSGXClocksWrap(psSysData);
+ if (eError != PVRSRV_OK)
+ {
+ DisableSystemClocks(psSysData);
+ }
+ }
+#endif
+
+ return eError;
+}
+
+static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData)
+{
+#if defined(NO_HARDWARE)
+ PVRSRV_ERROR eError;
+ IMG_CPU_PHYADDR sCpuPAddr;
+#else
+#if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO)
+ struct resource *dev_res;
+ int dev_irq;
+#endif
+#endif
+
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+
+
+ gsSGXDeviceMap.ui32Flags = 0x0;
+
+#if defined(NO_HARDWARE)
+
+
+ gsSGXDeviceMap.ui32RegsSize = SYS_OMAP4430_SGX_REGS_SIZE;
+
+ eError = OSBaseAllocContigMemory(gsSGXDeviceMap.ui32RegsSize,
+ &gsSGXRegsCPUVAddr,
+ &sCpuPAddr);
+ if(eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ gsSGXDeviceMap.sRegsCpuPBase = sCpuPAddr;
+ gsSGXDeviceMap.sRegsSysPBase = SysCpuPAddrToSysPAddr(gsSGXDeviceMap.sRegsCpuPBase);
+#if defined(__linux__)
+
+ gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;
+#else
+
+ gsSGXDeviceMap.pvRegsCpuVBase = IMG_NULL;
+#endif
+
+ OSMemSet(gsSGXRegsCPUVAddr, 0, gsSGXDeviceMap.ui32RegsSize);
+
+
+
+
+ gsSGXDeviceMap.ui32IRQ = 0;
+
+#else
+#if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO)
+
+ dev_res = platform_get_resource(gpsPVRLDMDev, IORESOURCE_MEM, 0);
+ if (dev_res == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_resource failed", __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ dev_irq = platform_get_irq(gpsPVRLDMDev, 0);
+ if (dev_irq < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_irq failed (%d)", __FUNCTION__, -dev_irq));
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ gsSGXDeviceMap.sRegsSysPBase.uiAddr = dev_res->start;
+ gsSGXDeviceMap.sRegsCpuPBase =
+ SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
+ PVR_TRACE(("SGX register base: 0x%lx", (unsigned long)gsSGXDeviceMap.sRegsCpuPBase.uiAddr));
+
+ gsSGXDeviceMap.ui32RegsSize = (unsigned int)(dev_res->end - dev_res->start);
+ PVR_TRACE(("SGX register size: %d",gsSGXDeviceMap.ui32RegsSize));
+
+ gsSGXDeviceMap.ui32IRQ = dev_irq;
+ PVR_TRACE(("SGX IRQ: %d", gsSGXDeviceMap.ui32IRQ));
+#else
+ gsSGXDeviceMap.sRegsSysPBase.uiAddr = SYS_OMAP4430_SGX_REGS_SYS_PHYS_BASE;
+ gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
+ gsSGXDeviceMap.ui32RegsSize = SYS_OMAP4430_SGX_REGS_SIZE;
+
+ gsSGXDeviceMap.ui32IRQ = SYS_OMAP4430_SGX_IRQ;
+
+#endif
+#if defined(SGX_OCP_REGS_ENABLED)
+ gsSGXRegsCPUVAddr = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
+ gsSGXDeviceMap.ui32RegsSize,
+ PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+ IMG_NULL);
+
+ if (gsSGXRegsCPUVAddr == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysLocateDevices: Failed to map SGX registers"));
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+
+
+ gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;
+ gpvOCPRegsLinAddr = gsSGXRegsCPUVAddr;
+#endif
+#endif
+
+#if defined(PDUMP)
+ {
+
+ static IMG_CHAR pszPDumpDevName[] = "SGXMEM";
+ gsSGXDeviceMap.pszPDumpDevName = pszPDumpDevName;
+ }
+#endif
+
+
+
+
+ return PVRSRV_OK;
+}
+
+
+static IMG_CHAR *SysCreateVersionString(void)
+{
+ static IMG_CHAR aszVersionString[100];
+ SYS_DATA *psSysData;
+ IMG_UINT32 ui32SGXRevision;
+ IMG_INT32 i32Count;
+#if !defined(NO_HARDWARE)
+ IMG_VOID *pvRegsLinAddr;
+
+ pvRegsLinAddr = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
+ gsSGXDeviceMap.ui32RegsSize,
+ PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+ IMG_NULL);
+ if(!pvRegsLinAddr)
+ {
+ return IMG_NULL;
+ }
+
+ ui32SGXRevision = OSReadHWReg((IMG_PVOID)((IMG_PBYTE)pvRegsLinAddr),
+ EUR_CR_CORE_REVISION);
+#else
+ ui32SGXRevision = 0;
+#endif
+
+ SysAcquireData(&psSysData);
+
+ i32Count = OSSNPrintf(aszVersionString, 100,
+ "SGX revision = %u.%u.%u",
+ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAJOR_MASK)
+ >> EUR_CR_CORE_REVISION_MAJOR_SHIFT),
+ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MINOR_MASK)
+ >> EUR_CR_CORE_REVISION_MINOR_SHIFT),
+ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAINTENANCE_MASK)
+ >> EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT)
+ );
+
+#if !defined(NO_HARDWARE)
+ OSUnMapPhysToLin(pvRegsLinAddr,
+ SYS_OMAP4430_SGX_REGS_SIZE,
+ PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+ IMG_NULL);
+#endif
+
+ if(i32Count == -1)
+ {
+ return IMG_NULL;
+ }
+
+ return aszVersionString;
+}
+
+
+PVRSRV_ERROR SysInitialise(IMG_VOID)
+{
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+#if !defined(PVR_NO_OMAP_TIMER)
+ IMG_CPU_PHYADDR TimerRegPhysBase;
+#endif
+#if !defined(SGX_DYNAMIC_TIMING_INFO)
+ SGX_TIMING_INFORMATION* psTimingInfo;
+#endif
+ gpsSysData = &gsSysData;
+ OSMemSet(gpsSysData, 0, sizeof(SYS_DATA));
+
+ gpsSysSpecificData = &gsSysSpecificData;
+ OSMemSet(gpsSysSpecificData, 0, sizeof(SYS_SPECIFIC_DATA));
+
+ gpsSysData->pvSysSpecificData = gpsSysSpecificData;
+
+ eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to setup env structure"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA);
+
+ gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT;
+
+
+ for(i=0; i<SYS_DEVICE_COUNT; i++)
+ {
+ gpsSysData->sDeviceID[i].uiID = i;
+ gpsSysData->sDeviceID[i].bInUse = IMG_FALSE;
+ }
+
+ gpsSysData->psDeviceNodeList = IMG_NULL;
+ gpsSysData->psQueueList = IMG_NULL;
+
+ eError = SysInitialiseCommon(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed in SysInitialiseCommon"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+
+#if !defined(SGX_DYNAMIC_TIMING_INFO)
+
+ psTimingInfo = &gsSGXDeviceMap.sTimingInfo;
+ psTimingInfo->ui32CoreClockSpeed = SYS_SGX_CLOCK_SPEED;
+ psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ;
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ psTimingInfo->bEnableActivePM = IMG_TRUE;
+#else
+ psTimingInfo->bEnableActivePM = IMG_FALSE;
+#endif
+ psTimingInfo->ui32ActivePowManLatencyms = sgx_apm_latency;
+ psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ;
+#endif
+
+
+
+ gpsSysSpecificData->ui32SrcClockDiv = 3;
+
+
+
+
+
+ eError = SysLocateDevices(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to locate devices"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV);
+
+ eError = SysPMRuntimeRegister();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register with OSPM!"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME);
+
+ eError = SysDvfsInitialize(gpsSysSpecificData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialize DVFS"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_DVFS_INIT);
+
+ eError = PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice,
+ DEVICE_SGX_INTERRUPT, &gui32SGXDeviceID);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register device!"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_REGDEV);
+
+
+
+
+
+ psDeviceNode = gpsSysData->psDeviceNodeList;
+ while(psDeviceNode)
+ {
+
+ switch(psDeviceNode->sDevId.eDeviceType)
+ {
+ case PVRSRV_DEVICE_TYPE_SGX:
+ {
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
+
+
+
+
+ psDeviceNode->psLocalDevMemArena = IMG_NULL;
+
+
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
+
+
+ for(i=0; i<psDevMemoryInfo->ui32HeapCount; i++)
+ {
+ psDeviceMemoryHeap[i].ui32Attribs |= PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG;
+ }
+
+ gpsSGXDevNode = psDeviceNode;
+ gsSysSpecificData.psSGXDevNode = psDeviceNode;
+
+ break;
+ }
+ default:
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to find SGX device node!"));
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+
+
+ psDeviceNode = psDeviceNode->psNext;
+ }
+
+ eError = EnableSystemClocksWrap(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable system clocks (%d)", eError));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ eError = EnableSGXClocksWrap(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable SGX clocks (%d)", eError));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+#endif
+
+ eError = PVRSRVInitialiseDevice(gui32SGXDeviceID);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
+ (IMG_VOID)SysDeinitialise(gpsSysData);
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV);
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+
+ DisableSGXClocks(gpsSysData);
+#endif
+
+#if !defined(PVR_NO_OMAP_TIMER)
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ TimerRegPhysBase = gsSysSpecificData.sTimerRegPhysBase;
+#else
+ TimerRegPhysBase.uiAddr = SYS_OMAP4430_GP11TIMER_REGS_SYS_PHYS_BASE;
+#endif
+ gpsSysData->pvSOCTimerRegisterKM = IMG_NULL;
+ gpsSysData->hSOCTimerRegisterOSMemHandle = 0;
+ if (TimerRegPhysBase.uiAddr != 0)
+ {
+ OSReservePhys(TimerRegPhysBase,
+ 4,
+ PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
+ (IMG_VOID **)&gpsSysData->pvSOCTimerRegisterKM,
+ &gpsSysData->hSOCTimerRegisterOSMemHandle);
+ }
+#endif
+
+ sgx_idle_init();
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR SysFinalise(IMG_VOID)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ eError = EnableSGXClocksWrap(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to Enable SGX clocks (%d)", eError));
+ return eError;
+ }
+#endif
+
+ eError = OSInstallMISR(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to install MISR"));
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR);
+
+#if defined(SYS_USING_INTERRUPTS)
+
+ eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to install ISR"));
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR);
+#if !defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ SysEnableSGXInterrupts(gpsSysData);
+#endif
+#endif
+#if defined(__linux__)
+
+ gpsSysData->pszVersionString = SysCreateVersionString();
+ if (!gpsSysData->pszVersionString)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to create a system version string"));
+ }
+ else
+ {
+ PVR_TRACE(("SysFinalise: Version string: %s", gpsSysData->pszVersionString));
+ }
+#endif
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+
+ DisableSGXClocks(gpsSysData);
+#endif
+
+ gpsSysSpecificData->bSGXInitComplete = IMG_TRUE;
+
+ return eError;
+}
+
+
+PVRSRV_ERROR SysDeinitialise (SYS_DATA *psSysData)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+
+ if(gpsSysData->pvSOCTimerRegisterKM)
+ {
+ OSUnReservePhys(gpsSysData->pvSOCTimerRegisterKM,
+ 4,
+ PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
+ gpsSysData->hSOCTimerRegisterOSMemHandle);
+ }
+
+
+#if defined(SYS_USING_INTERRUPTS)
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR))
+ {
+ eError = OSUninstallDeviceLISR(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallDeviceLISR failed"));
+ return eError;
+ }
+ }
+#endif
+
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR))
+ {
+ eError = OSUninstallMISR(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallMISR failed"));
+ return eError;
+ }
+ }
+
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV))
+ {
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ PVR_ASSERT(SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS));
+
+ eError = EnableSGXClocksWrap(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: EnableSGXClocks failed"));
+ return eError;
+ }
+#endif
+
+
+ eError = PVRSRVDeinitialiseDevice (gui32SGXDeviceID);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init the device"));
+ return eError;
+ }
+ }
+
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_DVFS_INIT))
+ {
+ eError = SysDvfsDeinitialize(gpsSysSpecificData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: Failed to de-init DVFS"));
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ }
+
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME))
+ {
+ eError = SysPMRuntimeUnregister();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: Failed to unregister with OSPM!"));
+ gpsSysData = IMG_NULL;
+ return eError;
+ }
+ }
+
+
+
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS))
+ {
+ DisableSystemClocks(gpsSysData);
+ }
+
+ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA))
+ {
+ eError = OSDeInitEnvData(gpsSysData->pvEnvSpecificData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init env structure"));
+ return eError;
+ }
+ }
+
+ SysDeinitialiseCommon(gpsSysData);
+
+#if defined(NO_HARDWARE) || defined(SGX_OCP_REGS_ENABLED)
+ if(gsSGXRegsCPUVAddr != IMG_NULL)
+ {
+#if defined(NO_HARDWARE)
+
+ OSBaseFreeContigMemory(SYS_OMAP4430_SGX_REGS_SIZE, gsSGXRegsCPUVAddr, gsSGXDeviceMap.sRegsCpuPBase);
+#else
+#if defined(SGX_OCP_REGS_ENABLED)
+ OSUnMapPhysToLin(gsSGXRegsCPUVAddr,
+ gsSGXDeviceMap.ui32RegsSize,
+ PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
+ IMG_NULL);
+
+ gpvOCPRegsLinAddr = IMG_NULL;
+#endif
+#endif
+ gsSGXRegsCPUVAddr = IMG_NULL;
+ gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr;
+ }
+#endif
+
+
+ gpsSysSpecificData->ui32SysSpecificData = 0;
+ gpsSysSpecificData->bSGXInitComplete = IMG_FALSE;
+
+ gpsSysData = IMG_NULL;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
+ IMG_VOID **ppvDeviceMap)
+{
+
+ switch(eDeviceType)
+ {
+ case PVRSRV_DEVICE_TYPE_SGX:
+ {
+
+ *ppvDeviceMap = (IMG_VOID*)&gsSGXDeviceMap;
+
+ break;
+ }
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysGetDeviceMemoryMap: unsupported device type"));
+ }
+ }
+ return PVRSRV_OK;
+}
+
+
+IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType,
+ IMG_CPU_PHYADDR CpuPAddr)
+{
+ IMG_DEV_PHYADDR DevPAddr;
+
+ PVR_UNREFERENCED_PARAMETER(eDeviceType);
+
+
+ DevPAddr.uiAddr = CpuPAddr.uiAddr;
+
+ return DevPAddr;
+}
+
+IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR sys_paddr)
+{
+ IMG_CPU_PHYADDR cpu_paddr;
+
+
+ cpu_paddr.uiAddr = sys_paddr.uiAddr;
+ return cpu_paddr;
+}
+
+IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr)
+{
+ IMG_SYS_PHYADDR sys_paddr;
+
+
+ sys_paddr.uiAddr = cpu_paddr.uiAddr;
+ return sys_paddr;
+}
+
+
+IMG_DEV_PHYADDR SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr)
+{
+ IMG_DEV_PHYADDR DevPAddr;
+
+ PVR_UNREFERENCED_PARAMETER(eDeviceType);
+
+
+ DevPAddr.uiAddr = SysPAddr.uiAddr;
+
+ return DevPAddr;
+}
+
+
+IMG_SYS_PHYADDR SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR DevPAddr)
+{
+ IMG_SYS_PHYADDR SysPAddr;
+
+ PVR_UNREFERENCED_PARAMETER(eDeviceType);
+
+
+ SysPAddr.uiAddr = DevPAddr.uiAddr;
+
+ return SysPAddr;
+}
+
+
+IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+}
+
+
+IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+}
+
+IMG_UINT32 SysGetInterruptSource(SYS_DATA *psSysData,
+ PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+#if defined(NO_HARDWARE)
+
+ return 0xFFFFFFFF;
+#else
+
+ return psDeviceNode->ui32SOCInterruptBit;
+#endif
+}
+
+
+IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32ClearBits);
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+#if !defined(NO_HARDWARE)
+#if defined(SGX_OCP_NO_INT_BYPASS)
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQSTATUS_2, 0x1);
+#endif
+
+ OSReadHWReg(((PVRSRV_SGXDEV_INFO *)gpsSGXDevNode->pvDevice)->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR);
+#endif
+}
+
+#if defined(SGX_OCP_NO_INT_BYPASS)
+IMG_VOID SysEnableSGXInterrupts(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *)psSysData->pvSysSpecificData;
+ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_ENABLE_LISR) && !SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED))
+ {
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQSTATUS_2, 0x1);
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQENABLE_SET_2, 0x1);
+ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED);
+ }
+}
+
+IMG_VOID SysDisableSGXInterrupts(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *)psSysData->pvSysSpecificData;
+
+ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED))
+ {
+ OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQENABLE_CLR_2, 0x1);
+ SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED);
+ }
+}
+#endif
+
+PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (eNewPowerState == PVRSRV_SYS_POWER_STATE_D3)
+ {
+ PVR_TRACE(("SysSystemPrePowerState: Entering state D3"));
+
+#if defined(SYS_USING_INTERRUPTS)
+ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR))
+ {
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+ IMG_BOOL bWrapped = WrapSystemPowerChange(&gsSysSpecificData);
+#endif
+ eError = OSUninstallDeviceLISR(gpsSysData);
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+ if (bWrapped)
+ {
+ UnwrapSystemPowerChange(&gsSysSpecificData);
+ }
+#endif
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysSystemPrePowerState: OSUninstallDeviceLISR failed (%d)", eError));
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR);
+ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR);
+ }
+#endif
+
+ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS))
+ {
+ DisableSystemClocks(gpsSysData);
+
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS);
+ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);
+ }
+ }
+
+ return eError;
+}
+
+
+PVRSRV_ERROR SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (eNewPowerState == PVRSRV_SYS_POWER_STATE_D0)
+ {
+ PVR_TRACE(("SysSystemPostPowerState: Entering state D0"));
+
+ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS))
+ {
+ eError = EnableSystemClocksWrap(gpsSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: EnableSystemClocksWrap failed (%d)", eError));
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);
+ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS);
+ }
+
+#if defined(SYS_USING_INTERRUPTS)
+ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR))
+ {
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+ IMG_BOOL bWrapped = WrapSystemPowerChange(&gsSysSpecificData);
+#endif
+
+ eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode);
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+ if (bWrapped)
+ {
+ UnwrapSystemPowerChange(&gsSysSpecificData);
+ }
+#endif
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: OSInstallDeviceLISR failed to install ISR (%d)", eError));
+ return eError;
+ }
+ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR);
+ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR);
+ }
+#endif
+ }
+ return eError;
+}
+
+
+PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+ PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);
+
+ if (ui32DeviceIndex != gui32SGXDeviceID)
+ {
+ return PVRSRV_OK;
+ }
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "SysDevicePrePowerState: SGX Entering state D3"));
+ DisableSGXClocks(gpsSysData);
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(eNewPowerState );
+#endif
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER(eNewPowerState);
+
+ if (ui32DeviceIndex != gui32SGXDeviceID)
+ {
+ return eError;
+ }
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "SysDevicePostPowerState: SGX Leaving state D3"));
+ eError = EnableSGXClocksWrap(gpsSysData);
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);
+#endif
+
+ return eError;
+}
+
+
+enum sgx_idle_event_type {
+ SGX_NONE = 0,
+ SGX_IDLE,
+ SGX_BUSY,
+ SGX_FLIP,
+ SGX_SLOW,
+ SGX_FAST,
+ SGX_OFF,
+ SGX_ON,
+};
+
+const char *sgx_idle_event_str[] = {
+ [SGX_NONE] = "none",
+ [SGX_IDLE] = " idle",
+ [SGX_BUSY] = " busy",
+ [SGX_FLIP] = "flip",
+ [SGX_SLOW] = " slow",
+ [SGX_FAST] = " fast",
+ [SGX_OFF] = " off",
+ [SGX_ON] = " on",
+};
+
+struct sgx_idle_event {
+ enum sgx_idle_event_type type;
+ ktime_t timestamp;
+};
+
+static struct sgx_idle_event sgx_idle_log[1024 * 10];
+static int sgx_idle_log_head;
+static int sgx_idle_log_tail;
+static DEFINE_MUTEX(sgx_idle_log_lock);
+
+static void sgx_idle_log_event(enum sgx_idle_event_type type)
+{
+ if (!sgx_idle_logging)
+ return;
+
+ mutex_lock(&sgx_idle_log_lock);
+
+ sgx_idle_log[sgx_idle_log_head].type = type;
+ sgx_idle_log[sgx_idle_log_head].timestamp = ktime_get();
+
+ sgx_idle_log_head++;
+ if (sgx_idle_log_head >= ARRAY_SIZE(sgx_idle_log))
+ sgx_idle_log_head = 0;
+ if (sgx_idle_log_head == sgx_idle_log_tail) {
+ sgx_idle_log_tail++;
+ if (sgx_idle_log_tail >= ARRAY_SIZE(sgx_idle_log))
+ sgx_idle_log_tail = 0;
+ }
+
+ mutex_unlock(&sgx_idle_log_lock);
+}
+
+void sgx_idle_log_flip(void)
+{
+ sgx_idle_log_event(SGX_FLIP);
+}
+
+void sgx_idle_log_on(void)
+{
+ sgx_idle_log_event(SGX_ON);
+}
+
+void sgx_idle_log_off(void)
+{
+ sgx_idle_log_event(SGX_OFF);
+}
+
+struct sgx_idle_seq_data {
+ struct sgx_idle_event log[ARRAY_SIZE(sgx_idle_log)];
+ int size;
+ int pos;
+};
+
+static void *sgx_idle_log_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct sgx_idle_seq_data *data = s->private;
+
+ if (*pos >= data->size)
+ return NULL;
+ data->pos = *pos;
+ return data;
+}
+
+static void sgx_idle_log_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static void *sgx_idle_log_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct sgx_idle_seq_data *data = v;
+
+ data->pos = ++(*pos);
+ if (data->pos >= data->size)
+ return NULL;
+
+ return data;
+}
+
+static int sgx_idle_log_find_next(struct sgx_idle_seq_data *data, int pos,
+ enum sgx_idle_event_type type)
+{
+ for(; pos < data->size; pos++) {
+ if (data->log[pos].type == type)
+ return pos;
+ }
+
+ return -1;
+}
+
+static int sgx_idle_log_seq_show(struct seq_file *s, void *v)
+{
+ struct sgx_idle_seq_data *data = v;
+ struct sgx_idle_event *e = &data->log[data->pos];
+ struct timespec ts = ktime_to_timespec(e->timestamp);
+ int next = -1;
+
+ seq_printf(s, "[%lu.%06lu] %s", ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC,
+ sgx_idle_event_str[e->type]);
+ if (e->type == SGX_IDLE)
+ next = sgx_idle_log_find_next(data, data->pos, SGX_BUSY);
+ else if (e->type == SGX_BUSY)
+ next = sgx_idle_log_find_next(data, data->pos, SGX_IDLE);
+
+ if (next > 0) {
+ struct sgx_idle_event *e1 = &data->log[next];
+ ktime_t diff = ktime_sub(e1->timestamp, e->timestamp);
+ ts = ktime_to_timespec(diff);
+
+ seq_printf(s, " for %lu.%06lu", ts.tv_sec,
+ ts.tv_nsec / NSEC_PER_USEC);
+ }
+
+ seq_printf(s, "\n");
+
+ return 0;
+}
+
+static const struct seq_operations sgx_idle_log_seq_ops = {
+ .start = sgx_idle_log_seq_start,
+ .next = sgx_idle_log_seq_next,
+ .stop = sgx_idle_log_seq_stop,
+ .show = sgx_idle_log_seq_show,
+};
+
+static int sgx_idle_log_open(struct inode *inode, struct file *file)
+{
+ struct sgx_idle_seq_data *data;
+ struct seq_file *seq;
+ int ret;
+ int pos;
+
+ ret = seq_open(file, &sgx_idle_log_seq_ops);
+ if (ret < 0)
+ goto err;
+
+ data = vmalloc(sizeof(*data));
+ if (!data)
+ goto err_seq_release;
+
+ mutex_lock(&sgx_idle_log_lock);
+ data->size = 0;
+ pos = sgx_idle_log_tail;
+ while (pos != sgx_idle_log_head) {
+ data->log[data->size] = sgx_idle_log[pos];
+ data->size++;
+ pos++;
+ if (pos >= ARRAY_SIZE(sgx_idle_log))
+ pos = 0;
+ }
+ mutex_unlock(&sgx_idle_log_lock);
+
+ seq = file->private_data;
+ seq->private = data;
+
+ return 0;
+
+err_seq_release:
+ seq_release(inode, file);
+err:
+ return ret;
+}
+
+static int sgx_idle_log_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq;
+ seq = file->private_data;
+ vfree(seq->private);
+ return seq_release(inode, file);
+}
+
+static const struct file_operations sgx_idle_log_fops = {
+ .open = sgx_idle_log_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = sgx_idle_log_release,
+};
+
+static void sgx_idle_log_init(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_file("sgx_idle", S_IRUGO, NULL,
+ NULL, &sgx_idle_log_fops);
+ if (IS_ERR_OR_NULL(d))
+ PVR_DPF((PVR_DBG_ERROR,"Failed to creat sgx_idle debug file"));
+}
+
+static ktime_t sgx_idle_last_busy;
+static struct hrtimer sgx_idle_timer;
+static struct workqueue_struct *sgx_idle_wq;
+static struct work_struct sgx_idle_work;
+
+void RequestSGXFreq(SYS_DATA *psSysData, IMG_BOOL bMaxFreq);
+
+enum hrtimer_restart sgx_idle_timer_callback(struct hrtimer *timer)
+{
+ queue_work(sgx_idle_wq, &sgx_idle_work);
+ return HRTIMER_NORESTART;
+}
+
+void sgx_idle_work_func(struct work_struct *work)
+{
+ sgx_idle_log_event(SGX_SLOW);
+ RequestSGXFreq(gpsSysData, IMG_FALSE);
+}
+
+IMG_VOID SysSGXIdleTransition(IMG_BOOL bSGXIdle)
+{
+ int ret;
+
+ if (bSGXIdle) {
+ sgx_idle_log_event(SGX_IDLE);
+ if (sgx_idle_mode != 0) {
+ uint timeout = sgx_idle_timeout;
+
+ if (sgx_idle_mode == 2) {
+ ktime_t diff;
+
+ diff = ktime_sub(ktime_get(),
+ sgx_idle_last_busy);
+
+ if (ktime_to_ns(diff) < 2 * NSEC_PER_MSEC)
+ timeout = 3 * NSEC_PER_MSEC -
+ ktime_to_ns(diff);
+ }
+
+ hrtimer_start(&sgx_idle_timer,
+ ktime_set(0, timeout),
+ HRTIMER_MODE_REL);
+ }
+ } else {
+ if (sgx_idle_mode != 0) {
+ bool fast = true;
+
+ ret = hrtimer_cancel(&sgx_idle_timer);
+ if (ret)
+ fast = false;
+
+ ret = cancel_work_sync(&sgx_idle_work);
+ if (ret)
+ fast = false;
+
+ if (fast)
+ sgx_idle_log_event(SGX_FAST);
+
+ RequestSGXFreq(gpsSysData, IMG_TRUE);
+ }
+ sgx_idle_log_event(SGX_BUSY);
+ sgx_idle_last_busy = ktime_get();
+ }
+ PVR_DPF((PVR_DBG_MESSAGE, "SysSGXIdleTransition switch to %u", bSGXIdle));
+}
+
+static void sgx_idle_init(void)
+{
+ sgx_idle_log_init();
+ hrtimer_init(&sgx_idle_timer, HRTIMER_BASE_MONOTONIC,
+ HRTIMER_MODE_REL);
+ sgx_idle_timer.function = sgx_idle_timer_callback;
+ sgx_idle_wq = alloc_ordered_workqueue("sgx_idle", WQ_HIGHPRI);
+ INIT_WORK(&sgx_idle_work, sgx_idle_work_func);
+
+ /* XXX: need a sgx_idle_deinit() */
+}
+
+PVRSRV_ERROR SysOEMFunction ( IMG_UINT32 ui32ID,
+ IMG_VOID *pvIn,
+ IMG_UINT32 ulInSize,
+ IMG_VOID *pvOut,
+ IMG_UINT32 ulOutSize)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32ID);
+ PVR_UNREFERENCED_PARAMETER(pvIn);
+ PVR_UNREFERENCED_PARAMETER(ulInSize);
+ PVR_UNREFERENCED_PARAMETER(pvOut);
+ PVR_UNREFERENCED_PARAMETER(ulOutSize);
+
+ if ((ui32ID == OEM_GET_EXT_FUNCS) &&
+ (ulOutSize == sizeof(PVRSRV_DC_OEM_JTABLE)))
+ {
+
+ PVRSRV_DC_OEM_JTABLE *psOEMJTable = (PVRSRV_DC_OEM_JTABLE*) pvOut;
+ psOEMJTable->pfnOEMBridgeDispatch = &PVRSRV_BridgeDispatchKM;
+ return PVRSRV_OK;
+ }
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+}
diff --git a/drivers/gpu/pvr/omap4/sysconfig.h b/drivers/gpu/pvr/omap4/sysconfig.h
new file mode 100644
index 0000000..e31ff8f
--- /dev/null
+++ b/drivers/gpu/pvr/omap4/sysconfig.h
@@ -0,0 +1,84 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined(__SOCCONFIG_H__)
+#define __SOCCONFIG_H__
+
+#define VS_PRODUCT_NAME "OMAP4"
+
+#if defined(SGX540) && (SGX_CORE_REV == 120)
+#define SYS_SGX_CLOCK_SPEED 307200000
+#else
+#define SYS_SGX_CLOCK_SPEED 304742400
+#endif
+
+#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ (100)
+#define SYS_SGX_PDS_TIMER_FREQ (1000)
+
+#if !defined(SYS_SGX_ACTIVE_POWER_LATENCY_MS)
+#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (2)
+#endif
+
+
+#define SYS_OMAP4430_SGX_REGS_SYS_PHYS_BASE 0x56000000
+#define SYS_OMAP4430_SGX_REGS_SIZE 0xFFFF
+
+#define SYS_OMAP4430_SGX_IRQ 53
+
+#define SYS_OMAP4430_DSS_REGS_SYS_PHYS_BASE 0x58000000
+#define SYS_OMAP4430_DSS_REGS_SIZE 0x7000
+
+#define SYS_OMAP4430_DSS_HDMI_INTERRUPT_STATUS_REG 0x6028
+#define SYS_OMAP4430_DSS_HDMI_INTERRUPT_ENABLE_REG 0x602c
+
+#define SYS_OMAP4430_DSS_HDMI_INTERRUPT_VSYNC_ENABLE_MASK 0x10000
+#define SYS_OMAP4430_DSS_HDMI_INTERRUPT_VSYNC_STATUS_MASK 0x10000
+
+#define SYS_OMAP4430_DSS_LCD_INTERRUPT_STATUS_REG 0x1018
+#define SYS_OMAP4430_DSS_LCD_INTERRUPT_ENABLE_REG 0x101c
+
+#define SYS_OMAP4430_DSS_LCD_INTERRUPT_VSYNC_ENABLE_MASK 0x40002
+#define SYS_OMAP4430_DSS_LCD_INTERRUPT_VSYNC_STATUS_MASK 0x40002
+
+
+#define SYS_OMAP4430_GP11TIMER_ENABLE_SYS_PHYS_BASE 0x48088038
+#define SYS_OMAP4430_GP11TIMER_REGS_SYS_PHYS_BASE 0x4808803C
+#define SYS_OMAP4430_GP11TIMER_TSICR_SYS_PHYS_BASE 0x48088054
+
+#define DEVICE_SGX_INTERRUPT (1<<0)
+#define DEVICE_MSVDX_INTERRUPT (1<<1)
+#define DEVICE_DISP_INTERRUPT (1<<2)
+
+#if defined(__linux__)
+#if defined(PVR_LDM_PLATFORM_PRE_REGISTERED_DEV)
+#define SYS_SGX_DEV_NAME PVR_LDM_PLATFORM_PRE_REGISTERED_DEV
+#else
+#define SYS_SGX_DEV_NAME "omap_gpu"
+#endif
+#endif
+
+
+#endif
diff --git a/drivers/gpu/pvr/omap4/sysinfo.h b/drivers/gpu/pvr/omap4/sysinfo.h
new file mode 100644
index 0000000..b6d3f5a
--- /dev/null
+++ b/drivers/gpu/pvr/omap4/sysinfo.h
@@ -0,0 +1,41 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+#if defined(PVR_LINUX_USING_WORKQUEUES)
+#define MAX_HW_TIME_US (1000000)
+#define WAIT_TRY_COUNT (20000)
+#else
+#define MAX_HW_TIME_US (500000)
+#define WAIT_TRY_COUNT (10000)
+#endif
+
+
+#define SYS_DEVICE_COUNT 15
+
+#endif
diff --git a/drivers/gpu/pvr/omap4/syslocal.h b/drivers/gpu/pvr/omap4/syslocal.h
new file mode 100644
index 0000000..9221886
--- /dev/null
+++ b/drivers/gpu/pvr/omap4/syslocal.h
@@ -0,0 +1,230 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined(__SYSLOCAL_H__)
+#define __SYSLOCAL_H__
+
+#if defined(__linux__)
+
+#include <linux/version.h>
+#include <linux/clk.h>
+#if defined(PVR_LINUX_USING_WORKQUEUES)
+#include <linux/mutex.h>
+#else
+#include <linux/spinlock.h>
+#endif
+#include <asm/atomic.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
+#include <linux/semaphore.h>
+#include <linux/resource.h>
+#else
+#include <asm/semaphore.h>
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22))
+#include <asm/arch/resource.h>
+#endif
+#endif
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+#if !defined(LDM_PLATFORM)
+#error "LDM_PLATFORM must be set"
+#endif
+#define PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO
+#include <linux/platform_device.h>
+#endif
+
+#if ((defined(DEBUG) || defined(TIMING)) && \
+ (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,34))) && \
+ !defined(PVR_NO_OMAP_TIMER)
+#define PVR_OMAP4_TIMING_PRCM
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+#include <plat/gpu.h>
+#if !defined(PVR_NO_OMAP_TIMER)
+#define PVR_OMAP_USE_DM_TIMER_API
+#include <plat/dmtimer.h>
+#endif
+#endif
+
+#if !defined(PVR_NO_OMAP_TIMER)
+#define PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA
+#endif
+#endif
+
+#if !defined(NO_HARDWARE) && \
+ defined(SYS_USING_INTERRUPTS)
+#define SGX_OCP_REGS_ENABLED
+#endif
+
+#if defined(__linux__)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) && defined(SGX_OCP_REGS_ENABLED)
+#if !defined(SGX544)
+#define SGX_OCP_NO_INT_BYPASS
+#endif
+#endif
+#endif
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+IMG_VOID DisableSystemClocks(SYS_DATA *psSysData);
+PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData);
+
+IMG_VOID DisableSGXClocks(SYS_DATA *psSysData);
+PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData);
+
+#define SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS 0x00000001
+#define SYS_SPECIFIC_DATA_ENABLE_LISR 0x00000002
+#define SYS_SPECIFIC_DATA_ENABLE_MISR 0x00000004
+#define SYS_SPECIFIC_DATA_ENABLE_ENVDATA 0x00000008
+#define SYS_SPECIFIC_DATA_ENABLE_LOCDEV 0x00000010
+#define SYS_SPECIFIC_DATA_ENABLE_REGDEV 0x00000020
+#define SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT 0x00000040
+#define SYS_SPECIFIC_DATA_ENABLE_INITDEV 0x00000080
+#define SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV 0x00000100
+
+#define SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR 0x00000200
+#define SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS 0x00000400
+#define SYS_SPECIFIC_DATA_ENABLE_OCPREGS 0x00000800
+#define SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME 0x00001000
+#define SYS_SPECIFIC_DATA_IRQ_ENABLED 0x00002000
+#define SYS_SPECIFIC_DATA_DVFS_INIT 0x00004000
+
+#define SYS_SPECIFIC_DATA_SET(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData |= (flag)))
+
+#define SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData &= ~(flag)))
+
+#define SYS_SPECIFIC_DATA_TEST(psSysSpecData, flag) (((psSysSpecData)->ui32SysSpecificData & (flag)) != 0)
+
+typedef struct _SYS_SPECIFIC_DATA_TAG_
+{
+ IMG_UINT32 ui32SysSpecificData;
+ PVRSRV_DEVICE_NODE *psSGXDevNode;
+ IMG_BOOL bSGXInitComplete;
+#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
+ IMG_CPU_PHYADDR sTimerRegPhysBase;
+#endif
+#if !defined(__linux__)
+ IMG_BOOL bSGXClocksEnabled;
+#endif
+ IMG_UINT32 ui32SrcClockDiv;
+#if defined(__linux__)
+ IMG_BOOL bSysClocksOneTimeInit;
+ atomic_t sSGXClocksEnabled;
+#if defined(PVR_LINUX_USING_WORKQUEUES)
+ struct mutex sPowerLock;
+#else
+ IMG_BOOL bConstraintNotificationsEnabled;
+ spinlock_t sPowerLock;
+ atomic_t sPowerLockCPU;
+ spinlock_t sNotifyLock;
+ atomic_t sNotifyLockCPU;
+ IMG_BOOL bCallVDD2PostFunc;
+#endif
+#if defined(DEBUG) || defined(TIMING)
+ struct clk *psGPT11_FCK;
+ struct clk *psGPT11_ICK;
+#endif
+#if defined(PVR_OMAP_USE_DM_TIMER_API)
+ struct omap_dm_timer *psGPTimer;
+#endif
+ IMG_UINT32 ui32SGXFreqListSize;
+ IMG_UINT32 *pui32SGXFreqList;
+ IMG_UINT32 ui32SGXFreqListIndex;
+#endif
+} SYS_SPECIFIC_DATA;
+
+extern SYS_SPECIFIC_DATA *gpsSysSpecificData;
+
+#if defined(SGX_OCP_REGS_ENABLED) && defined(SGX_OCP_NO_INT_BYPASS)
+IMG_VOID SysEnableSGXInterrupts(SYS_DATA* psSysData);
+IMG_VOID SysDisableSGXInterrupts(SYS_DATA* psSysData);
+#else
+#define SysEnableSGXInterrupts(psSysData)
+#define SysDisableSGXInterrupts(psSysData)
+#endif
+
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData);
+IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData);
+#endif
+
+#if defined(__linux__)
+
+PVRSRV_ERROR SysPMRuntimeRegister(void);
+PVRSRV_ERROR SysPMRuntimeUnregister(void);
+
+PVRSRV_ERROR SysDvfsInitialize(SYS_SPECIFIC_DATA *psSysSpecificData);
+PVRSRV_ERROR SysDvfsDeinitialize(SYS_SPECIFIC_DATA *psSysSpecificData);
+
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysPMRuntimeRegister)
+#endif
+static INLINE PVRSRV_ERROR SysPMRuntimeRegister(void)
+{
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysPMRuntimeUnregister)
+#endif
+static INLINE PVRSRV_ERROR SysPMRuntimeUnregister(void)
+{
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysDvfsInitialize)
+#endif
+static INLINE PVRSRV_ERROR SysDvfsInitialize(void)
+{
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysDvfsDeinitialize)
+#endif
+static INLINE PVRSRV_ERROR SysDvfsDeinitialize(void)
+{
+ return PVRSRV_OK;
+}
+
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
+
+
diff --git a/drivers/gpu/pvr/omap4/sysutils.c b/drivers/gpu/pvr/omap4/sysutils.c
new file mode 100644
index 0000000..f785cb1
--- /dev/null
+++ b/drivers/gpu/pvr/omap4/sysutils.c
@@ -0,0 +1,31 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if defined(__linux__)
+#include "sysutils_linux.c"
+#endif
+
+
diff --git a/drivers/gpu/pvr/omap4/sysutils_linux.c b/drivers/gpu/pvr/omap4/sysutils_linux.c
new file mode 100644
index 0000000..97c058a
--- /dev/null
+++ b/drivers/gpu/pvr/omap4/sysutils_linux.c
@@ -0,0 +1,688 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/hardirq.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include "sgxdefs.h"
+#include "services_headers.h"
+#include "sysinfo.h"
+#include "sgxapi_km.h"
+#include "sysconfig.h"
+#include "sgxinfokm.h"
+#include "syslocal.h"
+
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/opp.h>
+
+#if defined(SUPPORT_DRI_DRM_PLUGIN)
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include <linux/omap_gpu.h>
+
+#include "pvr_drm.h"
+#endif
+
+#define ONE_MHZ 1000000
+#define HZ_TO_MHZ(m) ((m) / ONE_MHZ)
+
+#if defined(SUPPORT_OMAP3430_SGXFCLK_96M)
+#define SGX_PARENT_CLOCK "cm_96m_fck"
+#else
+#define SGX_PARENT_CLOCK "core_ck"
+#endif
+
+extern bool sgx_idle_logging;
+extern uint sgx_idle_mode;
+extern uint sgx_idle_timeout;
+extern uint sgx_apm_latency;
+
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
+extern struct platform_device *gpsPVRLDMDev;
+#endif
+
+static PVRSRV_ERROR PowerLockWrap(SYS_SPECIFIC_DATA *psSysSpecData, IMG_BOOL bTryLock)
+{
+ if (!in_interrupt())
+ {
+ if (bTryLock)
+ {
+ int locked = mutex_trylock(&psSysSpecData->sPowerLock);
+ if (locked == 0)
+ {
+ return PVRSRV_ERROR_RETRY;
+ }
+ }
+ else
+ {
+ mutex_lock(&psSysSpecData->sPowerLock);
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+static IMG_VOID PowerLockUnwrap(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ if (!in_interrupt())
+ {
+ mutex_unlock(&psSysSpecData->sPowerLock);
+ }
+}
+
+PVRSRV_ERROR SysPowerLockWrap(IMG_BOOL bTryLock)
+{
+ SYS_DATA *psSysData;
+
+ SysAcquireData(&psSysData);
+
+ return PowerLockWrap(psSysData->pvSysSpecificData, bTryLock);
+}
+
+IMG_VOID SysPowerLockUnwrap(IMG_VOID)
+{
+ SYS_DATA *psSysData;
+
+ SysAcquireData(&psSysData);
+
+ PowerLockUnwrap(psSysData->pvSysSpecificData);
+}
+
+IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ return IMG_TRUE;
+}
+
+IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+}
+
+IMG_VOID SysGetSGXTimingInformation(SGX_TIMING_INFORMATION *psTimingInfo)
+{
+#if !defined(NO_HARDWARE)
+ PVR_ASSERT(atomic_read(&gpsSysSpecificData->sSGXClocksEnabled) != 0);
+#endif
+ psTimingInfo->ui32CoreClockSpeed =
+ gpsSysSpecificData->pui32SGXFreqList[gpsSysSpecificData->ui32SGXFreqListIndex];
+ psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ;
+ psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ;
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ psTimingInfo->bEnableActivePM = IMG_TRUE;
+#else
+ psTimingInfo->bEnableActivePM = IMG_FALSE;
+#endif
+ psTimingInfo->ui32ActivePowManLatencyms = sgx_apm_latency;
+}
+
+void RequestSGXFreq(SYS_DATA *psSysData, IMG_BOOL bMaxFreq)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+ struct gpu_platform_data *pdata;
+ IMG_UINT32 freq_index;
+ int res;
+
+ pdata = (struct gpu_platform_data *)gpsPVRLDMDev->dev.platform_data;
+ freq_index = bMaxFreq ? psSysSpecData->ui32SGXFreqListSize - 2 : 0;
+
+ if (psSysSpecData->ui32SGXFreqListIndex != freq_index)
+ {
+ PVR_ASSERT(pdata->device_scale != IMG_NULL);
+ res = pdata->device_scale(&gpsPVRLDMDev->dev,
+ &gpsPVRLDMDev->dev,
+ psSysSpecData->pui32SGXFreqList[freq_index]);
+
+ if (res == 0)
+ psSysSpecData->ui32SGXFreqListIndex = freq_index;
+ else if (res == -EBUSY)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "EnableSGXClocks: Unable to scale SGX frequency (EBUSY)"));
+ psSysSpecData->ui32SGXFreqListIndex = psSysSpecData->ui32SGXFreqListSize - 1;
+ }
+ else if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Unable to scale SGX frequency (%d)", res));
+ psSysSpecData->ui32SGXFreqListIndex = psSysSpecData->ui32SGXFreqListSize - 1;
+ }
+ }
+
+}
+
+void sgx_idle_log_on(void);
+void sgx_idle_log_off(void);
+
+PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData)
+{
+#if !defined(NO_HARDWARE)
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+
+
+ if (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0)
+ {
+ return PVRSRV_OK;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: Enabling SGX Clocks"));
+
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
+ {
+ int res;
+
+ if (sgx_idle_mode == 0)
+ RequestSGXFreq(psSysData, IMG_TRUE);
+
+ res = pm_runtime_get_sync(&gpsPVRLDMDev->dev);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: pm_runtime_get_sync failed (%d)", -res));
+ return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK;
+ }
+ }
+#endif
+ SysEnableSGXInterrupts(psSysData);
+
+
+ atomic_set(&psSysSpecData->sSGXClocksEnabled, 1);
+
+#else
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+#endif
+
+ sgx_idle_log_on();
+
+ return PVRSRV_OK;
+}
+
+
+IMG_VOID DisableSGXClocks(SYS_DATA *psSysData)
+{
+#if !defined(NO_HARDWARE)
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+
+
+ if (atomic_read(&psSysSpecData->sSGXClocksEnabled) == 0)
+ {
+ return;
+ }
+
+ sgx_idle_log_off();
+
+ PVR_DPF((PVR_DBG_MESSAGE, "DisableSGXClocks: Disabling SGX Clocks"));
+
+ SysDisableSGXInterrupts(psSysData);
+
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
+ {
+ int res;
+
+ res = pm_runtime_put_sync(&gpsPVRLDMDev->dev);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DisableSGXClocks: pm_runtime_put_sync failed (%d)", -res));
+ }
+
+ if (sgx_idle_mode == 0)
+ RequestSGXFreq(psSysData, IMG_FALSE);
+ }
+#endif
+
+
+ atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
+
+#else
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+#endif
+}
+
+#if (defined(DEBUG) || defined(TIMING)) && !defined(PVR_NO_OMAP_TIMER)
+#if defined(PVR_OMAP_USE_DM_TIMER_API)
+#define GPTIMER_TO_USE 11
+static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ PVR_ASSERT(psSysSpecData->psGPTimer == NULL);
+
+
+ psSysSpecData->psGPTimer = omap_dm_timer_request_specific(GPTIMER_TO_USE);
+ if (psSysSpecData->psGPTimer == NULL)
+ {
+
+ PVR_DPF((PVR_DBG_WARNING, "%s: omap_dm_timer_request_specific failed", __FUNCTION__));
+ return PVRSRV_ERROR_CLOCK_REQUEST_FAILED;
+ }
+
+
+ omap_dm_timer_set_source(psSysSpecData->psGPTimer, OMAP_TIMER_SRC_SYS_CLK);
+ omap_dm_timer_enable(psSysSpecData->psGPTimer);
+
+
+ omap_dm_timer_set_load_start(psSysSpecData->psGPTimer, 1, 0);
+
+ omap_dm_timer_start(psSysSpecData->psGPTimer);
+
+
+ psSysSpecData->sTimerRegPhysBase.uiAddr = SYS_OMAP4430_GP11TIMER_REGS_SYS_PHYS_BASE;
+
+ return PVRSRV_OK;
+}
+
+static void ReleaseGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ if (psSysSpecData->psGPTimer != NULL)
+ {
+
+ (void) omap_dm_timer_stop(psSysSpecData->psGPTimer);
+
+ omap_dm_timer_disable(psSysSpecData->psGPTimer);
+
+ omap_dm_timer_free(psSysSpecData->psGPTimer);
+
+ psSysSpecData->sTimerRegPhysBase.uiAddr = 0;
+
+ psSysSpecData->psGPTimer = NULL;
+ }
+
+}
+#else
+static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+#if defined(PVR_OMAP4_TIMING_PRCM)
+ struct clk *psCLK;
+ IMG_INT res;
+ struct clk *sys_ck;
+ IMG_INT rate;
+#endif
+ PVRSRV_ERROR eError;
+
+ IMG_CPU_PHYADDR sTimerRegPhysBase;
+ IMG_HANDLE hTimerEnable;
+ IMG_UINT32 *pui32TimerEnable;
+
+ PVR_ASSERT(psSysSpecData->sTimerRegPhysBase.uiAddr == 0);
+
+#if defined(PVR_OMAP4_TIMING_PRCM)
+
+ psCLK = clk_get(NULL, "gpt11_fck");
+ if (IS_ERR(psCLK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 functional clock"));
+ goto ExitError;
+ }
+ psSysSpecData->psGPT11_FCK = psCLK;
+
+ psCLK = clk_get(NULL, "gpt11_ick");
+ if (IS_ERR(psCLK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 interface clock"));
+ goto ExitError;
+ }
+ psSysSpecData->psGPT11_ICK = psCLK;
+
+ sys_ck = clk_get(NULL, "sys_clkin_ck");
+ if (IS_ERR(sys_ck))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get System clock"));
+ goto ExitError;
+ }
+
+ if(clk_get_parent(psSysSpecData->psGPT11_FCK) != sys_ck)
+ {
+ PVR_TRACE(("Setting GPTIMER11 parent to System Clock"));
+ res = clk_set_parent(psSysSpecData->psGPT11_FCK, sys_ck);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set GPTIMER11 parent clock (%d)", res));
+ goto ExitError;
+ }
+ }
+
+ rate = clk_get_rate(psSysSpecData->psGPT11_FCK);
+ PVR_TRACE(("GPTIMER11 clock is %dMHz", HZ_TO_MHZ(rate)));
+
+ res = clk_enable(psSysSpecData->psGPT11_FCK);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 functional clock (%d)", res));
+ goto ExitError;
+ }
+
+ res = clk_enable(psSysSpecData->psGPT11_ICK);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 interface clock (%d)", res));
+ goto ExitDisableGPT11FCK;
+ }
+#endif
+
+
+ sTimerRegPhysBase.uiAddr = SYS_OMAP4430_GP11TIMER_TSICR_SYS_PHYS_BASE;
+ pui32TimerEnable = OSMapPhysToLin(sTimerRegPhysBase,
+ 4,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ &hTimerEnable);
+
+ if (pui32TimerEnable == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));
+ goto ExitDisableGPT11ICK;
+ }
+
+ if(!(*pui32TimerEnable & 4))
+ {
+ PVR_TRACE(("Setting GPTIMER11 mode to posted (currently is non-posted)"));
+
+
+ *pui32TimerEnable |= 4;
+ }
+
+ OSUnMapPhysToLin(pui32TimerEnable,
+ 4,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ hTimerEnable);
+
+
+ sTimerRegPhysBase.uiAddr = SYS_OMAP4430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
+ pui32TimerEnable = OSMapPhysToLin(sTimerRegPhysBase,
+ 4,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ &hTimerEnable);
+
+ if (pui32TimerEnable == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));
+ goto ExitDisableGPT11ICK;
+ }
+
+
+ *pui32TimerEnable = 3;
+
+ OSUnMapPhysToLin(pui32TimerEnable,
+ 4,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ hTimerEnable);
+
+ psSysSpecData->sTimerRegPhysBase = sTimerRegPhysBase;
+
+ eError = PVRSRV_OK;
+
+ goto Exit;
+
+ExitDisableGPT11ICK:
+#if defined(PVR_OMAP4_TIMING_PRCM)
+ clk_disable(psSysSpecData->psGPT11_ICK);
+ExitDisableGPT11FCK:
+ clk_disable(psSysSpecData->psGPT11_FCK);
+ExitError:
+#endif
+ eError = PVRSRV_ERROR_CLOCK_REQUEST_FAILED;
+Exit:
+ return eError;
+}
+
+static void ReleaseGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ IMG_HANDLE hTimerDisable;
+ IMG_UINT32 *pui32TimerDisable;
+
+ if (psSysSpecData->sTimerRegPhysBase.uiAddr == 0)
+ {
+ return;
+ }
+
+
+ pui32TimerDisable = OSMapPhysToLin(psSysSpecData->sTimerRegPhysBase,
+ 4,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ &hTimerDisable);
+
+ if (pui32TimerDisable == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DisableSystemClocks: OSMapPhysToLin failed"));
+ }
+ else
+ {
+ *pui32TimerDisable = 0;
+
+ OSUnMapPhysToLin(pui32TimerDisable,
+ 4,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ hTimerDisable);
+ }
+
+ psSysSpecData->sTimerRegPhysBase.uiAddr = 0;
+
+#if defined(PVR_OMAP4_TIMING_PRCM)
+ clk_disable(psSysSpecData->psGPT11_ICK);
+
+ clk_disable(psSysSpecData->psGPT11_FCK);
+#endif
+}
+#endif
+#else
+static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSysSpecData);
+
+ return PVRSRV_OK;
+}
+static void ReleaseGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSysSpecData);
+}
+#endif
+
+PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+
+ PVR_TRACE(("EnableSystemClocks: Enabling System Clocks"));
+
+ if (!psSysSpecData->bSysClocksOneTimeInit)
+ {
+ mutex_init(&psSysSpecData->sPowerLock);
+
+ atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
+
+ psSysSpecData->bSysClocksOneTimeInit = IMG_TRUE;
+ }
+
+ return AcquireGPTimer(psSysSpecData);
+}
+
+IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+
+ PVR_TRACE(("DisableSystemClocks: Disabling System Clocks"));
+
+
+ DisableSGXClocks(psSysData);
+
+ ReleaseGPTimer(psSysSpecData);
+}
+
+PVRSRV_ERROR SysPMRuntimeRegister(void)
+{
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
+ pm_runtime_enable(&gpsPVRLDMDev->dev);
+#endif
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysPMRuntimeUnregister(void)
+{
+#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI)
+ pm_runtime_disable(&gpsPVRLDMDev->dev);
+#endif
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysDvfsInitialize(SYS_SPECIFIC_DATA *psSysSpecificData)
+{
+ IMG_INT32 opp_count;
+ IMG_UINT32 i, *freq_list;
+ struct opp *opp;
+ unsigned long freq;
+
+ /**
+ * We query and store the list of SGX frequencies just this once under the
+ * assumption that they are unchanging, e.g. no disabling of high frequency
+ * option for thermal management. This is currently valid for 4430 and 4460.
+ */
+ rcu_read_lock();
+ opp_count = opp_get_opp_count(&gpsPVRLDMDev->dev);
+ if (opp_count < 1)
+ {
+ rcu_read_unlock();
+ PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not retrieve opp count"));
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+
+ /**
+ * Allocate the frequency list with a slot for each available frequency plus
+ * one additional slot to hold a designated frequency value to assume when in
+ * an unknown frequency state.
+ */
+ freq_list = kmalloc((opp_count + 1) * sizeof(IMG_UINT32), GFP_ATOMIC);
+ if (!freq_list)
+ {
+ rcu_read_unlock();
+ PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not allocate frequency list"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /**
+ * Fill in frequency list from lowest to highest then finally the "unknown"
+ * frequency value. We use the highest available frequency as our assumed value
+ * when in an unknown state, because it is safer for APM and hardware recovery
+ * timers to be longer than intended rather than shorter.
+ */
+ freq = 0;
+ for (i = 0; i < opp_count; i++)
+ {
+ opp = opp_find_freq_ceil(&gpsPVRLDMDev->dev, &freq);
+ if (IS_ERR_OR_NULL(opp))
+ {
+ rcu_read_unlock();
+ PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not retrieve opp level %d", i));
+ kfree(freq_list);
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+ freq_list[i] = (IMG_UINT32)freq;
+ freq++;
+ }
+ rcu_read_unlock();
+ freq_list[opp_count] = freq_list[opp_count - 1];
+
+ psSysSpecificData->ui32SGXFreqListSize = opp_count + 1;
+ psSysSpecificData->pui32SGXFreqList = freq_list;
+ /* Start in unknown state - no frequency request to DVFS yet made */
+ psSysSpecificData->ui32SGXFreqListIndex = opp_count;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysDvfsDeinitialize(SYS_SPECIFIC_DATA *psSysSpecificData)
+{
+ /**
+ * We assume this function is only called if SysDvfsInitialize() was
+ * completed successfully before.
+ *
+ * The DVFS interface does not allow us to actually unregister as a
+ * user of SGX, so we do the next best thing which is to lower our
+ * required frequency to the minimum if not already set. DVFS may
+ * report busy if early in initialization, but all other errors are
+ * considered serious.
+ */
+ if (psSysSpecificData->ui32SGXFreqListIndex != 0)
+ {
+ IMG_INT32 res;
+ struct gpu_platform_data *pdata;
+
+ pdata = (struct gpu_platform_data *)gpsPVRLDMDev->dev.platform_data;
+
+ PVR_ASSERT(pdata->device_scale != IMG_NULL);
+ res = pdata->device_scale(&gpsPVRLDMDev->dev,
+ &gpsPVRLDMDev->dev,
+ psSysSpecificData->pui32SGXFreqList[0]);
+
+ if (res == -EBUSY)
+ PVR_DPF((PVR_DBG_WARNING, "SysDvfsDeinitialize: Unable to scale SGX frequency (EBUSY)"));
+ else if (res < 0)
+ PVR_DPF((PVR_DBG_ERROR, "SysDvfsDeinitialize: Unable to scale SGX frequency (%d)", res));
+
+ psSysSpecificData->ui32SGXFreqListIndex = 0;
+ }
+
+ kfree(psSysSpecificData->pui32SGXFreqList);
+ psSysSpecificData->pui32SGXFreqList = 0;
+ psSysSpecificData->ui32SGXFreqListSize = 0;
+
+ return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_DRI_DRM_PLUGIN)
+static struct omap_gpu_plugin sOMAPGPUPlugin;
+
+#define SYS_DRM_SET_PLUGIN_FIELD(d, s, f) (d)->f = (s)->f
+int
+SysDRMRegisterPlugin(PVRSRV_DRM_PLUGIN *psDRMPlugin)
+{
+ int iRes;
+
+ SYS_DRM_SET_PLUGIN_FIELD(&sOMAPGPUPlugin, psDRMPlugin, name);
+ SYS_DRM_SET_PLUGIN_FIELD(&sOMAPGPUPlugin, psDRMPlugin, open);
+ SYS_DRM_SET_PLUGIN_FIELD(&sOMAPGPUPlugin, psDRMPlugin, load);
+ SYS_DRM_SET_PLUGIN_FIELD(&sOMAPGPUPlugin, psDRMPlugin, unload);
+ SYS_DRM_SET_PLUGIN_FIELD(&sOMAPGPUPlugin, psDRMPlugin, release);
+ SYS_DRM_SET_PLUGIN_FIELD(&sOMAPGPUPlugin, psDRMPlugin, mmap);
+ SYS_DRM_SET_PLUGIN_FIELD(&sOMAPGPUPlugin, psDRMPlugin, ioctls);
+ SYS_DRM_SET_PLUGIN_FIELD(&sOMAPGPUPlugin, psDRMPlugin, num_ioctls);
+ SYS_DRM_SET_PLUGIN_FIELD(&sOMAPGPUPlugin, psDRMPlugin, ioctl_start);
+
+ iRes = omap_gpu_register_plugin(&sOMAPGPUPlugin);
+ if (iRes != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: omap_gpu_register_plugin failed (%d)", __FUNCTION__, iRes));
+ }
+
+ return iRes;
+}
+
+void
+SysDRMUnregisterPlugin(PVRSRV_DRM_PLUGIN *psDRMPlugin)
+{
+ int iRes = omap_gpu_unregister_plugin(&sOMAPGPUPlugin);
+ if (iRes != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: omap_gpu_unregister_plugin failed (%d)", __FUNCTION__, iRes));
+ }
+}
+#endif
diff --git a/drivers/gpu/pvr/omap4/sysutils_linux_wqueue_compat.c b/drivers/gpu/pvr/omap4/sysutils_linux_wqueue_compat.c
new file mode 100644
index 0000000..5aa875d
--- /dev/null
+++ b/drivers/gpu/pvr/omap4/sysutils_linux_wqueue_compat.c
@@ -0,0 +1,198 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/hardirq.h>
+#include <linux/mutex.h>
+
+#include <plat/gpu.h>
+#include <plat/omap-pm.h>
+#include <linux/pm_runtime.h>
+#include <plat/omap_device.h>
+#include "sgxdefs.h"
+#include "services_headers.h"
+#include "sysinfo.h"
+#include "sgxapi_km.h"
+#include "sysconfig.h"
+#include "sgxinfokm.h"
+#include "syslocal.h"
+
+#if !defined(PVR_LINUX_USING_WORKQUEUES)
+#error "PVR_LINUX_USING_WORKQUEUES must be defined"
+#endif
+
+#define ONE_MHZ 1000000
+#define HZ_TO_MHZ(m) ((m) / ONE_MHZ)
+
+#define LDM_DEV struct platform_device
+extern LDM_DEV *gpsPVRLDMDev;
+extern struct gpu_platform_data *gpsSgxPlatformData;
+
+
+#if !defined(NO_HARDWARE)
+
+static struct pm_qos_request_list *qos_request;
+
+#endif
+
+PVRSRV_ERROR SysPowerLockWrap(SYS_DATA unref__ *psSysData)
+{
+ return PVRSRV_OK;
+}
+
+IMG_VOID SysPowerLockUnwrap(SYS_DATA unref__ *psSysData)
+{
+}
+
+IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+ return IMG_TRUE;
+}
+
+IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData)
+{
+}
+
+static inline IMG_UINT32 scale_by_rate(IMG_UINT32 val, IMG_UINT32 rate1, IMG_UINT32 rate2)
+{
+ if (rate1 >= rate2)
+ {
+ return val * (rate1 / rate2);
+ }
+
+ return val / (rate2 / rate1);
+}
+
+static inline IMG_UINT32 scale_prop_to_SGX_clock(IMG_UINT32 val, IMG_UINT32 rate)
+{
+ return scale_by_rate(val, rate, SYS_SGX_CLOCK_SPEED);
+}
+
+static inline IMG_UINT32 scale_inv_prop_to_SGX_clock(IMG_UINT32 val, IMG_UINT32 rate)
+{
+ return scale_by_rate(val, SYS_SGX_CLOCK_SPEED, rate);
+}
+
+IMG_VOID SysGetSGXTimingInformation(SGX_TIMING_INFORMATION *psTimingInfo)
+{
+ IMG_UINT32 rate;
+
+#if defined(NO_HARDWARE)
+ rate = SYS_SGX_CLOCK_SPEED;
+#else
+ PVR_ASSERT(atomic_read(&gpsSysSpecificData->sSGXClocksEnabled) != 0);
+
+ rate = SYS_SGX_CLOCK_SPEED;
+ PVR_ASSERT(rate != 0);
+#endif
+ psTimingInfo->ui32CoreClockSpeed = rate;
+ psTimingInfo->ui32HWRecoveryFreq = scale_prop_to_SGX_clock(SYS_SGX_HWRECOVERY_TIMEOUT_FREQ, rate);
+ psTimingInfo->ui32uKernelFreq = scale_prop_to_SGX_clock(SYS_SGX_PDS_TIMER_FREQ, rate);
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+ psTimingInfo->bEnableActivePM = IMG_TRUE;
+#else
+ psTimingInfo->bEnableActivePM = IMG_FALSE;
+#endif
+ psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS;
+}
+
+PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData)
+{
+#if !defined(NO_HARDWARE)
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+
+ if (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0)
+ {
+ return PVRSRV_OK;
+ }
+ PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: Enabling SGX Clocks"));
+
+ pm_runtime_get_sync(&gpsPVRLDMDev->dev);
+ gpsSgxPlatformData->set_max_mpu_wakeup_lat(&qos_request, 0);
+ omap_device_set_rate(&gpsPVRLDMDev->dev,
+ &gpsPVRLDMDev->dev, SYS_SGX_CLOCK_SPEED);
+ atomic_set(&psSysSpecData->sSGXClocksEnabled, 1);
+
+#else
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+#endif
+ return PVRSRV_OK;
+}
+
+
+IMG_VOID DisableSGXClocks(SYS_DATA *psSysData)
+{
+#if !defined(NO_HARDWARE)
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+
+
+ if (atomic_read(&psSysSpecData->sSGXClocksEnabled) == 0)
+ {
+ return;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "DisableSGXClocks: Disabling SGX Clocks"));
+
+ pm_runtime_put_sync(&gpsPVRLDMDev->dev);
+ gpsSgxPlatformData->set_max_mpu_wakeup_lat(&qos_request, -1);
+ omap_device_set_rate(&gpsPVRLDMDev->dev, &gpsPVRLDMDev->dev, 0);
+ atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
+
+#else
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+#endif
+}
+
+PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
+{
+ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
+ PVRSRV_ERROR eError;
+
+ PVR_TRACE(("EnableSystemClocks: Enabling System Clocks"));
+
+ if (!psSysSpecData->bSysClocksOneTimeInit)
+ {
+ mutex_init(&psSysSpecData->sPowerLock);
+
+ atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
+ psSysSpecData->bSysClocksOneTimeInit = IMG_TRUE;
+ }
+
+ eError = PVRSRV_OK;
+
+ return eError;
+}
+
+IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)
+{
+
+ PVR_TRACE(("DisableSystemClocks: Disabling System Clocks"));
+
+ DisableSGXClocks(psSysData);
+
+}
diff --git a/drivers/gpu/pvr/omaplfb/omaplfb-sysfs.c b/drivers/gpu/pvr/omaplfb/omaplfb-sysfs.c
new file mode 100644
index 0000000..2e04583
--- /dev/null
+++ b/drivers/gpu/pvr/omaplfb/omaplfb-sysfs.c
@@ -0,0 +1,130 @@
+/*
+ * omaplfb-sysfs.c
+ *
+ * Copyright (C) 2011 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Author: Gustavo Diaz (gusdp@ti.com)
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sysfs.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+#include "servicesext.h"
+#include "kerneldisplay.h"
+#include "omaplfb.h"
+
+static ssize_t show_ignore_sync(OMAPLFB_DEVINFO *display_info, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", display_info->ignore_sync);
+}
+
+static ssize_t store_ignore_sync(OMAPLFB_DEVINFO *display_info,
+ const char *buf, size_t count)
+{
+ unsigned long new_value;
+
+ if (strict_strtoul(buf, 10, &new_value))
+ return -EINVAL;
+
+ if (new_value == 0 || new_value == 1) {
+ display_info->ignore_sync = new_value;
+ return count;
+ }
+
+ return -EINVAL;
+}
+
+struct omaplfb_attribute {
+ struct attribute attr;
+ ssize_t (*show)(OMAPLFB_DEVINFO *, char *);
+ ssize_t (*store)(OMAPLFB_DEVINFO *, const char *, size_t);
+};
+
+static ssize_t omaplfb_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ OMAPLFB_DEVINFO *display_info;
+ struct omaplfb_attribute *omaplfb_attr;
+
+ display_info = container_of(kobj, OMAPLFB_DEVINFO, kobj);
+ omaplfb_attr = container_of(attr, struct omaplfb_attribute, attr);
+
+ if (!omaplfb_attr->show)
+ return -ENOENT;
+
+ return omaplfb_attr->show(display_info, buf);
+}
+
+static ssize_t omaplfb_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t size)
+{
+ OMAPLFB_DEVINFO *display_info;
+ struct omaplfb_attribute *omaplfb_attr;
+
+ display_info = container_of(kobj, OMAPLFB_DEVINFO, kobj);
+ omaplfb_attr = container_of(attr, struct omaplfb_attribute, attr);
+
+ if (!omaplfb_attr->store)
+ return -ENOENT;
+
+ return omaplfb_attr->store(display_info, buf, size);
+}
+
+#define OMAPLFB_ATTR(_name, _mode, _show, _store) \
+ struct omaplfb_attribute omaplfb_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+static OMAPLFB_ATTR(ignore_sync, S_IRUGO|S_IWUSR, show_ignore_sync,
+ store_ignore_sync);
+
+#undef OMAPLFB_ATTR
+
+static struct attribute *omaplfb_sysfs_attrs[] = {
+ &omaplfb_attr_ignore_sync.attr,
+ NULL
+};
+
+static const struct sysfs_ops omaplfb_sysfs_ops = {
+ .show = omaplfb_attr_show,
+ .store = omaplfb_attr_store,
+};
+
+static struct kobj_type omaplfb_ktype = {
+ .sysfs_ops = &omaplfb_sysfs_ops,
+ .default_attrs = omaplfb_sysfs_attrs,
+};
+
+void omaplfb_create_sysfs(struct omaplfb_device *odev)
+{
+ int i, r;
+
+ /* Create a sysfs entry for every display */
+ for (i = 0; i < odev->display_count; i++) {
+ OMAPLFB_DEVINFO *display_info = &odev->display_info_list[i];
+ r = kobject_init_and_add(&display_info->kobj, &omaplfb_ktype,
+ &odev->dev->kobj, "display%d",
+ display_info->uDeviceID);
+ if (r)
+ ERROR_PRINTK("failed to create sysfs file\n");
+ }
+}
+
+void omaplfb_remove_sysfs(struct omaplfb_device *odev)
+{
+ int i;
+ for (i = 0; i < odev->display_count; i++) {
+ OMAPLFB_DEVINFO *display_info = &odev->display_info_list[i];
+ kobject_del(&display_info->kobj);
+ kobject_put(&display_info->kobj);
+ }
+}
diff --git a/drivers/gpu/pvr/omaplfb/omaplfb.h b/drivers/gpu/pvr/omaplfb/omaplfb.h
new file mode 100644
index 0000000..a416b15
--- /dev/null
+++ b/drivers/gpu/pvr/omaplfb/omaplfb.h
@@ -0,0 +1,290 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __OMAPLFB_H__
+#define __OMAPLFB_H__
+
+#include <linux/version.h>
+
+#include <asm/atomic.h>
+
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/notifier.h>
+#include <linux/mutex.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
+#define OMAPLFB_CONSOLE_LOCK() console_lock()
+#define OMAPLFB_CONSOLE_UNLOCK() console_unlock()
+#else
+#define OMAPLFB_CONSOLE_LOCK() acquire_console_sem()
+#define OMAPLFB_CONSOLE_UNLOCK() release_console_sem()
+#endif
+
+#define unref__ __attribute__ ((unused))
+
+typedef void * OMAPLFB_HANDLE;
+
+typedef bool OMAPLFB_BOOL, *OMAPLFB_PBOOL;
+#define OMAPLFB_FALSE false
+#define OMAPLFB_TRUE true
+
+typedef atomic_t OMAPLFB_ATOMIC_BOOL;
+
+typedef atomic_t OMAPLFB_ATOMIC_INT;
+
+typedef struct OMAPLFB_BUFFER_TAG
+{
+ struct OMAPLFB_BUFFER_TAG *psNext;
+ struct OMAPLFB_DEVINFO_TAG *psDevInfo;
+
+ struct work_struct sWork;
+
+
+ unsigned long ulYOffset;
+
+
+
+ IMG_SYS_PHYADDR sSysAddr;
+ IMG_CPU_VIRTADDR sCPUVAddr;
+ PVRSRV_SYNC_DATA *psSyncData;
+
+ OMAPLFB_HANDLE hCmdComplete;
+ unsigned long ulSwapInterval;
+} OMAPLFB_BUFFER;
+
+typedef struct OMAPLFB_SWAPCHAIN_TAG
+{
+
+ unsigned int uiSwapChainID;
+
+
+ unsigned long ulBufferCount;
+
+
+ OMAPLFB_BUFFER *psBuffer;
+
+
+ struct workqueue_struct *psWorkQueue;
+
+
+ OMAPLFB_BOOL bNotVSynced;
+
+
+ int iBlankEvents;
+
+
+ unsigned int uiFBDevID;
+} OMAPLFB_SWAPCHAIN;
+
+typedef struct OMAPLFB_FBINFO_TAG
+{
+ unsigned long ulFBSize;
+ unsigned long ulBufferSize;
+ unsigned long ulRoundedBufferSize;
+ unsigned long ulWidth;
+ unsigned long ulHeight;
+ unsigned long ulByteStride;
+ unsigned long ulPhysicalWidthmm;
+ unsigned long ulPhysicalHeightmm;
+
+
+
+ IMG_SYS_PHYADDR sSysAddr;
+ IMG_CPU_VIRTADDR sCPUVAddr;
+
+
+ PVRSRV_PIXEL_FORMAT ePixelFormat;
+
+ OMAPLFB_BOOL bIs2D;
+ IMG_SYS_PHYADDR *psPageList;
+#if defined(CONFIG_ION_OMAP)
+ struct ion_handle *psIONHandle;
+#endif
+ IMG_UINT32 uiBytesPerPixel;
+}OMAPLFB_FBINFO;
+
+typedef struct OMAPLFB_DEVINFO_TAG
+{
+
+ unsigned int uiFBDevID;
+
+
+ unsigned int uiPVRDevID;
+
+
+ struct mutex sCreateSwapChainMutex;
+
+
+ OMAPLFB_BUFFER sSystemBuffer;
+
+
+ PVRSRV_DC_DISP2SRV_KMJTABLE sPVRJTable;
+
+
+ PVRSRV_DC_SRV2DISP_KMJTABLE sDCJTable;
+
+
+ OMAPLFB_FBINFO sFBInfo;
+
+
+ OMAPLFB_SWAPCHAIN *psSwapChain;
+
+
+ unsigned int uiSwapChainID;
+
+
+ OMAPLFB_ATOMIC_BOOL sFlushCommands;
+
+
+ struct fb_info *psLINFBInfo;
+
+
+ struct notifier_block sLINNotifBlock;
+
+
+
+
+
+ IMG_DEV_VIRTADDR sDisplayDevVAddr;
+
+ DISPLAY_INFO sDisplayInfo;
+
+
+ DISPLAY_FORMAT sDisplayFormat;
+
+
+ DISPLAY_DIMS sDisplayDim;
+
+
+ OMAPLFB_ATOMIC_BOOL sBlanked;
+
+
+ OMAPLFB_ATOMIC_INT sBlankEvents;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+
+ OMAPLFB_ATOMIC_BOOL sEarlySuspendFlag;
+
+ struct early_suspend sEarlySuspend;
+#endif
+
+#if defined(SUPPORT_DRI_DRM)
+ OMAPLFB_ATOMIC_BOOL sLeaveVT;
+#endif
+
+} OMAPLFB_DEVINFO;
+
+#define OMAPLFB_PAGE_SIZE 4096
+
+#ifdef DEBUG
+#define DEBUG_PRINTK(x) printk x
+#else
+#define DEBUG_PRINTK(x)
+#endif
+
+#define DISPLAY_DEVICE_NAME "PowerVR OMAP Linux Display Driver"
+#define DRVNAME "omaplfb"
+#define DEVNAME DRVNAME
+#define DRIVER_PREFIX DRVNAME
+
+typedef enum _OMAPLFB_ERROR_
+{
+ OMAPLFB_OK = 0,
+ OMAPLFB_ERROR_GENERIC = 1,
+ OMAPLFB_ERROR_OUT_OF_MEMORY = 2,
+ OMAPLFB_ERROR_TOO_FEW_BUFFERS = 3,
+ OMAPLFB_ERROR_INVALID_PARAMS = 4,
+ OMAPLFB_ERROR_INIT_FAILURE = 5,
+ OMAPLFB_ERROR_CANT_REGISTER_CALLBACK = 6,
+ OMAPLFB_ERROR_INVALID_DEVICE = 7,
+ OMAPLFB_ERROR_DEVICE_REGISTER_FAILED = 8,
+ OMAPLFB_ERROR_SET_UPDATE_MODE_FAILED = 9
+} OMAPLFB_ERROR;
+
+typedef enum _OMAPLFB_UPDATE_MODE_
+{
+ OMAPLFB_UPDATE_MODE_UNDEFINED = 0,
+ OMAPLFB_UPDATE_MODE_MANUAL = 1,
+ OMAPLFB_UPDATE_MODE_AUTO = 2,
+ OMAPLFB_UPDATE_MODE_DISABLED = 3
+} OMAPLFB_UPDATE_MODE;
+
+#ifndef UNREFERENCED_PARAMETER
+#define UNREFERENCED_PARAMETER(param) (param) = (param)
+#endif
+
+OMAPLFB_ERROR OMAPLFBInit(void);
+OMAPLFB_ERROR OMAPLFBDeInit(void);
+
+OMAPLFB_DEVINFO *OMAPLFBGetDevInfoPtr(unsigned uiFBDevID);
+unsigned OMAPLFBMaxFBDevIDPlusOne(void);
+void *OMAPLFBAllocKernelMem(unsigned long ulSize);
+void OMAPLFBFreeKernelMem(void *pvMem);
+OMAPLFB_ERROR OMAPLFBGetLibFuncAddr(char *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable);
+OMAPLFB_ERROR OMAPLFBCreateSwapQueue (OMAPLFB_SWAPCHAIN *psSwapChain);
+void OMAPLFBDestroySwapQueue(OMAPLFB_SWAPCHAIN *psSwapChain);
+void OMAPLFBInitBufferForSwap(OMAPLFB_BUFFER *psBuffer);
+void OMAPLFBSwapHandler(OMAPLFB_BUFFER *psBuffer);
+void OMAPLFBQueueBufferForSwap(OMAPLFB_SWAPCHAIN *psSwapChain, OMAPLFB_BUFFER *psBuffer);
+void OMAPLFBFlip(OMAPLFB_DEVINFO *psDevInfo, OMAPLFB_BUFFER *psBuffer);
+OMAPLFB_UPDATE_MODE OMAPLFBGetUpdateMode(OMAPLFB_DEVINFO *psDevInfo);
+OMAPLFB_BOOL OMAPLFBSetUpdateMode(OMAPLFB_DEVINFO *psDevInfo, OMAPLFB_UPDATE_MODE eMode);
+OMAPLFB_BOOL OMAPLFBWaitForVSync(OMAPLFB_DEVINFO *psDevInfo);
+OMAPLFB_BOOL OMAPLFBManualSync(OMAPLFB_DEVINFO *psDevInfo);
+OMAPLFB_BOOL OMAPLFBCheckModeAndSync(OMAPLFB_DEVINFO *psDevInfo);
+OMAPLFB_ERROR OMAPLFBUnblankDisplay(OMAPLFB_DEVINFO *psDevInfo);
+OMAPLFB_ERROR OMAPLFBEnableLFBEventNotification(OMAPLFB_DEVINFO *psDevInfo);
+OMAPLFB_ERROR OMAPLFBDisableLFBEventNotification(OMAPLFB_DEVINFO *psDevInfo);
+void OMAPLFBCreateSwapChainLockInit(OMAPLFB_DEVINFO *psDevInfo);
+void OMAPLFBCreateSwapChainLockDeInit(OMAPLFB_DEVINFO *psDevInfo);
+void OMAPLFBCreateSwapChainLock(OMAPLFB_DEVINFO *psDevInfo);
+void OMAPLFBCreateSwapChainUnLock(OMAPLFB_DEVINFO *psDevInfo);
+void OMAPLFBAtomicBoolInit(OMAPLFB_ATOMIC_BOOL *psAtomic, OMAPLFB_BOOL bVal);
+void OMAPLFBAtomicBoolDeInit(OMAPLFB_ATOMIC_BOOL *psAtomic);
+void OMAPLFBAtomicBoolSet(OMAPLFB_ATOMIC_BOOL *psAtomic, OMAPLFB_BOOL bVal);
+OMAPLFB_BOOL OMAPLFBAtomicBoolRead(OMAPLFB_ATOMIC_BOOL *psAtomic);
+void OMAPLFBAtomicIntInit(OMAPLFB_ATOMIC_INT *psAtomic, int iVal);
+void OMAPLFBAtomicIntDeInit(OMAPLFB_ATOMIC_INT *psAtomic);
+void OMAPLFBAtomicIntSet(OMAPLFB_ATOMIC_INT *psAtomic, int iVal);
+int OMAPLFBAtomicIntRead(OMAPLFB_ATOMIC_INT *psAtomic);
+void OMAPLFBAtomicIntInc(OMAPLFB_ATOMIC_INT *psAtomic);
+
+#if defined(DEBUG)
+void OMAPLFBPrintInfo(OMAPLFB_DEVINFO *psDevInfo);
+#else
+#define OMAPLFBPrintInfo(psDevInfo)
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/omaplfb/omaplfb_displayclass.c b/drivers/gpu/pvr/omaplfb/omaplfb_displayclass.c
new file mode 100644
index 0000000..a70b454
--- /dev/null
+++ b/drivers/gpu/pvr/omaplfb/omaplfb_displayclass.c
@@ -0,0 +1,1545 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/notifier.h>
+
+#include "img_defs.h"
+#include "servicesext.h"
+#include "kerneldisplay.h"
+#include "omaplfb.h"
+
+#if defined(CONFIG_ION_OMAP)
+#include <linux/ion.h>
+#include <linux/omap_ion.h>
+extern struct ion_client *gpsIONClient;
+#endif
+#if defined(CONFIG_TI_TILER)
+#include <mach/tiler.h>
+#include <video/dsscomp.h>
+#include <plat/dsscomp.h>
+
+#endif
+
+#define OMAPLFB_COMMAND_COUNT 1
+
+#define OMAPLFB_VSYNC_SETTLE_COUNT 5
+
+#define OMAPLFB_MAX_NUM_DEVICES FB_MAX
+#if (OMAPLFB_MAX_NUM_DEVICES > FB_MAX)
+#error "OMAPLFB_MAX_NUM_DEVICES must not be greater than FB_MAX"
+#endif
+
+static OMAPLFB_DEVINFO *gapsDevInfo[OMAPLFB_MAX_NUM_DEVICES];
+
+static PFN_DC_GET_PVRJTABLE gpfnGetPVRJTable = NULL;
+
+static inline unsigned long RoundUpToMultiple(unsigned long x, unsigned long y)
+{
+ unsigned long div = x / y;
+ unsigned long rem = x % y;
+
+ return (div + ((rem == 0) ? 0 : 1)) * y;
+}
+
+static unsigned long GCD(unsigned long x, unsigned long y)
+{
+ while (y != 0)
+ {
+ unsigned long r = x % y;
+ x = y;
+ y = r;
+ }
+
+ return x;
+}
+
+static unsigned long LCM(unsigned long x, unsigned long y)
+{
+ unsigned long gcd = GCD(x, y);
+
+ return (gcd == 0) ? 0 : ((x / gcd) * y);
+}
+
+unsigned OMAPLFBMaxFBDevIDPlusOne(void)
+{
+ return OMAPLFB_MAX_NUM_DEVICES;
+}
+
+OMAPLFB_DEVINFO *OMAPLFBGetDevInfoPtr(unsigned uiFBDevID)
+{
+ WARN_ON(uiFBDevID >= OMAPLFBMaxFBDevIDPlusOne());
+
+ if (uiFBDevID >= OMAPLFB_MAX_NUM_DEVICES)
+ {
+ return NULL;
+ }
+
+ return gapsDevInfo[uiFBDevID];
+}
+
+static inline void OMAPLFBSetDevInfoPtr(unsigned uiFBDevID, OMAPLFB_DEVINFO *psDevInfo)
+{
+ WARN_ON(uiFBDevID >= OMAPLFB_MAX_NUM_DEVICES);
+
+ if (uiFBDevID < OMAPLFB_MAX_NUM_DEVICES)
+ {
+ gapsDevInfo[uiFBDevID] = psDevInfo;
+ }
+}
+
+static inline OMAPLFB_BOOL SwapChainHasChanged(OMAPLFB_DEVINFO *psDevInfo, OMAPLFB_SWAPCHAIN *psSwapChain)
+{
+ return (psDevInfo->psSwapChain != psSwapChain) ||
+ (psDevInfo->uiSwapChainID != psSwapChain->uiSwapChainID);
+}
+
+static inline OMAPLFB_BOOL DontWaitForVSync(OMAPLFB_DEVINFO *psDevInfo)
+{
+ OMAPLFB_BOOL bDontWait;
+
+ bDontWait = OMAPLFBAtomicBoolRead(&psDevInfo->sBlanked) ||
+ OMAPLFBAtomicBoolRead(&psDevInfo->sFlushCommands);
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ bDontWait = bDontWait || OMAPLFBAtomicBoolRead(&psDevInfo->sEarlySuspendFlag);
+#endif
+#if defined(SUPPORT_DRI_DRM)
+ bDontWait = bDontWait || OMAPLFBAtomicBoolRead(&psDevInfo->sLeaveVT);
+#endif
+ return bDontWait;
+}
+
+static IMG_VOID SetDCState(IMG_HANDLE hDevice, IMG_UINT32 ui32State)
+{
+ OMAPLFB_DEVINFO *psDevInfo = (OMAPLFB_DEVINFO *)hDevice;
+
+ switch (ui32State)
+ {
+ case DC_STATE_FLUSH_COMMANDS:
+ OMAPLFBAtomicBoolSet(&psDevInfo->sFlushCommands, OMAPLFB_TRUE);
+ break;
+ case DC_STATE_NO_FLUSH_COMMANDS:
+ OMAPLFBAtomicBoolSet(&psDevInfo->sFlushCommands, OMAPLFB_FALSE);
+ break;
+ case DC_STATE_FORCE_SWAP_TO_SYSTEM:
+ OMAPLFBFlip(psDevInfo, &psDevInfo->sSystemBuffer);
+ break;
+ default:
+ break;
+ }
+}
+
+static PVRSRV_ERROR OpenDCDevice(IMG_UINT32 uiPVRDevID,
+ IMG_HANDLE *phDevice,
+ PVRSRV_SYNC_DATA* psSystemBufferSyncData)
+{
+ OMAPLFB_DEVINFO *psDevInfo;
+ OMAPLFB_ERROR eError;
+ unsigned uiMaxFBDevIDPlusOne = OMAPLFBMaxFBDevIDPlusOne();
+ unsigned i;
+
+ for (i = 0; i < uiMaxFBDevIDPlusOne; i++)
+ {
+ psDevInfo = OMAPLFBGetDevInfoPtr(i);
+ if (psDevInfo != NULL && psDevInfo->uiPVRDevID == uiPVRDevID)
+ {
+ break;
+ }
+ }
+ if (i == uiMaxFBDevIDPlusOne)
+ {
+ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
+ ": %s: PVR Device %u not found\n", __FUNCTION__, uiPVRDevID));
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+
+ psDevInfo->sSystemBuffer.psSyncData = psSystemBufferSyncData;
+
+ eError = OMAPLFBUnblankDisplay(psDevInfo);
+ if (eError != OMAPLFB_OK)
+ {
+ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
+ ": %s: Device %u: OMAPLFBUnblankDisplay failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, eError));
+ return PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED;
+ }
+
+
+ *phDevice = (IMG_HANDLE)psDevInfo;
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR CloseDCDevice(IMG_HANDLE hDevice)
+{
+#if defined(SUPPORT_DRI_DRM)
+ OMAPLFB_DEVINFO *psDevInfo = (OMAPLFB_DEVINFO *)hDevice;
+
+ OMAPLFBAtomicBoolSet(&psDevInfo->sLeaveVT, OMAPLFB_FALSE);
+ (void) OMAPLFBUnblankDisplay(psDevInfo);
+#else
+ UNREFERENCED_PARAMETER(hDevice);
+#endif
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR EnumDCFormats(IMG_HANDLE hDevice,
+ IMG_UINT32 *pui32NumFormats,
+ DISPLAY_FORMAT *psFormat)
+{
+ OMAPLFB_DEVINFO *psDevInfo;
+
+ if(!hDevice || !pui32NumFormats)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
+
+ *pui32NumFormats = 1;
+
+ if(psFormat)
+ {
+ psFormat[0] = psDevInfo->sDisplayFormat;
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR EnumDCDims(IMG_HANDLE hDevice,
+ DISPLAY_FORMAT *psFormat,
+ IMG_UINT32 *pui32NumDims,
+ DISPLAY_DIMS *psDim)
+{
+ OMAPLFB_DEVINFO *psDevInfo;
+
+ if(!hDevice || !psFormat || !pui32NumDims)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
+
+ *pui32NumDims = 1;
+
+
+ if(psDim)
+ {
+ psDim[0] = psDevInfo->sDisplayDim;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+static PVRSRV_ERROR GetDCSystemBuffer(IMG_HANDLE hDevice, IMG_HANDLE *phBuffer)
+{
+ OMAPLFB_DEVINFO *psDevInfo;
+
+ if(!hDevice || !phBuffer)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
+
+ *phBuffer = (IMG_HANDLE)&psDevInfo->sSystemBuffer;
+
+ return PVRSRV_OK;
+}
+
+
+static PVRSRV_ERROR GetDCInfo(IMG_HANDLE hDevice, DISPLAY_INFO *psDCInfo)
+{
+ OMAPLFB_DEVINFO *psDevInfo;
+
+ if(!hDevice || !psDCInfo)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
+
+ *psDCInfo = psDevInfo->sDisplayInfo;
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR GetDCBufferAddr(IMG_HANDLE hDevice,
+ IMG_HANDLE hBuffer,
+ IMG_SYS_PHYADDR **ppsSysAddr,
+ IMG_UINT32 *pui32ByteSize,
+ IMG_VOID **ppvCpuVAddr,
+ IMG_HANDLE *phOSMapInfo,
+ IMG_BOOL *pbIsContiguous,
+ IMG_UINT32 *pui32TilingStride)
+{
+ OMAPLFB_DEVINFO *psDevInfo;
+ OMAPLFB_BUFFER *psSystemBuffer;
+
+ UNREFERENCED_PARAMETER(pui32TilingStride);
+
+ if(!hDevice)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if(!hBuffer)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (!ppsSysAddr)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (!pui32ByteSize)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
+
+ psSystemBuffer = (OMAPLFB_BUFFER *)hBuffer;
+
+ *ppsSysAddr = &psSystemBuffer->sSysAddr;
+
+ *pui32ByteSize = (IMG_UINT32)psDevInfo->sFBInfo.ulBufferSize;
+
+ if (ppvCpuVAddr)
+ {
+ *ppvCpuVAddr = psDevInfo->sFBInfo.bIs2D ? NULL : psSystemBuffer->sCPUVAddr;
+ }
+
+ if (phOSMapInfo)
+ {
+ *phOSMapInfo = (IMG_HANDLE)0;
+ }
+
+ if (pbIsContiguous)
+ {
+ *pbIsContiguous = !psDevInfo->sFBInfo.bIs2D;
+ }
+
+#if defined(CONFIG_TI_TILER)
+ if (psDevInfo->sFBInfo.bIs2D) {
+ int i = (psSystemBuffer->sSysAddr.uiAddr - psDevInfo->sFBInfo.psPageList->uiAddr) >> PAGE_SHIFT;
+ *ppsSysAddr = psDevInfo->sFBInfo.psPageList + psDevInfo->sFBInfo.ulHeight * i;
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR CreateDCSwapChain(IMG_HANDLE hDevice,
+ IMG_UINT32 ui32Flags,
+ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
+ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
+ IMG_UINT32 ui32BufferCount,
+ PVRSRV_SYNC_DATA **ppsSyncData,
+ IMG_UINT32 ui32OEMFlags,
+ IMG_HANDLE *phSwapChain,
+ IMG_UINT32 *pui32SwapChainID)
+{
+ OMAPLFB_DEVINFO *psDevInfo;
+ OMAPLFB_SWAPCHAIN *psSwapChain;
+ OMAPLFB_BUFFER *psBuffer;
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32BuffersToSkip;
+
+ UNREFERENCED_PARAMETER(ui32OEMFlags);
+
+
+ if(!hDevice
+ || !psDstSurfAttrib
+ || !psSrcSurfAttrib
+ || !ppsSyncData
+ || !phSwapChain)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
+
+
+ if (psDevInfo->sDisplayInfo.ui32MaxSwapChains == 0)
+ {
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+
+ OMAPLFBCreateSwapChainLock(psDevInfo);
+
+
+ if(psDevInfo->psSwapChain != NULL)
+ {
+ eError = PVRSRV_ERROR_FLIP_CHAIN_EXISTS;
+ goto ExitUnLock;
+ }
+
+
+ if(ui32BufferCount > psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers)
+ {
+ eError = PVRSRV_ERROR_TOOMANYBUFFERS;
+ goto ExitUnLock;
+ }
+
+ if ((psDevInfo->sFBInfo.ulRoundedBufferSize * (unsigned long)ui32BufferCount) > psDevInfo->sFBInfo.ulFBSize)
+ {
+ eError = PVRSRV_ERROR_TOOMANYBUFFERS;
+ goto ExitUnLock;
+ }
+
+
+ ui32BuffersToSkip = psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers - ui32BufferCount;
+
+
+ if(psDstSurfAttrib->pixelformat != psDevInfo->sDisplayFormat.pixelformat
+ || psDstSurfAttrib->sDims.ui32ByteStride != psDevInfo->sDisplayDim.ui32ByteStride
+ || psDstSurfAttrib->sDims.ui32Width != psDevInfo->sDisplayDim.ui32Width
+ || psDstSurfAttrib->sDims.ui32Height != psDevInfo->sDisplayDim.ui32Height)
+ {
+
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ExitUnLock;
+ }
+
+ if(psDstSurfAttrib->pixelformat != psSrcSurfAttrib->pixelformat
+ || psDstSurfAttrib->sDims.ui32ByteStride != psSrcSurfAttrib->sDims.ui32ByteStride
+ || psDstSurfAttrib->sDims.ui32Width != psSrcSurfAttrib->sDims.ui32Width
+ || psDstSurfAttrib->sDims.ui32Height != psSrcSurfAttrib->sDims.ui32Height)
+ {
+
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ExitUnLock;
+ }
+
+ UNREFERENCED_PARAMETER(ui32Flags);
+
+#if defined(PVR_OMAPFB3_UPDATE_MODE)
+ if (!OMAPLFBSetUpdateMode(psDevInfo, PVR_OMAPFB3_UPDATE_MODE))
+ {
+ printk(KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Couldn't set frame buffer update mode %d\n", __FUNCTION__, psDevInfo->uiFBDevID, PVR_OMAPFB3_UPDATE_MODE);
+ }
+#endif
+
+ psSwapChain = (OMAPLFB_SWAPCHAIN*)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_SWAPCHAIN));
+ if(!psSwapChain)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ExitUnLock;
+ }
+
+ psBuffer = (OMAPLFB_BUFFER*)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_BUFFER) * ui32BufferCount);
+ if(!psBuffer)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorFreeSwapChain;
+ }
+
+ psSwapChain->ulBufferCount = (unsigned long)ui32BufferCount;
+ psSwapChain->psBuffer = psBuffer;
+ psSwapChain->bNotVSynced = OMAPLFB_TRUE;
+ psSwapChain->uiFBDevID = psDevInfo->uiFBDevID;
+
+
+ for(i=0; i<ui32BufferCount-1; i++)
+ {
+ psBuffer[i].psNext = &psBuffer[i+1];
+ }
+
+ psBuffer[i].psNext = &psBuffer[0];
+
+ for(i=0; i<ui32BufferCount; i++)
+ {
+ IMG_UINT32 ui32SwapBuffer = i + ui32BuffersToSkip;
+ IMG_UINT32 ui32BufferOffset = ui32SwapBuffer * (IMG_UINT32)psDevInfo->sFBInfo.ulRoundedBufferSize;
+ if (psDevInfo->sFBInfo.bIs2D)
+ {
+ ui32BufferOffset = 0;
+ }
+
+ psBuffer[i].psSyncData = ppsSyncData[i];
+ psBuffer[i].sSysAddr.uiAddr = psDevInfo->sFBInfo.sSysAddr.uiAddr + ui32BufferOffset;
+ psBuffer[i].sCPUVAddr = psDevInfo->sFBInfo.sCPUVAddr + ui32BufferOffset;
+ psBuffer[i].ulYOffset = ui32BufferOffset / psDevInfo->sFBInfo.ulByteStride;
+ if (psDevInfo->sFBInfo.bIs2D)
+ {
+ psBuffer[i].sSysAddr.uiAddr += ui32SwapBuffer *
+ ALIGN((IMG_UINT32)psDevInfo->sFBInfo.ulWidth * psDevInfo->sFBInfo.uiBytesPerPixel, PAGE_SIZE);
+ }
+ psBuffer[i].psDevInfo = psDevInfo;
+ OMAPLFBInitBufferForSwap(&psBuffer[i]);
+ }
+
+ if (OMAPLFBCreateSwapQueue(psSwapChain) != OMAPLFB_OK)
+ {
+ printk(KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Failed to create workqueue\n", __FUNCTION__, psDevInfo->uiFBDevID);
+ eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+ goto ErrorFreeBuffers;
+ }
+
+ if (OMAPLFBEnableLFBEventNotification(psDevInfo)!= OMAPLFB_OK)
+ {
+ eError = PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT;
+ printk(KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Couldn't enable framebuffer event notification\n", __FUNCTION__, psDevInfo->uiFBDevID);
+ goto ErrorDestroySwapQueue;
+ }
+
+ psDevInfo->uiSwapChainID++;
+ if (psDevInfo->uiSwapChainID == 0)
+ {
+ psDevInfo->uiSwapChainID++;
+ }
+
+ psSwapChain->uiSwapChainID = psDevInfo->uiSwapChainID;
+
+ psDevInfo->psSwapChain = psSwapChain;
+
+ *pui32SwapChainID = psDevInfo->uiSwapChainID;
+
+ *phSwapChain = (IMG_HANDLE)psSwapChain;
+
+ eError = PVRSRV_OK;
+ goto ExitUnLock;
+
+ErrorDestroySwapQueue:
+ OMAPLFBDestroySwapQueue(psSwapChain);
+ErrorFreeBuffers:
+ OMAPLFBFreeKernelMem(psBuffer);
+ErrorFreeSwapChain:
+ OMAPLFBFreeKernelMem(psSwapChain);
+ExitUnLock:
+ OMAPLFBCreateSwapChainUnLock(psDevInfo);
+ return eError;
+}
+
+static PVRSRV_ERROR DestroyDCSwapChain(IMG_HANDLE hDevice,
+ IMG_HANDLE hSwapChain)
+{
+ OMAPLFB_DEVINFO *psDevInfo;
+ OMAPLFB_SWAPCHAIN *psSwapChain;
+ OMAPLFB_ERROR eError;
+
+
+ if(!hDevice || !hSwapChain)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
+ psSwapChain = (OMAPLFB_SWAPCHAIN*)hSwapChain;
+
+ OMAPLFBCreateSwapChainLock(psDevInfo);
+
+ if (SwapChainHasChanged(psDevInfo, psSwapChain))
+ {
+ printk(KERN_WARNING DRIVER_PREFIX
+ ": %s: Device %u: Swap chain mismatch\n", __FUNCTION__, psDevInfo->uiFBDevID);
+
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ExitUnLock;
+ }
+
+
+ OMAPLFBDestroySwapQueue(psSwapChain);
+
+ eError = OMAPLFBDisableLFBEventNotification(psDevInfo);
+ if (eError != OMAPLFB_OK)
+ {
+ printk(KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Couldn't disable framebuffer event notification\n", __FUNCTION__, psDevInfo->uiFBDevID);
+ }
+
+
+ OMAPLFBFreeKernelMem(psSwapChain->psBuffer);
+ OMAPLFBFreeKernelMem(psSwapChain);
+
+ psDevInfo->psSwapChain = NULL;
+
+ OMAPLFBFlip(psDevInfo, &psDevInfo->sSystemBuffer);
+ (void) OMAPLFBCheckModeAndSync(psDevInfo);
+
+ eError = PVRSRV_OK;
+
+ExitUnLock:
+ OMAPLFBCreateSwapChainUnLock(psDevInfo);
+
+ return eError;
+}
+
+static PVRSRV_ERROR SetDCDstRect(IMG_HANDLE hDevice,
+ IMG_HANDLE hSwapChain,
+ IMG_RECT *psRect)
+{
+ UNREFERENCED_PARAMETER(hDevice);
+ UNREFERENCED_PARAMETER(hSwapChain);
+ UNREFERENCED_PARAMETER(psRect);
+
+
+
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+static PVRSRV_ERROR SetDCSrcRect(IMG_HANDLE hDevice,
+ IMG_HANDLE hSwapChain,
+ IMG_RECT *psRect)
+{
+ UNREFERENCED_PARAMETER(hDevice);
+ UNREFERENCED_PARAMETER(hSwapChain);
+ UNREFERENCED_PARAMETER(psRect);
+
+
+
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+static PVRSRV_ERROR SetDCDstColourKey(IMG_HANDLE hDevice,
+ IMG_HANDLE hSwapChain,
+ IMG_UINT32 ui32CKColour)
+{
+ UNREFERENCED_PARAMETER(hDevice);
+ UNREFERENCED_PARAMETER(hSwapChain);
+ UNREFERENCED_PARAMETER(ui32CKColour);
+
+
+
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+static PVRSRV_ERROR SetDCSrcColourKey(IMG_HANDLE hDevice,
+ IMG_HANDLE hSwapChain,
+ IMG_UINT32 ui32CKColour)
+{
+ UNREFERENCED_PARAMETER(hDevice);
+ UNREFERENCED_PARAMETER(hSwapChain);
+ UNREFERENCED_PARAMETER(ui32CKColour);
+
+
+
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+static PVRSRV_ERROR GetDCBuffers(IMG_HANDLE hDevice,
+ IMG_HANDLE hSwapChain,
+ IMG_UINT32 *pui32BufferCount,
+ IMG_HANDLE *phBuffer)
+{
+ OMAPLFB_DEVINFO *psDevInfo;
+ OMAPLFB_SWAPCHAIN *psSwapChain;
+ PVRSRV_ERROR eError;
+ unsigned i;
+
+
+ if(!hDevice
+ || !hSwapChain
+ || !pui32BufferCount
+ || !phBuffer)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
+ psSwapChain = (OMAPLFB_SWAPCHAIN*)hSwapChain;
+
+ OMAPLFBCreateSwapChainLock(psDevInfo);
+
+ if (SwapChainHasChanged(psDevInfo, psSwapChain))
+ {
+ printk(KERN_WARNING DRIVER_PREFIX
+ ": %s: Device %u: Swap chain mismatch\n", __FUNCTION__, psDevInfo->uiFBDevID);
+
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto Exit;
+ }
+
+
+ *pui32BufferCount = (IMG_UINT32)psSwapChain->ulBufferCount;
+
+
+ for(i=0; i<psSwapChain->ulBufferCount; i++)
+ {
+ phBuffer[i] = (IMG_HANDLE)&psSwapChain->psBuffer[i];
+ }
+
+ eError = PVRSRV_OK;
+
+Exit:
+ OMAPLFBCreateSwapChainUnLock(psDevInfo);
+
+ return eError;
+}
+
+static PVRSRV_ERROR SwapToDCBuffer(IMG_HANDLE hDevice,
+ IMG_HANDLE hBuffer,
+ IMG_UINT32 ui32SwapInterval,
+ IMG_HANDLE hPrivateTag,
+ IMG_UINT32 ui32ClipRectCount,
+ IMG_RECT *psClipRect)
+{
+ UNREFERENCED_PARAMETER(hDevice);
+ UNREFERENCED_PARAMETER(hBuffer);
+ UNREFERENCED_PARAMETER(ui32SwapInterval);
+ UNREFERENCED_PARAMETER(hPrivateTag);
+ UNREFERENCED_PARAMETER(ui32ClipRectCount);
+ UNREFERENCED_PARAMETER(psClipRect);
+
+
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR SwapToDCSystem(IMG_HANDLE hDevice,
+ IMG_HANDLE hSwapChain)
+{
+ UNREFERENCED_PARAMETER(hDevice);
+ UNREFERENCED_PARAMETER(hSwapChain);
+
+
+ return PVRSRV_OK;
+}
+
+static OMAPLFB_BOOL WaitForVSyncSettle(OMAPLFB_DEVINFO *psDevInfo)
+{
+ unsigned i;
+ for(i = 0; i < OMAPLFB_VSYNC_SETTLE_COUNT; i++)
+ {
+ if (DontWaitForVSync(psDevInfo) || !OMAPLFBWaitForVSync(psDevInfo))
+ {
+ return OMAPLFB_FALSE;
+ }
+ }
+
+ return OMAPLFB_TRUE;
+}
+
+void OMAPLFBSwapHandler(OMAPLFB_BUFFER *psBuffer)
+{
+ OMAPLFB_DEVINFO *psDevInfo = psBuffer->psDevInfo;
+ OMAPLFB_SWAPCHAIN *psSwapChain = psDevInfo->psSwapChain;
+ OMAPLFB_BOOL bPreviouslyNotVSynced;
+
+#if defined(SUPPORT_DRI_DRM)
+ if (!OMAPLFBAtomicBoolRead(&psDevInfo->sLeaveVT))
+#endif
+ {
+ OMAPLFBFlip(psDevInfo, psBuffer);
+ }
+
+ bPreviouslyNotVSynced = psSwapChain->bNotVSynced;
+ psSwapChain->bNotVSynced = OMAPLFB_TRUE;
+
+
+ if (!DontWaitForVSync(psDevInfo))
+ {
+ OMAPLFB_UPDATE_MODE eMode = OMAPLFBGetUpdateMode(psDevInfo);
+ int iBlankEvents = OMAPLFBAtomicIntRead(&psDevInfo->sBlankEvents);
+
+ switch(eMode)
+ {
+ case OMAPLFB_UPDATE_MODE_AUTO:
+ psSwapChain->bNotVSynced = OMAPLFB_FALSE;
+
+ if (bPreviouslyNotVSynced || psSwapChain->iBlankEvents != iBlankEvents)
+ {
+ psSwapChain->iBlankEvents = iBlankEvents;
+ psSwapChain->bNotVSynced = !WaitForVSyncSettle(psDevInfo);
+ } else if (psBuffer->ulSwapInterval != 0)
+ {
+ psSwapChain->bNotVSynced = !OMAPLFBWaitForVSync(psDevInfo);
+ }
+ break;
+#if defined(PVR_OMAPFB3_MANUAL_UPDATE_SYNC_IN_SWAP)
+ case OMAPLFB_UPDATE_MODE_MANUAL:
+ if (psBuffer->ulSwapInterval != 0)
+ {
+ (void) OMAPLFBManualSync(psDevInfo);
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+
+ psDevInfo->sPVRJTable.pfnPVRSRVCmdComplete((IMG_HANDLE)psBuffer->hCmdComplete, IMG_TRUE);
+}
+
+#if defined(CONFIG_DSSCOMP)
+
+#include <mach/tiler.h>
+#include <video/dsscomp.h>
+#include <plat/dsscomp.h>
+
+void sgx_idle_log_flip(void);
+
+static void dsscomp_proxy_cmdcomplete(void * cookie, int i)
+{
+ sgx_idle_log_flip();
+ /* XXX: assumes that there is only one display */
+ gapsDevInfo[0]->sPVRJTable.pfnPVRSRVCmdComplete(cookie, i);
+}
+
+static IMG_BOOL ProcessFlipV1(IMG_HANDLE hCmdCookie,
+ OMAPLFB_DEVINFO *psDevInfo,
+ OMAPLFB_SWAPCHAIN *psSwapChain,
+ OMAPLFB_BUFFER *psBuffer,
+ unsigned long ulSwapInterval)
+{
+ OMAPLFBCreateSwapChainLock(psDevInfo);
+
+
+ if (SwapChainHasChanged(psDevInfo, psSwapChain))
+ {
+ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
+ ": %s: Device %u (PVR Device ID %u): The swap chain has been destroyed\n",
+ __FUNCTION__, psDevInfo->uiFBDevID, psDevInfo->uiPVRDevID));
+ }
+ else
+ {
+ psBuffer->hCmdComplete = (OMAPLFB_HANDLE)hCmdCookie;
+ psBuffer->ulSwapInterval = ulSwapInterval;
+ if (is_tiler_addr(psBuffer->sSysAddr.uiAddr)) {
+ IMG_UINT32 w = psBuffer->psDevInfo->sDisplayDim.ui32Width;
+ IMG_UINT32 h = psBuffer->psDevInfo->sDisplayDim.ui32Height;
+ struct dsscomp_setup_dispc_data comp = {
+ .num_mgrs = 1,
+ .mgrs[0].alpha_blending = 1,
+ .num_ovls = 1,
+ .ovls[0].cfg = {
+ .width = w,
+ .win.w = w,
+ .crop.w = w,
+ .height = h,
+ .win.h = h,
+ .crop.h = h,
+ .stride = psBuffer->psDevInfo->sDisplayDim.ui32ByteStride,
+ .color_mode = OMAP_DSS_COLOR_ARGB32,
+ .enabled = 1,
+ .global_alpha = 255,
+ },
+ .mode = DSSCOMP_SETUP_DISPLAY,
+ };
+ struct tiler_pa_info *pas[1] = { NULL };
+ comp.ovls[0].ba = (u32) psBuffer->sSysAddr.uiAddr;
+ dsscomp_gralloc_queue(&comp, pas, true,
+ dsscomp_proxy_cmdcomplete,
+ (void *) psBuffer->hCmdComplete);
+ } else {
+ OMAPLFBQueueBufferForSwap(psSwapChain, psBuffer);
+ }
+ }
+
+ OMAPLFBCreateSwapChainUnLock(psDevInfo);
+
+ return IMG_TRUE;
+}
+
+#include "servicesint.h"
+#include "services.h"
+#include "mm.h"
+
+static IMG_BOOL ProcessFlipV2(IMG_HANDLE hCmdCookie,
+ OMAPLFB_DEVINFO *psDevInfo,
+ PDC_MEM_INFO *ppsMemInfos,
+ IMG_UINT32 ui32NumMemInfos,
+ struct dsscomp_setup_dispc_data *psDssData,
+ IMG_UINT32 uiDssDataLength)
+{
+ struct tiler_pa_info *apsTilerPAs[5];
+ IMG_UINT32 i, k;
+ struct {
+ IMG_UINTPTR_T uiAddr;
+ IMG_UINTPTR_T uiUVAddr;
+ struct tiler_pa_info *psTilerInfo;
+ } asMemInfo[5];
+
+ memset(asMemInfo, 0, sizeof(asMemInfo));
+
+ if(uiDssDataLength != sizeof(*psDssData))
+ {
+ WARN(1, "invalid size of private data (%d vs %d)",
+ uiDssDataLength, sizeof(*psDssData));
+ return IMG_FALSE;
+ }
+
+ if(psDssData->num_ovls == 0 || ui32NumMemInfos == 0)
+ {
+ WARN(1, "must have at least one layer");
+ return IMG_FALSE;
+ }
+
+ for(i = k = 0; i < ui32NumMemInfos && k < ARRAY_SIZE(apsTilerPAs); i++, k++)
+ {
+ struct tiler_pa_info *psTilerInfo;
+ IMG_CPU_VIRTADDR virtAddr;
+ IMG_CPU_PHYADDR phyAddr;
+ IMG_UINT32 ui32NumPages;
+ IMG_SIZE_T uByteSize;
+ int j;
+
+ psDevInfo->sPVRJTable.pfnPVRSRVDCMemInfoGetByteSize(ppsMemInfos[i], &uByteSize);
+ ui32NumPages = (uByteSize + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ psDevInfo->sPVRJTable.pfnPVRSRVDCMemInfoGetCpuPAddr(ppsMemInfos[i], 0, &phyAddr);
+
+ /* TILER buffers do not need meminfos */
+ if(is_tiler_addr((u32)phyAddr.uiAddr))
+ {
+ asMemInfo[k].uiAddr = phyAddr.uiAddr;
+ if (tiler_fmt((u32)phyAddr.uiAddr) == TILFMT_8BIT) {
+ /* NV12 buffers have 2 meminfos */
+ BUG_ON(i + 1 >= ui32NumMemInfos);
+ i++;
+ psDevInfo->sPVRJTable.pfnPVRSRVDCMemInfoGetCpuPAddr(ppsMemInfos[i], 0, &phyAddr);
+ asMemInfo[k].uiUVAddr = phyAddr.uiAddr;
+ }
+ continue;
+ }
+
+ /* normal gralloc layer */
+ psTilerInfo = kzalloc(sizeof(*psTilerInfo), GFP_KERNEL);
+ if(!psTilerInfo)
+ {
+ continue;
+ }
+
+ psTilerInfo->mem = kzalloc(sizeof(*psTilerInfo->mem) * ui32NumPages, GFP_KERNEL);
+ if(!psTilerInfo->mem)
+ {
+ kfree(psTilerInfo);
+ continue;
+ }
+
+ psTilerInfo->num_pg = ui32NumPages;
+ psTilerInfo->memtype = TILER_MEM_USING;
+ for(j = 0; j < ui32NumPages; j++)
+ {
+ psDevInfo->sPVRJTable.pfnPVRSRVDCMemInfoGetCpuPAddr(ppsMemInfos[i], j << PAGE_SHIFT, &phyAddr);
+ psTilerInfo->mem[j] = (u32)phyAddr.uiAddr;
+ }
+
+ /* need base address for in-page offset */
+ psDevInfo->sPVRJTable.pfnPVRSRVDCMemInfoGetCpuVAddr(ppsMemInfos[i], &virtAddr);
+ asMemInfo[k].uiAddr = (IMG_UINTPTR_T) virtAddr;
+ asMemInfo[k].psTilerInfo = psTilerInfo;
+ }
+
+ for(i = 0; i < psDssData->num_ovls; i++)
+ {
+ unsigned int ix;
+ apsTilerPAs[i] = NULL;
+
+ /* only supporting Post2, cloned and fbmem layers */
+ if (psDssData->ovls[i].addressing != OMAP_DSS_BUFADDR_LAYER_IX &&
+ psDssData->ovls[i].addressing != OMAP_DSS_BUFADDR_OVL_IX &&
+ psDssData->ovls[i].addressing != OMAP_DSS_BUFADDR_FB)
+ psDssData->ovls[i].cfg.enabled = false;
+
+ if (psDssData->ovls[i].addressing != OMAP_DSS_BUFADDR_LAYER_IX)
+ continue;
+
+ /* Post2 layers */
+ ix = psDssData->ovls[i].ba;
+ if (ix >= k)
+ {
+ WARN(1, "Invalid Post2 layer (%u)", ix);
+ psDssData->ovls[i].cfg.enabled = false;
+ continue;
+ }
+
+ psDssData->ovls[i].addressing = OMAP_DSS_BUFADDR_DIRECT;
+ psDssData->ovls[i].ba = (u32) asMemInfo[ix].uiAddr;
+ psDssData->ovls[i].uv = (u32) asMemInfo[ix].uiUVAddr;
+ apsTilerPAs[i] = asMemInfo[ix].psTilerInfo;
+ }
+
+ dsscomp_gralloc_queue(psDssData, apsTilerPAs, false,
+ dsscomp_proxy_cmdcomplete,
+ (void *)hCmdCookie);
+
+ for(i = 0; i < k; i++)
+ {
+ tiler_pa_free(apsTilerPAs[i]);
+ }
+
+ return IMG_TRUE;
+}
+
+#endif
+
+static IMG_BOOL ProcessFlip(IMG_HANDLE hCmdCookie,
+ IMG_UINT32 ui32DataSize,
+ IMG_VOID *pvData)
+{
+ DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
+ OMAPLFB_DEVINFO *psDevInfo;
+
+ if(!hCmdCookie || !pvData)
+ {
+ return IMG_FALSE;
+ }
+
+ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)pvData;
+
+ if (psFlipCmd == IMG_NULL)
+ {
+ return IMG_FALSE;
+ }
+
+ psDevInfo = (OMAPLFB_DEVINFO*)psFlipCmd->hExtDevice;
+
+ if(psFlipCmd->hExtBuffer)
+ {
+ return ProcessFlipV1(hCmdCookie,
+ psDevInfo,
+ psFlipCmd->hExtSwapChain,
+ psFlipCmd->hExtBuffer,
+ psFlipCmd->ui32SwapInterval);
+ }
+ else
+ {
+#if defined(CONFIG_DSSCOMP)
+ DISPLAYCLASS_FLIP_COMMAND2 *psFlipCmd2;
+ psFlipCmd2 = (DISPLAYCLASS_FLIP_COMMAND2 *)pvData;
+ return ProcessFlipV2(hCmdCookie,
+ psDevInfo,
+ psFlipCmd2->ppsMemInfos,
+ psFlipCmd2->ui32NumMemInfos,
+ psFlipCmd2->pvPrivData,
+ psFlipCmd2->ui32PrivDataLength);
+#else
+ BUG();
+#endif
+ }
+}
+
+static OMAPLFB_ERROR OMAPLFBInitFBDev(OMAPLFB_DEVINFO *psDevInfo)
+{
+ struct fb_info *psLINFBInfo;
+ struct module *psLINFBOwner;
+ OMAPLFB_FBINFO *psPVRFBInfo = &psDevInfo->sFBInfo;
+ OMAPLFB_ERROR eError = OMAPLFB_ERROR_GENERIC;
+ unsigned long FBSize;
+ unsigned long ulLCM;
+ unsigned uiFBDevID = psDevInfo->uiFBDevID;
+
+ OMAPLFB_CONSOLE_LOCK();
+
+ psLINFBInfo = registered_fb[uiFBDevID];
+ if (psLINFBInfo == NULL)
+ {
+ eError = OMAPLFB_ERROR_INVALID_DEVICE;
+ goto ErrorRelSem;
+ }
+
+ FBSize = (psLINFBInfo->screen_size) != 0 ?
+ psLINFBInfo->screen_size :
+ psLINFBInfo->fix.smem_len;
+
+
+ if (FBSize == 0 || psLINFBInfo->fix.line_length == 0)
+ {
+ eError = OMAPLFB_ERROR_INVALID_DEVICE;
+ goto ErrorRelSem;
+ }
+
+ psLINFBOwner = psLINFBInfo->fbops->owner;
+ if (!try_module_get(psLINFBOwner))
+ {
+ printk(KERN_INFO DRIVER_PREFIX
+ ": %s: Device %u: Couldn't get framebuffer module\n", __FUNCTION__, uiFBDevID);
+
+ goto ErrorRelSem;
+ }
+
+ if (psLINFBInfo->fbops->fb_open != NULL)
+ {
+ int res;
+
+ res = psLINFBInfo->fbops->fb_open(psLINFBInfo, 0);
+ if (res != 0)
+ {
+ printk(KERN_INFO DRIVER_PREFIX
+ " %s: Device %u: Couldn't open framebuffer(%d)\n", __FUNCTION__, uiFBDevID, res);
+
+ goto ErrorModPut;
+ }
+ }
+
+ psDevInfo->psLINFBInfo = psLINFBInfo;
+
+ ulLCM = LCM(psLINFBInfo->fix.line_length, OMAPLFB_PAGE_SIZE);
+
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+ ": Device %u: Framebuffer physical address: 0x%lx\n",
+ psDevInfo->uiFBDevID, psLINFBInfo->fix.smem_start));
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+ ": Device %u: Framebuffer virtual address: 0x%lx\n",
+ psDevInfo->uiFBDevID, (unsigned long)psLINFBInfo->screen_base));
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+ ": Device %u: Framebuffer size: %lu\n",
+ psDevInfo->uiFBDevID, FBSize));
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+ ": Device %u: Framebuffer virtual width: %u\n",
+ psDevInfo->uiFBDevID, psLINFBInfo->var.xres_virtual));
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+ ": Device %u: Framebuffer virtual height: %u\n",
+ psDevInfo->uiFBDevID, psLINFBInfo->var.yres_virtual));
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+ ": Device %u: Framebuffer width: %u\n",
+ psDevInfo->uiFBDevID, psLINFBInfo->var.xres));
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+ ": Device %u: Framebuffer height: %u\n",
+ psDevInfo->uiFBDevID, psLINFBInfo->var.yres));
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+ ": Device %u: Framebuffer stride: %u\n",
+ psDevInfo->uiFBDevID, psLINFBInfo->fix.line_length));
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+ ": Device %u: LCM of stride and page size: %lu\n",
+ psDevInfo->uiFBDevID, ulLCM));
+
+
+ OMAPLFBPrintInfo(psDevInfo);
+
+ /* hijack LINFB */
+#if defined(CONFIG_ION_OMAP)
+ if(1)
+ {
+ /* for some reason we need at least 3 buffers in the swap chain */
+ int n = FBSize / RoundUpToMultiple(psLINFBInfo->fix.line_length * psLINFBInfo->var.yres, ulLCM);
+ int res;
+ int i, x, y, w;
+ ion_phys_addr_t phys;
+ size_t size;
+ struct tiler_view_t view;
+
+ struct omap_ion_tiler_alloc_data sAllocData = {
+ /* TILER will align width to 128-bytes */
+ /* however, SGX must have full page width */
+ .w = ALIGN(psLINFBInfo->var.xres, PAGE_SIZE / (psLINFBInfo->var.bits_per_pixel / 8)),
+ .h = psLINFBInfo->var.yres,
+ .fmt = psLINFBInfo->var.bits_per_pixel == 16 ? TILER_PIXEL_FMT_16BIT : TILER_PIXEL_FMT_32BIT,
+ .flags = 0,
+ };
+
+ printk(KERN_DEBUG DRIVER_PREFIX
+ " %s: Device %u: Requesting %d TILER 2D framebuffers\n", __FUNCTION__, uiFBDevID, n);
+
+ /* HACK: limit to MAX 3 FBs to save TILER container space */
+ if (n > 3)
+ n = 3;
+ sAllocData.w *= n;
+
+ psPVRFBInfo->uiBytesPerPixel = psLINFBInfo->var.bits_per_pixel >> 3;
+ psPVRFBInfo->bIs2D = OMAPLFB_TRUE;
+
+ res = omap_ion_nonsecure_tiler_alloc(gpsIONClient, &sAllocData);
+ if (res < 0)
+ {
+ res = omap_ion_tiler_alloc(gpsIONClient, &sAllocData);
+ }
+ psPVRFBInfo->psIONHandle = sAllocData.handle;
+ if (res < 0)
+ {
+ printk(KERN_ERR DRIVER_PREFIX
+ " %s: Device %u: Could not allocate 2D framebuffer(%d)\n", __FUNCTION__, uiFBDevID, res);
+ goto ErrorModPut;
+ }
+
+ ion_phys(gpsIONClient, sAllocData.handle, &phys, &size);
+
+ psPVRFBInfo->sSysAddr.uiAddr = phys;
+ psPVRFBInfo->sCPUVAddr = 0;
+
+ psPVRFBInfo->ulWidth = psLINFBInfo->var.xres;
+ psPVRFBInfo->ulHeight = psLINFBInfo->var.yres;
+ psPVRFBInfo->ulByteStride = PAGE_ALIGN(psPVRFBInfo->ulWidth * psPVRFBInfo->uiBytesPerPixel);
+ w = psPVRFBInfo->ulByteStride >> PAGE_SHIFT;
+
+ /* this is an "effective" FB size to get correct number of buffers */
+ psPVRFBInfo->ulFBSize = sAllocData.h * n * psPVRFBInfo->ulByteStride;
+ psPVRFBInfo->psPageList = kzalloc(w * n * psPVRFBInfo->ulHeight * sizeof(*psPVRFBInfo->psPageList), GFP_KERNEL);
+ if (!psPVRFBInfo->psPageList)
+ {
+ printk(KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Could not allocate page list\n", __FUNCTION__, psDevInfo->uiFBDevID);
+ ion_free(gpsIONClient, sAllocData.handle);
+ goto ErrorModPut;
+ }
+
+ tilview_create(&view, phys, psDevInfo->sFBInfo.ulWidth, psDevInfo->sFBInfo.ulHeight);
+ for(i=0; i<n; i++)
+ {
+ for(y=0; y<psDevInfo->sFBInfo.ulHeight; y++)
+ {
+ for(x=0; x<w; x++)
+ {
+ psPVRFBInfo->psPageList[i * psDevInfo->sFBInfo.ulHeight * w + y * w + x].uiAddr =
+ phys + view.v_inc * y + ((x + i * w) << PAGE_SHIFT);
+ }
+ }
+ }
+ }
+ else
+#endif
+ {
+ psPVRFBInfo->sSysAddr.uiAddr = psLINFBInfo->fix.smem_start;
+ psPVRFBInfo->sCPUVAddr = psLINFBInfo->screen_base;
+
+ psPVRFBInfo->ulWidth = psLINFBInfo->var.xres;
+ psPVRFBInfo->ulHeight = psLINFBInfo->var.yres;
+ psPVRFBInfo->ulByteStride = psLINFBInfo->fix.line_length;
+ psPVRFBInfo->ulFBSize = FBSize;
+ psPVRFBInfo->bIs2D = OMAPLFB_FALSE;
+ psPVRFBInfo->psPageList = IMG_NULL;
+ psPVRFBInfo->psIONHandle = IMG_NULL;
+ }
+ psPVRFBInfo->ulBufferSize = psPVRFBInfo->ulHeight * psPVRFBInfo->ulByteStride;
+
+ psPVRFBInfo->ulRoundedBufferSize = RoundUpToMultiple(psPVRFBInfo->ulBufferSize, ulLCM);
+
+ if(psLINFBInfo->var.bits_per_pixel == 16)
+ {
+ if((psLINFBInfo->var.red.length == 5) &&
+ (psLINFBInfo->var.green.length == 6) &&
+ (psLINFBInfo->var.blue.length == 5) &&
+ (psLINFBInfo->var.red.offset == 11) &&
+ (psLINFBInfo->var.green.offset == 5) &&
+ (psLINFBInfo->var.blue.offset == 0) &&
+ (psLINFBInfo->var.red.msb_right == 0))
+ {
+ psPVRFBInfo->ePixelFormat = PVRSRV_PIXEL_FORMAT_RGB565;
+ }
+ else
+ {
+ printk(KERN_INFO DRIVER_PREFIX ": %s: Device %u: Unknown FB format\n", __FUNCTION__, uiFBDevID);
+ }
+ }
+ else if(psLINFBInfo->var.bits_per_pixel == 32)
+ {
+ if((psLINFBInfo->var.red.length == 8) &&
+ (psLINFBInfo->var.green.length == 8) &&
+ (psLINFBInfo->var.blue.length == 8) &&
+ (psLINFBInfo->var.red.offset == 16) &&
+ (psLINFBInfo->var.green.offset == 8) &&
+ (psLINFBInfo->var.blue.offset == 0) &&
+ (psLINFBInfo->var.red.msb_right == 0))
+ {
+ psPVRFBInfo->ePixelFormat = PVRSRV_PIXEL_FORMAT_ARGB8888;
+ }
+ else
+ {
+ printk(KERN_INFO DRIVER_PREFIX ": %s: Device %u: Unknown FB format\n", __FUNCTION__, uiFBDevID);
+ }
+ }
+ else
+ {
+ printk(KERN_INFO DRIVER_PREFIX ": %s: Device %u: Unknown FB format\n", __FUNCTION__, uiFBDevID);
+ }
+
+ psDevInfo->sFBInfo.ulPhysicalWidthmm =
+ ((int)psLINFBInfo->var.width > 0) ? psLINFBInfo->var.width : 90;
+
+ psDevInfo->sFBInfo.ulPhysicalHeightmm =
+ ((int)psLINFBInfo->var.height > 0) ? psLINFBInfo->var.height : 54;
+
+
+ psDevInfo->sFBInfo.sSysAddr.uiAddr = psPVRFBInfo->sSysAddr.uiAddr;
+ psDevInfo->sFBInfo.sCPUVAddr = psPVRFBInfo->sCPUVAddr;
+
+ eError = OMAPLFB_OK;
+ goto ErrorRelSem;
+
+ErrorModPut:
+ module_put(psLINFBOwner);
+ErrorRelSem:
+ OMAPLFB_CONSOLE_UNLOCK();
+
+ return eError;
+}
+
+static void OMAPLFBDeInitFBDev(OMAPLFB_DEVINFO *psDevInfo)
+{
+ struct fb_info *psLINFBInfo = psDevInfo->psLINFBInfo;
+ OMAPLFB_FBINFO *psPVRFBInfo = &psDevInfo->sFBInfo;
+ struct module *psLINFBOwner;
+
+ OMAPLFB_CONSOLE_LOCK();
+
+ psLINFBOwner = psLINFBInfo->fbops->owner;
+
+ kfree(psPVRFBInfo->psPageList);
+ if (psPVRFBInfo->psIONHandle)
+ {
+ ion_free(gpsIONClient, psPVRFBInfo->psIONHandle);
+ }
+
+ if (psLINFBInfo->fbops->fb_release != NULL)
+ {
+ (void) psLINFBInfo->fbops->fb_release(psLINFBInfo, 0);
+ }
+
+ module_put(psLINFBOwner);
+
+ OMAPLFB_CONSOLE_UNLOCK();
+}
+
+static OMAPLFB_DEVINFO *OMAPLFBInitDev(unsigned uiFBDevID)
+{
+ PFN_CMD_PROC pfnCmdProcList[OMAPLFB_COMMAND_COUNT];
+ IMG_UINT32 aui32SyncCountList[OMAPLFB_COMMAND_COUNT][2];
+ OMAPLFB_DEVINFO *psDevInfo = NULL;
+
+
+ psDevInfo = (OMAPLFB_DEVINFO *)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_DEVINFO));
+
+ if(psDevInfo == NULL)
+ {
+ printk(KERN_ERR DRIVER_PREFIX
+ ": %s: Device %u: Couldn't allocate device information structure\n", __FUNCTION__, uiFBDevID);
+
+ goto ErrorExit;
+ }
+
+
+ memset(psDevInfo, 0, sizeof(OMAPLFB_DEVINFO));
+
+ psDevInfo->uiFBDevID = uiFBDevID;
+
+
+ if(!(*gpfnGetPVRJTable)(&psDevInfo->sPVRJTable))
+ {
+ goto ErrorFreeDevInfo;
+ }
+
+
+ if(OMAPLFBInitFBDev(psDevInfo) != OMAPLFB_OK)
+ {
+
+ goto ErrorFreeDevInfo;
+ }
+
+ psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers = (IMG_UINT32)(psDevInfo->sFBInfo.ulFBSize / psDevInfo->sFBInfo.ulRoundedBufferSize);
+ if (psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers != 0)
+ {
+ psDevInfo->sDisplayInfo.ui32MaxSwapChains = 1;
+ psDevInfo->sDisplayInfo.ui32MaxSwapInterval = 1;
+ }
+
+ psDevInfo->sDisplayInfo.ui32PhysicalWidthmm = psDevInfo->sFBInfo.ulPhysicalWidthmm;
+ psDevInfo->sDisplayInfo.ui32PhysicalHeightmm = psDevInfo->sFBInfo.ulPhysicalHeightmm;
+
+ strncpy(psDevInfo->sDisplayInfo.szDisplayName, DISPLAY_DEVICE_NAME, MAX_DISPLAY_NAME_SIZE);
+
+ psDevInfo->sDisplayFormat.pixelformat = psDevInfo->sFBInfo.ePixelFormat;
+ psDevInfo->sDisplayDim.ui32Width = (IMG_UINT32)psDevInfo->sFBInfo.ulWidth;
+ psDevInfo->sDisplayDim.ui32Height = (IMG_UINT32)psDevInfo->sFBInfo.ulHeight;
+ psDevInfo->sDisplayDim.ui32ByteStride = (IMG_UINT32)psDevInfo->sFBInfo.ulByteStride;
+
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+ ": Device %u: Maximum number of swap chain buffers: %u\n",
+ psDevInfo->uiFBDevID, psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers));
+
+
+ psDevInfo->sSystemBuffer.sSysAddr = psDevInfo->sFBInfo.sSysAddr;
+ psDevInfo->sSystemBuffer.sCPUVAddr = psDevInfo->sFBInfo.sCPUVAddr;
+ psDevInfo->sSystemBuffer.psDevInfo = psDevInfo;
+
+ OMAPLFBInitBufferForSwap(&psDevInfo->sSystemBuffer);
+
+
+
+ psDevInfo->sDCJTable.ui32TableSize = sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE);
+ psDevInfo->sDCJTable.pfnOpenDCDevice = OpenDCDevice;
+ psDevInfo->sDCJTable.pfnCloseDCDevice = CloseDCDevice;
+ psDevInfo->sDCJTable.pfnEnumDCFormats = EnumDCFormats;
+ psDevInfo->sDCJTable.pfnEnumDCDims = EnumDCDims;
+ psDevInfo->sDCJTable.pfnGetDCSystemBuffer = GetDCSystemBuffer;
+ psDevInfo->sDCJTable.pfnGetDCInfo = GetDCInfo;
+ psDevInfo->sDCJTable.pfnGetBufferAddr = GetDCBufferAddr;
+ psDevInfo->sDCJTable.pfnCreateDCSwapChain = CreateDCSwapChain;
+ psDevInfo->sDCJTable.pfnDestroyDCSwapChain = DestroyDCSwapChain;
+ psDevInfo->sDCJTable.pfnSetDCDstRect = SetDCDstRect;
+ psDevInfo->sDCJTable.pfnSetDCSrcRect = SetDCSrcRect;
+ psDevInfo->sDCJTable.pfnSetDCDstColourKey = SetDCDstColourKey;
+ psDevInfo->sDCJTable.pfnSetDCSrcColourKey = SetDCSrcColourKey;
+ psDevInfo->sDCJTable.pfnGetDCBuffers = GetDCBuffers;
+ psDevInfo->sDCJTable.pfnSwapToDCBuffer = SwapToDCBuffer;
+ psDevInfo->sDCJTable.pfnSwapToDCSystem = SwapToDCSystem;
+ psDevInfo->sDCJTable.pfnSetDCState = SetDCState;
+
+
+ if(psDevInfo->sPVRJTable.pfnPVRSRVRegisterDCDevice(
+ &psDevInfo->sDCJTable,
+ &psDevInfo->uiPVRDevID) != PVRSRV_OK)
+ {
+ printk(KERN_ERR DRIVER_PREFIX
+ ": %s: Device %u: PVR Services device registration failed\n", __FUNCTION__, uiFBDevID);
+
+ goto ErrorDeInitFBDev;
+ }
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
+ ": Device %u: PVR Device ID: %u\n",
+ psDevInfo->uiFBDevID, psDevInfo->uiPVRDevID));
+
+
+ pfnCmdProcList[DC_FLIP_COMMAND] = ProcessFlip;
+
+
+ aui32SyncCountList[DC_FLIP_COMMAND][0] = 0;
+ aui32SyncCountList[DC_FLIP_COMMAND][1] = 10;
+
+
+
+
+
+ if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterCmdProcList(psDevInfo->uiPVRDevID,
+ &pfnCmdProcList[0],
+ aui32SyncCountList,
+ OMAPLFB_COMMAND_COUNT) != PVRSRV_OK)
+ {
+ printk(KERN_ERR DRIVER_PREFIX
+ ": %s: Device %u: Couldn't register command processing functions with PVR Services\n", __FUNCTION__, uiFBDevID);
+ goto ErrorUnregisterDevice;
+ }
+
+ OMAPLFBCreateSwapChainLockInit(psDevInfo);
+
+ OMAPLFBAtomicBoolInit(&psDevInfo->sBlanked, OMAPLFB_FALSE);
+ OMAPLFBAtomicIntInit(&psDevInfo->sBlankEvents, 0);
+ OMAPLFBAtomicBoolInit(&psDevInfo->sFlushCommands, OMAPLFB_FALSE);
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ OMAPLFBAtomicBoolInit(&psDevInfo->sEarlySuspendFlag, OMAPLFB_FALSE);
+#endif
+#if defined(SUPPORT_DRI_DRM)
+ OMAPLFBAtomicBoolInit(&psDevInfo->sLeaveVT, OMAPLFB_FALSE);
+#endif
+ return psDevInfo;
+
+ErrorUnregisterDevice:
+ (void)psDevInfo->sPVRJTable.pfnPVRSRVRemoveDCDevice(psDevInfo->uiPVRDevID);
+ErrorDeInitFBDev:
+ OMAPLFBDeInitFBDev(psDevInfo);
+ErrorFreeDevInfo:
+ OMAPLFBFreeKernelMem(psDevInfo);
+ErrorExit:
+ return NULL;
+}
+
+OMAPLFB_ERROR OMAPLFBInit(void)
+{
+ unsigned uiMaxFBDevIDPlusOne = OMAPLFBMaxFBDevIDPlusOne();
+ unsigned i;
+ unsigned uiDevicesFound = 0;
+
+ if(OMAPLFBGetLibFuncAddr ("PVRGetDisplayClassJTable", &gpfnGetPVRJTable) != OMAPLFB_OK)
+ {
+ return OMAPLFB_ERROR_INIT_FAILURE;
+ }
+
+
+ for(i = uiMaxFBDevIDPlusOne; i-- != 0;)
+ {
+ OMAPLFB_DEVINFO *psDevInfo = OMAPLFBInitDev(i);
+
+ if (psDevInfo != NULL)
+ {
+
+ OMAPLFBSetDevInfoPtr(psDevInfo->uiFBDevID, psDevInfo);
+ uiDevicesFound++;
+ }
+ }
+
+ return (uiDevicesFound != 0) ? OMAPLFB_OK : OMAPLFB_ERROR_INIT_FAILURE;
+}
+
+static OMAPLFB_BOOL OMAPLFBDeInitDev(OMAPLFB_DEVINFO *psDevInfo)
+{
+ PVRSRV_DC_DISP2SRV_KMJTABLE *psPVRJTable = &psDevInfo->sPVRJTable;
+
+ OMAPLFBCreateSwapChainLockDeInit(psDevInfo);
+
+ OMAPLFBAtomicBoolDeInit(&psDevInfo->sBlanked);
+ OMAPLFBAtomicIntDeInit(&psDevInfo->sBlankEvents);
+ OMAPLFBAtomicBoolDeInit(&psDevInfo->sFlushCommands);
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ OMAPLFBAtomicBoolDeInit(&psDevInfo->sEarlySuspendFlag);
+#endif
+#if defined(SUPPORT_DRI_DRM)
+ OMAPLFBAtomicBoolDeInit(&psDevInfo->sLeaveVT);
+#endif
+ psPVRJTable = &psDevInfo->sPVRJTable;
+
+ if (psPVRJTable->pfnPVRSRVRemoveCmdProcList (psDevInfo->uiPVRDevID, OMAPLFB_COMMAND_COUNT) != PVRSRV_OK)
+ {
+ printk(KERN_ERR DRIVER_PREFIX
+ ": %s: Device %u: PVR Device %u: Couldn't unregister command processing functions\n", __FUNCTION__, psDevInfo->uiFBDevID, psDevInfo->uiPVRDevID);
+ return OMAPLFB_FALSE;
+ }
+
+
+ if (psPVRJTable->pfnPVRSRVRemoveDCDevice(psDevInfo->uiPVRDevID) != PVRSRV_OK)
+ {
+ printk(KERN_ERR DRIVER_PREFIX
+ ": %s: Device %u: PVR Device %u: Couldn't remove device from PVR Services\n", __FUNCTION__, psDevInfo->uiFBDevID, psDevInfo->uiPVRDevID);
+ return OMAPLFB_FALSE;
+ }
+
+ OMAPLFBDeInitFBDev(psDevInfo);
+
+ OMAPLFBSetDevInfoPtr(psDevInfo->uiFBDevID, NULL);
+
+
+ OMAPLFBFreeKernelMem(psDevInfo);
+
+ return OMAPLFB_TRUE;
+}
+
+OMAPLFB_ERROR OMAPLFBDeInit(void)
+{
+ unsigned uiMaxFBDevIDPlusOne = OMAPLFBMaxFBDevIDPlusOne();
+ unsigned i;
+ OMAPLFB_BOOL bError = OMAPLFB_FALSE;
+
+ for(i = 0; i < uiMaxFBDevIDPlusOne; i++)
+ {
+ OMAPLFB_DEVINFO *psDevInfo = OMAPLFBGetDevInfoPtr(i);
+
+ if (psDevInfo != NULL)
+ {
+ bError |= !OMAPLFBDeInitDev(psDevInfo);
+ }
+ }
+
+ return (bError) ? OMAPLFB_ERROR_INIT_FAILURE : OMAPLFB_OK;
+}
+
diff --git a/drivers/gpu/pvr/omaplfb/omaplfb_linux.c b/drivers/gpu/pvr/omaplfb/omaplfb_linux.c
new file mode 100644
index 0000000..03951f5
--- /dev/null
+++ b/drivers/gpu/pvr/omaplfb/omaplfb_linux.c
@@ -0,0 +1,1050 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#endif
+
+#include <asm/atomic.h>
+
+#if defined(SUPPORT_DRI_DRM)
+#include <drm/drmP.h>
+#else
+#include <linux/module.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/hardirq.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/fb.h>
+#include <linux/console.h>
+#include <linux/omapfb.h>
+#include <linux/mutex.h>
+
+#if defined(PVR_OMAPLFB_DRM_FB)
+#include <plat/display.h>
+#include <linux/omap_gpu.h>
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
+#define PVR_OMAPFB3_NEEDS_PLAT_VRFB_H
+#endif
+
+#if defined(PVR_OMAPFB3_NEEDS_PLAT_VRFB_H)
+#include <plat/vrfb.h>
+#else
+#if defined(PVR_OMAPFB3_NEEDS_MACH_VRFB_H)
+#include <mach/vrfb.h>
+#endif
+#endif
+
+#if defined(DEBUG)
+#define PVR_DEBUG DEBUG
+#undef DEBUG
+#endif
+#include <omapfb/omapfb.h>
+#if defined(DEBUG)
+#undef DEBUG
+#endif
+#if defined(PVR_DEBUG)
+#define DEBUG PVR_DEBUG
+#undef PVR_DEBUG
+#endif
+#endif
+
+#include "img_defs.h"
+#include "servicesext.h"
+#include "kerneldisplay.h"
+#include "omaplfb.h"
+#include "pvrmodule.h"
+#if defined(SUPPORT_DRI_DRM)
+#include "pvr_drm.h"
+#include "3rdparty_dc_drm_shared.h"
+#endif
+
+#if !defined(PVR_LINUX_USING_WORKQUEUES)
+#error "PVR_LINUX_USING_WORKQUEUES must be defined"
+#endif
+
+MODULE_SUPPORTED_DEVICE(DEVNAME);
+
+#if !defined(PVR_OMAPLFB_DRM_FB)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
+#define OMAP_DSS_DRIVER(drv, dev) struct omap_dss_driver *drv = (dev) != NULL ? (dev)->driver : NULL
+#define OMAP_DSS_MANAGER(man, dev) struct omap_overlay_manager *man = (dev) != NULL ? (dev)->manager : NULL
+#define WAIT_FOR_VSYNC(man) ((man)->wait_for_vsync)
+#else
+#define OMAP_DSS_DRIVER(drv, dev) struct omap_dss_device *drv = (dev)
+#define OMAP_DSS_MANAGER(man, dev) struct omap_dss_device *man = (dev)
+#define WAIT_FOR_VSYNC(man) ((man)->wait_vsync)
+#endif
+#endif
+
+void *OMAPLFBAllocKernelMem(unsigned long ulSize)
+{
+ return kmalloc(ulSize, GFP_KERNEL);
+}
+
+void OMAPLFBFreeKernelMem(void *pvMem)
+{
+ kfree(pvMem);
+}
+
+void OMAPLFBCreateSwapChainLockInit(OMAPLFB_DEVINFO *psDevInfo)
+{
+ mutex_init(&psDevInfo->sCreateSwapChainMutex);
+}
+
+void OMAPLFBCreateSwapChainLockDeInit(OMAPLFB_DEVINFO *psDevInfo)
+{
+ mutex_destroy(&psDevInfo->sCreateSwapChainMutex);
+}
+
+void OMAPLFBCreateSwapChainLock(OMAPLFB_DEVINFO *psDevInfo)
+{
+ mutex_lock(&psDevInfo->sCreateSwapChainMutex);
+}
+
+void OMAPLFBCreateSwapChainUnLock(OMAPLFB_DEVINFO *psDevInfo)
+{
+ mutex_unlock(&psDevInfo->sCreateSwapChainMutex);
+}
+
+void OMAPLFBAtomicBoolInit(OMAPLFB_ATOMIC_BOOL *psAtomic, OMAPLFB_BOOL bVal)
+{
+ atomic_set(psAtomic, (int)bVal);
+}
+
+void OMAPLFBAtomicBoolDeInit(OMAPLFB_ATOMIC_BOOL *psAtomic)
+{
+}
+
+void OMAPLFBAtomicBoolSet(OMAPLFB_ATOMIC_BOOL *psAtomic, OMAPLFB_BOOL bVal)
+{
+ atomic_set(psAtomic, (int)bVal);
+}
+
+OMAPLFB_BOOL OMAPLFBAtomicBoolRead(OMAPLFB_ATOMIC_BOOL *psAtomic)
+{
+ return (OMAPLFB_BOOL)atomic_read(psAtomic);
+}
+
+void OMAPLFBAtomicIntInit(OMAPLFB_ATOMIC_INT *psAtomic, int iVal)
+{
+ atomic_set(psAtomic, iVal);
+}
+
+void OMAPLFBAtomicIntDeInit(OMAPLFB_ATOMIC_INT *psAtomic)
+{
+}
+
+void OMAPLFBAtomicIntSet(OMAPLFB_ATOMIC_INT *psAtomic, int iVal)
+{
+ atomic_set(psAtomic, iVal);
+}
+
+int OMAPLFBAtomicIntRead(OMAPLFB_ATOMIC_INT *psAtomic)
+{
+ return atomic_read(psAtomic);
+}
+
+void OMAPLFBAtomicIntInc(OMAPLFB_ATOMIC_INT *psAtomic)
+{
+ atomic_inc(psAtomic);
+}
+
+OMAPLFB_ERROR OMAPLFBGetLibFuncAddr (char *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable)
+{
+ if(strcmp("PVRGetDisplayClassJTable", szFunctionName) != 0)
+ {
+ return (OMAPLFB_ERROR_INVALID_PARAMS);
+ }
+
+
+ *ppfnFuncTable = PVRGetDisplayClassJTable;
+
+ return (OMAPLFB_OK);
+}
+
+void OMAPLFBQueueBufferForSwap(OMAPLFB_SWAPCHAIN *psSwapChain, OMAPLFB_BUFFER *psBuffer)
+{
+ int res = queue_work(psSwapChain->psWorkQueue, &psBuffer->sWork);
+
+ if (res == 0)
+ {
+ printk(KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Buffer already on work queue\n", __FUNCTION__, psSwapChain->uiFBDevID);
+ }
+}
+
+static void WorkQueueHandler(struct work_struct *psWork)
+{
+ OMAPLFB_BUFFER *psBuffer = container_of(psWork, OMAPLFB_BUFFER, sWork);
+
+ OMAPLFBSwapHandler(psBuffer);
+}
+
+OMAPLFB_ERROR OMAPLFBCreateSwapQueue(OMAPLFB_SWAPCHAIN *psSwapChain)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+
+ psSwapChain->psWorkQueue = alloc_ordered_workqueue(DEVNAME, WQ_FREEZABLE | WQ_MEM_RECLAIM);
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+ psSwapChain->psWorkQueue = create_freezable_workqueue(DEVNAME);
+#else
+
+ psSwapChain->psWorkQueue = __create_workqueue(DEVNAME, 1, 1, 1);
+#endif
+#endif
+ if (psSwapChain->psWorkQueue == NULL)
+ {
+ printk(KERN_ERR DRIVER_PREFIX ": %s: Device %u: Couldn't create workqueue\n", __FUNCTION__, psSwapChain->uiFBDevID);
+
+ return (OMAPLFB_ERROR_INIT_FAILURE);
+ }
+
+ return (OMAPLFB_OK);
+}
+
+void OMAPLFBInitBufferForSwap(OMAPLFB_BUFFER *psBuffer)
+{
+ INIT_WORK(&psBuffer->sWork, WorkQueueHandler);
+}
+
+void OMAPLFBDestroySwapQueue(OMAPLFB_SWAPCHAIN *psSwapChain)
+{
+ destroy_workqueue(psSwapChain->psWorkQueue);
+}
+
+#if defined(CONFIG_DSSCOMP)
+#include <video/dsscomp.h>
+#include <plat/dsscomp.h>
+#include <linux/omapfb.h>
+#endif
+
+void OMAPLFBFlip(OMAPLFB_DEVINFO *psDevInfo, OMAPLFB_BUFFER *psBuffer)
+{
+ struct fb_var_screeninfo sFBVar;
+ int res;
+ unsigned long ulYResVirtual;
+
+ OMAPLFB_CONSOLE_LOCK();
+
+ sFBVar = psDevInfo->psLINFBInfo->var;
+
+ sFBVar.xoffset = 0;
+ sFBVar.yoffset = psBuffer->ulYOffset;
+
+ ulYResVirtual = psBuffer->ulYOffset + sFBVar.yres;
+
+
+#if defined(CONFIG_DSSCOMP)
+ {
+ /*
+ * If using DSSCOMP, we need to use dsscomp queuing for normal
+ * framebuffer updates, so that previously used overlays get
+ * automatically disabled, and manager gets dirtied. We can
+ * do that because DSSCOMP takes ownership of all pipelines on
+ * a manager.
+ */
+ struct fb_fix_screeninfo sFBFix = psDevInfo->psLINFBInfo->fix;
+ struct dsscomp_setup_dispc_data d = {
+ .num_ovls = 1,
+ .num_mgrs = 1,
+ .mgrs[0].alpha_blending = 1,
+ .ovls[0] = {
+ .cfg = {
+ .win.w = sFBVar.xres,
+ .win.h = sFBVar.yres,
+ .crop.x = sFBVar.xoffset,
+ .crop.y = sFBVar.yoffset,
+ .crop.w = sFBVar.xres,
+ .crop.h = sFBVar.yres,
+ .width = sFBVar.xres_virtual,
+ .height = sFBVar.yres_virtual,
+ .stride = sFBFix.line_length,
+ .enabled = 1,
+ .global_alpha = 255,
+ },
+ },
+ };
+ /* do not map buffer into TILER1D as it is contiguous */
+ struct tiler_pa_info *pas[] = { NULL };
+
+ d.ovls[0].ba = sFBFix.smem_start;
+ omapfb_mode_to_dss_mode(&sFBVar, &d.ovls[0].cfg.color_mode);
+
+ res = dsscomp_gralloc_queue(&d, pas, true, NULL, NULL);
+ }
+#else
+#if !defined(PVR_OMAPLFB_DONT_USE_FB_PAN_DISPLAY)
+
+ if (sFBVar.xres_virtual != sFBVar.xres || sFBVar.yres_virtual < ulYResVirtual)
+#endif
+ {
+ sFBVar.xres_virtual = sFBVar.xres;
+ sFBVar.yres_virtual = ulYResVirtual;
+
+ sFBVar.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
+
+ res = fb_set_var(psDevInfo->psLINFBInfo, &sFBVar);
+ if (res != 0)
+ {
+ printk(KERN_ERR DRIVER_PREFIX ": %s: Device %u: fb_set_var failed (Y Offset: %lu, Error: %d)\n", __FUNCTION__, psDevInfo->uiFBDevID, psBuffer->ulYOffset, res);
+ }
+ }
+#if !defined(PVR_OMAPLFB_DONT_USE_FB_PAN_DISPLAY)
+ else
+ {
+ res = fb_pan_display(psDevInfo->psLINFBInfo, &sFBVar);
+ if (res != 0)
+ {
+ printk(KERN_ERR DRIVER_PREFIX ": %s: Device %u: fb_pan_display failed (Y Offset: %lu, Error: %d)\n", __FUNCTION__, psDevInfo->uiFBDevID, psBuffer->ulYOffset, res);
+ }
+ }
+#endif
+#endif
+ OMAPLFB_CONSOLE_UNLOCK();
+}
+
+#if !defined(PVR_OMAPLFB_DRM_FB) || defined(DEBUG)
+static OMAPLFB_BOOL OMAPLFBValidateDSSUpdateMode(enum omap_dss_update_mode eMode)
+{
+ switch (eMode)
+ {
+ case OMAP_DSS_UPDATE_AUTO:
+ case OMAP_DSS_UPDATE_MANUAL:
+ case OMAP_DSS_UPDATE_DISABLED:
+ return OMAPLFB_TRUE;
+ default:
+ break;
+ }
+
+ return OMAPLFB_FALSE;
+}
+
+static OMAPLFB_UPDATE_MODE OMAPLFBFromDSSUpdateMode(enum omap_dss_update_mode eMode)
+{
+ switch (eMode)
+ {
+ case OMAP_DSS_UPDATE_AUTO:
+ return OMAPLFB_UPDATE_MODE_AUTO;
+ case OMAP_DSS_UPDATE_MANUAL:
+ return OMAPLFB_UPDATE_MODE_MANUAL;
+ case OMAP_DSS_UPDATE_DISABLED:
+ return OMAPLFB_UPDATE_MODE_DISABLED;
+ default:
+ break;
+ }
+
+ return OMAPLFB_UPDATE_MODE_UNDEFINED;
+}
+#endif
+
+static OMAPLFB_BOOL OMAPLFBValidateUpdateMode(OMAPLFB_UPDATE_MODE eMode)
+{
+ switch(eMode)
+ {
+ case OMAPLFB_UPDATE_MODE_AUTO:
+ case OMAPLFB_UPDATE_MODE_MANUAL:
+ case OMAPLFB_UPDATE_MODE_DISABLED:
+ return OMAPLFB_TRUE;
+ default:
+ break;
+ }
+
+ return OMAPLFB_FALSE;
+}
+
+static enum omap_dss_update_mode OMAPLFBToDSSUpdateMode(OMAPLFB_UPDATE_MODE eMode)
+{
+ switch(eMode)
+ {
+ case OMAPLFB_UPDATE_MODE_AUTO:
+ return OMAP_DSS_UPDATE_AUTO;
+ case OMAPLFB_UPDATE_MODE_MANUAL:
+ return OMAP_DSS_UPDATE_MANUAL;
+ case OMAPLFB_UPDATE_MODE_DISABLED:
+ return OMAP_DSS_UPDATE_DISABLED;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+#if defined(DEBUG)
+static const char *OMAPLFBUpdateModeToString(OMAPLFB_UPDATE_MODE eMode)
+{
+ switch(eMode)
+ {
+ case OMAPLFB_UPDATE_MODE_AUTO:
+ return "Auto Update Mode";
+ case OMAPLFB_UPDATE_MODE_MANUAL:
+ return "Manual Update Mode";
+ case OMAPLFB_UPDATE_MODE_DISABLED:
+ return "Update Mode Disabled";
+ case OMAPLFB_UPDATE_MODE_UNDEFINED:
+ return "Update Mode Undefined";
+ default:
+ break;
+ }
+
+ return "Unknown Update Mode";
+}
+
+static const char *OMAPLFBDSSUpdateModeToString(enum omap_dss_update_mode eMode)
+{
+ if (!OMAPLFBValidateDSSUpdateMode(eMode))
+ {
+ return "Unknown Update Mode";
+ }
+
+ return OMAPLFBUpdateModeToString(OMAPLFBFromDSSUpdateMode(eMode));
+}
+
+void OMAPLFBPrintInfo(OMAPLFB_DEVINFO *psDevInfo)
+{
+#if defined(PVR_OMAPLFB_DRM_FB)
+ struct drm_connector *psConnector;
+ unsigned uConnectors;
+ unsigned uConnector;
+
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": Device %u: DRM framebuffer\n", psDevInfo->uiFBDevID));
+
+ for (psConnector = NULL, uConnectors = 0;
+ (psConnector = omap_fbdev_get_next_connector(psDevInfo->psLINFBInfo, psConnector)) != NULL;)
+ {
+ uConnectors++;
+ }
+
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": Device %u: Number of screens (DRM connectors): %u\n", psDevInfo->uiFBDevID, uConnectors));
+
+ if (uConnectors == 0)
+ {
+ return;
+ }
+
+ for (psConnector = NULL, uConnector = 0;
+ (psConnector = omap_fbdev_get_next_connector(psDevInfo->psLINFBInfo, psConnector)) != NULL; uConnector++)
+ {
+ enum omap_dss_update_mode eMode = omap_connector_get_update_mode(psConnector);
+
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": Device %u: Screen %u: %s (%d)\n", psDevInfo->uiFBDevID, uConnector, OMAPLFBDSSUpdateModeToString(eMode), (int)eMode));
+
+ }
+#else
+ OMAPLFB_UPDATE_MODE eMode = OMAPLFBGetUpdateMode(psDevInfo);
+
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": Device %u: non-DRM framebuffer\n", psDevInfo->uiFBDevID));
+
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": Device %u: %s\n", psDevInfo->uiFBDevID, OMAPLFBUpdateModeToString(eMode)));
+#endif
+}
+#endif
+
+OMAPLFB_UPDATE_MODE OMAPLFBGetUpdateMode(OMAPLFB_DEVINFO *psDevInfo)
+{
+#if defined(PVR_OMAPLFB_DRM_FB)
+ struct drm_connector *psConnector;
+ OMAPLFB_UPDATE_MODE eMode = OMAPLFB_UPDATE_MODE_UNDEFINED;
+
+
+ for (psConnector = NULL;
+ (psConnector = omap_fbdev_get_next_connector(psDevInfo->psLINFBInfo, psConnector)) != NULL;)
+ {
+ switch(omap_connector_get_update_mode(psConnector))
+ {
+ case OMAP_DSS_UPDATE_MANUAL:
+ eMode = OMAPLFB_UPDATE_MODE_MANUAL;
+ break;
+ case OMAP_DSS_UPDATE_DISABLED:
+ if (eMode == OMAPLFB_UPDATE_MODE_UNDEFINED)
+ {
+ eMode = OMAPLFB_UPDATE_MODE_DISABLED;
+ }
+ break;
+ case OMAP_DSS_UPDATE_AUTO:
+
+ default:
+
+ if (eMode != OMAPLFB_UPDATE_MODE_MANUAL)
+ {
+ eMode = OMAPLFB_UPDATE_MODE_AUTO;
+ }
+ break;
+ }
+ }
+
+ return eMode;
+#else
+ struct omap_dss_device *psDSSDev = fb2display(psDevInfo->psLINFBInfo);
+ OMAP_DSS_DRIVER(psDSSDrv, psDSSDev);
+
+ enum omap_dss_update_mode eMode;
+
+ if (psDSSDrv == NULL)
+ {
+ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX ": %s: Device %u: No DSS device\n", __FUNCTION__, psDevInfo->uiFBDevID));
+ return OMAPLFB_UPDATE_MODE_UNDEFINED;
+ }
+
+ if (psDSSDrv->get_update_mode == NULL)
+ {
+ if (strcmp(psDSSDev->name, "hdmi") == 0)
+ {
+ return OMAPLFB_UPDATE_MODE_AUTO;
+ }
+ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX ": %s: Device %u: No get_update_mode function\n", __FUNCTION__, psDevInfo->uiFBDevID));
+ return OMAPLFB_UPDATE_MODE_UNDEFINED;
+ }
+
+ eMode = psDSSDrv->get_update_mode(psDSSDev);
+ if (!OMAPLFBValidateDSSUpdateMode(eMode))
+ {
+ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Unknown update mode (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, (int)eMode));
+ }
+
+ return OMAPLFBFromDSSUpdateMode(eMode);
+#endif
+}
+
+OMAPLFB_BOOL OMAPLFBSetUpdateMode(OMAPLFB_DEVINFO *psDevInfo, OMAPLFB_UPDATE_MODE eMode)
+{
+#if defined(PVR_OMAPLFB_DRM_FB)
+ struct drm_connector *psConnector;
+ enum omap_dss_update_mode eDSSMode;
+ OMAPLFB_BOOL bSuccess = OMAPLFB_FALSE;
+ OMAPLFB_BOOL bFailure = OMAPLFB_FALSE;
+
+ if (!OMAPLFBValidateUpdateMode(eMode))
+ {
+ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Unknown update mode (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, (int)eMode));
+ return OMAPLFB_FALSE;
+ }
+ eDSSMode = OMAPLFBToDSSUpdateMode(eMode);
+
+ for (psConnector = NULL;
+ (psConnector = omap_fbdev_get_next_connector(psDevInfo->psLINFBInfo, psConnector)) != NULL;)
+ {
+ int iRes = omap_connector_set_update_mode(psConnector, eDSSMode);
+ OMAPLFB_BOOL bRes = (iRes == 0);
+
+
+ bSuccess |= bRes;
+ bFailure |= !bRes;
+ }
+
+ if (!bFailure)
+ {
+ if (!bSuccess)
+ {
+ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX ": %s: Device %u: No screens\n", __FUNCTION__, psDevInfo->uiFBDevID));
+ }
+
+ return OMAPLFB_TRUE;
+ }
+
+ if (!bSuccess)
+ {
+ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Couldn't set %s for any screen\n", __FUNCTION__, psDevInfo->uiFBDevID, OMAPLFBUpdateModeToString(eMode)));
+ return OMAPLFB_FALSE;
+ }
+
+ if (eMode == OMAPLFB_UPDATE_MODE_AUTO)
+ {
+ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Couldn't set %s for all screens\n", __FUNCTION__, psDevInfo->uiFBDevID, OMAPLFBUpdateModeToString(eMode)));
+ return OMAPLFB_FALSE;
+ }
+
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: %s set for some screens\n", __FUNCTION__, psDevInfo->uiFBDevID, OMAPLFBUpdateModeToString(eMode)));
+
+ return OMAPLFB_TRUE;
+#else
+ struct omap_dss_device *psDSSDev = fb2display(psDevInfo->psLINFBInfo);
+ OMAP_DSS_DRIVER(psDSSDrv, psDSSDev);
+ enum omap_dss_update_mode eDSSMode;
+ int res;
+
+ if (psDSSDrv == NULL || psDSSDrv->set_update_mode == NULL)
+ {
+ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Can't set update mode\n", __FUNCTION__, psDevInfo->uiFBDevID));
+ return OMAPLFB_FALSE;
+ }
+
+ if (!OMAPLFBValidateUpdateMode(eMode))
+ {
+ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Unknown update mode (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, (int)eMode));
+ return OMAPLFB_FALSE;
+ }
+ eDSSMode = OMAPLFBToDSSUpdateMode(eMode);
+
+ res = psDSSDrv->set_update_mode(psDSSDev, eDSSMode);
+ if (res != 0)
+ {
+ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX ": %s: Device %u: set_update_mode (%s) failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, OMAPLFBDSSUpdateModeToString(eDSSMode), res));
+ }
+
+ return (res == 0);
+#endif
+}
+
+OMAPLFB_BOOL OMAPLFBWaitForVSync(OMAPLFB_DEVINFO *psDevInfo)
+{
+#if defined(PVR_OMAPLFB_DRM_FB)
+ struct drm_connector *psConnector;
+
+ for (psConnector = NULL;
+ (psConnector = omap_fbdev_get_next_connector(psDevInfo->psLINFBInfo, psConnector)) != NULL;)
+ {
+ (void) omap_encoder_wait_for_vsync(psConnector->encoder);
+ }
+
+ return OMAPLFB_TRUE;
+#else
+ struct omap_dss_device *psDSSDev = fb2display(psDevInfo->psLINFBInfo);
+ OMAP_DSS_MANAGER(psDSSMan, psDSSDev);
+
+ if (psDSSMan != NULL && WAIT_FOR_VSYNC(psDSSMan) != NULL)
+ {
+ int res = WAIT_FOR_VSYNC(psDSSMan)(psDSSMan);
+ if (res != 0)
+ {
+ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX ": %s: Device %u: Wait for vsync failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, res));
+ return OMAPLFB_FALSE;
+ }
+ }
+
+ return OMAPLFB_TRUE;
+#endif
+}
+
+OMAPLFB_BOOL OMAPLFBManualSync(OMAPLFB_DEVINFO *psDevInfo)
+{
+#if defined(PVR_OMAPLFB_DRM_FB)
+ struct drm_connector *psConnector;
+
+ for (psConnector = NULL;
+ (psConnector = omap_fbdev_get_next_connector(psDevInfo->psLINFBInfo, psConnector)) != NULL; )
+ {
+
+ if (omap_connector_sync(psConnector) != 0)
+ {
+ (void) omap_encoder_wait_for_vsync(psConnector->encoder);
+ }
+ }
+
+ return OMAPLFB_TRUE;
+#else
+ struct omap_dss_device *psDSSDev = fb2display(psDevInfo->psLINFBInfo);
+ OMAP_DSS_DRIVER(psDSSDrv, psDSSDev);
+
+ if (psDSSDrv != NULL && psDSSDrv->sync != NULL)
+ {
+ int res = psDSSDrv->sync(psDSSDev);
+ if (res != 0)
+ {
+ printk(KERN_ERR DRIVER_PREFIX ": %s: Device %u: Sync failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, res);
+ return OMAPLFB_FALSE;
+ }
+ }
+
+ return OMAPLFB_TRUE;
+#endif
+}
+
+OMAPLFB_BOOL OMAPLFBCheckModeAndSync(OMAPLFB_DEVINFO *psDevInfo)
+{
+ OMAPLFB_UPDATE_MODE eMode = OMAPLFBGetUpdateMode(psDevInfo);
+
+ switch(eMode)
+ {
+ case OMAPLFB_UPDATE_MODE_AUTO:
+ case OMAPLFB_UPDATE_MODE_MANUAL:
+ return OMAPLFBManualSync(psDevInfo);
+ default:
+ break;
+ }
+
+ return OMAPLFB_TRUE;
+}
+
+static int OMAPLFBFrameBufferEvents(struct notifier_block *psNotif,
+ unsigned long event, void *data)
+{
+ OMAPLFB_DEVINFO *psDevInfo;
+ struct fb_event *psFBEvent = (struct fb_event *)data;
+ struct fb_info *psFBInfo = psFBEvent->info;
+ OMAPLFB_BOOL bBlanked;
+
+
+ if (event != FB_EVENT_BLANK)
+ {
+ return 0;
+ }
+
+ bBlanked = (*(IMG_INT *)psFBEvent->data != 0) ? OMAPLFB_TRUE: OMAPLFB_FALSE;
+
+ psDevInfo = OMAPLFBGetDevInfoPtr(psFBInfo->node);
+
+#if 0
+ if (psDevInfo != NULL)
+ {
+ if (bBlanked)
+ {
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Blank event received\n", __FUNCTION__, psDevInfo->uiFBDevID));
+ }
+ else
+ {
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Unblank event received\n", __FUNCTION__, psDevInfo->uiFBDevID));
+ }
+ }
+ else
+ {
+ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": %s: Device %u: Blank/Unblank event for unknown framebuffer\n", __FUNCTION__, psFBInfo->node));
+ }
+#endif
+
+ if (psDevInfo != NULL)
+ {
+ OMAPLFBAtomicBoolSet(&psDevInfo->sBlanked, bBlanked);
+ OMAPLFBAtomicIntInc(&psDevInfo->sBlankEvents);
+ }
+
+ return 0;
+}
+
+OMAPLFB_ERROR OMAPLFBUnblankDisplay(OMAPLFB_DEVINFO *psDevInfo)
+{
+ int res;
+
+ OMAPLFB_CONSOLE_LOCK();
+ res = fb_blank(psDevInfo->psLINFBInfo, 0);
+ OMAPLFB_CONSOLE_UNLOCK();
+ if (res != 0 && res != -EINVAL)
+ {
+ printk(KERN_ERR DRIVER_PREFIX
+ ": %s: Device %u: fb_blank failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, res);
+ return (OMAPLFB_ERROR_GENERIC);
+ }
+
+ return (OMAPLFB_OK);
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+
+static void OMAPLFBBlankDisplay(OMAPLFB_DEVINFO *psDevInfo)
+{
+ OMAPLFB_CONSOLE_LOCK();
+ fb_blank(psDevInfo->psLINFBInfo, 1);
+ OMAPLFB_CONSOLE_UNLOCK();
+}
+
+static void OMAPLFBEarlySuspendHandler(struct early_suspend *h)
+{
+ unsigned uiMaxFBDevIDPlusOne = OMAPLFBMaxFBDevIDPlusOne();
+ unsigned i;
+
+ for (i=0; i < uiMaxFBDevIDPlusOne; i++)
+ {
+ OMAPLFB_DEVINFO *psDevInfo = OMAPLFBGetDevInfoPtr(i);
+
+ if (psDevInfo != NULL)
+ {
+ OMAPLFBAtomicBoolSet(&psDevInfo->sEarlySuspendFlag, OMAPLFB_TRUE);
+ OMAPLFBBlankDisplay(psDevInfo);
+ }
+ }
+}
+
+static void OMAPLFBEarlyResumeHandler(struct early_suspend *h)
+{
+ unsigned uiMaxFBDevIDPlusOne = OMAPLFBMaxFBDevIDPlusOne();
+ unsigned i;
+
+ for (i=0; i < uiMaxFBDevIDPlusOne; i++)
+ {
+ OMAPLFB_DEVINFO *psDevInfo = OMAPLFBGetDevInfoPtr(i);
+
+ if (psDevInfo != NULL)
+ {
+ OMAPLFBUnblankDisplay(psDevInfo);
+ OMAPLFBAtomicBoolSet(&psDevInfo->sEarlySuspendFlag, OMAPLFB_FALSE);
+ }
+ }
+}
+
+#endif
+
+OMAPLFB_ERROR OMAPLFBEnableLFBEventNotification(OMAPLFB_DEVINFO *psDevInfo)
+{
+ int res;
+ OMAPLFB_ERROR eError;
+
+
+ memset(&psDevInfo->sLINNotifBlock, 0, sizeof(psDevInfo->sLINNotifBlock));
+
+ psDevInfo->sLINNotifBlock.notifier_call = OMAPLFBFrameBufferEvents;
+
+ OMAPLFBAtomicBoolSet(&psDevInfo->sBlanked, OMAPLFB_FALSE);
+ OMAPLFBAtomicIntSet(&psDevInfo->sBlankEvents, 0);
+
+ res = fb_register_client(&psDevInfo->sLINNotifBlock);
+ if (res != 0)
+ {
+ printk(KERN_ERR DRIVER_PREFIX
+ ": %s: Device %u: fb_register_client failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, res);
+
+ return (OMAPLFB_ERROR_GENERIC);
+ }
+
+ eError = OMAPLFBUnblankDisplay(psDevInfo);
+ if (eError != OMAPLFB_OK)
+ {
+ printk(KERN_ERR DRIVER_PREFIX
+ ": %s: Device %u: UnblankDisplay failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, eError);
+ return eError;
+ }
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ psDevInfo->sEarlySuspend.suspend = OMAPLFBEarlySuspendHandler;
+ psDevInfo->sEarlySuspend.resume = OMAPLFBEarlyResumeHandler;
+ psDevInfo->sEarlySuspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1;
+ register_early_suspend(&psDevInfo->sEarlySuspend);
+#endif
+
+ return (OMAPLFB_OK);
+}
+
+OMAPLFB_ERROR OMAPLFBDisableLFBEventNotification(OMAPLFB_DEVINFO *psDevInfo)
+{
+ int res;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&psDevInfo->sEarlySuspend);
+#endif
+
+
+ res = fb_unregister_client(&psDevInfo->sLINNotifBlock);
+ if (res != 0)
+ {
+ printk(KERN_ERR DRIVER_PREFIX
+ ": %s: Device %u: fb_unregister_client failed (%d)\n", __FUNCTION__, psDevInfo->uiFBDevID, res);
+ return (OMAPLFB_ERROR_GENERIC);
+ }
+
+ OMAPLFBAtomicBoolSet(&psDevInfo->sBlanked, OMAPLFB_FALSE);
+
+ return (OMAPLFB_OK);
+}
+
+#if defined(SUPPORT_DRI_DRM) && defined(PVR_DISPLAY_CONTROLLER_DRM_IOCTL)
+static OMAPLFB_DEVINFO *OMAPLFBPVRDevIDToDevInfo(unsigned uiPVRDevID)
+{
+ unsigned uiMaxFBDevIDPlusOne = OMAPLFBMaxFBDevIDPlusOne();
+ unsigned i;
+
+ for (i=0; i < uiMaxFBDevIDPlusOne; i++)
+ {
+ OMAPLFB_DEVINFO *psDevInfo = OMAPLFBGetDevInfoPtr(i);
+
+ if (psDevInfo->uiPVRDevID == uiPVRDevID)
+ {
+ return psDevInfo;
+ }
+ }
+
+ printk(KERN_ERR DRIVER_PREFIX
+ ": %s: PVR Device %u: Couldn't find device\n", __FUNCTION__, uiPVRDevID);
+
+ return NULL;
+}
+
+int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Ioctl)(struct drm_device unref__ *dev, void *arg, struct drm_file unref__ *pFile)
+{
+ uint32_t *puiArgs;
+ uint32_t uiCmd;
+ unsigned uiPVRDevID;
+ int ret = 0;
+ OMAPLFB_DEVINFO *psDevInfo;
+
+ if (arg == NULL)
+ {
+ return -EFAULT;
+ }
+
+ puiArgs = (uint32_t *)arg;
+ uiCmd = puiArgs[PVR_DRM_DISP_ARG_CMD];
+ uiPVRDevID = puiArgs[PVR_DRM_DISP_ARG_DEV];
+
+ psDevInfo = OMAPLFBPVRDevIDToDevInfo(uiPVRDevID);
+ if (psDevInfo == NULL)
+ {
+ return -EINVAL;
+ }
+
+
+ switch (uiCmd)
+ {
+ case PVR_DRM_DISP_CMD_LEAVE_VT:
+ case PVR_DRM_DISP_CMD_ENTER_VT:
+ {
+ OMAPLFB_BOOL bLeaveVT = (uiCmd == PVR_DRM_DISP_CMD_LEAVE_VT);
+ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX ": %s: PVR Device %u: %s\n",
+ __FUNCTION__, uiPVRDevID,
+ bLeaveVT ? "Leave VT" : "Enter VT"));
+
+ OMAPLFBCreateSwapChainLock(psDevInfo);
+
+ OMAPLFBAtomicBoolSet(&psDevInfo->sLeaveVT, bLeaveVT);
+ if (psDevInfo->psSwapChain != NULL)
+ {
+ flush_workqueue(psDevInfo->psSwapChain->psWorkQueue);
+
+ if (bLeaveVT)
+ {
+ OMAPLFBFlip(psDevInfo, &psDevInfo->sSystemBuffer);
+ (void) OMAPLFBCheckModeAndSync(psDevInfo);
+ }
+ }
+
+ OMAPLFBCreateSwapChainUnLock(psDevInfo);
+ (void) OMAPLFBUnblankDisplay(psDevInfo);
+ break;
+ }
+ case PVR_DRM_DISP_CMD_ON:
+ case PVR_DRM_DISP_CMD_STANDBY:
+ case PVR_DRM_DISP_CMD_SUSPEND:
+ case PVR_DRM_DISP_CMD_OFF:
+ {
+ int iFBMode;
+#if defined(DEBUG)
+ {
+ const char *pszMode;
+ switch(uiCmd)
+ {
+ case PVR_DRM_DISP_CMD_ON:
+ pszMode = "On";
+ break;
+ case PVR_DRM_DISP_CMD_STANDBY:
+ pszMode = "Standby";
+ break;
+ case PVR_DRM_DISP_CMD_SUSPEND:
+ pszMode = "Suspend";
+ break;
+ case PVR_DRM_DISP_CMD_OFF:
+ pszMode = "Off";
+ break;
+ default:
+ pszMode = "(Unknown Mode)";
+ break;
+ }
+ printk(KERN_WARNING DRIVER_PREFIX ": %s: PVR Device %u: Display %s\n",
+ __FUNCTION__, uiPVRDevID, pszMode);
+ }
+#endif
+ switch(uiCmd)
+ {
+ case PVR_DRM_DISP_CMD_ON:
+ iFBMode = FB_BLANK_UNBLANK;
+ break;
+ case PVR_DRM_DISP_CMD_STANDBY:
+ iFBMode = FB_BLANK_HSYNC_SUSPEND;
+ break;
+ case PVR_DRM_DISP_CMD_SUSPEND:
+ iFBMode = FB_BLANK_VSYNC_SUSPEND;
+ break;
+ case PVR_DRM_DISP_CMD_OFF:
+ iFBMode = FB_BLANK_POWERDOWN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ OMAPLFBCreateSwapChainLock(psDevInfo);
+
+ if (psDevInfo->psSwapChain != NULL)
+ {
+ flush_workqueue(psDevInfo->psSwapChain->psWorkQueue);
+ }
+
+ OMAPLFB_CONSOLE_LOCK();
+ ret = fb_blank(psDevInfo->psLINFBInfo, iFBMode);
+ OMAPLFB_CONSOLE_UNLOCK();
+
+ OMAPLFBCreateSwapChainUnLock(psDevInfo);
+
+ break;
+ }
+ default:
+ {
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ return ret;
+}
+#endif
+
+#if defined(SUPPORT_DRI_DRM)
+int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(struct drm_device unref__ *dev)
+#else
+static int __init OMAPLFB_Init(void)
+#endif
+{
+
+ if(OMAPLFBInit() != OMAPLFB_OK)
+ {
+ printk(KERN_ERR DRIVER_PREFIX ": %s: OMAPLFBInit failed\n", __FUNCTION__);
+ return -ENODEV;
+ }
+
+ return 0;
+
+}
+
+#if defined(SUPPORT_DRI_DRM)
+void PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(struct drm_device unref__ *dev)
+#else
+static void __exit OMAPLFB_Cleanup(void)
+#endif
+{
+ if(OMAPLFBDeInit() != OMAPLFB_OK)
+ {
+ printk(KERN_ERR DRIVER_PREFIX ": %s: OMAPLFBDeInit failed\n", __FUNCTION__);
+ }
+}
+
+#if !defined(SUPPORT_DRI_DRM)
+late_initcall(OMAPLFB_Init);
+module_exit(OMAPLFB_Cleanup);
+#endif
diff --git a/drivers/gpu/pvr/osfunc.c b/drivers/gpu/pvr/osfunc.c
new file mode 100644
index 0000000..28e2b00
--- /dev/null
+++ b/drivers/gpu/pvr/osfunc.c
@@ -0,0 +1,3310 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#endif
+
+#include <asm/io.h>
+#include <asm/page.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
+#include <asm/system.h>
+#endif
+#include <asm/cacheflush.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/hugetlb.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <asm/hardirq.h>
+#include <linux/timer.h>
+#include <linux/capability.h>
+#include <asm/uaccess.h>
+#include <linux/spinlock.h>
+#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || \
+ defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) || \
+ defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || \
+ defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) || \
+ defined(PVR_LINUX_USING_WORKQUEUES)
+#include <linux/workqueue.h>
+#endif
+
+#include "img_types.h"
+#include "services_headers.h"
+#include "mm.h"
+#include "pvrmmap.h"
+#include "mmap.h"
+#include "env_data.h"
+#include "proc.h"
+#include "mutex.h"
+#include "event.h"
+#include "linkage.h"
+#include "pvr_uaccess.h"
+#include "lock.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
+#define ON_EACH_CPU(func, info, wait) on_each_cpu(func, info, wait)
+#else
+#define ON_EACH_CPU(func, info, wait) on_each_cpu(func, info, 0, wait)
+#endif
+
+#if defined(PVR_LINUX_USING_WORKQUEUES) && !defined(CONFIG_PREEMPT)
+#error "A preemptible Linux kernel is required when using workqueues"
+#endif
+
+#if defined(EMULATOR)
+#define EVENT_OBJECT_TIMEOUT_MS (2000)
+#else
+#define EVENT_OBJECT_TIMEOUT_MS (100)
+#endif
+
+#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvCpuVAddr, IMG_HANDLE *phBlockAlloc)
+#else
+PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvCpuVAddr, IMG_HANDLE *phBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line)
+#endif
+{
+ PVR_UNREFERENCED_PARAMETER(ui32Flags);
+ PVR_UNREFERENCED_PARAMETER(phBlockAlloc);
+
+ if (ui32Size > PAGE_SIZE)
+ {
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ *ppvCpuVAddr = _VMallocWrapper(ui32Size, PVRSRV_HAP_CACHED, pszFilename, ui32Line);
+#else
+ *ppvCpuVAddr = VMallocWrapper(ui32Size, PVRSRV_HAP_CACHED);
+#endif
+ if (*ppvCpuVAddr)
+ {
+ return PVRSRV_OK;
+ }
+ }
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ *ppvCpuVAddr = _KMallocWrapper(ui32Size, GFP_KERNEL | __GFP_NOWARN, pszFilename, ui32Line);
+#else
+ *ppvCpuVAddr = KMallocWrapper(ui32Size, GFP_KERNEL | __GFP_NOWARN);
+#endif
+ if (!*ppvCpuVAddr)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ return PVRSRV_OK;
+}
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
+
+static inline int is_vmalloc_addr(const void *pvCpuVAddr)
+{
+ unsigned long lAddr = (unsigned long)pvCpuVAddr;
+ return lAddr >= VMALLOC_START && lAddr < VMALLOC_END;
+}
+
+#endif
+
+#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID pvCpuVAddr, IMG_HANDLE hBlockAlloc)
+#else
+PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID pvCpuVAddr, IMG_HANDLE hBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line)
+#endif
+{
+ PVR_UNREFERENCED_PARAMETER(ui32Flags);
+ PVR_UNREFERENCED_PARAMETER(ui32Size);
+ PVR_UNREFERENCED_PARAMETER(hBlockAlloc);
+
+ if (is_vmalloc_addr(pvCpuVAddr))
+ {
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ _VFreeWrapper(pvCpuVAddr, pszFilename, ui32Line);
+#else
+ VFreeWrapper(pvCpuVAddr);
+#endif
+ }
+ else
+ {
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ _KFreeWrapper(pvCpuVAddr, pszFilename, ui32Line);
+#else
+ KFreeWrapper(pvCpuVAddr);
+#endif
+ }
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+OSAllocPages_Impl(IMG_UINT32 ui32AllocFlags,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32PageSize,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
+ IMG_VOID **ppvCpuVAddr,
+ IMG_HANDLE *phOSMemHandle)
+{
+ LinuxMemArea *psLinuxMemArea;
+
+ PVR_UNREFERENCED_PARAMETER(ui32PageSize);
+
+#if 0
+
+ if(ui32AllocFlags & PVRSRV_HAP_SINGLE_PROCESS)
+ {
+ ui32AllocFlags &= ~PVRSRV_HAP_SINGLE_PROCESS;
+ ui32AllocFlags |= PVRSRV_HAP_MULTI_PROCESS;
+ }
+#endif
+
+ if(ui32AllocFlags & PVRSRV_MEM_ION)
+ {
+
+ BUG_ON((ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK) != PVRSRV_HAP_SINGLE_PROCESS);
+
+ psLinuxMemArea = NewIONLinuxMemArea(ui32Size, ui32AllocFlags,
+ pvPrivData, ui32PrivDataLength);
+ if(!psLinuxMemArea)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ PVRMMapRegisterArea(psLinuxMemArea);
+ goto ExitSkipSwitch;
+ }
+
+ switch(ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK)
+ {
+ case PVRSRV_HAP_KERNEL_ONLY:
+ {
+ psLinuxMemArea = NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags);
+ if(!psLinuxMemArea)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ break;
+ }
+ case PVRSRV_HAP_SINGLE_PROCESS:
+ {
+
+ psLinuxMemArea = NewAllocPagesLinuxMemArea(ui32Size, ui32AllocFlags);
+ if(!psLinuxMemArea)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ PVRMMapRegisterArea(psLinuxMemArea);
+ break;
+ }
+
+ case PVRSRV_HAP_MULTI_PROCESS:
+ {
+
+#if defined(VIVT_CACHE) || defined(__sh__)
+
+ ui32AllocFlags &= ~PVRSRV_HAP_CACHED;
+#endif
+ psLinuxMemArea = NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags);
+ if(!psLinuxMemArea)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ PVRMMapRegisterArea(psLinuxMemArea);
+ break;
+ }
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "OSAllocPages: invalid flags 0x%x\n", ui32AllocFlags));
+ *ppvCpuVAddr = NULL;
+ *phOSMemHandle = (IMG_HANDLE)0;
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ExitSkipSwitch:
+ *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
+ *phOSMemHandle = psLinuxMemArea;
+
+ LinuxMemAreaRegister(psLinuxMemArea);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+OSFreePages(IMG_UINT32 ui32AllocFlags, IMG_UINT32 ui32Bytes, IMG_VOID *pvCpuVAddr, IMG_HANDLE hOSMemHandle)
+{
+ LinuxMemArea *psLinuxMemArea;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
+ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
+
+ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
+
+ switch(ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK)
+ {
+ case PVRSRV_HAP_KERNEL_ONLY:
+ break;
+ case PVRSRV_HAP_SINGLE_PROCESS:
+ case PVRSRV_HAP_MULTI_PROCESS:
+ eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "OSFreePages(ui32AllocFlags=0x%08X, ui32Bytes=%d, "
+ "pvCpuVAddr=%p, hOSMemHandle=%p) FAILED!",
+ ui32AllocFlags, ui32Bytes, pvCpuVAddr, hOSMemHandle));
+ return eError;
+ }
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,"%s: invalid flags 0x%x\n",
+ __FUNCTION__, ui32AllocFlags));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ LinuxMemAreaDeepFree(psLinuxMemArea);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
+ IMG_UINT32 ui32ByteOffset,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE *phOSMemHandleRet)
+{
+ LinuxMemArea *psParentLinuxMemArea, *psLinuxMemArea;
+ PVRSRV_ERROR eError;
+
+ psParentLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
+
+ psLinuxMemArea = NewSubLinuxMemArea(psParentLinuxMemArea, ui32ByteOffset, ui32Bytes);
+ if(!psLinuxMemArea)
+ {
+ *phOSMemHandleRet = NULL;
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ *phOSMemHandleRet = psLinuxMemArea;
+
+
+ if(ui32Flags & PVRSRV_HAP_KERNEL_ONLY)
+ {
+ return PVRSRV_OK;
+ }
+
+ eError = PVRMMapRegisterArea(psLinuxMemArea);
+ if(eError != PVRSRV_OK)
+ {
+ goto failed_register_area;
+ }
+
+ return PVRSRV_OK;
+
+failed_register_area:
+ *phOSMemHandleRet = NULL;
+ LinuxMemAreaDeepFree(psLinuxMemArea);
+ return eError;
+}
+
+PVRSRV_ERROR
+OSReleaseSubMemHandle(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32Flags)
+{
+ LinuxMemArea *psLinuxMemArea;
+ PVRSRV_ERROR eError;
+
+ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
+ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC);
+
+ if((ui32Flags & PVRSRV_HAP_KERNEL_ONLY) == 0)
+ {
+ eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
+ if(eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+ LinuxMemAreaDeepFree(psLinuxMemArea);
+
+ return PVRSRV_OK;
+}
+
+
+IMG_CPU_PHYADDR
+OSMemHandleToCpuPAddr(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32ByteOffset)
+{
+ PVR_ASSERT(hOSMemHandle);
+
+ return LinuxMemAreaToCpuPAddr(hOSMemHandle, ui32ByteOffset);
+}
+
+
+IMG_BOOL OSMemHandleIsPhysContig(IMG_VOID *hOSMemHandle)
+{
+ LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
+
+ PVR_ASSERT(psLinuxMemArea);
+
+ if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_EXTERNAL_KV)
+ return psLinuxMemArea->uData.sExternalKV.bPhysContig;
+
+ return IMG_FALSE;
+}
+
+
+IMG_VOID OSMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_UINT32 ui32Size)
+{
+#if defined(USE_UNOPTIMISED_MEMCPY)
+ IMG_UINT8 *Src,*Dst;
+ IMG_INT i;
+
+ Src=(IMG_UINT8 *)pvSrc;
+ Dst=(IMG_UINT8 *)pvDst;
+ for(i=0;i<ui32Size;i++)
+ {
+ Dst[i]=Src[i];
+ }
+#else
+ memcpy(pvDst, pvSrc, ui32Size);
+#endif
+}
+
+
+IMG_VOID OSMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size)
+{
+#if defined(USE_UNOPTIMISED_MEMSET)
+ IMG_UINT8 *Buff;
+ IMG_INT i;
+
+ Buff=(IMG_UINT8 *)pvDest;
+ for(i=0;i<ui32Size;i++)
+ {
+ Buff[i]=ui8Value;
+ }
+#else
+ memset(pvDest, (IMG_INT) ui8Value, (size_t) ui32Size);
+#endif
+}
+
+
+IMG_CHAR *OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc)
+{
+ return (strcpy(pszDest, pszSrc));
+}
+
+IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_UINT32 ui32Size, const IMG_CHAR *pszFormat, ...)
+{
+ va_list argList;
+ IMG_INT32 iCount;
+
+ va_start(argList, pszFormat);
+ iCount = vsnprintf(pStr, (size_t)ui32Size, pszFormat, argList);
+ va_end(argList);
+
+ return iCount;
+}
+
+IMG_VOID OSBreakResourceLock (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
+{
+ volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
+
+ if(*pui32Access)
+ {
+ if(psResource->ui32ID == ui32ID)
+ {
+ psResource->ui32ID = 0;
+ *pui32Access = 0;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"OSBreakResourceLock: Resource is not locked for this process."));
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"OSBreakResourceLock: Resource is not locked"));
+ }
+}
+
+
+PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource)
+{
+ psResource->ui32ID = 0;
+ psResource->ui32Lock = 0;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSDestroyResource (PVRSRV_RESOURCE *psResource)
+{
+ OSBreakResourceLock (psResource, psResource->ui32ID);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSInitEnvData(IMG_PVOID *ppvEnvSpecificData)
+{
+ ENV_DATA *psEnvData;
+ PVRSRV_ERROR eError;
+
+
+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), (IMG_VOID **)&psEnvData, IMG_NULL,
+ "Environment Data");
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE,
+ &psEnvData->pvBridgeData, IMG_NULL,
+ "Bridge Data");
+ if (eError != PVRSRV_OK)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), psEnvData, IMG_NULL);
+
+ return eError;
+ }
+
+
+
+ psEnvData->bMISRInstalled = IMG_FALSE;
+ psEnvData->bLISRInstalled = IMG_FALSE;
+
+
+ *ppvEnvSpecificData = psEnvData;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData)
+{
+ ENV_DATA *psEnvData = (ENV_DATA*)pvEnvSpecificData;
+
+ PVR_ASSERT(!psEnvData->bMISRInstalled);
+ PVR_ASSERT(!psEnvData->bLISRInstalled);
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE, psEnvData->pvBridgeData, IMG_NULL);
+ psEnvData->pvBridgeData = IMG_NULL;
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), pvEnvSpecificData, IMG_NULL);
+
+
+ return PVRSRV_OK;
+}
+
+
+
+IMG_VOID OSReleaseThreadQuanta(IMG_VOID)
+{
+ schedule();
+}
+
+
+
+IMG_UINT32 OSClockus(IMG_VOID)
+{
+ IMG_UINT32 time, j = jiffies;
+
+ time = j * (1000000 / HZ);
+
+ return time;
+}
+
+
+IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus)
+{
+ udelay(ui32Timeus);
+}
+
+
+IMG_VOID OSSleepms(IMG_UINT32 ui32Timems)
+{
+ msleep(ui32Timems);
+}
+
+
+
+IMG_HANDLE OSFuncHighResTimerCreate(IMG_VOID)
+{
+
+ return (IMG_HANDLE) 1;
+}
+
+
+IMG_UINT32 OSFuncHighResTimerGetus(IMG_HANDLE hTimer)
+{
+ return (IMG_UINT32) jiffies_to_usecs(jiffies);
+}
+
+
+IMG_VOID OSFuncHighResTimerDestroy(IMG_HANDLE hTimer)
+{
+ PVR_UNREFERENCED_PARAMETER(hTimer);
+}
+
+IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID)
+{
+ if (in_interrupt())
+ {
+ return KERNEL_ID;
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
+ return (IMG_UINT32)current->pgrp;
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
+ return (IMG_UINT32)task_tgid_nr(current);
+#else
+ return (IMG_UINT32)current->tgid;
+#endif
+#endif
+}
+
+
+IMG_UINT32 OSGetPageSize(IMG_VOID)
+{
+#if defined(__sh__)
+ IMG_UINT32 ui32ReturnValue = PAGE_SIZE;
+
+ return (ui32ReturnValue);
+#else
+ return PAGE_SIZE;
+#endif
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
+static irqreturn_t DeviceISRWrapper(int irq, void *dev_id
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+ , struct pt_regs *regs
+#endif
+ )
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ IMG_BOOL bStatus = IMG_FALSE;
+
+ PVR_UNREFERENCED_PARAMETER(irq);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+ PVR_UNREFERENCED_PARAMETER(regs);
+#endif
+ psDeviceNode = (PVRSRV_DEVICE_NODE*)dev_id;
+ if(!psDeviceNode)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DeviceISRWrapper: invalid params\n"));
+ goto out;
+ }
+
+ bStatus = PVRSRVDeviceLISR(psDeviceNode);
+
+ if (bStatus)
+ {
+ OSScheduleMISR((IMG_VOID *)psDeviceNode->psSysData);
+ }
+
+out:
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
+ return bStatus ? IRQ_HANDLED : IRQ_NONE;
+#endif
+}
+
+
+
+static irqreturn_t SystemISRWrapper(int irq, void *dev_id
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+ , struct pt_regs *regs
+#endif
+ )
+{
+ SYS_DATA *psSysData;
+ IMG_BOOL bStatus = IMG_FALSE;
+
+ PVR_UNREFERENCED_PARAMETER(irq);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+ PVR_UNREFERENCED_PARAMETER(regs);
+#endif
+ psSysData = (SYS_DATA *)dev_id;
+ if(!psSysData)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SystemISRWrapper: invalid params\n"));
+ goto out;
+ }
+
+ bStatus = PVRSRVSystemLISR(psSysData);
+
+ if (bStatus)
+ {
+ OSScheduleMISR((IMG_VOID *)psSysData);
+ }
+
+out:
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
+ return bStatus ? IRQ_HANDLED : IRQ_NONE;
+#endif
+}
+PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID *pvSysData,
+ IMG_UINT32 ui32Irq,
+ IMG_CHAR *pszISRName,
+ IMG_VOID *pvDeviceNode)
+{
+ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
+ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
+
+ if (psEnvData->bLISRInstalled)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSInstallDeviceLISR: An ISR has already been installed: IRQ %d cookie %p", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
+ return PVRSRV_ERROR_ISR_ALREADY_INSTALLED;
+ }
+
+ PVR_TRACE(("Installing device LISR %s on IRQ %d with cookie %p", pszISRName, ui32Irq, pvDeviceNode));
+
+ if(request_irq(ui32Irq, DeviceISRWrapper,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22))
+ SA_SHIRQ
+#else
+ IRQF_SHARED
+#endif
+ , pszISRName, pvDeviceNode))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"OSInstallDeviceLISR: Couldn't install device LISR on IRQ %d", ui32Irq));
+
+ return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+ }
+
+ psEnvData->ui32IRQ = ui32Irq;
+ psEnvData->pvISRCookie = pvDeviceNode;
+ psEnvData->bLISRInstalled = IMG_TRUE;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID *pvSysData)
+{
+ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
+ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
+
+ if (!psEnvData->bLISRInstalled)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSUninstallDeviceLISR: No LISR has been installed"));
+ return PVRSRV_ERROR_ISR_NOT_INSTALLED;
+ }
+
+ PVR_TRACE(("Uninstalling device LISR on IRQ %d with cookie %p", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
+
+ free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie);
+
+ psEnvData->bLISRInstalled = IMG_FALSE;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID *pvSysData, IMG_UINT32 ui32Irq)
+{
+ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
+ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
+
+ if (psEnvData->bLISRInstalled)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSInstallSystemLISR: An LISR has already been installed: IRQ %d cookie %p", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
+ return PVRSRV_ERROR_ISR_ALREADY_INSTALLED;
+ }
+
+ PVR_TRACE(("Installing system LISR on IRQ %d with cookie %p", ui32Irq, pvSysData));
+
+ if(request_irq(ui32Irq, SystemISRWrapper,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22))
+ SA_SHIRQ
+#else
+ IRQF_SHARED
+#endif
+ , PVRSRV_MODNAME, pvSysData))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"OSInstallSystemLISR: Couldn't install system LISR on IRQ %d", ui32Irq));
+
+ return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+ }
+
+ psEnvData->ui32IRQ = ui32Irq;
+ psEnvData->pvISRCookie = pvSysData;
+ psEnvData->bLISRInstalled = IMG_TRUE;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID *pvSysData)
+{
+ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
+ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
+
+ if (!psEnvData->bLISRInstalled)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSUninstallSystemLISR: No LISR has been installed"));
+ return PVRSRV_ERROR_ISR_NOT_INSTALLED;
+ }
+
+ PVR_TRACE(("Uninstalling system LISR on IRQ %d with cookie %p", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
+
+ free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie);
+
+ psEnvData->bLISRInstalled = IMG_FALSE;
+
+ return PVRSRV_OK;
+}
+
+#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
+static void MISRWrapper(
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
+ void *data
+#else
+ struct work_struct *data
+#endif
+)
+{
+ ENV_DATA *psEnvData = container_of(data, ENV_DATA, sMISRWork);
+ SYS_DATA *psSysData = (SYS_DATA *)psEnvData->pvMISRData;
+
+ PVRSRVMISR(psSysData);
+}
+
+
+PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData)
+{
+ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
+ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
+
+ if (psEnvData->bMISRInstalled)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed"));
+ return PVRSRV_ERROR_ISR_ALREADY_INSTALLED;
+ }
+
+ PVR_TRACE(("Installing MISR with cookie %p", pvSysData));
+
+ psEnvData->psWorkQueue = create_singlethread_workqueue("pvr_workqueue");
+
+ if (psEnvData->psWorkQueue == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: create_singlethreaded_workqueue failed"));
+ return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD;
+ }
+
+ INIT_WORK(&psEnvData->sMISRWork, MISRWrapper
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
+ , (void *)&psEnvData->sMISRWork
+#endif
+ );
+
+ psEnvData->pvMISRData = pvSysData;
+ psEnvData->bMISRInstalled = IMG_TRUE;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData)
+{
+ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
+ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
+
+ if (!psEnvData->bMISRInstalled)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed"));
+ return PVRSRV_ERROR_ISR_NOT_INSTALLED;
+ }
+
+ PVR_TRACE(("Uninstalling MISR"));
+
+ destroy_workqueue(psEnvData->psWorkQueue);
+
+ psEnvData->bMISRInstalled = IMG_FALSE;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData)
+{
+ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
+ ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData;
+
+ if (psEnvData->bMISRInstalled)
+ {
+ queue_work(psEnvData->psWorkQueue, &psEnvData->sMISRWork);
+ }
+
+ return PVRSRV_OK;
+}
+#else
+#if defined(PVR_LINUX_MISR_USING_WORKQUEUE)
+static void MISRWrapper(
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
+ void *data
+#else
+ struct work_struct *data
+#endif
+)
+{
+ ENV_DATA *psEnvData = container_of(data, ENV_DATA, sMISRWork);
+ SYS_DATA *psSysData = (SYS_DATA *)psEnvData->pvMISRData;
+
+ PVRSRVMISR(psSysData);
+}
+
+
+PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData)
+{
+ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
+ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
+
+ if (psEnvData->bMISRInstalled)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed"));
+ return PVRSRV_ERROR_ISR_ALREADY_INSTALLED;
+ }
+
+ PVR_TRACE(("Installing MISR with cookie %p", pvSysData));
+
+ INIT_WORK(&psEnvData->sMISRWork, MISRWrapper
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
+ , (void *)&psEnvData->sMISRWork
+#endif
+ );
+
+ psEnvData->pvMISRData = pvSysData;
+ psEnvData->bMISRInstalled = IMG_TRUE;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData)
+{
+ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
+ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
+
+ if (!psEnvData->bMISRInstalled)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed"));
+ return PVRSRV_ERROR_ISR_NOT_INSTALLED;
+ }
+
+ PVR_TRACE(("Uninstalling MISR"));
+
+ flush_scheduled_work();
+
+ psEnvData->bMISRInstalled = IMG_FALSE;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData)
+{
+ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
+ ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData;
+
+ if (psEnvData->bMISRInstalled)
+ {
+ schedule_work(&psEnvData->sMISRWork);
+ }
+
+ return PVRSRV_OK;
+}
+
+#else
+
+
+static void MISRWrapper(unsigned long data)
+{
+ SYS_DATA *psSysData;
+
+ psSysData = (SYS_DATA *)data;
+
+ PVRSRVMISR(psSysData);
+}
+
+
+PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData)
+{
+ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
+ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
+
+ if (psEnvData->bMISRInstalled)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed"));
+ return PVRSRV_ERROR_ISR_ALREADY_INSTALLED;
+ }
+
+ PVR_TRACE(("Installing MISR with cookie %p", pvSysData));
+
+ tasklet_init(&psEnvData->sMISRTasklet, MISRWrapper, (unsigned long)pvSysData);
+
+ psEnvData->bMISRInstalled = IMG_TRUE;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData)
+{
+ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
+ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
+
+ if (!psEnvData->bMISRInstalled)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed"));
+ return PVRSRV_ERROR_ISR_NOT_INSTALLED;
+ }
+
+ PVR_TRACE(("Uninstalling MISR"));
+
+ tasklet_kill(&psEnvData->sMISRTasklet);
+
+ psEnvData->bMISRInstalled = IMG_FALSE;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData)
+{
+ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
+ ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData;
+
+ if (psEnvData->bMISRInstalled)
+ {
+ tasklet_schedule(&psEnvData->sMISRTasklet);
+ }
+
+ return PVRSRV_OK;
+}
+
+#endif
+#endif
+
+#endif
+
+IMG_VOID OSPanic(IMG_VOID)
+{
+ BUG();
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
+#define OS_TAS(p) xchg((p), 1)
+#else
+#define OS_TAS(p) tas(p)
+#endif
+PVRSRV_ERROR OSLockResource ( PVRSRV_RESOURCE *psResource,
+ IMG_UINT32 ui32ID)
+
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if(!OS_TAS(&psResource->ui32Lock))
+ psResource->ui32ID = ui32ID;
+ else
+ eError = PVRSRV_ERROR_UNABLE_TO_LOCK_RESOURCE;
+
+ return eError;
+}
+
+
+PVRSRV_ERROR OSUnlockResource (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
+{
+ volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if(*pui32Access)
+ {
+ if(psResource->ui32ID == ui32ID)
+ {
+ psResource->ui32ID = 0;
+ smp_mb();
+ *pui32Access = 0;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"OSUnlockResource: Resource %p is not locked with expected value.", psResource));
+ PVR_DPF((PVR_DBG_MESSAGE,"Should be %x is actually %x", ui32ID, psResource->ui32ID));
+ eError = PVRSRV_ERROR_INVALID_LOCK_ID;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"OSUnlockResource: Resource %p is not locked", psResource));
+ eError = PVRSRV_ERROR_RESOURCE_NOT_LOCKED;
+ }
+
+ return eError;
+}
+
+
+IMG_BOOL OSIsResourceLocked (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
+{
+ volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
+
+ return (*(volatile IMG_UINT32 *)pui32Access == 1) && (psResource->ui32ID == ui32ID)
+ ? IMG_TRUE
+ : IMG_FALSE;
+}
+
+
+#if !defined(SYS_CUSTOM_POWERLOCK_WRAP)
+PVRSRV_ERROR OSPowerLockWrap(IMG_BOOL bTryLock)
+{
+ PVR_UNREFERENCED_PARAMETER(bTryLock);
+
+ return PVRSRV_OK;
+}
+
+IMG_VOID OSPowerLockUnwrap (IMG_VOID)
+{
+}
+#endif
+
+
+IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvLinAddr)
+{
+ IMG_CPU_PHYADDR CpuPAddr;
+ LinuxMemArea *psLinuxMemArea;
+ IMG_UINTPTR_T uiByteOffset;
+ IMG_UINT32 ui32ByteOffset;
+
+ PVR_ASSERT(hOSMemHandle != IMG_NULL);
+
+
+
+ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
+
+ uiByteOffset = (IMG_UINTPTR_T)pvLinAddr - (IMG_UINTPTR_T)LinuxMemAreaToCpuVAddr(psLinuxMemArea);
+ ui32ByteOffset = (IMG_UINT32)uiByteOffset;
+
+ CpuPAddr = LinuxMemAreaToCpuPAddr(hOSMemHandle, ui32ByteOffset);
+
+ return CpuPAddr;
+}
+
+
+IMG_VOID *
+OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32MappingFlags,
+ IMG_HANDLE *phOSMemHandle)
+{
+ if(ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY)
+ {
+
+ if(phOSMemHandle == IMG_NULL)
+ {
+ IMG_VOID *pvIORemapCookie;
+ pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags);
+ if(pvIORemapCookie == IMG_NULL)
+ {
+ return IMG_NULL;
+ }
+ return pvIORemapCookie;
+ }
+ else
+ {
+ LinuxMemArea *psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
+
+ if(psLinuxMemArea == IMG_NULL)
+ {
+ return IMG_NULL;
+ }
+
+ *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea;
+ return LinuxMemAreaToCpuVAddr(psLinuxMemArea);
+ }
+ }
+
+ PVR_DPF((PVR_DBG_ERROR,
+ "OSMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY "
+ " (Use OSReservePhys otherwise)"));
+
+ return IMG_NULL;
+}
+
+IMG_BOOL
+OSUnMapPhysToLin(IMG_VOID *pvLinAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32MappingFlags, IMG_HANDLE hOSMemHandle)
+{
+ PVR_TRACE(("%s: unmapping %d bytes from %p", __FUNCTION__, ui32Bytes, pvLinAddr));
+
+ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
+
+ if(ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY)
+ {
+ if (hOSMemHandle == IMG_NULL)
+ {
+ IOUnmapWrapper(pvLinAddr);
+ }
+ else
+ {
+ LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
+
+ PVR_ASSERT(LinuxMemAreaToCpuVAddr(psLinuxMemArea) == pvLinAddr);
+
+ FreeIORemapLinuxMemArea(psLinuxMemArea);
+ }
+
+ return IMG_TRUE;
+ }
+
+ PVR_DPF((PVR_DBG_ERROR,
+ "OSUnMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY "
+ " (Use OSUnReservePhys otherwise)"));
+ return IMG_FALSE;
+}
+
+static PVRSRV_ERROR
+RegisterExternalMem(IMG_SYS_PHYADDR *pBasePAddr,
+ IMG_VOID *pvCPUVAddr,
+ IMG_UINT32 ui32Bytes,
+ IMG_BOOL bPhysContig,
+ IMG_UINT32 ui32MappingFlags,
+ IMG_HANDLE *phOSMemHandle)
+{
+ LinuxMemArea *psLinuxMemArea;
+
+ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
+ {
+ case PVRSRV_HAP_KERNEL_ONLY:
+ {
+ psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags);
+
+ if(!psLinuxMemArea)
+ {
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+ break;
+ }
+ case PVRSRV_HAP_SINGLE_PROCESS:
+ {
+ psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags);
+
+ if(!psLinuxMemArea)
+ {
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+ PVRMMapRegisterArea(psLinuxMemArea);
+ break;
+ }
+ case PVRSRV_HAP_MULTI_PROCESS:
+ {
+
+#if defined(VIVT_CACHE) || defined(__sh__)
+
+ ui32MappingFlags &= ~PVRSRV_HAP_CACHED;
+#endif
+ psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags);
+
+ if(!psLinuxMemArea)
+ {
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+ PVRMMapRegisterArea(psLinuxMemArea);
+ break;
+ }
+ default:
+ PVR_DPF((PVR_DBG_ERROR,"OSRegisterMem : invalid flags 0x%x\n", ui32MappingFlags));
+ *phOSMemHandle = (IMG_HANDLE)0;
+ return PVRSRV_ERROR_INVALID_FLAGS;
+ }
+
+ *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea;
+
+ LinuxMemAreaRegister(psLinuxMemArea);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+OSRegisterMem(IMG_CPU_PHYADDR BasePAddr,
+ IMG_VOID *pvCPUVAddr,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32MappingFlags,
+ IMG_HANDLE *phOSMemHandle)
+{
+ IMG_SYS_PHYADDR SysPAddr = SysCpuPAddrToSysPAddr(BasePAddr);
+
+ return RegisterExternalMem(&SysPAddr, pvCPUVAddr, ui32Bytes, IMG_TRUE, ui32MappingFlags, phOSMemHandle);
+}
+
+
+PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32MappingFlags, IMG_HANDLE *phOSMemHandle)
+{
+ return RegisterExternalMem(pBasePAddr, pvCPUVAddr, ui32Bytes, IMG_FALSE, ui32MappingFlags, phOSMemHandle);
+}
+
+
+PVRSRV_ERROR
+OSUnRegisterMem (IMG_VOID *pvCpuVAddr,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32MappingFlags,
+ IMG_HANDLE hOSMemHandle)
+{
+ LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
+ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
+
+ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
+ {
+ case PVRSRV_HAP_KERNEL_ONLY:
+ break;
+ case PVRSRV_HAP_SINGLE_PROCESS:
+ case PVRSRV_HAP_MULTI_PROCESS:
+ {
+ eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s(%p, %d, 0x%08X, %p) FAILED!",
+ __FUNCTION__, pvCpuVAddr, ui32Bytes,
+ ui32MappingFlags, hOSMemHandle));
+ return eError;
+ }
+ break;
+ }
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSUnRegisterMem : invalid flags 0x%x", ui32MappingFlags));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+
+ LinuxMemAreaDeepFree(psLinuxMemArea);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle)
+{
+ return OSUnRegisterMem(pvCpuVAddr, ui32Bytes, ui32Flags, hOSMemHandle);
+}
+
+PVRSRV_ERROR
+OSReservePhys(IMG_CPU_PHYADDR BasePAddr,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32MappingFlags,
+ IMG_VOID **ppvCpuVAddr,
+ IMG_HANDLE *phOSMemHandle)
+{
+ LinuxMemArea *psLinuxMemArea;
+
+#if 0
+
+ if(ui32MappingFlags & PVRSRV_HAP_SINGLE_PROCESS)
+ {
+ ui32MappingFlags &= ~PVRSRV_HAP_SINGLE_PROCESS;
+ ui32MappingFlags |= PVRSRV_HAP_MULTI_PROCESS;
+ }
+#endif
+
+ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
+ {
+ case PVRSRV_HAP_KERNEL_ONLY:
+ {
+
+ psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
+ if(!psLinuxMemArea)
+ {
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+ break;
+ }
+ case PVRSRV_HAP_SINGLE_PROCESS:
+ {
+
+ psLinuxMemArea = NewIOLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
+ if(!psLinuxMemArea)
+ {
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+ PVRMMapRegisterArea(psLinuxMemArea);
+ break;
+ }
+ case PVRSRV_HAP_MULTI_PROCESS:
+ {
+
+#if defined(VIVT_CACHE) || defined(__sh__)
+
+ ui32MappingFlags &= ~PVRSRV_HAP_CACHED;
+#endif
+ psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
+ if(!psLinuxMemArea)
+ {
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+ PVRMMapRegisterArea(psLinuxMemArea);
+ break;
+ }
+ default:
+ PVR_DPF((PVR_DBG_ERROR,"OSMapPhysToLin : invalid flags 0x%x\n", ui32MappingFlags));
+ *ppvCpuVAddr = NULL;
+ *phOSMemHandle = (IMG_HANDLE)0;
+ return PVRSRV_ERROR_INVALID_FLAGS;
+ }
+
+ *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea;
+ *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
+
+ LinuxMemAreaRegister(psLinuxMemArea);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+OSUnReservePhys(IMG_VOID *pvCpuVAddr,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32MappingFlags,
+ IMG_HANDLE hOSMemHandle)
+{
+ LinuxMemArea *psLinuxMemArea;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
+ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
+
+ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
+
+ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
+ {
+ case PVRSRV_HAP_KERNEL_ONLY:
+ break;
+ case PVRSRV_HAP_SINGLE_PROCESS:
+ case PVRSRV_HAP_MULTI_PROCESS:
+ {
+ eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s(%p, %d, 0x%08X, %p) FAILED!",
+ __FUNCTION__, pvCpuVAddr, ui32Bytes,
+ ui32MappingFlags, hOSMemHandle));
+ return eError;
+ }
+ break;
+ }
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSUnMapPhysToLin : invalid flags 0x%x", ui32MappingFlags));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+
+ LinuxMemAreaDeepFree(psLinuxMemArea);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSBaseAllocContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR *pvLinAddr, IMG_CPU_PHYADDR *psPhysAddr)
+{
+#if !defined(NO_HARDWARE)
+ PVR_UNREFERENCED_PARAMETER(ui32Size);
+ PVR_UNREFERENCED_PARAMETER(pvLinAddr);
+ PVR_UNREFERENCED_PARAMETER(psPhysAddr);
+ PVR_DPF((PVR_DBG_ERROR, "%s: Not available", __FUNCTION__));
+
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+#else
+ IMG_VOID *pvKernLinAddr;
+
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ pvKernLinAddr = _KMallocWrapper(ui32Size, GFP_KERNEL, __FILE__, __LINE__);
+#else
+ pvKernLinAddr = KMallocWrapper(ui32Size, GFP_KERNEL);
+#endif
+ if (!pvKernLinAddr)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ *pvLinAddr = pvKernLinAddr;
+
+ psPhysAddr->uiAddr = virt_to_phys(pvKernLinAddr);
+
+ return PVRSRV_OK;
+#endif
+}
+
+
+PVRSRV_ERROR OSBaseFreeContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR pvLinAddr, IMG_CPU_PHYADDR psPhysAddr)
+{
+#if !defined(NO_HARDWARE)
+ PVR_UNREFERENCED_PARAMETER(ui32Size);
+ PVR_UNREFERENCED_PARAMETER(pvLinAddr);
+ PVR_UNREFERENCED_PARAMETER(psPhysAddr.uiAddr);
+
+ PVR_DPF((PVR_DBG_WARNING, "%s: Not available", __FUNCTION__));
+#else
+ PVR_UNREFERENCED_PARAMETER(ui32Size);
+ PVR_UNREFERENCED_PARAMETER(psPhysAddr.uiAddr);
+
+ KFreeWrapper(pvLinAddr);
+#endif
+ return PVRSRV_OK;
+}
+
+IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset)
+{
+#if !defined(NO_HARDWARE)
+ return (IMG_UINT32) readl((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
+#else
+ return *(IMG_UINT32 *)((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
+#endif
+}
+
+IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
+{
+#if !defined(NO_HARDWARE)
+ writel(ui32Value, (IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
+#else
+ *(IMG_UINT32 *)((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset) = ui32Value;
+#endif
+}
+
+#if defined(CONFIG_PCI) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14))
+
+PVRSRV_PCI_DEV_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags)
+{
+ int err;
+ IMG_UINT32 i;
+ PVR_PCI_DEV *psPVRPCI;
+
+ PVR_TRACE(("OSPCISetDev"));
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (IMG_VOID **)&psPVRPCI, IMG_NULL,
+ "PCI Device") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSPCISetDev: Couldn't allocate PVR PCI structure"));
+ return IMG_NULL;
+ }
+
+ psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie;
+ psPVRPCI->ePCIFlags = eFlags;
+
+ err = pci_enable_device(psPVRPCI->psPCIDev);
+ if (err != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSPCISetDev: Couldn't enable device (%d)", err));
+ return IMG_NULL;
+ }
+
+ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
+ {
+ pci_set_master(psPVRPCI->psPCIDev);
+ }
+
+ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI)
+ {
+#if defined(CONFIG_PCI_MSI)
+ err = pci_enable_msi(psPVRPCI->psPCIDev);
+ if (err != 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "OSPCISetDev: Couldn't enable MSI (%d)", err));
+ psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI;
+ }
+#else
+ PVR_DPF((PVR_DBG_WARNING, "OSPCISetDev: MSI support not enabled in the kernel"));
+#endif
+ }
+
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ {
+ psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
+ }
+
+ return (PVRSRV_PCI_DEV_HANDLE)psPVRPCI;
+}
+
+PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags)
+{
+ struct pci_dev *psPCIDev;
+
+ psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL);
+ if (psPCIDev == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: Couldn't acquire device"));
+ return IMG_NULL;
+ }
+
+ return OSPCISetDev((IMG_VOID *)psPCIDev, eFlags);
+}
+
+PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ)
+{
+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+
+ *pui32IRQ = psPVRPCI->psPCIDev->irq;
+
+ return PVRSRV_OK;
+}
+
+enum HOST_PCI_ADDR_RANGE_FUNC
+{
+ HOST_PCI_ADDR_RANGE_FUNC_LEN,
+ HOST_PCI_ADDR_RANGE_FUNC_START,
+ HOST_PCI_ADDR_RANGE_FUNC_END,
+ HOST_PCI_ADDR_RANGE_FUNC_REQUEST,
+ HOST_PCI_ADDR_RANGE_FUNC_RELEASE
+};
+
+static IMG_UINT32 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc,
+ PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+ IMG_UINT32 ui32Index)
+{
+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+
+ if (ui32Index >= DEVICE_COUNT_RESOURCE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Index out of range"));
+ return 0;
+
+ }
+
+ switch (eFunc)
+ {
+ case HOST_PCI_ADDR_RANGE_FUNC_LEN:
+ return pci_resource_len(psPVRPCI->psPCIDev, ui32Index);
+ case HOST_PCI_ADDR_RANGE_FUNC_START:
+ return pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+ case HOST_PCI_ADDR_RANGE_FUNC_END:
+ return pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
+ case HOST_PCI_ADDR_RANGE_FUNC_REQUEST:
+ {
+ int err;
+
+ err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, PVRSRV_MODNAME);
+ if (err != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err));
+ return 0;
+ }
+ psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE;
+ return 1;
+ }
+ case HOST_PCI_ADDR_RANGE_FUNC_RELEASE:
+ if (psPVRPCI->abPCIResourceInUse[ui32Index])
+ {
+ pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index);
+ psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE;
+ }
+ return 1;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Unknown function"));
+ break;
+ }
+
+ return 0;
+}
+
+IMG_UINT32 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index);
+}
+
+IMG_UINT32 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index);
+}
+
+IMG_UINT32 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index);
+}
+
+PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+ IMG_UINT32 ui32Index)
+{
+ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0 ? PVRSRV_ERROR_PCI_CALL_FAILED : PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0 ? PVRSRV_ERROR_PCI_CALL_FAILED : PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
+{
+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+ int i;
+
+ PVR_TRACE(("OSPCIReleaseDev"));
+
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ {
+ if (psPVRPCI->abPCIResourceInUse[i])
+ {
+ PVR_TRACE(("OSPCIReleaseDev: Releasing Address range %d", i));
+ pci_release_region(psPVRPCI->psPCIDev, i);
+ psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
+ }
+ }
+
+#if defined(CONFIG_PCI_MSI)
+ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI)
+ {
+ pci_disable_msi(psPVRPCI->psPCIDev);
+ }
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
+ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
+ {
+ pci_clear_master(psPVRPCI->psPCIDev);
+ }
+#endif
+ pci_disable_device(psPVRPCI->psPCIDev);
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (IMG_VOID *)psPVRPCI, IMG_NULL);
+
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
+{
+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+ int i;
+ int err;
+
+ PVR_TRACE(("OSPCISuspendDev"));
+
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ {
+ if (psPVRPCI->abPCIResourceInUse[i])
+ {
+ pci_release_region(psPVRPCI->psPCIDev, i);
+ }
+ }
+
+ err = pci_save_state(psPVRPCI->psPCIDev);
+ if (err != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: pci_save_state_failed (%d)", err));
+ return PVRSRV_ERROR_PCI_CALL_FAILED;
+ }
+
+ pci_disable_device(psPVRPCI->psPCIDev);
+
+ err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_SUSPEND));
+ switch(err)
+ {
+ case 0:
+ break;
+ case -EIO:
+ PVR_DPF((PVR_DBG_WARNING, "OSPCISuspendDev: device doesn't support PCI PM"));
+ break;
+ case -EINVAL:
+ PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: can't enter requested power state"));
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: pci_set_power_state failed (%d)", err));
+ break;
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
+{
+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+ int err;
+ int i;
+
+ PVR_TRACE(("OSPCIResumeDev"));
+
+ err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON));
+ switch(err)
+ {
+ case 0:
+ break;
+ case -EIO:
+ PVR_DPF((PVR_DBG_WARNING, "OSPCIResumeDev: device doesn't support PCI PM"));
+ break;
+ case -EINVAL:
+ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: can't enter requested power state"));
+ return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_set_power_state failed (%d)", err));
+ return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
+ pci_restore_state(psPVRPCI->psPCIDev);
+#else
+ err = pci_restore_state(psPVRPCI->psPCIDev);
+ if (err != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_restore_state failed (%d)", err));
+ return PVRSRV_ERROR_PCI_CALL_FAILED;
+ }
+#endif
+
+ err = pci_enable_device(psPVRPCI->psPCIDev);
+ if (err != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: Couldn't enable device (%d)", err));
+ return PVRSRV_ERROR_PCI_CALL_FAILED;
+ }
+
+ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
+ pci_set_master(psPVRPCI->psPCIDev);
+
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ {
+ if (psPVRPCI->abPCIResourceInUse[i])
+ {
+ err = pci_request_region(psPVRPCI->psPCIDev, i, PVRSRV_MODNAME);
+ if (err != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err));
+ }
+ }
+
+ }
+
+ return PVRSRV_OK;
+}
+
+#endif
+
+#define OS_MAX_TIMERS 8
+
+typedef struct TIMER_CALLBACK_DATA_TAG
+{
+ IMG_BOOL bInUse;
+ PFN_TIMER_FUNC pfnTimerFunc;
+ IMG_VOID *pvData;
+ struct timer_list sTimer;
+ IMG_UINT32 ui32Delay;
+ IMG_BOOL bActive;
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+ struct work_struct sWork;
+#endif
+}TIMER_CALLBACK_DATA;
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+static struct workqueue_struct *psTimerWorkQueue;
+#endif
+
+static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS];
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+DEFINE_MUTEX(sTimerStructLock);
+#else
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
+
+static spinlock_t sTimerStructLock = SPIN_LOCK_UNLOCKED;
+#else
+static DEFINE_SPINLOCK(sTimerStructLock);
+#endif
+#endif
+
+static void OSTimerCallbackBody(TIMER_CALLBACK_DATA *psTimerCBData)
+{
+ if (!psTimerCBData->bActive)
+ return;
+
+
+ psTimerCBData->pfnTimerFunc(psTimerCBData->pvData);
+
+
+ mod_timer(&psTimerCBData->sTimer, psTimerCBData->ui32Delay + jiffies);
+}
+
+
+static IMG_VOID OSTimerCallbackWrapper(IMG_UINT32 ui32Data)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA*)ui32Data;
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+ int res;
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+ res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork);
+#else
+ res = schedule_work(&psTimerCBData->sWork);
+#endif
+ if (res == 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "OSTimerCallbackWrapper: work already queued"));
+ }
+#else
+ OSTimerCallbackBody(psTimerCBData);
+#endif
+}
+
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+static void OSTimerWorkQueueCallBack(struct work_struct *psWork)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData = container_of(psWork, TIMER_CALLBACK_DATA, sWork);
+
+ OSTimerCallbackBody(psTimerCBData);
+}
+#endif
+
+IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData;
+ IMG_UINT32 ui32i;
+#if !(defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE))
+ unsigned long ulLockFlags;
+#endif
+
+
+ if(!pfnTimerFunc)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback"));
+ return IMG_NULL;
+ }
+
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+ mutex_lock(&sTimerStructLock);
+#else
+ spin_lock_irqsave(&sTimerStructLock, ulLockFlags);
+#endif
+ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
+ {
+ psTimerCBData = &sTimers[ui32i];
+ if (!psTimerCBData->bInUse)
+ {
+ psTimerCBData->bInUse = IMG_TRUE;
+ break;
+ }
+ }
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+ mutex_unlock(&sTimerStructLock);
+#else
+ spin_unlock_irqrestore(&sTimerStructLock, ulLockFlags);
+#endif
+ if (ui32i >= OS_MAX_TIMERS)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: all timers are in use"));
+ return IMG_NULL;
+ }
+
+ psTimerCBData->pfnTimerFunc = pfnTimerFunc;
+ psTimerCBData->pvData = pvData;
+ psTimerCBData->bActive = IMG_FALSE;
+
+
+
+
+ psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000)
+ ? 1
+ : ((HZ * ui32MsTimeout) / 1000);
+
+ init_timer(&psTimerCBData->sTimer);
+
+
+
+ psTimerCBData->sTimer.function = (IMG_VOID *)OSTimerCallbackWrapper;
+ psTimerCBData->sTimer.data = (IMG_UINT32)psTimerCBData;
+
+ return (IMG_HANDLE)(ui32i + 1);
+}
+
+
+static inline TIMER_CALLBACK_DATA *GetTimerStructure(IMG_HANDLE hTimer)
+{
+ IMG_UINT32 ui32i = ((IMG_UINT32)hTimer) - 1;
+
+ PVR_ASSERT(ui32i < OS_MAX_TIMERS);
+
+ return &sTimers[ui32i];
+}
+
+PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+ PVR_ASSERT(psTimerCBData->bInUse);
+ PVR_ASSERT(!psTimerCBData->bActive);
+
+
+ psTimerCBData->bInUse = IMG_FALSE;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+ PVR_ASSERT(psTimerCBData->bInUse);
+ PVR_ASSERT(!psTimerCBData->bActive);
+
+
+ psTimerCBData->bActive = IMG_TRUE;
+
+
+ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
+
+
+ add_timer(&psTimerCBData->sTimer);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+ PVR_ASSERT(psTimerCBData->bInUse);
+ PVR_ASSERT(psTimerCBData->bActive);
+
+
+ psTimerCBData->bActive = IMG_FALSE;
+ smp_mb();
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+ flush_workqueue(psTimerWorkQueue);
+#endif
+#if defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+ flush_scheduled_work();
+#endif
+
+
+ del_timer_sync(&psTimerCBData->sTimer);
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+
+ flush_workqueue(psTimerWorkQueue);
+#endif
+#if defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+ flush_scheduled_work();
+#endif
+
+ return PVRSRV_OK;
+}
+
+
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR OSEventObjectCreateKM(const IMG_CHAR *pszName, PVRSRV_EVENTOBJECT_KM *psEventObject)
+#else
+PVRSRV_ERROR OSEventObjectCreateKM(const IMG_CHAR *pszName, PVRSRV_EVENTOBJECT *psEventObject)
+#endif
+{
+
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if(psEventObject)
+ {
+ if(pszName)
+ {
+
+ strncpy(psEventObject->szName, pszName, EVENTOBJNAME_MAXLENGTH);
+ }
+ else
+ {
+
+ static IMG_UINT16 ui16NameIndex = 0;
+#if defined (SUPPORT_SID_INTERFACE)
+ snprintf(psEventObject->szName, EVENTOBJNAME_MAXLENGTH, "PVRSRV_EVENTOBJECT_KM_%d", ui16NameIndex++);
+#else
+ snprintf(psEventObject->szName, EVENTOBJNAME_MAXLENGTH, "PVRSRV_EVENTOBJECT_%d", ui16NameIndex++);
+#endif
+ }
+
+ if(LinuxEventObjectListCreate(&psEventObject->hOSEventKM) != PVRSRV_OK)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreateKM: psEventObject is not a valid pointer"));
+ eError = PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT;
+ }
+
+ return eError;
+
+}
+
+
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR OSEventObjectDestroyKM(PVRSRV_EVENTOBJECT_KM *psEventObject)
+#else
+PVRSRV_ERROR OSEventObjectDestroyKM(PVRSRV_EVENTOBJECT *psEventObject)
+#endif
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if(psEventObject)
+ {
+ if(psEventObject->hOSEventKM)
+ {
+ LinuxEventObjectListDestroy(psEventObject->hOSEventKM);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroyKM: hOSEventKM is not a valid pointer"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroyKM: psEventObject is not a valid pointer"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR OSEventObjectWaitKM(IMG_HANDLE hOSEventKM)
+{
+ PVRSRV_ERROR eError;
+
+ if(hOSEventKM)
+ {
+ eError = LinuxEventObjectWait(hOSEventKM, EVENT_OBJECT_TIMEOUT_MS);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWaitKM: hOSEventKM is not a valid handle"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return eError;
+}
+
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR OSEventObjectOpenKM(PVRSRV_EVENTOBJECT_KM *psEventObject,
+#else
+PVRSRV_ERROR OSEventObjectOpenKM(PVRSRV_EVENTOBJECT *psEventObject,
+#endif
+ IMG_HANDLE *phOSEvent)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if(psEventObject)
+ {
+ if(LinuxEventObjectAdd(psEventObject->hOSEventKM, phOSEvent) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreateKM: psEventObject is not a valid pointer"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return eError;
+}
+
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR OSEventObjectCloseKM(PVRSRV_EVENTOBJECT_KM *psEventObject,
+#else
+PVRSRV_ERROR OSEventObjectCloseKM(PVRSRV_EVENTOBJECT *psEventObject,
+#endif
+ IMG_HANDLE hOSEventKM)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if(psEventObject)
+ {
+ if(LinuxEventObjectDelete(psEventObject->hOSEventKM, hOSEventKM) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectDelete: failed"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroyKM: psEventObject is not a valid pointer"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return eError;
+
+}
+
+PVRSRV_ERROR OSEventObjectSignalKM(IMG_HANDLE hOSEventKM)
+{
+ PVRSRV_ERROR eError;
+
+ if(hOSEventKM)
+ {
+ eError = LinuxEventObjectSignal(hOSEventKM);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectSignalKM: hOSEventKM is not a valid handle"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return eError;
+}
+
+IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID)
+{
+ return (capable(CAP_SYS_MODULE) != 0) ? IMG_TRUE : IMG_FALSE;
+}
+
+PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess,
+ IMG_VOID *pvDest,
+ IMG_VOID *pvSrc,
+ IMG_UINT32 ui32Bytes)
+{
+ PVR_UNREFERENCED_PARAMETER(pvProcess);
+
+ if(pvr_copy_to_user(pvDest, pvSrc, ui32Bytes)==0)
+ return PVRSRV_OK;
+ else
+ return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY;
+}
+
+PVRSRV_ERROR OSCopyFromUser( IMG_PVOID pvProcess,
+ IMG_VOID *pvDest,
+ IMG_VOID *pvSrc,
+ IMG_UINT32 ui32Bytes)
+{
+ PVR_UNREFERENCED_PARAMETER(pvProcess);
+
+ if(pvr_copy_from_user(pvDest, pvSrc, ui32Bytes)==0)
+ return PVRSRV_OK;
+ else
+ return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY;
+}
+
+IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID *pvUserPtr, IMG_UINT32 ui32Bytes)
+{
+ IMG_INT linuxType;
+
+ if (eVerification == PVR_VERIFY_READ)
+ {
+ linuxType = VERIFY_READ;
+ }
+ else
+ {
+ PVR_ASSERT(eVerification == PVR_VERIFY_WRITE);
+ linuxType = VERIFY_WRITE;
+ }
+
+ return access_ok(linuxType, pvUserPtr, ui32Bytes);
+}
+
+typedef enum _eWrapMemType_
+{
+ WRAP_TYPE_NULL = 0,
+ WRAP_TYPE_GET_USER_PAGES,
+ WRAP_TYPE_FIND_VMA
+} eWrapMemType;
+
+typedef struct _sWrapMemInfo_
+{
+ eWrapMemType eType;
+ IMG_INT iNumPages;
+ IMG_INT iNumPagesMapped;
+ struct page **ppsPages;
+ IMG_SYS_PHYADDR *psPhysAddr;
+ IMG_INT iPageOffset;
+#if defined(DEBUG)
+ IMG_UINT32 ulStartAddr;
+ IMG_UINT32 ulBeyondEndAddr;
+ struct vm_area_struct *psVMArea;
+#endif
+} sWrapMemInfo;
+
+
+static IMG_BOOL CPUVAddrToPFN(struct vm_area_struct *psVMArea, IMG_UINT32 ulCPUVAddr, IMG_UINT32 *pulPFN, struct page **ppsPage)
+{
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10))
+ pgd_t *psPGD;
+ pud_t *psPUD;
+ pmd_t *psPMD;
+ pte_t *psPTE;
+ struct mm_struct *psMM = psVMArea->vm_mm;
+ spinlock_t *psPTLock;
+ IMG_BOOL bRet = IMG_FALSE;
+
+ *pulPFN = 0;
+ *ppsPage = NULL;
+
+ psPGD = pgd_offset(psMM, ulCPUVAddr);
+ if (pgd_none(*psPGD) || pgd_bad(*psPGD))
+ return bRet;
+
+ psPUD = pud_offset(psPGD, ulCPUVAddr);
+ if (pud_none(*psPUD) || pud_bad(*psPUD))
+ return bRet;
+
+ psPMD = pmd_offset(psPUD, ulCPUVAddr);
+ if (pmd_none(*psPMD) || pmd_bad(*psPMD))
+ return bRet;
+
+ psPTE = (pte_t *)pte_offset_map_lock(psMM, psPMD, ulCPUVAddr, &psPTLock);
+
+ if ((pte_none(*psPTE) == 0) && (pte_present(*psPTE) != 0) && (pte_write(*psPTE) != 0))
+ {
+ *pulPFN = pte_pfn(*psPTE);
+ bRet = IMG_TRUE;
+
+ if (pfn_valid(*pulPFN))
+ {
+ *ppsPage = pfn_to_page(*pulPFN);
+
+ get_page(*ppsPage);
+ }
+ }
+
+ pte_unmap_unlock(psPTE, psPTLock);
+
+ return bRet;
+#else
+ return IMG_FALSE;
+#endif
+}
+
+PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem)
+{
+ sWrapMemInfo *psInfo = (sWrapMemInfo *)hOSWrapMem;
+ IMG_INT i;
+
+ if (psInfo == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "OSReleasePhysPageAddr: called with null wrap handle"));
+ return PVRSRV_OK;
+ }
+
+ switch (psInfo->eType)
+ {
+ case WRAP_TYPE_NULL:
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "OSReleasePhysPageAddr: called with wrap type WRAP_TYPE_NULL"));
+ break;
+ }
+ case WRAP_TYPE_GET_USER_PAGES:
+ {
+ for (i = 0; i < psInfo->iNumPagesMapped; i++)
+ {
+ struct page *psPage = psInfo->ppsPages[i];
+
+ PVR_ASSERT(psPage != NULL);
+
+
+ if (psInfo->iNumPagesMapped == psInfo->iNumPages)
+ {
+ if (!PageReserved(psPage))
+ {
+ SetPageDirty(psPage);
+ }
+ }
+ page_cache_release(psPage);
+ }
+ break;
+ }
+ case WRAP_TYPE_FIND_VMA:
+ {
+ for (i = 0; i < psInfo->iNumPages; i++)
+ {
+ if (psInfo->ppsPages[i] != IMG_NULL)
+ {
+ put_page(psInfo->ppsPages[i]);
+ }
+ }
+ break;
+ }
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "OSReleasePhysPageAddr: Unknown wrap type (%d)", psInfo->eType));
+ return PVRSRV_ERROR_INVALID_WRAP_TYPE;
+ }
+ }
+
+ if (psInfo->ppsPages != IMG_NULL)
+ {
+ kfree(psInfo->ppsPages);
+ }
+
+ if (psInfo->psPhysAddr != IMG_NULL)
+ {
+ kfree(psInfo->psPhysAddr);
+ }
+
+ kfree(psInfo);
+
+ return PVRSRV_OK;
+}
+
+#if defined(CONFIG_TI_TILER)
+
+static IMG_UINT32 CPUAddrToTilerPhy(IMG_UINT32 uiAddr)
+{
+ IMG_UINT32 ui32PhysAddr = 0;
+ pte_t *ptep, pte;
+ pgd_t *pgd;
+ pmd_t *pmd;
+
+ pgd = pgd_offset(current->mm, uiAddr);
+ if (pgd_none(*pgd) || pgd_bad(*pgd))
+ goto err_out;
+
+ pmd = pmd_offset(pgd, uiAddr);
+ if (pmd_none(*pmd) || pmd_bad(*pmd))
+ goto err_out;
+
+ ptep = pte_offset_map(pmd, uiAddr);
+ if (!ptep)
+ goto err_out;
+
+ pte = *ptep;
+ if (!pte_present(pte))
+ goto err_out;
+
+ ui32PhysAddr = (pte & PAGE_MASK) | (~PAGE_MASK & uiAddr);
+
+
+ if (ui32PhysAddr < 0x60000000 && ui32PhysAddr > 0x7fffffff)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CPUAddrToTilerPhy: Not in tiler range"));
+ ui32PhysAddr = 0;
+ goto err_out;
+ }
+
+err_out:
+ return ui32PhysAddr;
+}
+
+#endif
+
+PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID *pvCPUVAddr,
+ IMG_UINT32 ui32Bytes,
+ IMG_SYS_PHYADDR *psSysPAddr,
+ IMG_HANDLE *phOSWrapMem)
+{
+ IMG_UINT32 ulStartAddrOrig = (IMG_UINT32) pvCPUVAddr;
+ IMG_UINT32 ulAddrRangeOrig = (IMG_UINT32) ui32Bytes;
+ IMG_UINT32 ulBeyondEndAddrOrig = ulStartAddrOrig + ulAddrRangeOrig;
+ IMG_UINT32 ulStartAddr;
+ IMG_UINT32 ulAddrRange;
+ IMG_UINT32 ulBeyondEndAddr;
+ IMG_UINT32 ulAddr;
+ IMG_INT i;
+ struct vm_area_struct *psVMArea;
+ sWrapMemInfo *psInfo = NULL;
+ IMG_BOOL bHavePageStructs = IMG_FALSE;
+ IMG_BOOL bHaveNoPageStructs = IMG_FALSE;
+ IMG_BOOL bMMapSemHeld = IMG_FALSE;
+ PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+
+
+ ulStartAddr = ulStartAddrOrig & PAGE_MASK;
+ ulBeyondEndAddr = PAGE_ALIGN(ulBeyondEndAddrOrig);
+ ulAddrRange = ulBeyondEndAddr - ulStartAddr;
+
+
+ if (ulBeyondEndAddr <= ulStartAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "OSAcquirePhysPageAddr: Invalid address range (start %x, length %x)",
+ ulStartAddrOrig, ulAddrRangeOrig));
+ goto error;
+ }
+
+
+ psInfo = kmalloc(sizeof(*psInfo), GFP_KERNEL);
+ if (psInfo == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "OSAcquirePhysPageAddr: Couldn't allocate information structure"));
+ goto error;
+ }
+ memset(psInfo, 0, sizeof(*psInfo));
+
+#if defined(DEBUG)
+ psInfo->ulStartAddr = ulStartAddrOrig;
+ psInfo->ulBeyondEndAddr = ulBeyondEndAddrOrig;
+#endif
+
+ psInfo->iNumPages = (IMG_INT)(ulAddrRange >> PAGE_SHIFT);
+ psInfo->iPageOffset = (IMG_INT)(ulStartAddrOrig & ~PAGE_MASK);
+
+
+ psInfo->psPhysAddr = kmalloc((size_t)psInfo->iNumPages * sizeof(*psInfo->psPhysAddr), GFP_KERNEL);
+ if (psInfo->psPhysAddr == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "OSAcquirePhysPageAddr: Couldn't allocate page array"));
+ goto error;
+ }
+ memset(psInfo->psPhysAddr, 0, (size_t)psInfo->iNumPages * sizeof(*psInfo->psPhysAddr));
+
+
+ psInfo->ppsPages = kmalloc((size_t)psInfo->iNumPages * sizeof(*psInfo->ppsPages), GFP_KERNEL);
+ if (psInfo->ppsPages == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "OSAcquirePhysPageAddr: Couldn't allocate page array"));
+ goto error;
+ }
+ memset(psInfo->ppsPages, 0, (size_t)psInfo->iNumPages * sizeof(*psInfo->ppsPages));
+
+
+ eError = PVRSRV_ERROR_BAD_MAPPING;
+
+
+ psInfo->eType = WRAP_TYPE_GET_USER_PAGES;
+
+
+ down_read(¤t->mm->mmap_sem);
+ bMMapSemHeld = IMG_TRUE;
+
+
+ psInfo->iNumPagesMapped = get_user_pages(current, current->mm, ulStartAddr, psInfo->iNumPages, 1, 0, psInfo->ppsPages, NULL);
+
+ if (psInfo->iNumPagesMapped >= 0)
+ {
+
+ if (psInfo->iNumPagesMapped != psInfo->iNumPages)
+ {
+ PVR_TRACE(("OSAcquirePhysPageAddr: Couldn't map all the pages needed (wanted: %d, got %d)", psInfo->iNumPages, psInfo->iNumPagesMapped));
+
+ goto error;
+ }
+
+
+ for (i = 0; i < psInfo->iNumPages; i++)
+ {
+ IMG_CPU_PHYADDR CPUPhysAddr;
+ IMG_UINT32 ulPFN;
+
+ ulPFN = page_to_pfn(psInfo->ppsPages[i]);
+ CPUPhysAddr.uiAddr = ulPFN << PAGE_SHIFT;
+ if ((CPUPhysAddr.uiAddr >> PAGE_SHIFT) != ulPFN)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "OSAcquirePhysPageAddr: Page frame number out of range (%x)", ulPFN));
+
+ goto error;
+ }
+ psInfo->psPhysAddr[i] = SysCpuPAddrToSysPAddr(CPUPhysAddr);
+ psSysPAddr[i] = psInfo->psPhysAddr[i];
+
+ }
+
+ goto exit;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "OSAcquirePhysPageAddr: get_user_pages failed (%d), using CPU page table", psInfo->iNumPagesMapped));
+
+
+ psInfo->eType = WRAP_TYPE_NULL;
+ psInfo->iNumPagesMapped = 0;
+ memset(psInfo->ppsPages, 0, (size_t)psInfo->iNumPages * sizeof(*psInfo->ppsPages));
+
+
+
+ psInfo->eType = WRAP_TYPE_FIND_VMA;
+
+ psVMArea = find_vma(current->mm, ulStartAddrOrig);
+ if (psVMArea == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "OSAcquirePhysPageAddr: Couldn't find memory region containing start address %x", ulStartAddrOrig));
+
+ goto error;
+ }
+#if defined(DEBUG)
+ psInfo->psVMArea = psVMArea;
+#endif
+
+
+ if (ulStartAddrOrig < psVMArea->vm_start)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "OSAcquirePhysPageAddr: Start address %x is outside of the region returned by find_vma", ulStartAddrOrig));
+ goto error;
+ }
+
+
+ if (ulBeyondEndAddrOrig > psVMArea->vm_end)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "OSAcquirePhysPageAddr: End address %x is outside of the region returned by find_vma", ulBeyondEndAddrOrig));
+ goto error;
+ }
+
+
+ if ((psVMArea->vm_flags & (VM_IO | VM_RESERVED)) != (VM_IO | VM_RESERVED))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "OSAcquirePhysPageAddr: Memory region does not represent memory mapped I/O (VMA flags: 0x%lx)", psVMArea->vm_flags));
+ goto error;
+ }
+
+
+ if ((psVMArea->vm_flags & (VM_READ | VM_WRITE)) != (VM_READ | VM_WRITE))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "OSAcquirePhysPageAddr: No read/write access to memory region (VMA flags: 0x%lx)", psVMArea->vm_flags));
+ goto error;
+ }
+
+ for (ulAddr = ulStartAddrOrig, i = 0; ulAddr < ulBeyondEndAddrOrig; ulAddr += PAGE_SIZE, i++)
+ {
+ IMG_CPU_PHYADDR CPUPhysAddr;
+ IMG_UINT32 ulPFN = 0;
+
+ PVR_ASSERT(i < psInfo->iNumPages);
+
+ if (!CPUVAddrToPFN(psVMArea, ulAddr, &ulPFN, &psInfo->ppsPages[i]))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "OSAcquirePhysPageAddr: Invalid CPU virtual address"));
+
+ goto error;
+ }
+ if (psInfo->ppsPages[i] == NULL)
+ {
+#if defined(CONFIG_TI_TILER)
+
+ IMG_UINT32 ui32TilerAddr = CPUAddrToTilerPhy(ulAddr);
+ if (ui32TilerAddr)
+ {
+ bHavePageStructs = IMG_TRUE;
+ psInfo->iNumPagesMapped++;
+ psInfo->psPhysAddr[i].uiAddr = ui32TilerAddr;
+ psSysPAddr[i].uiAddr = ui32TilerAddr;
+ continue;
+ }
+#endif
+
+ bHaveNoPageStructs = IMG_TRUE;
+ }
+ else
+ {
+ bHavePageStructs = IMG_TRUE;
+
+ psInfo->iNumPagesMapped++;
+
+ PVR_ASSERT(ulPFN == page_to_pfn(psInfo->ppsPages[i]));
+ }
+
+ CPUPhysAddr.uiAddr = ulPFN << PAGE_SHIFT;
+ if ((CPUPhysAddr.uiAddr >> PAGE_SHIFT) != ulPFN)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "OSAcquirePhysPageAddr: Page frame number out of range (%x)", ulPFN));
+
+ goto error;
+ }
+
+ psInfo->psPhysAddr[i] = SysCpuPAddrToSysPAddr(CPUPhysAddr);
+ psSysPAddr[i] = psInfo->psPhysAddr[i];
+ }
+ PVR_ASSERT(i == psInfo->iNumPages);
+
+#if defined(VM_MIXEDMAP)
+ if ((psVMArea->vm_flags & VM_MIXEDMAP) != 0)
+ {
+ goto exit;
+ }
+#endif
+
+ if (bHavePageStructs && bHaveNoPageStructs)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "OSAcquirePhysPageAddr: Region is VM_MIXEDMAP, but isn't marked as such"));
+ goto error;
+ }
+
+ if (!bHaveNoPageStructs)
+ {
+
+ goto exit;
+ }
+
+#if defined(VM_PFNMAP)
+ if ((psVMArea->vm_flags & VM_PFNMAP) == 0)
+#endif
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "OSAcquirePhysPageAddr: Region is VM_PFNMAP, but isn't marked as such"));
+ goto error;
+ }
+
+exit:
+ PVR_ASSERT(bMMapSemHeld);
+ up_read(¤t->mm->mmap_sem);
+
+
+ *phOSWrapMem = (IMG_HANDLE)psInfo;
+
+ if (bHaveNoPageStructs)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "OSAcquirePhysPageAddr: Region contains pages which can't be locked down (no page structures)"));
+ }
+
+ PVR_ASSERT(psInfo->eType != 0);
+
+#if 0
+
+
+ OSCleanCPUCacheRangeKM(pvCPUVAddr, (IMG_VOID *)((IMG_CHAR *)pvCPUVAddr + ui32Bytes));
+#endif
+
+ return PVRSRV_OK;
+
+error:
+ if (bMMapSemHeld)
+ {
+ up_read(¤t->mm->mmap_sem);
+ }
+ OSReleasePhysPageAddr((IMG_HANDLE)psInfo);
+
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+typedef void (*InnerCacheOp_t)(const void *pvStart, const void *pvEnd);
+
+#if defined(__arm__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+typedef void (*OuterCacheOp_t)(phys_addr_t uStart, phys_addr_t uEnd);
+#else
+typedef void (*OuterCacheOp_t)(unsigned long ulStart, unsigned long ulEnd);
+#endif
+
+#if defined(CONFIG_OUTER_CACHE)
+
+typedef unsigned long (*MemAreaToPhys_t)(LinuxMemArea *psLinuxMemArea,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32PageNumOffset,
+ IMG_UINT32 ui32PageNum);
+
+static unsigned long VMallocAreaToPhys(LinuxMemArea *psLinuxMemArea,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32PageNumOffset,
+ IMG_UINT32 ui32PageNum)
+{
+ return vmalloc_to_pfn(pvRangeAddrStart + ui32PageNum * PAGE_SIZE) << PAGE_SHIFT;
+}
+
+static unsigned long ExternalKVAreaToPhys(LinuxMemArea *psLinuxMemArea,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32PageNumOffset,
+ IMG_UINT32 ui32PageNum)
+{
+ IMG_SYS_PHYADDR SysPAddr;
+ IMG_CPU_PHYADDR CpuPAddr;
+ SysPAddr = psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr[ui32PageNumOffset + ui32PageNum];
+ CpuPAddr = SysSysPAddrToCpuPAddr(SysPAddr);
+ return CpuPAddr.uiAddr;
+}
+
+static unsigned long AllocPagesAreaToPhys(LinuxMemArea *psLinuxMemArea,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32PageNumOffset,
+ IMG_UINT32 ui32PageNum)
+{
+ struct page *pPage;
+ pPage = psLinuxMemArea->uData.sPageList.ppsPageList[ui32PageNumOffset + ui32PageNum];
+ return page_to_pfn(pPage) << PAGE_SHIFT;
+}
+
+static unsigned long IONAreaToPhys(LinuxMemArea *psLinuxMemArea,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32PageNumOffset,
+ IMG_UINT32 ui32PageNum)
+{
+ IMG_CPU_PHYADDR CpuPAddr;
+ CpuPAddr = psLinuxMemArea->uData.sIONTilerAlloc.pCPUPhysAddrs[ui32PageNumOffset + ui32PageNum];
+ return CpuPAddr.uiAddr;
+}
+
+#endif
+
+static
+IMG_VOID *FindMMapBaseVAddr(struct list_head *psMMapOffsetStructList,
+ IMG_VOID *pvRangeAddrStart, IMG_UINT32 ui32Length)
+{
+ PKV_OFFSET_STRUCT psOffsetStruct;
+ IMG_VOID *pvMinVAddr;
+
+
+ list_for_each_entry(psOffsetStruct, psMMapOffsetStructList, sAreaItem)
+ {
+ if(OSGetCurrentProcessIDKM() != psOffsetStruct->ui32PID)
+ continue;
+
+ pvMinVAddr = (IMG_VOID *)psOffsetStruct->ui32UserVAddr;
+
+
+ if(pvRangeAddrStart >= pvMinVAddr &&
+ ui32Length <= psOffsetStruct->ui32RealByteSize)
+ return pvMinVAddr;
+ }
+
+ return IMG_NULL;
+}
+
+extern PVRSRV_LINUX_MUTEX g_sMMapMutex;
+
+static
+IMG_BOOL CheckExecuteCacheOp(IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32Length,
+ InnerCacheOp_t pfnInnerCacheOp,
+ OuterCacheOp_t pfnOuterCacheOp)
+{
+ LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
+ IMG_UINT32 ui32AreaLength, ui32AreaOffset = 0;
+ struct list_head *psMMapOffsetStructList;
+ IMG_VOID *pvMinVAddr;
+
+#if defined(CONFIG_OUTER_CACHE)
+ MemAreaToPhys_t pfnMemAreaToPhys = IMG_NULL;
+ IMG_UINT32 ui32PageNumOffset = 0;
+#endif
+
+ PVR_ASSERT(psLinuxMemArea != IMG_NULL);
+
+ LinuxLockMutex(&g_sMMapMutex);
+
+ psMMapOffsetStructList = &psLinuxMemArea->sMMapOffsetStructList;
+ ui32AreaLength = psLinuxMemArea->ui32ByteSize;
+
+ PVR_ASSERT(ui32Length <= ui32AreaLength);
+
+ if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
+ {
+ ui32AreaOffset = psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset;
+ psLinuxMemArea = psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea;
+ }
+
+
+ PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC);
+
+ switch(psLinuxMemArea->eAreaType)
+ {
+ case LINUX_MEM_AREA_VMALLOC:
+ {
+ if(is_vmalloc_addr(pvRangeAddrStart))
+ {
+ pvMinVAddr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress + ui32AreaOffset;
+
+
+ if(pvRangeAddrStart < pvMinVAddr)
+ goto err_blocked;
+
+ pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
+ }
+ else
+ {
+
+ pvMinVAddr = FindMMapBaseVAddr(psMMapOffsetStructList,
+ pvRangeAddrStart, ui32Length);
+ if(!pvMinVAddr)
+ goto err_blocked;
+
+ pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
+
+#if defined(CONFIG_OUTER_CACHE)
+
+ pvRangeAddrStart = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress +
+ (ui32AreaOffset & PAGE_MASK) + (pvRangeAddrStart - pvMinVAddr);
+ }
+
+ pfnMemAreaToPhys = VMallocAreaToPhys;
+#else
+ }
+#endif
+ break;
+ }
+
+ case LINUX_MEM_AREA_EXTERNAL_KV:
+ {
+
+ if (psLinuxMemArea->uData.sExternalKV.bPhysContig == IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Attempt to flush contiguous external memory", __func__));
+ goto err_blocked;
+ }
+
+
+ if (psLinuxMemArea->uData.sExternalKV.pvExternalKV != IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Attempt to flush external memory with a kernel virtual address", __func__));
+ goto err_blocked;
+ }
+
+ pvMinVAddr = FindMMapBaseVAddr(psMMapOffsetStructList,
+ pvRangeAddrStart, ui32Length);
+ if(!pvMinVAddr)
+ goto err_blocked;
+
+ pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
+
+#if defined(CONFIG_OUTER_CACHE)
+ ui32PageNumOffset = ((ui32AreaOffset & PAGE_MASK) + (pvRangeAddrStart - pvMinVAddr)) >> PAGE_SHIFT;
+ pfnMemAreaToPhys = ExternalKVAreaToPhys;
+#endif
+ break;
+ }
+
+ case LINUX_MEM_AREA_ION:
+ {
+ pvMinVAddr = FindMMapBaseVAddr(psMMapOffsetStructList,
+ pvRangeAddrStart, ui32Length);
+ if(!pvMinVAddr)
+ goto err_blocked;
+
+ pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
+
+#if defined(CONFIG_OUTER_CACHE)
+ ui32PageNumOffset = ((ui32AreaOffset & PAGE_MASK) + (pvRangeAddrStart - pvMinVAddr)) >> PAGE_SHIFT;
+ pfnMemAreaToPhys = IONAreaToPhys;
+#endif
+ break;
+ }
+
+ case LINUX_MEM_AREA_ALLOC_PAGES:
+ {
+ pvMinVAddr = FindMMapBaseVAddr(psMMapOffsetStructList,
+ pvRangeAddrStart, ui32Length);
+ if(!pvMinVAddr)
+ goto err_blocked;
+
+ pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length);
+
+#if defined(CONFIG_OUTER_CACHE)
+ ui32PageNumOffset = ((ui32AreaOffset & PAGE_MASK) + (pvRangeAddrStart - pvMinVAddr)) >> PAGE_SHIFT;
+ pfnMemAreaToPhys = AllocPagesAreaToPhys;
+#endif
+ break;
+ }
+
+ default:
+ PVR_DBG_BREAK;
+ }
+
+ LinuxUnLockMutex(&g_sMMapMutex);
+
+#if defined(CONFIG_OUTER_CACHE)
+ PVR_ASSERT(pfnMemAreaToPhys != IMG_NULL);
+
+
+ {
+ unsigned long ulStart, ulEnd, ulLength, ulStartOffset, ulEndOffset;
+ IMG_UINT32 i, ui32NumPages;
+
+
+ ulLength = (unsigned long)ui32Length;
+ ulStartOffset = ((unsigned long)pvRangeAddrStart) & (PAGE_SIZE - 1);
+ ulEndOffset = ((unsigned long)pvRangeAddrStart + ulLength) & (PAGE_SIZE - 1);
+
+
+ ui32NumPages = (ulStartOffset + ulLength + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ for(i = 0; i < ui32NumPages; i++)
+ {
+ ulStart = pfnMemAreaToPhys(psLinuxMemArea, pvRangeAddrStart,
+ ui32PageNumOffset, i);
+ ulEnd = ulStart + PAGE_SIZE;
+
+ if(i == ui32NumPages - 1 && ulEndOffset != 0)
+ ulEnd = ulStart + ulEndOffset;
+
+ if(i == 0)
+ ulStart += ulStartOffset;
+
+ pfnOuterCacheOp(ulStart, ulEnd);
+ }
+ }
+#endif
+
+ return IMG_TRUE;
+
+err_blocked:
+ PVR_DPF((PVR_DBG_WARNING, "%s: Blocked cache op on virtual range "
+ "%p-%p (type %d)", __func__,
+ pvRangeAddrStart, pvRangeAddrStart + ui32Length,
+ psLinuxMemArea->eAreaType));
+ LinuxUnLockMutex(&g_sMMapMutex);
+ return IMG_FALSE;
+}
+
+#if defined(__i386__)
+
+#define ROUND_UP(x,a) (((x) + (a) - 1) & ~((a) - 1))
+
+static void per_cpu_cache_flush(void *arg)
+{
+ PVR_UNREFERENCED_PARAMETER(arg);
+ wbinvd();
+}
+
+static void x86_flush_cache_range(const void *pvStart, const void *pvEnd)
+{
+ IMG_BYTE *pbStart = (IMG_BYTE *)pvStart;
+ IMG_BYTE *pbEnd = (IMG_BYTE *)pvEnd;
+ IMG_BYTE *pbBase;
+
+ pbEnd = (IMG_BYTE *)ROUND_UP((IMG_UINTPTR_T)pbEnd,
+ boot_cpu_data.x86_clflush_size);
+
+ mb();
+ for(pbBase = pbStart; pbBase < pbEnd; pbBase += boot_cpu_data.x86_clflush_size)
+ clflush(pbBase);
+ mb();
+}
+
+IMG_VOID OSCleanCPUCacheKM(IMG_VOID)
+{
+
+ ON_EACH_CPU(per_cpu_cache_flush, NULL, 1);
+}
+
+IMG_VOID OSFlushCPUCacheKM(IMG_VOID)
+{
+ ON_EACH_CPU(per_cpu_cache_flush, NULL, 1);
+}
+
+IMG_BOOL OSFlushCPUCacheRangeKM(IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32Length)
+{
+
+ return CheckExecuteCacheOp(hOSMemHandle, pvRangeAddrStart, ui32Length,
+ x86_flush_cache_range, IMG_NULL);
+}
+
+IMG_BOOL OSCleanCPUCacheRangeKM(IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32Length)
+{
+
+ return CheckExecuteCacheOp(hOSMemHandle, pvRangeAddrStart, ui32Length,
+ x86_flush_cache_range, IMG_NULL);
+}
+
+IMG_BOOL OSInvalidateCPUCacheRangeKM(IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32Length)
+{
+
+ return CheckExecuteCacheOp(hOSMemHandle, pvRangeAddrStart, ui32Length,
+ x86_flush_cache_range, IMG_NULL);
+}
+
+#else
+
+#if defined(__arm__)
+
+static void per_cpu_cache_flush(void *arg)
+{
+ PVR_UNREFERENCED_PARAMETER(arg);
+ flush_cache_all();
+}
+
+IMG_VOID OSCleanCPUCacheKM(IMG_VOID)
+{
+
+ ON_EACH_CPU(per_cpu_cache_flush, NULL, 1);
+#if defined(CONFIG_OUTER_CACHE) && !defined(PVR_NO_FULL_CACHE_OPS)
+ outer_clean_range(0, ULONG_MAX);
+#endif
+}
+
+IMG_VOID OSFlushCPUCacheKM(IMG_VOID)
+{
+ ON_EACH_CPU(per_cpu_cache_flush, NULL, 1);
+#if defined(CONFIG_OUTER_CACHE) && !defined(PVR_NO_FULL_CACHE_OPS)
+ outer_flush_all();
+#endif
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
+static inline size_t pvr_dmac_range_len(const void *pvStart, const void *pvEnd)
+{
+ return (size_t)((char *)pvEnd - (char *)pvStart);
+}
+#endif
+
+static void pvr_dmac_inv_range(const void *pvStart, const void *pvEnd)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
+ dmac_inv_range(pvStart, pvEnd);
+#else
+ dmac_map_area(pvStart, pvr_dmac_range_len(pvStart, pvEnd), DMA_FROM_DEVICE);
+#endif
+}
+
+static void pvr_dmac_clean_range(const void *pvStart, const void *pvEnd)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
+ dmac_clean_range(pvStart, pvEnd);
+#else
+ dmac_map_area(pvStart, pvr_dmac_range_len(pvStart, pvEnd), DMA_TO_DEVICE);
+#endif
+}
+
+IMG_BOOL OSFlushCPUCacheRangeKM(IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32Length)
+{
+ return CheckExecuteCacheOp(hOSMemHandle, pvRangeAddrStart, ui32Length,
+ dmac_flush_range, outer_flush_range);
+}
+
+IMG_BOOL OSCleanCPUCacheRangeKM(IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32Length)
+{
+ return CheckExecuteCacheOp(hOSMemHandle, pvRangeAddrStart, ui32Length,
+ pvr_dmac_clean_range, outer_clean_range);
+}
+
+IMG_BOOL OSInvalidateCPUCacheRangeKM(IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32Length)
+{
+ return CheckExecuteCacheOp(hOSMemHandle, pvRangeAddrStart, ui32Length,
+ pvr_dmac_inv_range, outer_inv_range);
+}
+
+#else
+
+#if defined(__mips__)
+IMG_VOID OSCleanCPUCacheKM(IMG_VOID)
+{
+
+ dma_cache_wback(0, 0x100000);
+}
+
+IMG_VOID OSFlushCPUCacheKM(IMG_VOID)
+{
+
+ dma_cache_wback_inv(0, 0x100000);
+}
+
+IMG_BOOL OSFlushCPUCacheRangeKM(IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32Length)
+{
+ if (ui32Length)
+ dma_cache_wback_inv((IMG_UINTPTR_T)pvRangeAddrStart, ui32Length);
+ return IMG_TRUE;
+}
+
+IMG_BOOL OSCleanCPUCacheRangeKM(IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32Length)
+{
+ if (ui32Length)
+ dma_cache_wback((IMG_UINTPTR_T)pvRangeAddrStart, ui32Length);
+ return IMG_TRUE;
+}
+
+IMG_BOOL OSInvalidateCPUCacheRangeKM(IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32Length)
+{
+ if (ui32Length)
+ dma_cache_inv((IMG_UINTPTR_T)pvRangeAddrStart, ui32Length);
+ return IMG_TRUE;
+}
+
+#else
+
+#error "Implement CPU cache flush/clean/invalidate primitives for this CPU!"
+
+#endif
+
+#endif
+
+#endif
+
+typedef struct _AtomicStruct
+{
+ atomic_t RefCount;
+} AtomicStruct;
+
+PVRSRV_ERROR OSAtomicAlloc(IMG_PVOID *ppvRefCount)
+{
+ AtomicStruct *psRefCount;
+
+ psRefCount = kmalloc(sizeof(AtomicStruct), GFP_KERNEL);
+ if (psRefCount == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ atomic_set(&psRefCount->RefCount, 0);
+
+ *ppvRefCount = psRefCount;
+ return PVRSRV_OK;
+}
+
+IMG_VOID OSAtomicFree(IMG_PVOID pvRefCount)
+{
+ AtomicStruct *psRefCount = pvRefCount;
+
+ PVR_ASSERT(atomic_read(&psRefCount->RefCount) == 0);
+ kfree(psRefCount);
+}
+
+IMG_VOID OSAtomicInc(IMG_PVOID pvRefCount)
+{
+ AtomicStruct *psRefCount = pvRefCount;
+
+ atomic_inc(&psRefCount->RefCount);
+}
+
+IMG_BOOL OSAtomicDecAndTest(IMG_PVOID pvRefCount)
+{
+ AtomicStruct *psRefCount = pvRefCount;
+
+ return atomic_dec_and_test(&psRefCount->RefCount) ? IMG_TRUE:IMG_FALSE;
+}
+
+IMG_UINT32 OSAtomicRead(IMG_PVOID pvRefCount)
+{
+ AtomicStruct *psRefCount = pvRefCount;
+
+ return (IMG_UINT32) atomic_read(&psRefCount->RefCount);
+}
+
+IMG_VOID OSReleaseBridgeLock(IMG_VOID)
+{
+ LinuxUnLockMutex(&gPVRSRVLock);
+}
+
+IMG_VOID OSReacquireBridgeLock(IMG_VOID)
+{
+ LinuxLockMutex(&gPVRSRVLock);
+}
+
+typedef struct _OSTime
+{
+ unsigned long ulTime;
+} OSTime;
+
+PVRSRV_ERROR OSTimeCreateWithUSOffset(IMG_PVOID *pvRet, IMG_UINT32 ui32USOffset)
+{
+ OSTime *psOSTime;
+
+ psOSTime = kmalloc(sizeof(OSTime), GFP_KERNEL);
+ if (psOSTime == IMG_NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psOSTime->ulTime = usecs_to_jiffies(jiffies_to_usecs(jiffies) + ui32USOffset);
+ *pvRet = psOSTime;
+ return PVRSRV_OK;
+}
+
+
+IMG_BOOL OSTimeHasTimePassed(IMG_PVOID pvData)
+{
+ OSTime *psOSTime = pvData;
+
+ if (time_is_before_jiffies(psOSTime->ulTime))
+ {
+ return IMG_TRUE;
+ }
+ return IMG_FALSE;
+}
+
+IMG_VOID OSTimeDestroy(IMG_PVOID pvData)
+{
+ kfree(pvData);
+}
+
+PVRSRV_ERROR PVROSFuncInit(IMG_VOID)
+{
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+ {
+ psTimerWorkQueue = create_workqueue("pvr_timer");
+ if (psTimerWorkQueue == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: couldn't create timer workqueue", __FUNCTION__));
+ return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD;
+
+ }
+ }
+#endif
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+ {
+ IMG_UINT32 ui32i;
+
+ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
+ {
+ TIMER_CALLBACK_DATA *psTimerCBData = &sTimers[ui32i];
+
+ INIT_WORK(&psTimerCBData->sWork, OSTimerWorkQueueCallBack);
+ }
+ }
+#endif
+ return PVRSRV_OK;
+}
+
+IMG_VOID PVROSFuncDeInit(IMG_VOID)
+{
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+ if (psTimerWorkQueue != NULL)
+ {
+ destroy_workqueue(psTimerWorkQueue);
+ }
+#endif
+}
diff --git a/drivers/gpu/pvr/osfunc.h b/drivers/gpu/pvr/osfunc.h
new file mode 100644
index 0000000..70caf57
--- /dev/null
+++ b/drivers/gpu/pvr/osfunc.h
@@ -0,0 +1,648 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifdef DEBUG_RELEASE_BUILD
+#pragma optimize( "", off )
+#define DEBUG 1
+#endif
+
+#ifndef __OSFUNC_H__
+#define __OSFUNC_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if defined(__linux__) && defined(__KERNEL__)
+#include <linux/hardirq.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#if defined(__arm__)
+#include <asm/memory.h>
+#endif
+#endif
+
+
+
+ #define PVRSRV_PAGEABLE_SELECT PVRSRV_OS_PAGEABLE_HEAP
+
+#define KERNEL_ID 0xffffffffL
+#define POWER_MANAGER_ID 0xfffffffeL
+#define ISR_ID 0xfffffffdL
+#define TIMER_ID 0xfffffffcL
+
+
+#define HOST_PAGESIZE OSGetPageSize
+#define HOST_PAGEMASK (HOST_PAGESIZE()-1)
+#define HOST_PAGEALIGN(addr) (((addr) + HOST_PAGEMASK) & ~HOST_PAGEMASK)
+
+#define PVRSRV_OS_HEAP_MASK 0xf
+#define PVRSRV_OS_PAGEABLE_HEAP 0x1
+#define PVRSRV_OS_NON_PAGEABLE_HEAP 0x2
+
+
+IMG_UINT32 OSClockus(IMG_VOID);
+IMG_SIZE_T OSGetPageSize(IMG_VOID);
+PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID *pvSysData,
+ IMG_UINT32 ui32Irq,
+ IMG_CHAR *pszISRName,
+ IMG_VOID *pvDeviceNode);
+PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID *pvSysData);
+PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID *pvSysData, IMG_UINT32 ui32Irq);
+PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID *pvSysData);
+PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData);
+PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData);
+IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_HANDLE, IMG_VOID* pvLinAddr);
+IMG_VOID OSMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_SIZE_T ui32Size);
+IMG_VOID *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE *phOSMemHandle);
+IMG_BOOL OSUnMapPhysToLin(IMG_VOID *pvLinAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle);
+
+PVRSRV_ERROR OSReservePhys(IMG_CPU_PHYADDR BasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle);
+PVRSRV_ERROR OSUnReservePhys(IMG_VOID *pvCpuVAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle);
+
+#if (defined(__linux__) && defined(__KERNEL__)) || (UNDER_CE >= 600)
+
+IMG_VOID OSFlushCPUCacheKM(IMG_VOID);
+
+IMG_VOID OSCleanCPUCacheKM(IMG_VOID);
+
+IMG_BOOL OSFlushCPUCacheRangeKM(IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32Length);
+IMG_BOOL OSCleanCPUCacheRangeKM(IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32Length);
+IMG_BOOL OSInvalidateCPUCacheRangeKM(IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32Length);
+
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSFlushCPUCacheKM)
+#endif
+static INLINE IMG_VOID OSFlushCPUCacheKM(IMG_VOID) {}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSCleanCPUCacheKM)
+#endif
+static INLINE IMG_VOID OSCleanCPUCacheKM(IMG_VOID) {}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSFlushCPUCacheRangeKM)
+#endif
+static INLINE IMG_BOOL OSFlushCPUCacheRangeKM(IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32Length)
+{
+ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
+ PVR_UNREFERENCED_PARAMETER(pvRangeAddrStart);
+ PVR_UNREFERENCED_PARAMETER(ui32Length);
+ return IMG_FALSE;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSCleanCPUCacheRangeKM)
+#endif
+static INLINE IMG_BOOL OSCleanCPUCacheRangeKM(IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32Length)
+{
+ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
+ PVR_UNREFERENCED_PARAMETER(pvRangeAddrStart);
+ PVR_UNREFERENCED_PARAMETER(ui32Length);
+ return IMG_FALSE;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSInvalidateCPUCacheRangeKM)
+#endif
+static INLINE IMG_BOOL OSInvalidateCPUCacheRangeKM(IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvRangeAddrStart,
+ IMG_UINT32 ui32Length)
+{
+ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
+ PVR_UNREFERENCED_PARAMETER(pvRangeAddrStart);
+ PVR_UNREFERENCED_PARAMETER(ui32Length);
+ return IMG_FALSE;
+}
+
+#endif
+
+#if defined(__linux__)
+PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr,
+ IMG_VOID *pvCpuVAddr,
+ IMG_SIZE_T ui32Bytes,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE *phOSMemHandle);
+PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr,
+ IMG_SIZE_T ui32Bytes,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hOSMemHandle);
+#else
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSRegisterDiscontigMem)
+#endif
+static INLINE PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr,
+ IMG_VOID *pvCpuVAddr,
+ IMG_SIZE_T ui32Bytes,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE *phOSMemHandle)
+{
+ PVR_UNREFERENCED_PARAMETER(pBasePAddr);
+ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
+ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
+ PVR_UNREFERENCED_PARAMETER(ui32Flags);
+ PVR_UNREFERENCED_PARAMETER(phOSMemHandle);
+
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSUnRegisterDiscontigMem)
+#endif
+static INLINE PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr,
+ IMG_SIZE_T ui32Bytes,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hOSMemHandle)
+{
+ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
+ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
+ PVR_UNREFERENCED_PARAMETER(ui32Flags);
+ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
+
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+#endif
+
+
+#if defined(__linux__)
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSReserveDiscontigPhys)
+#endif
+static INLINE PVRSRV_ERROR OSReserveDiscontigPhys(IMG_SYS_PHYADDR *pBasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle)
+{
+#if defined(__linux__)
+ *ppvCpuVAddr = IMG_NULL;
+ return OSRegisterDiscontigMem(pBasePAddr, *ppvCpuVAddr, ui32Bytes, ui32Flags, phOSMemHandle);
+#else
+ extern IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr(IMG_SYS_PHYADDR SysPAddr);
+
+
+ return OSReservePhys(SysSysPAddrToCpuPAddr(pBasePAddr[0]), ui32Bytes, ui32Flags, ppvCpuVAddr, phOSMemHandle);
+#endif
+}
+
+static INLINE PVRSRV_ERROR OSUnReserveDiscontigPhys(IMG_VOID *pvCpuVAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle)
+{
+#if defined(__linux__)
+ OSUnRegisterDiscontigMem(pvCpuVAddr, ui32Bytes, ui32Flags, hOSMemHandle);
+#endif
+
+ return PVRSRV_OK;
+}
+#else
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSReserveDiscontigPhys)
+#endif
+static INLINE PVRSRV_ERROR OSReserveDiscontigPhys(IMG_SYS_PHYADDR *pBasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle)
+{
+ PVR_UNREFERENCED_PARAMETER(pBasePAddr);
+ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
+ PVR_UNREFERENCED_PARAMETER(ui32Flags);
+ PVR_UNREFERENCED_PARAMETER(ppvCpuVAddr);
+ PVR_UNREFERENCED_PARAMETER(phOSMemHandle);
+
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSUnReserveDiscontigPhys)
+#endif
+static INLINE PVRSRV_ERROR OSUnReserveDiscontigPhys(IMG_VOID *pvCpuVAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle)
+{
+ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
+ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
+ PVR_UNREFERENCED_PARAMETER(ui32Flags);
+ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
+
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+#endif
+
+PVRSRV_ERROR OSRegisterMem(IMG_CPU_PHYADDR BasePAddr,
+ IMG_VOID *pvCpuVAddr,
+ IMG_SIZE_T ui32Bytes,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE *phOSMemHandle);
+PVRSRV_ERROR OSUnRegisterMem(IMG_VOID *pvCpuVAddr,
+ IMG_SIZE_T ui32Bytes,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hOSMemHandle);
+
+
+
+#if defined(__linux__)
+PVRSRV_ERROR OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
+ IMG_UINTPTR_T ui32ByteOffset,
+ IMG_SIZE_T ui32Bytes,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE *phOSMemHandleRet);
+PVRSRV_ERROR OSReleaseSubMemHandle(IMG_HANDLE hOSMemHandle, IMG_UINT32 ui32Flags);
+#else
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSGetSubMemHandle)
+#endif
+static INLINE PVRSRV_ERROR OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
+ IMG_UINTPTR_T ui32ByteOffset,
+ IMG_SIZE_T ui32Bytes,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE *phOSMemHandleRet)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32ByteOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
+ PVR_UNREFERENCED_PARAMETER(ui32Flags);
+
+ *phOSMemHandleRet = hOSMemHandle;
+ return PVRSRV_OK;
+}
+
+static INLINE PVRSRV_ERROR OSReleaseSubMemHandle(IMG_HANDLE hOSMemHandle, IMG_UINT32 ui32Flags)
+{
+ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
+ PVR_UNREFERENCED_PARAMETER(ui32Flags);
+ return PVRSRV_OK;
+}
+#endif
+
+IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID);
+IMG_UINTPTR_T OSGetCurrentThreadID( IMG_VOID );
+IMG_VOID OSMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_SIZE_T ui32Size);
+
+PVRSRV_ERROR OSAllocPages_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_UINT32 ui32PageSize,
+ IMG_PVOID pvPrivData, IMG_UINT32 ui32PrivDataLength, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phPageAlloc);
+PVRSRV_ERROR OSFreePages(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hPageAlloc);
+
+
+#ifdef PVRSRV_LOG_MEMORY_ALLOCS
+ #define OSAllocMem(flags, size, linAddr, blockAlloc, logStr) \
+ (PVR_TRACE(("OSAllocMem(" #flags ", " #size ", " #linAddr ", " #blockAlloc "): " logStr " (size = 0x%lx)", size)), \
+ OSAllocMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__))
+
+ #define OSAllocPages(flags, size, pageSize, privdata, privdatalength, linAddr, pageAlloc) \
+ (PVR_TRACE(("OSAllocPages(" #flags ", " #size ", " #pageSize ", " #linAddr ", " #pageAlloc "): (size = 0x%lx)", size)), \
+ OSAllocPages_Impl(flags, size, pageSize, linAddr, privdata, privdatalength, pageAlloc))
+
+ #define OSFreeMem(flags, size, linAddr, blockAlloc) \
+ (PVR_TRACE(("OSFreeMem(" #flags ", " #size ", " #linAddr ", " #blockAlloc "): (pointer = 0x%X)", linAddr)), \
+ OSFreeMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__))
+#else
+ #define OSAllocMem(flags, size, linAddr, blockAlloc, logString) \
+ OSAllocMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__)
+
+ #define OSAllocPages OSAllocPages_Impl
+
+ #define OSFreeMem(flags, size, linAddr, blockAlloc) \
+ OSFreeMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__)
+#endif
+
+#ifdef PVRSRV_DEBUG_OS_MEMORY
+
+ PVRSRV_ERROR OSAllocMem_Debug_Wrapper(IMG_UINT32 ui32Flags,
+ IMG_UINT32 ui32Size,
+ IMG_PVOID *ppvCpuVAddr,
+ IMG_HANDLE *phBlockAlloc,
+ IMG_CHAR *pszFilename,
+ IMG_UINT32 ui32Line);
+
+ PVRSRV_ERROR OSFreeMem_Debug_Wrapper(IMG_UINT32 ui32Flags,
+ IMG_UINT32 ui32Size,
+ IMG_PVOID pvCpuVAddr,
+ IMG_HANDLE hBlockAlloc,
+ IMG_CHAR *pszFilename,
+ IMG_UINT32 ui32Line);
+
+
+ typedef struct
+ {
+ IMG_UINT8 sGuardRegionBefore[8];
+ IMG_CHAR sFileName[128];
+ IMG_UINT32 uLineNo;
+ IMG_SIZE_T uSize;
+ IMG_SIZE_T uSizeParityCheck;
+ enum valid_tag
+ { isFree = 0x277260FF,
+ isAllocated = 0x260511AA
+ } eValid;
+ } OSMEM_DEBUG_INFO;
+
+ #define TEST_BUFFER_PADDING_STATUS (sizeof(OSMEM_DEBUG_INFO))
+ #define TEST_BUFFER_PADDING_AFTER (8)
+ #define TEST_BUFFER_PADDING (TEST_BUFFER_PADDING_STATUS + TEST_BUFFER_PADDING_AFTER)
+#else
+ #define OSAllocMem_Debug_Wrapper OSAllocMem_Debug_Linux_Memory_Allocations
+ #define OSFreeMem_Debug_Wrapper OSFreeMem_Debug_Linux_Memory_Allocations
+#endif
+
+#if defined(__linux__) && defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+ PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line);
+ PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line);
+
+ #define OSAllocMem_Debug_Linux_Memory_Allocations OSAllocMem_Impl
+ #define OSFreeMem_Debug_Linux_Memory_Allocations OSFreeMem_Impl
+#else
+ PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phBlockAlloc);
+ PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hBlockAlloc);
+
+ #define OSAllocMem_Debug_Linux_Memory_Allocations(flags, size, addr, blockAlloc, file, line) \
+ OSAllocMem_Impl(flags, size, addr, blockAlloc)
+ #define OSFreeMem_Debug_Linux_Memory_Allocations(flags, size, addr, blockAlloc, file, line) \
+ OSFreeMem_Impl(flags, size, addr, blockAlloc)
+#endif
+
+
+#if defined(__linux__)
+IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(IMG_VOID *hOSMemHandle, IMG_SIZE_T ui32ByteOffset);
+#else
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSMemHandleToCpuPAddr)
+#endif
+static INLINE IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(IMG_HANDLE hOSMemHandle, IMG_SIZE_T ui32ByteOffset)
+{
+ IMG_CPU_PHYADDR sCpuPAddr;
+ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
+ PVR_UNREFERENCED_PARAMETER(ui32ByteOffset);
+ sCpuPAddr.uiAddr = 0;
+ return sCpuPAddr;
+}
+#endif
+
+#if defined(__linux__)
+IMG_BOOL OSMemHandleIsPhysContig(IMG_VOID *hOSMemHandle);
+#else
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSMemHandleIsPhysContig)
+#endif
+static INLINE IMG_BOOL OSMemHandleIsPhysContig(IMG_HANDLE hOSMemHandle)
+{
+ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
+ return IMG_FALSE;
+}
+#endif
+
+PVRSRV_ERROR OSInitEnvData(IMG_PVOID *ppvEnvSpecificData);
+PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData);
+IMG_CHAR* OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc);
+IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_SIZE_T ui32Size, const IMG_CHAR *pszFormat, ...) IMG_FORMAT_PRINTF(3, 4);
+#define OSStringLength(pszString) strlen(pszString)
+
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR OSEventObjectCreateKM(const IMG_CHAR *pszName,
+ PVRSRV_EVENTOBJECT_KM *psEventObject);
+PVRSRV_ERROR OSEventObjectDestroyKM(PVRSRV_EVENTOBJECT_KM *psEventObject);
+PVRSRV_ERROR OSEventObjectSignalKM(IMG_HANDLE hOSEventKM);
+PVRSRV_ERROR OSEventObjectWaitKM(IMG_HANDLE hOSEventKM);
+PVRSRV_ERROR OSEventObjectOpenKM(PVRSRV_EVENTOBJECT_KM *psEventObject,
+ IMG_HANDLE *phOSEvent);
+PVRSRV_ERROR OSEventObjectCloseKM(PVRSRV_EVENTOBJECT_KM *psEventObject,
+ IMG_HANDLE hOSEventKM);
+#else
+PVRSRV_ERROR OSEventObjectCreateKM(const IMG_CHAR *pszName,
+ PVRSRV_EVENTOBJECT *psEventObject);
+PVRSRV_ERROR OSEventObjectDestroyKM(PVRSRV_EVENTOBJECT *psEventObject);
+PVRSRV_ERROR OSEventObjectSignalKM(IMG_HANDLE hOSEventKM);
+PVRSRV_ERROR OSEventObjectWaitKM(IMG_HANDLE hOSEventKM);
+PVRSRV_ERROR OSEventObjectOpenKM(PVRSRV_EVENTOBJECT *psEventObject,
+ IMG_HANDLE *phOSEvent);
+PVRSRV_ERROR OSEventObjectCloseKM(PVRSRV_EVENTOBJECT *psEventObject,
+ IMG_HANDLE hOSEventKM);
+#endif
+
+
+PVRSRV_ERROR OSBaseAllocContigMemory(IMG_SIZE_T ui32Size, IMG_CPU_VIRTADDR *pLinAddr, IMG_CPU_PHYADDR *pPhysAddr);
+PVRSRV_ERROR OSBaseFreeContigMemory(IMG_SIZE_T ui32Size, IMG_CPU_VIRTADDR LinAddr, IMG_CPU_PHYADDR PhysAddr);
+
+IMG_PVOID MapUserFromKernel(IMG_PVOID pvLinAddrKM,IMG_SIZE_T ui32Size,IMG_HANDLE *phMemBlock);
+IMG_PVOID OSMapHWRegsIntoUserSpace(IMG_HANDLE hDevCookie, IMG_SYS_PHYADDR sRegAddr, IMG_UINT32 ulSize, IMG_PVOID *ppvProcess);
+IMG_VOID OSUnmapHWRegsFromUserSpace(IMG_HANDLE hDevCookie, IMG_PVOID pvUserAddr, IMG_PVOID pvProcess);
+
+IMG_VOID UnmapUserFromKernel(IMG_PVOID pvLinAddrUM, IMG_SIZE_T ui32Size, IMG_HANDLE hMemBlock);
+
+PVRSRV_ERROR OSMapPhysToUserSpace(IMG_HANDLE hDevCookie,
+ IMG_SYS_PHYADDR sCPUPhysAddr,
+ IMG_SIZE_T uiSizeInBytes,
+ IMG_UINT32 ui32CacheFlags,
+ IMG_PVOID *ppvUserAddr,
+ IMG_SIZE_T *puiActualSize,
+ IMG_HANDLE hMappingHandle);
+
+PVRSRV_ERROR OSUnmapPhysToUserSpace(IMG_HANDLE hDevCookie,
+ IMG_PVOID pvUserAddr,
+ IMG_PVOID pvProcess);
+
+PVRSRV_ERROR OSLockResource(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
+PVRSRV_ERROR OSUnlockResource(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
+IMG_BOOL OSIsResourceLocked(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
+PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource);
+PVRSRV_ERROR OSDestroyResource(PVRSRV_RESOURCE *psResource);
+IMG_VOID OSBreakResourceLock(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
+
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+#define OSPowerLockWrap SysPowerLockWrap
+#define OSPowerLockUnwrap SysPowerLockUnwrap
+#else
+PVRSRV_ERROR OSPowerLockWrap(IMG_BOOL bTryLock);
+
+IMG_VOID OSPowerLockUnwrap(IMG_VOID);
+#endif
+
+
+IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus);
+
+
+IMG_VOID OSSleepms(IMG_UINT32 ui32Timems);
+
+IMG_HANDLE OSFuncHighResTimerCreate(IMG_VOID);
+IMG_UINT32 OSFuncHighResTimerGetus(IMG_HANDLE hTimer);
+IMG_VOID OSFuncHighResTimerDestroy(IMG_HANDLE hTimer);
+IMG_VOID OSReleaseThreadQuanta(IMG_VOID);
+IMG_UINT32 OSPCIReadDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg);
+IMG_VOID OSPCIWriteDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg, IMG_UINT32 ui32Value);
+
+#ifndef OSReadHWReg
+IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+#endif
+#ifndef OSWriteHWReg
+IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
+#endif
+
+typedef IMG_VOID (*PFN_TIMER_FUNC)(IMG_VOID*);
+IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout);
+PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer);
+PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer);
+PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer);
+
+PVRSRV_ERROR OSGetSysMemSize(IMG_SIZE_T *pui32Bytes);
+
+typedef enum _HOST_PCI_INIT_FLAGS_
+{
+ HOST_PCI_INIT_FLAG_BUS_MASTER = 0x00000001,
+ HOST_PCI_INIT_FLAG_MSI = 0x00000002,
+ HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff
+} HOST_PCI_INIT_FLAGS;
+
+struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_;
+typedef struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_ *PVRSRV_PCI_DEV_HANDLE;
+
+PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags);
+PVRSRV_PCI_DEV_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags);
+PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
+PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ);
+IMG_UINT32 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+IMG_UINT32 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+IMG_UINT32 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
+PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
+
+PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData);
+
+IMG_VOID OSPanic(IMG_VOID);
+
+IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID);
+
+typedef enum _img_verify_test
+{
+ PVR_VERIFY_WRITE = 0,
+ PVR_VERIFY_READ
+} IMG_VERIFY_TEST;
+
+IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID *pvUserPtr, IMG_SIZE_T ui32Bytes);
+
+PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess, IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_SIZE_T ui32Bytes);
+PVRSRV_ERROR OSCopyFromUser(IMG_PVOID pvProcess, IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_SIZE_T ui32Bytes);
+
+#if defined(__linux__)
+PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID* pvCPUVAddr,
+ IMG_SIZE_T ui32Bytes,
+ IMG_SYS_PHYADDR *psSysPAddr,
+ IMG_HANDLE *phOSWrapMem);
+PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem);
+#else
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSAcquirePhysPageAddr)
+#endif
+static INLINE PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID* pvCPUVAddr,
+ IMG_SIZE_T ui32Bytes,
+ IMG_SYS_PHYADDR *psSysPAddr,
+ IMG_HANDLE *phOSWrapMem)
+{
+ PVR_UNREFERENCED_PARAMETER(pvCPUVAddr);
+ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
+ PVR_UNREFERENCED_PARAMETER(psSysPAddr);
+ PVR_UNREFERENCED_PARAMETER(phOSWrapMem);
+ return PVRSRV_OK;
+}
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSReleasePhysPageAddr)
+#endif
+static INLINE PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem)
+{
+ PVR_UNREFERENCED_PARAMETER(hOSWrapMem);
+ return PVRSRV_OK;
+}
+#endif
+
+#if defined(__linux__) && defined(__KERNEL__)
+
+#define OS_SUPPORTS_IN_LISR
+
+static inline IMG_BOOL OSInLISR(IMG_VOID unref__ *pvSysData)
+{
+ PVR_UNREFERENCED_PARAMETER(pvSysData);
+ return (in_irq()) ? IMG_TRUE : IMG_FALSE;
+}
+
+static inline IMG_VOID OSWriteMemoryBarrier(IMG_VOID)
+{
+ wmb();
+}
+
+static inline IMG_VOID OSMemoryBarrier(IMG_VOID)
+{
+ mb();
+}
+
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSWriteMemoryBarrier)
+#endif
+static INLINE IMG_VOID OSWriteMemoryBarrier(IMG_VOID) { }
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSMemoryBarrier)
+#endif
+static INLINE IMG_VOID OSMemoryBarrier(IMG_VOID) { }
+
+#endif
+
+PVRSRV_ERROR OSAtomicAlloc(IMG_PVOID *ppvRefCount);
+IMG_VOID OSAtomicFree(IMG_PVOID pvRefCount);
+IMG_VOID OSAtomicInc(IMG_PVOID pvRefCount);
+IMG_BOOL OSAtomicDecAndTest(IMG_PVOID pvRefCount);
+IMG_UINT32 OSAtomicRead(IMG_PVOID pvRefCount);
+
+PVRSRV_ERROR OSTimeCreateWithUSOffset(IMG_PVOID *pvRet, IMG_UINT32 ui32MSOffset);
+IMG_BOOL OSTimeHasTimePassed(IMG_PVOID pvData);
+IMG_VOID OSTimeDestroy(IMG_PVOID pvData);
+
+#if defined(__linux__)
+IMG_VOID OSReleaseBridgeLock(IMG_VOID);
+IMG_VOID OSReacquireBridgeLock(IMG_VOID);
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSReleaseBridgeLock)
+#endif
+static INLINE IMG_VOID OSReleaseBridgeLock(IMG_VOID) { }
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSReacquireBridgeLock)
+#endif
+static INLINE IMG_VOID OSReacquireBridgeLock(IMG_VOID) { }
+
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/osfunc_common.c b/drivers/gpu/pvr/osfunc_common.c
new file mode 100644
index 0000000..e0a46da
--- /dev/null
+++ b/drivers/gpu/pvr/osfunc_common.c
@@ -0,0 +1,31 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "img_types.h"
+#include "services_headers.h"
+#include "osfunc.h"
+
+
diff --git a/drivers/gpu/pvr/osperproc.c b/drivers/gpu/pvr/osperproc.c
new file mode 100644
index 0000000..6b57dfc
--- /dev/null
+++ b/drivers/gpu/pvr/osperproc.c
@@ -0,0 +1,113 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "services_headers.h"
+#include "osperproc.h"
+
+#include "env_perproc.h"
+#include "proc.h"
+
+extern IMG_UINT32 gui32ReleasePID;
+
+PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData)
+{
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hBlockAlloc;
+ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
+
+ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_ENV_PER_PROCESS_DATA),
+ phOsPrivateData,
+ &hBlockAlloc,
+ "Environment per Process Data");
+
+ if (eError != PVRSRV_OK)
+ {
+ *phOsPrivateData = IMG_NULL;
+
+ PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed (%d)", __FUNCTION__, eError));
+ return eError;
+ }
+
+ psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)*phOsPrivateData;
+ OSMemSet(psEnvPerProc, 0, sizeof(*psEnvPerProc));
+
+ psEnvPerProc->hBlockAlloc = hBlockAlloc;
+
+
+ LinuxMMapPerProcessConnect(psEnvPerProc);
+
+#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
+
+ INIT_LIST_HEAD(&psEnvPerProc->sDRMAuthListHead);
+#endif
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
+
+ if (hOsPrivateData == IMG_NULL)
+ {
+ return PVRSRV_OK;
+ }
+
+ psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)hOsPrivateData;
+
+
+ LinuxMMapPerProcessDisconnect(psEnvPerProc);
+
+
+ RemovePerProcessProcDir(psEnvPerProc);
+
+ eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_ENV_PER_PROCESS_DATA),
+ hOsPrivateData,
+ psEnvPerProc->hBlockAlloc);
+
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: OSFreeMem failed (%d)", __FUNCTION__, eError));
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
+{
+ return LinuxMMapPerProcessHandleOptions(psHandleBase);
+}
+
+IMG_HANDLE LinuxTerminatingProcessPrivateData(IMG_VOID)
+{
+ if(!gui32ReleasePID)
+ return NULL;
+ return PVRSRVPerProcessPrivateData(gui32ReleasePID);
+}
diff --git a/drivers/gpu/pvr/osperproc.h b/drivers/gpu/pvr/osperproc.h
new file mode 100644
index 0000000..02aa230
--- /dev/null
+++ b/drivers/gpu/pvr/osperproc.h
@@ -0,0 +1,76 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __OSPERPROC_H__
+#define __OSPERPROC_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if defined(__linux__)
+PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData);
+PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData);
+
+PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase);
+#else
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSPerProcessPrivateDataInit)
+#endif
+static INLINE PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData)
+{
+ PVR_UNREFERENCED_PARAMETER(phOsPrivateData);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSPerProcessPrivateDataDeInit)
+#endif
+static INLINE PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
+{
+ PVR_UNREFERENCED_PARAMETER(hOsPrivateData);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSPerProcessSetHandleOptions)
+#endif
+static INLINE PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
+{
+ PVR_UNREFERENCED_PARAMETER(psHandleBase);
+
+ return PVRSRV_OK;
+}
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/pdump.c b/drivers/gpu/pvr/pdump.c
new file mode 100644
index 0000000..13d9b0d
--- /dev/null
+++ b/drivers/gpu/pvr/pdump.c
@@ -0,0 +1,628 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if defined (SUPPORT_SGX) || defined (SUPPORT_VGX)
+#if defined (PDUMP)
+
+#include <asm/atomic.h>
+#include <stdarg.h>
+#if defined (SUPPORT_SGX)
+#include "sgxdefs.h"
+#endif
+#include "services_headers.h"
+
+#include "pvrversion.h"
+#include "pvr_debug.h"
+
+#include "dbgdrvif.h"
+#if defined (SUPPORT_SGX)
+#include "sgxmmu.h"
+#endif
+#include "mm.h"
+#include "pdump_km.h"
+#include "pdump_int.h"
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+static IMG_BOOL PDumpWriteString2 (IMG_CHAR * pszString, IMG_UINT32 ui32Flags);
+static IMG_BOOL PDumpWriteILock (PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags);
+static IMG_VOID DbgSetFrame (PDBG_STREAM psStream, IMG_UINT32 ui32Frame);
+static IMG_VOID DbgSetMarker (PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+
+#define PDUMP_DATAMASTER_PIXEL (1)
+#define PDUMP_DATAMASTER_EDM (3)
+
+#define MAX_FILE_SIZE 0x40000000
+
+static atomic_t gsPDumpSuspended = ATOMIC_INIT(0);
+
+static PDBGKM_SERVICE_TABLE gpfnDbgDrv = IMG_NULL;
+
+
+
+IMG_CHAR *pszStreamName[PDUMP_NUM_STREAMS] = { "ParamStream2",
+ "ScriptStream2",
+ "DriverInfoStream"};
+typedef struct PDBG_PDUMP_STATE_TAG
+{
+ PDBG_STREAM psStream[PDUMP_NUM_STREAMS];
+ IMG_UINT32 ui32ParamFileNum;
+
+ IMG_CHAR *pszMsg;
+ IMG_CHAR *pszScript;
+ IMG_CHAR *pszFile;
+
+} PDBG_PDUMP_STATE;
+
+static PDBG_PDUMP_STATE gsDBGPdumpState = {{IMG_NULL}, 0, IMG_NULL, IMG_NULL, IMG_NULL};
+
+#define SZ_MSG_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
+#define SZ_SCRIPT_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
+#define SZ_FILENAME_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
+
+
+
+
+IMG_VOID DBGDrvGetServiceTable(IMG_VOID **fn_table);
+
+static inline IMG_BOOL PDumpSuspended(IMG_VOID)
+{
+ return (atomic_read(&gsPDumpSuspended) != 0) ? IMG_TRUE : IMG_FALSE;
+}
+
+PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript,
+ IMG_UINT32 *pui32MaxLen)
+{
+ *phScript = (IMG_HANDLE)gsDBGPdumpState.pszScript;
+ *pui32MaxLen = SZ_SCRIPT_SIZE_MAX;
+ if ((!*phScript) || PDumpSuspended())
+ {
+ return PVRSRV_ERROR_PDUMP_NOT_ACTIVE;
+ }
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpOSGetMessageString(IMG_CHAR **ppszMsg,
+ IMG_UINT32 *pui32MaxLen)
+{
+ *ppszMsg = gsDBGPdumpState.pszMsg;
+ *pui32MaxLen = SZ_MSG_SIZE_MAX;
+ if ((!*ppszMsg) || PDumpSuspended())
+ {
+ return PVRSRV_ERROR_PDUMP_NOT_ACTIVE;
+ }
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile,
+ IMG_UINT32 *pui32MaxLen)
+{
+ *ppszFile = gsDBGPdumpState.pszFile;
+ *pui32MaxLen = SZ_FILENAME_SIZE_MAX;
+ if ((!*ppszFile) || PDumpSuspended())
+ {
+ return PVRSRV_ERROR_PDUMP_NOT_ACTIVE;
+ }
+ return PVRSRV_OK;
+}
+
+IMG_BOOL PDumpOSWriteString2(IMG_HANDLE hScript, IMG_UINT32 ui32Flags)
+{
+ return PDumpWriteString2(hScript, ui32Flags);
+}
+
+PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...)
+{
+ IMG_CHAR* pszBuf = hBuf;
+ IMG_INT32 n;
+ va_list vaArgs;
+
+ va_start(vaArgs, pszFormat);
+
+ n = vsnprintf(pszBuf, ui32ScriptSizeMax, pszFormat, vaArgs);
+
+ va_end(vaArgs);
+
+ if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
+
+ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
+ }
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+ g_ui32EveryLineCounter++;
+#endif
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, PDUMP_va_list vaArgs)
+{
+ IMG_INT32 n;
+
+ n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
+
+ if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
+
+ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
+ }
+
+ return PVRSRV_OK;
+}
+
+IMG_VOID PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...)
+{
+ PVR_UNREFERENCED_PARAMETER(pszFormat);
+
+
+}
+
+PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...)
+{
+ IMG_INT32 n;
+ va_list vaArgs;
+
+ va_start(vaArgs, pszFormat);
+
+ n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
+
+ va_end(vaArgs);
+
+ if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
+
+ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
+ }
+
+ return PVRSRV_OK;
+}
+
+IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
+{
+ IMG_CHAR* pszBuf = hBuffer;
+ IMG_UINT32 ui32Count = 0;
+
+ while ((pszBuf[ui32Count]!=0) && (ui32Count<ui32BufferSizeMax) )
+ {
+ ui32Count++;
+ }
+ return(ui32Count);
+}
+
+IMG_VOID PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
+{
+ IMG_UINT32 ui32Count;
+ IMG_CHAR* pszBuf = hBuffer;
+
+
+ ui32Count = PDumpOSBuflen(hBuffer, ui32BufferSizeMax);
+
+
+ if ((ui32Count >= 1) && (pszBuf[ui32Count-1] != '\n') && (ui32Count<ui32BufferSizeMax))
+ {
+ pszBuf[ui32Count] = '\n';
+ ui32Count++;
+ pszBuf[ui32Count] = '\0';
+ }
+ if ((ui32Count >= 2) && (pszBuf[ui32Count-2] != '\r') && (ui32Count<ui32BufferSizeMax))
+ {
+ pszBuf[ui32Count-1] = '\r';
+ pszBuf[ui32Count] = '\n';
+ ui32Count++;
+ pszBuf[ui32Count] = '\0';
+ }
+}
+
+IMG_HANDLE PDumpOSGetStream(IMG_UINT32 ePDumpStream)
+{
+ return (IMG_HANDLE)gsDBGPdumpState.psStream[ePDumpStream];
+}
+
+IMG_UINT32 PDumpOSGetStreamOffset(IMG_UINT32 ePDumpStream)
+{
+ PDBG_STREAM psStream = gsDBGPdumpState.psStream[ePDumpStream];
+ return gpfnDbgDrv->pfnGetStreamOffset(psStream);
+}
+
+IMG_UINT32 PDumpOSGetParamFileNum(IMG_VOID)
+{
+ return gsDBGPdumpState.ui32ParamFileNum;
+}
+
+IMG_BOOL PDumpOSWriteString(IMG_HANDLE hStream,
+ IMG_UINT8 *psui8Data,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32Flags)
+{
+ PDBG_STREAM psStream = (PDBG_STREAM)hStream;
+ return PDumpWriteILock(psStream,
+ psui8Data,
+ ui32Size,
+ ui32Flags);
+}
+
+IMG_VOID PDumpOSCheckForSplitting(IMG_HANDLE hStream, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags)
+{
+
+ PVR_UNREFERENCED_PARAMETER(hStream);
+ PVR_UNREFERENCED_PARAMETER(ui32Size);
+ PVR_UNREFERENCED_PARAMETER(ui32Flags);
+}
+
+IMG_BOOL PDumpOSJTInitialised(IMG_VOID)
+{
+ if(gpfnDbgDrv)
+ {
+ return IMG_TRUE;
+ }
+ return IMG_FALSE;
+}
+
+inline IMG_BOOL PDumpOSIsSuspended(IMG_VOID)
+{
+ return (atomic_read(&gsPDumpSuspended) != 0) ? IMG_TRUE : IMG_FALSE;
+}
+
+IMG_VOID PDumpOSCPUVAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType,
+ IMG_HANDLE hOSMemHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT8 *pui8LinAddr,
+ IMG_UINT32 ui32PageSize,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ IMG_CPU_PHYADDR sCpuPAddr;
+
+ PVR_UNREFERENCED_PARAMETER(pui8LinAddr);
+ PVR_UNREFERENCED_PARAMETER(ui32PageSize);
+
+
+
+ PVR_ASSERT (hOSMemHandle != IMG_NULL);
+
+ sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset);
+ PVR_ASSERT((sCpuPAddr.uiAddr & (ui32PageSize - 1)) == 0);
+
+
+ *psDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
+}
+
+IMG_VOID PDumpOSCPUVAddrToPhysPages(IMG_HANDLE hOSMemHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_PUINT8 pui8LinAddr,
+ IMG_UINT32 ui32DataPageMask,
+ IMG_UINT32 *pui32PageOffset)
+{
+ if(hOSMemHandle)
+ {
+
+ IMG_CPU_PHYADDR sCpuPAddr;
+
+ PVR_UNREFERENCED_PARAMETER(pui8LinAddr);
+
+ sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset);
+ *pui32PageOffset = sCpuPAddr.uiAddr & ui32DataPageMask;
+ }
+ else
+ {
+ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+
+ *pui32PageOffset = ((IMG_UINT32)pui8LinAddr & ui32DataPageMask);
+ }
+}
+
+IMG_UINT32 PDumpOSDebugDriverWrite( PDBG_STREAM psStream,
+ PDUMP_DDWMODE eDbgDrvWriteMode,
+ IMG_UINT8 *pui8Data,
+ IMG_UINT32 ui32BCount,
+ IMG_UINT32 ui32Level,
+ IMG_UINT32 ui32DbgDrvFlags)
+{
+ switch(eDbgDrvWriteMode)
+ {
+ case PDUMP_WRITE_MODE_CONTINUOUS:
+ PVR_UNREFERENCED_PARAMETER(ui32DbgDrvFlags);
+ return gpfnDbgDrv->pfnDBGDrivWrite2(psStream, pui8Data, ui32BCount, ui32Level);
+ case PDUMP_WRITE_MODE_LASTFRAME:
+ return gpfnDbgDrv->pfnWriteLF(psStream, pui8Data, ui32BCount, ui32Level, ui32DbgDrvFlags);
+ case PDUMP_WRITE_MODE_BINCM:
+ PVR_UNREFERENCED_PARAMETER(ui32DbgDrvFlags);
+ return gpfnDbgDrv->pfnWriteBINCM(psStream, pui8Data, ui32BCount, ui32Level);
+ case PDUMP_WRITE_MODE_PERSISTENT:
+ PVR_UNREFERENCED_PARAMETER(ui32DbgDrvFlags);
+ return gpfnDbgDrv->pfnWritePersist(psStream, pui8Data, ui32BCount, ui32Level);
+ default:
+ PVR_UNREFERENCED_PARAMETER(ui32DbgDrvFlags);
+ break;
+ }
+ return 0xFFFFFFFFU;
+}
+
+IMG_VOID PDumpOSReleaseExecution(IMG_VOID)
+{
+ OSReleaseThreadQuanta();
+}
+
+IMG_VOID PDumpInit(IMG_VOID)
+{
+ IMG_UINT32 i;
+ DBGKM_CONNECT_NOTIFIER sConnectNotifier;
+
+
+ if (!gpfnDbgDrv)
+ {
+ DBGDrvGetServiceTable((IMG_VOID **)&gpfnDbgDrv);
+
+
+
+ if (gpfnDbgDrv == IMG_NULL)
+ {
+ return;
+ }
+
+
+ sConnectNotifier.pfnConnectNotifier = &PDumpConnectionNotify;
+ gpfnDbgDrv->pfnSetConnectNotifier(sConnectNotifier);
+
+ if(!gsDBGPdumpState.pszFile)
+ {
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszFile, 0,
+ "Filename string") != PVRSRV_OK)
+ {
+ goto init_failed;
+ }
+ }
+
+ if(!gsDBGPdumpState.pszMsg)
+ {
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszMsg, 0,
+ "Message string") != PVRSRV_OK)
+ {
+ goto init_failed;
+ }
+ }
+
+ if(!gsDBGPdumpState.pszScript)
+ {
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszScript, 0,
+ "Script string") != PVRSRV_OK)
+ {
+ goto init_failed;
+ }
+ }
+
+ for(i=0; i < PDUMP_NUM_STREAMS; i++)
+ {
+ gsDBGPdumpState.psStream[i] = gpfnDbgDrv->pfnCreateStream(pszStreamName[i],
+ DEBUG_CAPMODE_FRAMED,
+ DEBUG_OUTMODE_STREAMENABLE,
+ 0,
+ 10);
+
+ gpfnDbgDrv->pfnSetCaptureMode(gsDBGPdumpState.psStream[i],DEBUG_CAPMODE_FRAMED,0xFFFFFFFF, 0xFFFFFFFF, 1);
+ gpfnDbgDrv->pfnSetFrame(gsDBGPdumpState.psStream[i],0);
+ }
+
+ PDUMPCOMMENT("Driver Product Name: %s", VS_PRODUCT_NAME);
+ PDUMPCOMMENT("Driver Product Version: %s (%s)", PVRVERSION_STRING, PVRVERSION_FAMILY);
+ PDUMPCOMMENT("Start of Init Phase");
+ }
+
+ return;
+
+init_failed:
+
+ if(gsDBGPdumpState.pszFile)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszFile, 0);
+ gsDBGPdumpState.pszFile = IMG_NULL;
+ }
+
+ if(gsDBGPdumpState.pszScript)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszScript, 0);
+ gsDBGPdumpState.pszScript = IMG_NULL;
+ }
+
+ if(gsDBGPdumpState.pszMsg)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszMsg, 0);
+ gsDBGPdumpState.pszMsg = IMG_NULL;
+ }
+
+
+ sConnectNotifier.pfnConnectNotifier = 0;
+ gpfnDbgDrv->pfnSetConnectNotifier(sConnectNotifier);
+
+ gpfnDbgDrv = IMG_NULL;
+}
+
+
+IMG_VOID PDumpDeInit(IMG_VOID)
+{
+ IMG_UINT32 i;
+ DBGKM_CONNECT_NOTIFIER sConnectNotifier;
+
+ for(i=0; i < PDUMP_NUM_STREAMS; i++)
+ {
+ gpfnDbgDrv->pfnDestroyStream(gsDBGPdumpState.psStream[i]);
+ }
+
+ if(gsDBGPdumpState.pszFile)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszFile, 0);
+ gsDBGPdumpState.pszFile = IMG_NULL;
+ }
+
+ if(gsDBGPdumpState.pszScript)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszScript, 0);
+ gsDBGPdumpState.pszScript = IMG_NULL;
+ }
+
+ if(gsDBGPdumpState.pszMsg)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszMsg, 0);
+ gsDBGPdumpState.pszMsg = IMG_NULL;
+ }
+
+
+ sConnectNotifier.pfnConnectNotifier = 0;
+ gpfnDbgDrv->pfnSetConnectNotifier(sConnectNotifier);
+
+ gpfnDbgDrv = IMG_NULL;
+}
+
+PVRSRV_ERROR PDumpStartInitPhaseKM(IMG_VOID)
+{
+ IMG_UINT32 i;
+
+ if (gpfnDbgDrv)
+ {
+ PDUMPCOMMENT("Start Init Phase");
+ for(i=0; i < PDUMP_NUM_STREAMS; i++)
+ {
+ gpfnDbgDrv->pfnStartInitPhase(gsDBGPdumpState.psStream[i]);
+ }
+ }
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpStopInitPhaseKM(IMG_VOID)
+{
+ IMG_UINT32 i;
+
+ if (gpfnDbgDrv)
+ {
+ PDUMPCOMMENT("Stop Init Phase");
+
+ for(i=0; i < PDUMP_NUM_STREAMS; i++)
+ {
+ gpfnDbgDrv->pfnStopInitPhase(gsDBGPdumpState.psStream[i]);
+ }
+ }
+ return PVRSRV_OK;
+}
+
+IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID)
+{
+ return gpfnDbgDrv->pfnIsLastCaptureFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]);
+}
+
+
+IMG_BOOL PDumpOSIsCaptureFrameKM(IMG_VOID)
+{
+ if (PDumpSuspended())
+ {
+ return IMG_FALSE;
+ }
+ return gpfnDbgDrv->pfnIsCaptureFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], IMG_FALSE);
+}
+
+PVRSRV_ERROR PDumpOSSetFrameKM(IMG_UINT32 ui32Frame)
+{
+ IMG_UINT32 ui32Stream;
+
+ for (ui32Stream = 0; ui32Stream < PDUMP_NUM_STREAMS; ui32Stream++)
+ {
+ if (gsDBGPdumpState.psStream[ui32Stream])
+ {
+ DbgSetFrame(gsDBGPdumpState.psStream[ui32Stream], ui32Frame);
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+
+static IMG_BOOL PDumpWriteString2(IMG_CHAR * pszString, IMG_UINT32 ui32Flags)
+{
+ return PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], (IMG_UINT8 *) pszString, strlen(pszString), ui32Flags);
+}
+
+
+static IMG_BOOL PDumpWriteILock(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags)
+{
+ IMG_UINT32 ui32Written = 0;
+ if ((psStream == IMG_NULL) || PDumpSuspended() || ((ui32Flags & PDUMP_FLAGS_NEVER) != 0))
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "PDumpWriteILock: Failed to write 0x%x bytes to stream 0x%x", ui32Count, (IMG_UINT32)psStream));
+ return IMG_TRUE;
+ }
+
+
+
+
+ if (psStream == gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2])
+ {
+ IMG_UINT32 ui32ParamOutPos = gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]);
+
+ if (ui32ParamOutPos + ui32Count > MAX_FILE_SIZE)
+ {
+ if ((gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2] && PDumpWriteString2("\r\n-- Splitting pdump output file\r\n\r\n", ui32Flags)))
+ {
+ DbgSetMarker(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2], ui32ParamOutPos);
+ gsDBGPdumpState.ui32ParamFileNum++;
+ }
+ }
+ }
+
+ ui32Written = DbgWrite(psStream, pui8Data, ui32Count, ui32Flags);
+
+ if (ui32Written == 0xFFFFFFFF)
+ {
+ return IMG_FALSE;
+ }
+
+ return IMG_TRUE;
+}
+
+static IMG_VOID DbgSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame)
+{
+ gpfnDbgDrv->pfnSetFrame(psStream, ui32Frame);
+}
+
+static IMG_VOID DbgSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
+{
+ gpfnDbgDrv->pfnSetMarker(psStream, ui32Marker);
+}
+
+IMG_VOID PDumpSuspendKM(IMG_VOID)
+{
+ atomic_inc(&gsPDumpSuspended);
+}
+
+IMG_VOID PDumpResumeKM(IMG_VOID)
+{
+ atomic_dec(&gsPDumpSuspended);
+}
+
+#endif
+#endif
diff --git a/drivers/gpu/pvr/pdump.h b/drivers/gpu/pvr/pdump.h
new file mode 100644
index 0000000..c41a6d4
--- /dev/null
+++ b/drivers/gpu/pvr/pdump.h
@@ -0,0 +1,37 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _SERVICES_PDUMP_H_
+#define _SERVICES_PDUMP_H_
+
+#define PDUMP_FLAGS_NEVER 0x08000000U
+#define PDUMP_FLAGS_LASTFRAME 0x10000000U
+#define PDUMP_FLAGS_RESETLFBUFFER 0x20000000U
+#define PDUMP_FLAGS_CONTINUOUS 0x40000000U
+#define PDUMP_FLAGS_PERSISTENT 0x80000000U
+
+#endif
+
diff --git a/drivers/gpu/pvr/pdump/dbgdriv.c b/drivers/gpu/pvr/pdump/dbgdriv.c
new file mode 100644
index 0000000..e205b00
--- /dev/null
+++ b/drivers/gpu/pvr/pdump/dbgdriv.c
@@ -0,0 +1,2354 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+
+#ifdef LINUX
+#include <linux/string.h>
+#endif
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "dbgdrvif.h"
+#include "dbgdriv.h"
+#include "hotkey.h"
+#include "hostfunc.h"
+#include "pvr_debug.h"
+
+
+
+
+#define LAST_FRAME_BUF_SIZE 1024
+
+typedef struct _DBG_LASTFRAME_BUFFER_
+{
+ PDBG_STREAM psStream;
+ IMG_UINT8 ui8Buffer[LAST_FRAME_BUF_SIZE];
+ IMG_UINT32 ui32BufLen;
+ struct _DBG_LASTFRAME_BUFFER_ *psNext;
+} *PDBG_LASTFRAME_BUFFER;
+
+
+static PDBG_STREAM g_psStreamList = 0;
+static PDBG_LASTFRAME_BUFFER g_psLFBufferList;
+
+static IMG_UINT32 g_ui32LOff = 0;
+static IMG_UINT32 g_ui32Line = 0;
+static IMG_UINT32 g_ui32MonoLines = 25;
+
+static IMG_BOOL g_bHotkeyMiddump = IMG_FALSE;
+static IMG_UINT32 g_ui32HotkeyMiddumpStart = 0xffffffff;
+static IMG_UINT32 g_ui32HotkeyMiddumpEnd = 0xffffffff;
+
+IMG_VOID * g_pvAPIMutex=IMG_NULL;
+
+extern IMG_UINT32 g_ui32HotKeyFrame;
+extern IMG_BOOL g_bHotKeyPressed;
+extern IMG_BOOL g_bHotKeyRegistered;
+
+IMG_BOOL gbDumpThisFrame = IMG_FALSE;
+
+
+IMG_UINT32 SpaceInStream(PDBG_STREAM psStream);
+IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize);
+PDBG_LASTFRAME_BUFFER FindLFBuf(PDBG_STREAM psStream);
+
+DBGKM_SERVICE_TABLE g_sDBGKMServices =
+{
+ sizeof (DBGKM_SERVICE_TABLE),
+ ExtDBGDrivCreateStream,
+ ExtDBGDrivDestroyStream,
+ ExtDBGDrivFindStream,
+ ExtDBGDrivWriteString,
+ ExtDBGDrivReadString,
+ ExtDBGDrivWrite,
+ ExtDBGDrivRead,
+ ExtDBGDrivSetCaptureMode,
+ ExtDBGDrivSetOutputMode,
+ ExtDBGDrivSetDebugLevel,
+ ExtDBGDrivSetFrame,
+ ExtDBGDrivGetFrame,
+ ExtDBGDrivOverrideMode,
+ ExtDBGDrivDefaultMode,
+ ExtDBGDrivWrite2,
+ ExtDBGDrivWriteStringCM,
+ ExtDBGDrivWriteCM,
+ ExtDBGDrivSetMarker,
+ ExtDBGDrivGetMarker,
+ ExtDBGDrivStartInitPhase,
+ ExtDBGDrivStopInitPhase,
+ ExtDBGDrivIsCaptureFrame,
+ ExtDBGDrivWriteLF,
+ ExtDBGDrivReadLF,
+ ExtDBGDrivGetStreamOffset,
+ ExtDBGDrivSetStreamOffset,
+ ExtDBGDrivIsLastCaptureFrame,
+ ExtDBGDrivWaitForEvent,
+ ExtDBGDrivSetConnectNotifier,
+ ExtDBGDrivWritePersist
+};
+
+
+static IMG_UINT32 DBGDrivWritePersist(PDBG_STREAM psMainStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+static IMG_VOID InvalidateAllStreams(IMG_VOID);
+
+
+
+
+DBGKM_CONNECT_NOTIFIER g_fnDBGKMNotifier;
+
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetConnectNotifier(DBGKM_CONNECT_NOTIFIER fn_notifier)
+{
+
+ g_fnDBGKMNotifier = fn_notifier;
+}
+
+IMG_VOID * IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR * pszName, IMG_UINT32 ui32CapMode, IMG_UINT32 ui32OutMode, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size)
+{
+ IMG_VOID * pvRet;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ pvRet=DBGDrivCreateStream(pszName, ui32CapMode, ui32OutMode, ui32Flags, ui32Size);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return pvRet;
+}
+
+void IMG_CALLCONV ExtDBGDrivDestroyStream(PDBG_STREAM psStream)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivDestroyStream(psStream);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+IMG_VOID * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream)
+{
+ IMG_VOID * pvRet;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ pvRet=DBGDrivFindStream(pszName, bResetStream);
+ if(g_fnDBGKMNotifier.pfnConnectNotifier)
+ {
+ g_fnDBGKMNotifier.pfnConnectNotifier();
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "pfnConnectNotifier not initialised.\n"));
+ }
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return pvRet;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivWriteString(psStream, pszString, ui32Level);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivReadString(psStream, pszString, ui32Limit);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivWrite(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivRead(psStream, bReadInitBuffer, ui32OutBuffSize, pui8OutBuf);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+void IMG_CALLCONV ExtDBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32End,IMG_UINT32 ui32SampleRate)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivSetCaptureMode(psStream, ui32Mode, ui32Start, ui32End, ui32SampleRate);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+void IMG_CALLCONV ExtDBGDrivSetOutputMode(PDBG_STREAM psStream,IMG_UINT32 ui32OutMode)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivSetOutputMode(psStream, ui32OutMode);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+void IMG_CALLCONV ExtDBGDrivSetDebugLevel(PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivSetDebugLevel(psStream, ui32DebugLevel);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+void IMG_CALLCONV ExtDBGDrivSetFrame(PDBG_STREAM psStream,IMG_UINT32 ui32Frame)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivSetFrame(psStream, ui32Frame);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(PDBG_STREAM psStream)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivGetFrame(psStream);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+IMG_BOOL IMG_CALLCONV ExtDBGDrivIsLastCaptureFrame(PDBG_STREAM psStream)
+{
+ IMG_BOOL bRet;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ bRet = DBGDrivIsLastCaptureFrame(psStream);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return bRet;
+}
+
+IMG_BOOL IMG_CALLCONV ExtDBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame)
+{
+ IMG_BOOL bRet;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ bRet = DBGDrivIsCaptureFrame(psStream, bCheckPreviousFrame);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return bRet;
+}
+
+void IMG_CALLCONV ExtDBGDrivOverrideMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivOverrideMode(psStream, ui32Mode);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+void IMG_CALLCONV ExtDBGDrivDefaultMode(PDBG_STREAM psStream)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivDefaultMode(psStream);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivWrite2(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWritePersist(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivWritePersist(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
+ if(ui32Ret==0xFFFFFFFFU)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "An error occurred in DBGDrivWritePersist."));
+ }
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivWriteStringCM(psStream, pszString, ui32Level);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivWriteCM(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+void IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivSetMarker(psStream, ui32Marker);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream)
+{
+ IMG_UINT32 ui32Marker;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Marker = DBGDrivGetMarker(psStream);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Marker;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 * pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret = DBGDrivWriteLF(psStream, pui8InBuf, ui32InBuffSize, ui32Level, ui32Flags);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 * pui8OutBuf)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret = DBGDrivReadLF(psStream, ui32OutBuffSize, pui8OutBuf);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+
+IMG_VOID IMG_CALLCONV ExtDBGDrivStartInitPhase(PDBG_STREAM psStream)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivStartInitPhase(psStream);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+IMG_VOID IMG_CALLCONV ExtDBGDrivStopInitPhase(PDBG_STREAM psStream)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivStopInitPhase(psStream);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetStreamOffset(PDBG_STREAM psStream)
+{
+ IMG_UINT32 ui32Ret;
+
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret = DBGDrivGetStreamOffset(psStream);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetStreamOffset(PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset)
+{
+
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivSetStreamOffset(psStream, ui32StreamOffset);
+
+
+ HostReleaseMutex(g_pvAPIMutex);
+}
+
+IMG_VOID IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent)
+{
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ DBGDrivWaitForEvent(eEvent);
+#else
+ PVR_UNREFERENCED_PARAMETER(eEvent);
+#endif
+}
+
+IMG_UINT32 AtoI(IMG_CHAR *szIn)
+{
+ IMG_INT iLen = 0;
+ IMG_UINT32 ui32Value = 0;
+ IMG_UINT32 ui32Digit=1;
+ IMG_UINT32 ui32Base=10;
+ IMG_INT iPos;
+ IMG_CHAR bc;
+
+
+ while (szIn[iLen] > 0)
+ {
+ iLen ++;
+ }
+
+
+ if (iLen == 0)
+ {
+ return (0);
+ }
+
+
+ iPos=0;
+ while (szIn[iPos] == '0')
+ {
+ iPos++;
+ }
+ if (szIn[iPos] == '\0')
+ {
+ return 0;
+ }
+ if (szIn[iPos] == 'x' || szIn[iPos] == 'X')
+ {
+ ui32Base=16;
+ szIn[iPos]='0';
+ }
+
+
+ for (iPos = iLen - 1; iPos >= 0; iPos --)
+ {
+ bc = szIn[iPos];
+
+ if ( (bc >= 'a') && (bc <= 'f') && ui32Base == 16)
+ {
+ bc -= 'a' - 0xa;
+ }
+ else
+ if ( (bc >= 'A') && (bc <= 'F') && ui32Base == 16)
+ {
+ bc -= 'A' - 0xa;
+ }
+ else
+ if ((bc >= '0') && (bc <= '9'))
+ {
+ bc -= '0';
+ }
+ else
+ return (0);
+
+ ui32Value += (IMG_UINT32)bc * ui32Digit;
+
+ ui32Digit = ui32Digit * ui32Base;
+ }
+ return (ui32Value);
+}
+
+
+static IMG_BOOL StreamValid(PDBG_STREAM psStream)
+{
+ PDBG_STREAM psThis;
+
+ psThis = g_psStreamList;
+
+ while (psThis)
+ {
+ if (psStream && (psThis == psStream) )
+ {
+ return(IMG_TRUE);
+ }
+ else
+ {
+ psThis = psThis->psNext;
+ }
+ }
+
+ return(IMG_FALSE);
+}
+
+
+static IMG_BOOL StreamValidForRead(PDBG_STREAM psStream)
+{
+ if( StreamValid(psStream) &&
+ ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_WRITEONLY) == 0) )
+ {
+ return(IMG_TRUE);
+ }
+
+ return(IMG_FALSE);
+}
+
+static IMG_BOOL StreamValidForWrite(PDBG_STREAM psStream)
+{
+ if( StreamValid(psStream) &&
+ ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_READONLY) == 0) )
+ {
+ return(IMG_TRUE);
+ }
+
+ return(IMG_FALSE);
+}
+
+
+static void Write(PDBG_STREAM psStream,IMG_PUINT8 pui8Data,IMG_UINT32 ui32InBuffSize)
+{
+
+
+ if (!psStream->bCircularAllowed)
+ {
+
+ }
+
+ if ((psStream->ui32WPtr + ui32InBuffSize) > psStream->ui32Size)
+ {
+
+ IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32WPtr;
+ IMG_UINT32 ui32B2 = ui32InBuffSize - ui32B1;
+
+
+ HostMemCopy((IMG_PVOID)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32WPtr),
+ (IMG_PVOID) pui8Data,
+ ui32B1);
+
+
+ HostMemCopy(psStream->pvBase,
+ (IMG_PVOID)(pui8Data + ui32B1),
+ ui32B2);
+
+
+ psStream->ui32WPtr = ui32B2;
+ }
+ else
+ {
+ HostMemCopy((IMG_PVOID)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32WPtr),
+ (IMG_PVOID) pui8Data,
+ ui32InBuffSize);
+
+ psStream->ui32WPtr += ui32InBuffSize;
+
+ if (psStream->ui32WPtr == psStream->ui32Size)
+ {
+ psStream->ui32WPtr = 0;
+ }
+ }
+ psStream->ui32DataWritten += ui32InBuffSize;
+}
+
+
+void MonoOut(IMG_CHAR * pszString,IMG_BOOL bNewLine)
+{
+#if defined (_WIN64)
+ PVR_UNREFERENCED_PARAMETER(pszString);
+ PVR_UNREFERENCED_PARAMETER(bNewLine);
+
+#else
+ IMG_UINT32 i;
+ IMG_CHAR * pScreen;
+
+ pScreen = (IMG_CHAR *) DBGDRIV_MONOBASE;
+
+ pScreen += g_ui32Line * 160;
+
+
+
+ i=0;
+ do
+ {
+ pScreen[g_ui32LOff + (i*2)] = pszString[i];
+ pScreen[g_ui32LOff + (i*2)+1] = 127;
+ i++;
+ }
+ while ((pszString[i] != 0) && (i < 4096));
+
+ g_ui32LOff += i * 2;
+
+ if (bNewLine)
+ {
+ g_ui32LOff = 0;
+ g_ui32Line++;
+ }
+
+
+
+ if (g_ui32Line == g_ui32MonoLines)
+ {
+ g_ui32Line = g_ui32MonoLines - 1;
+
+ HostMemCopy((IMG_VOID *)DBGDRIV_MONOBASE,(IMG_VOID *)(DBGDRIV_MONOBASE + 160),160 * (g_ui32MonoLines - 1));
+
+ HostMemSet((IMG_VOID *)(DBGDRIV_MONOBASE + (160 * (g_ui32MonoLines - 1))),0,160);
+ }
+#endif
+}
+
+static IMG_UINT32 WriteExpandingBuffer(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize)
+{
+ IMG_UINT ui32Space;
+
+
+
+ ui32Space = SpaceInStream(psStream);
+
+
+
+ if ((psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: buffer %x is disabled", (IMG_UINTPTR_T) psStream));
+ return(0);
+ }
+
+
+
+ if (psStream->psCtrl->ui32Flags & DEBUG_FLAGS_NO_BUF_EXPANDSION)
+ {
+
+
+
+ if (ui32Space < 32)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: buffer %x is full and isn't expandable", (IMG_UINTPTR_T) psStream));
+ return(0);
+ }
+ }
+ else
+ {
+ if ((ui32Space < 32) || (ui32Space <= (ui32InBuffSize + 4)))
+ {
+ IMG_UINT32 ui32NewBufSize;
+
+
+
+ ui32NewBufSize = 2 * psStream->ui32Size;
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Expanding buffer size = %x, new size = %x",
+ psStream->ui32Size, ui32NewBufSize));
+
+ if (ui32InBuffSize > psStream->ui32Size)
+ {
+ ui32NewBufSize += ui32InBuffSize;
+ }
+
+
+
+ if (!ExpandStreamBuffer(psStream,ui32NewBufSize))
+ {
+ if (ui32Space < 32)
+ {
+ if(psStream->bCircularAllowed)
+ {
+ return(0);
+ }
+ else
+ {
+
+ PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: Unable to expand %x. Out of memory.", (IMG_UINTPTR_T) psStream));
+ InvalidateAllStreams();
+ return (0xFFFFFFFFUL);
+ }
+ }
+ }
+
+
+
+ ui32Space = SpaceInStream(psStream);
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Expanded buffer, free space = %x",
+ ui32Space));
+ }
+ }
+
+
+
+ if (ui32Space <= (ui32InBuffSize + 4))
+ {
+ ui32InBuffSize = ui32Space - 4;
+ }
+
+
+
+ Write(psStream,pui8InBuf,ui32InBuffSize);
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ if (ui32InBuffSize)
+ {
+ HostSignalEvent(DBG_EVENT_STREAM_DATA);
+ }
+#endif
+ return(ui32InBuffSize);
+}
+
+IMG_VOID * IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR * pszName,
+ IMG_UINT32 ui32CapMode,
+ IMG_UINT32 ui32OutMode,
+ IMG_UINT32 ui32Flags,
+ IMG_UINT32 ui32Size)
+{
+ PDBG_STREAM psStream;
+ PDBG_STREAM psInitStream;
+ PDBG_LASTFRAME_BUFFER psLFBuffer;
+ PDBG_STREAM_CONTROL psCtrl;
+ IMG_UINT32 ui32Off;
+ IMG_VOID * pvBase;
+ static IMG_CHAR pszNameInitSuffix[] = "_Init";
+ IMG_UINT32 ui32OffSuffix;
+
+
+
+
+ psStream = (PDBG_STREAM) DBGDrivFindStream(pszName, IMG_FALSE);
+
+ if (psStream)
+ {
+ return ((IMG_VOID *) psStream);
+ }
+
+
+
+ psStream = HostNonPageablePageAlloc(1);
+ psInitStream = HostNonPageablePageAlloc(1);
+ psLFBuffer = HostNonPageablePageAlloc(1);
+ psCtrl = HostNonPageablePageAlloc(1);
+ if (
+ (!psStream) ||
+ (!psInitStream) ||
+ (!psLFBuffer) ||
+ (!psCtrl)
+ )
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc control structs\n\r"));
+ return((IMG_VOID *) 0);
+ }
+
+
+ if ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+ {
+ pvBase = HostNonPageablePageAlloc(ui32Size);
+ }
+ else
+ {
+ pvBase = HostPageablePageAlloc(ui32Size);
+ }
+
+ if (!pvBase)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc Stream buffer\n\r"));
+ HostNonPageablePageFree(psStream);
+ return((IMG_VOID *) 0);
+ }
+
+
+ psCtrl->ui32Flags = ui32Flags;
+ psCtrl->ui32CapMode = ui32CapMode;
+ psCtrl->ui32OutMode = ui32OutMode;
+ psCtrl->ui32DebugLevel = DEBUG_LEVEL_0;
+ psCtrl->ui32DefaultMode = ui32CapMode;
+ psCtrl->ui32Start = 0;
+ psCtrl->ui32End = 0;
+ psCtrl->ui32Current = 0;
+ psCtrl->ui32SampleRate = 1;
+ psCtrl->bInitPhaseComplete = IMG_FALSE;
+
+
+
+ psStream->psNext = 0;
+ psStream->pvBase = pvBase;
+ psStream->psCtrl = psCtrl;
+ psStream->ui32Size = ui32Size * 4096UL;
+ psStream->ui32RPtr = 0;
+ psStream->ui32WPtr = 0;
+ psStream->ui32DataWritten = 0;
+ psStream->ui32Marker = 0;
+ psStream->bCircularAllowed = IMG_TRUE;
+ psStream->ui32InitPhaseWOff = 0;
+
+
+
+
+ if ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+ {
+ pvBase = HostNonPageablePageAlloc(ui32Size);
+ }
+ else
+ {
+ pvBase = HostPageablePageAlloc(ui32Size);
+ }
+
+ if (!pvBase)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc InitStream buffer\n\r"));
+
+ if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+ {
+ HostNonPageablePageFree(psStream->pvBase);
+ }
+ else
+ {
+ HostPageablePageFree(psStream->pvBase);
+ }
+ HostNonPageablePageFree(psStream);
+ return((IMG_VOID *) 0);
+ }
+
+
+ psInitStream->psNext = 0;
+ psInitStream->pvBase = pvBase;
+ psInitStream->psCtrl = psCtrl;
+ psInitStream->ui32Size = ui32Size * 4096UL;
+ psInitStream->ui32RPtr = 0;
+ psInitStream->ui32WPtr = 0;
+ psInitStream->ui32DataWritten = 0;
+ psInitStream->ui32Marker = 0;
+ psInitStream->bCircularAllowed = IMG_FALSE;
+ psInitStream->ui32InitPhaseWOff = 0;
+
+
+
+ psStream->psInitStream = psInitStream;
+
+
+ psLFBuffer->psStream = psStream;
+ psLFBuffer->ui32BufLen = 0UL;
+
+ g_bHotkeyMiddump = IMG_FALSE;
+ g_ui32HotkeyMiddumpStart = 0xffffffffUL;
+ g_ui32HotkeyMiddumpEnd = 0xffffffffUL;
+
+
+
+ ui32Off = 0;
+
+ do
+ {
+ psStream->szName[ui32Off] = pszName[ui32Off];
+ psInitStream->szName[ui32Off] = pszName[ui32Off];
+ ui32Off++;
+ }
+ while ((pszName[ui32Off] != 0) && (ui32Off < (4096UL - sizeof(DBG_STREAM))));
+ psStream->szName[ui32Off] = pszName[ui32Off];
+
+
+
+ ui32OffSuffix = 0;
+ do
+ {
+ psInitStream->szName[ui32Off] = pszNameInitSuffix[ui32OffSuffix];
+ ui32Off++;
+ ui32OffSuffix++;
+ }
+ while ( (pszNameInitSuffix[ui32OffSuffix] != 0) &&
+ (ui32Off < (4096UL - sizeof(DBG_STREAM))));
+ psInitStream->szName[ui32Off] = pszNameInitSuffix[ui32OffSuffix];
+
+
+
+ psStream->psNext = g_psStreamList;
+ g_psStreamList = psStream;
+
+ psLFBuffer->psNext = g_psLFBufferList;
+ g_psLFBufferList = psLFBuffer;
+
+ AddSIDEntry(psStream);
+
+ return((IMG_VOID *) psStream);
+}
+
+void IMG_CALLCONV DBGDrivDestroyStream(PDBG_STREAM psStream)
+{
+ PDBG_STREAM psStreamThis;
+ PDBG_STREAM psStreamPrev;
+ PDBG_LASTFRAME_BUFFER psLFBuffer;
+ PDBG_LASTFRAME_BUFFER psLFThis;
+ PDBG_LASTFRAME_BUFFER psLFPrev;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "DBGDriv: Destroying stream %s\r\n", psStream->szName ));
+
+
+
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ RemoveSIDEntry(psStream);
+
+ psLFBuffer = FindLFBuf(psStream);
+
+
+
+ psStreamThis = g_psStreamList;
+ psStreamPrev = 0;
+
+ while (psStreamThis)
+ {
+ if (psStreamThis == psStream)
+ {
+ if (psStreamPrev)
+ {
+ psStreamPrev->psNext = psStreamThis->psNext;
+ }
+ else
+ {
+ g_psStreamList = psStreamThis->psNext;
+ }
+
+ psStreamThis = 0;
+ }
+ else
+ {
+ psStreamPrev = psStreamThis;
+ psStreamThis = psStreamThis->psNext;
+ }
+ }
+
+ psLFThis = g_psLFBufferList;
+ psLFPrev = 0;
+
+ while (psLFThis)
+ {
+ if (psLFThis == psLFBuffer)
+ {
+ if (psLFPrev)
+ {
+ psLFPrev->psNext = psLFThis->psNext;
+ }
+ else
+ {
+ g_psLFBufferList = psLFThis->psNext;
+ }
+
+ psLFThis = 0;
+ }
+ else
+ {
+ psLFPrev = psLFThis;
+ psLFThis = psLFThis->psNext;
+ }
+ }
+
+
+ if (psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_HOTKEY)
+ {
+ DeactivateHotKeys();
+ }
+
+
+
+ if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+ {
+ HostNonPageablePageFree(psStream->psCtrl);
+ HostNonPageablePageFree(psStream->pvBase);
+ HostNonPageablePageFree(psStream->psInitStream->pvBase);
+ }
+ else
+ {
+ HostNonPageablePageFree(psStream->psCtrl);
+ HostPageablePageFree(psStream->pvBase);
+ HostPageablePageFree(psStream->psInitStream->pvBase);
+ }
+
+ HostNonPageablePageFree(psStream->psInitStream);
+ HostNonPageablePageFree(psStream);
+ HostNonPageablePageFree(psLFBuffer);
+
+ if (g_psStreamList == 0)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Stream list now empty" ));
+ }
+
+ return;
+}
+
+IMG_VOID * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream)
+{
+ PDBG_STREAM psStream;
+ PDBG_STREAM psThis;
+ IMG_UINT32 ui32Off;
+ IMG_BOOL bAreSame;
+
+ psStream = 0;
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "PDump client connecting to %s %s",
+ pszName,
+ (bResetStream == IMG_TRUE) ? "with reset" : "no reset"));
+
+
+
+ for (psThis = g_psStreamList; psThis != IMG_NULL; psThis = psThis->psNext)
+ {
+ bAreSame = IMG_TRUE;
+ ui32Off = 0;
+
+ if (strlen(psThis->szName) == strlen(pszName))
+ {
+ while ((psThis->szName[ui32Off] != 0) && (pszName[ui32Off] != 0) && (ui32Off < 128) && bAreSame)
+ {
+ if (psThis->szName[ui32Off] != pszName[ui32Off])
+ {
+ bAreSame = IMG_FALSE;
+ }
+
+ ui32Off++;
+ }
+ }
+ else
+ {
+ bAreSame = IMG_FALSE;
+ }
+
+ if (bAreSame)
+ {
+ psStream = psThis;
+ break;
+ }
+ }
+
+ if(bResetStream && psStream)
+ {
+ static IMG_CHAR szComment[] = "-- Init phase terminated\r\n";
+ psStream->psInitStream->ui32RPtr = 0;
+ psStream->ui32RPtr = 0;
+ psStream->ui32WPtr = 0;
+ psStream->ui32DataWritten = psStream->psInitStream->ui32DataWritten;
+ if (psStream->psCtrl->bInitPhaseComplete == IMG_FALSE)
+ {
+ if (psStream->psCtrl->ui32Flags & DEBUG_FLAGS_TEXTSTREAM)
+ {
+ DBGDrivWrite2(psStream, (IMG_UINT8 *)szComment, sizeof(szComment) - 1, 0x01);
+ }
+ psStream->psCtrl->bInitPhaseComplete = IMG_TRUE;
+ }
+
+ {
+
+
+ psStream->psInitStream->ui32InitPhaseWOff = psStream->psInitStream->ui32WPtr;
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Set %s client marker bo %x, total bw %x",
+ psStream->szName,
+ psStream->psInitStream->ui32InitPhaseWOff,
+ psStream->psInitStream->ui32DataWritten ));
+ }
+ }
+
+ return((IMG_VOID *) psStream);
+}
+
+static void IMG_CALLCONV DBGDrivInvalidateStream(PDBG_STREAM psStream)
+{
+ IMG_CHAR pszErrorMsg[] = "**OUTOFMEM\n";
+ IMG_UINT32 ui32Space;
+ IMG_UINT32 ui32Off = 0;
+ IMG_UINT32 ui32WPtr = psStream->ui32WPtr;
+ IMG_PUINT8 pui8Buffer = (IMG_UINT8 *) psStream->pvBase;
+
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivInvalidateStream: An error occurred for stream %s\r\n", psStream->szName ));
+
+
+
+
+
+
+
+
+
+ ui32Space = SpaceInStream(psStream);
+
+
+ if(ui32Space > 0)
+ {
+ ui32Space--;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivInvalidateStream: Buffer full."));
+ }
+
+ while((pszErrorMsg[ui32Off] != 0) && (ui32Off < ui32Space))
+ {
+ pui8Buffer[ui32WPtr] = (IMG_UINT8)pszErrorMsg[ui32Off];
+ ui32Off++;
+ ui32WPtr++;
+ }
+ pui8Buffer[ui32WPtr++] = '\0';
+ psStream->ui32WPtr = ui32WPtr;
+
+
+ psStream->psCtrl->ui32Flags |= DEBUG_FLAGS_READONLY;
+}
+
+static IMG_VOID InvalidateAllStreams(IMG_VOID)
+{
+ PDBG_STREAM psStream = g_psStreamList;
+ while (psStream != IMG_NULL)
+ {
+ DBGDrivInvalidateStream(psStream);
+ psStream = psStream->psNext;
+ }
+ return;
+}
+
+
+
+IMG_UINT32 IMG_CALLCONV DBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
+{
+
+
+ if (!StreamValidForWrite(psStream))
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+ if (psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED)
+ {
+ if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
+ {
+ return(0);
+ }
+ }
+ else
+ {
+ if (psStream->psCtrl->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
+ {
+ if ((psStream->psCtrl->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
+ {
+ return(0);
+ }
+ }
+ }
+
+ return(DBGDrivWriteString(psStream,pszString,ui32Level));
+
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
+{
+ IMG_UINT32 ui32Len;
+ IMG_UINT32 ui32Space;
+ IMG_UINT32 ui32WPtr;
+ IMG_UINT8 * pui8Buffer;
+
+
+
+ if (!StreamValidForWrite(psStream))
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+ if ((psStream->psCtrl->ui32DebugLevel & ui32Level) == 0)
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+
+ if ((psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_ASYNC) == 0)
+ {
+ if (psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_STANDARDDBG)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"%s: %s\r\n",psStream->szName, pszString));
+ }
+
+
+
+ if (psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_MONO)
+ {
+ MonoOut(psStream->szName,IMG_FALSE);
+ MonoOut(": ",IMG_FALSE);
+ MonoOut(pszString,IMG_TRUE);
+ }
+ }
+
+
+
+ if (
+ !(
+ ((psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) != 0) ||
+ ((psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_ASYNC) != 0)
+ )
+ )
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+ ui32Space=SpaceInStream(psStream);
+
+
+ if(ui32Space > 0)
+ {
+ ui32Space--;
+ }
+
+ ui32Len = 0;
+ ui32WPtr = psStream->ui32WPtr;
+ pui8Buffer = (IMG_UINT8 *) psStream->pvBase;
+
+ while((pszString[ui32Len] != 0) && (ui32Len < ui32Space))
+ {
+ pui8Buffer[ui32WPtr] = (IMG_UINT8)pszString[ui32Len];
+ ui32Len++;
+ ui32WPtr++;
+ if (ui32WPtr == psStream->ui32Size)
+ {
+ ui32WPtr = 0;
+ }
+ }
+
+ if (ui32Len < ui32Space)
+ {
+
+ pui8Buffer[ui32WPtr] = (IMG_UINT8)pszString[ui32Len];
+ ui32Len++;
+ ui32WPtr++;
+ if (ui32WPtr == psStream->ui32Size)
+ {
+ ui32WPtr = 0;
+ }
+
+
+ psStream->ui32WPtr = ui32WPtr;
+ psStream->ui32DataWritten+= ui32Len;
+ } else
+ {
+ ui32Len = 0;
+ }
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ if (ui32Len)
+ {
+ HostSignalEvent(DBG_EVENT_STREAM_DATA);
+ }
+#endif
+
+ return(ui32Len);
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit)
+{
+ IMG_UINT32 ui32OutLen;
+ IMG_UINT32 ui32Len;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT8 *pui8Buff;
+
+
+
+ if (!StreamValidForRead(psStream))
+ {
+ return(0);
+ }
+
+
+
+ pui8Buff = (IMG_UINT8 *)psStream->pvBase;
+ ui32Offset = psStream->ui32RPtr;
+
+ if (psStream->ui32RPtr == psStream->ui32WPtr)
+ {
+ return(0);
+ }
+
+
+
+ ui32Len = 0;
+ while((pui8Buff[ui32Offset] != 0) && (ui32Offset != psStream->ui32WPtr))
+ {
+ ui32Offset++;
+ ui32Len++;
+
+
+
+ if (ui32Offset == psStream->ui32Size)
+ {
+ ui32Offset = 0;
+ }
+ }
+
+ ui32OutLen = ui32Len + 1;
+
+
+
+ if (ui32Len > ui32Limit)
+ {
+ return(0);
+ }
+
+
+
+ ui32Offset = psStream->ui32RPtr;
+ ui32Len = 0;
+
+ while ((pui8Buff[ui32Offset] != 0) && (ui32Len < ui32Limit))
+ {
+ pszString[ui32Len] = (IMG_CHAR)pui8Buff[ui32Offset];
+ ui32Offset++;
+ ui32Len++;
+
+
+
+ if (ui32Offset == psStream->ui32Size)
+ {
+ ui32Offset = 0;
+ }
+ }
+
+ pszString[ui32Len] = (IMG_CHAR)pui8Buff[ui32Offset];
+
+ psStream->ui32RPtr = ui32Offset + 1;
+
+ if (psStream->ui32RPtr == psStream->ui32Size)
+ {
+ psStream->ui32RPtr = 0;
+ }
+
+ return(ui32OutLen);
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivWrite(PDBG_STREAM psMainStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
+{
+ IMG_UINT32 ui32Space;
+ DBG_STREAM *psStream;
+
+
+
+ if (!StreamValidForWrite(psMainStream))
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+ if ((psMainStream->psCtrl->ui32DebugLevel & ui32Level) == 0)
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+ if (psMainStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED)
+ {
+ if ((psMainStream->psCtrl->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
+ {
+
+ return(ui32InBuffSize);
+ }
+ }
+ else if (psMainStream->psCtrl->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
+ {
+ if ((psMainStream->psCtrl->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
+ {
+
+ return(ui32InBuffSize);
+ }
+ }
+
+ if(psMainStream->psCtrl->bInitPhaseComplete)
+ {
+ psStream = psMainStream;
+ }
+ else
+ {
+ psStream = psMainStream->psInitStream;
+ }
+
+
+
+ ui32Space=SpaceInStream(psStream);
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Recv %d b for %s: Roff = %x, WOff = %x",
+ ui32InBuffSize,
+ psStream->szName,
+ psStream->ui32RPtr,
+ psStream->ui32WPtr));
+
+
+
+ if ((psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivWrite: buffer %x is disabled", (IMG_UINTPTR_T) psStream));
+ return(0);
+ }
+
+ if (ui32Space < 8)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivWrite: buffer %x is full", (IMG_UINTPTR_T) psStream));
+ return(0);
+ }
+
+
+
+ if (ui32Space <= (ui32InBuffSize + 4))
+ {
+ ui32InBuffSize = ui32Space - 8;
+ }
+
+
+
+ Write(psStream,(IMG_UINT8 *) &ui32InBuffSize,4);
+ Write(psStream,pui8InBuf,ui32InBuffSize);
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ if (ui32InBuffSize)
+ {
+ HostSignalEvent(DBG_EVENT_STREAM_DATA);
+ }
+#endif
+ return(ui32InBuffSize);
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
+{
+
+
+ if (!StreamValidForWrite(psStream))
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+ if (psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED)
+ {
+ if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
+ {
+
+ return(ui32InBuffSize);
+ }
+ }
+ else
+ {
+ if (psStream->psCtrl->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
+ {
+ if ((psStream->psCtrl->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
+ {
+
+ return(ui32InBuffSize);
+ }
+ }
+ }
+
+ return(DBGDrivWrite2(psStream,pui8InBuf,ui32InBuffSize,ui32Level));
+}
+
+
+static IMG_UINT32 DBGDrivWritePersist(PDBG_STREAM psMainStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
+{
+ DBG_STREAM *psStream;
+ PVR_UNREFERENCED_PARAMETER(ui32Level);
+
+
+
+ if (!StreamValidForWrite(psMainStream))
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+ psStream = psMainStream->psInitStream;
+ if(psStream->bCircularAllowed == IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "DBGDrivWritePersist: Init phase is a circular buffer, some data may be lost"));
+ }
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Append %x b to %s: Roff = %x, WOff = %x [bw = %x]",
+ ui32InBuffSize,
+ psStream->szName,
+ psStream->ui32RPtr,
+ psStream->ui32WPtr,
+ psStream->ui32DataWritten));
+
+ return( WriteExpandingBuffer(psStream, pui8InBuf, ui32InBuffSize) );
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psMainStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
+{
+ DBG_STREAM *psStream;
+
+
+
+ if (!StreamValidForWrite(psMainStream))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivWrite2: stream not valid"));
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+ if ((psMainStream->psCtrl->ui32DebugLevel & ui32Level) == 0)
+ {
+ return(0);
+ }
+
+ if(psMainStream->psCtrl->bInitPhaseComplete)
+ {
+ psStream = psMainStream;
+ }
+ else
+ {
+ psStream = psMainStream->psInitStream;
+ }
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Recv(exp) %d b for %s: Roff = %x, WOff = %x",
+ ui32InBuffSize,
+ psStream->szName,
+ psStream->ui32RPtr,
+ psStream->ui32WPtr));
+
+ return( WriteExpandingBuffer(psStream, pui8InBuf, ui32InBuffSize) );
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psMainStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf)
+{
+ IMG_UINT32 ui32Data;
+ DBG_STREAM *psStream;
+
+
+
+ if (!StreamValidForRead(psMainStream))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivRead: buffer %x is invalid", (IMG_UINTPTR_T) psMainStream));
+ return(0);
+ }
+
+ if(bReadInitBuffer)
+ {
+ psStream = psMainStream->psInitStream;
+ }
+ else
+ {
+ psStream = psMainStream;
+ }
+
+
+ if (psStream->ui32RPtr == psStream->ui32WPtr ||
+ ((psStream->ui32InitPhaseWOff > 0) &&
+ (psStream->ui32RPtr >= psStream->ui32InitPhaseWOff)) )
+ {
+ return(0);
+ }
+
+
+
+ if (psStream->ui32RPtr <= psStream->ui32WPtr)
+ {
+ ui32Data = psStream->ui32WPtr - psStream->ui32RPtr;
+ }
+ else
+ {
+ ui32Data = psStream->ui32WPtr + (psStream->ui32Size - psStream->ui32RPtr);
+ }
+
+
+
+ if ((psStream->ui32InitPhaseWOff > 0) &&
+ (psStream->ui32InitPhaseWOff < psStream->ui32WPtr))
+ {
+ ui32Data = psStream->ui32InitPhaseWOff - psStream->ui32RPtr;
+ }
+
+
+
+ if (ui32Data > ui32OutBuffSize)
+ {
+ ui32Data = ui32OutBuffSize;
+ }
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Send %x b from %s: Roff = %x, WOff = %x",
+ ui32Data,
+ psStream->szName,
+ psStream->ui32RPtr,
+ psStream->ui32WPtr));
+
+
+
+ if ((psStream->ui32RPtr + ui32Data) > psStream->ui32Size)
+ {
+ IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32RPtr;
+ IMG_UINT32 ui32B2 = ui32Data - ui32B1;
+
+
+ HostMemCopy((IMG_VOID *) pui8OutBuf,
+ (IMG_VOID *)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32RPtr),
+ ui32B1);
+
+
+ HostMemCopy((IMG_VOID *)(pui8OutBuf + ui32B1),
+ psStream->pvBase,
+ ui32B2);
+
+
+ psStream->ui32RPtr = ui32B2;
+ }
+ else
+ {
+ HostMemCopy((IMG_VOID *) pui8OutBuf,
+ (IMG_VOID *)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32RPtr),
+ ui32Data);
+
+
+ psStream->ui32RPtr += ui32Data;
+
+
+ if (psStream->ui32RPtr == psStream->ui32Size)
+ {
+ psStream->ui32RPtr = 0;
+ }
+ }
+
+ return(ui32Data);
+}
+
+void IMG_CALLCONV DBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32End,IMG_UINT32 ui32SampleRate)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ psStream->psCtrl->ui32CapMode = ui32Mode;
+ psStream->psCtrl->ui32DefaultMode = ui32Mode;
+ psStream->psCtrl->ui32Start = ui32Start;
+ psStream->psCtrl->ui32End = ui32End;
+ psStream->psCtrl->ui32SampleRate = ui32SampleRate;
+
+
+
+ if (psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_HOTKEY)
+ {
+ ActivateHotKeys(psStream);
+ }
+}
+
+void IMG_CALLCONV DBGDrivSetOutputMode(PDBG_STREAM psStream,IMG_UINT32 ui32OutMode)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ psStream->psCtrl->ui32OutMode = ui32OutMode;
+}
+
+void IMG_CALLCONV DBGDrivSetDebugLevel(PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ psStream->psCtrl->ui32DebugLevel = ui32DebugLevel;
+}
+
+void IMG_CALLCONV DBGDrivSetFrame(PDBG_STREAM psStream,IMG_UINT32 ui32Frame)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ psStream->psCtrl->ui32Current = ui32Frame;
+
+ if ((ui32Frame >= psStream->psCtrl->ui32Start) &&
+ (ui32Frame <= psStream->psCtrl->ui32End) &&
+ (((ui32Frame - psStream->psCtrl->ui32Start) % psStream->psCtrl->ui32SampleRate) == 0))
+ {
+ psStream->psCtrl->ui32Flags |= DEBUG_FLAGS_ENABLESAMPLE;
+ }
+ else
+ {
+ psStream->psCtrl->ui32Flags &= ~DEBUG_FLAGS_ENABLESAMPLE;
+ }
+
+ if (g_bHotkeyMiddump)
+ {
+ if ((ui32Frame >= g_ui32HotkeyMiddumpStart) &&
+ (ui32Frame <= g_ui32HotkeyMiddumpEnd) &&
+ (((ui32Frame - g_ui32HotkeyMiddumpStart) % psStream->psCtrl->ui32SampleRate) == 0))
+ {
+ psStream->psCtrl->ui32Flags |= DEBUG_FLAGS_ENABLESAMPLE;
+ }
+ else
+ {
+ psStream->psCtrl->ui32Flags &= ~DEBUG_FLAGS_ENABLESAMPLE;
+ if (psStream->psCtrl->ui32Current > g_ui32HotkeyMiddumpEnd)
+ {
+ g_bHotkeyMiddump = IMG_FALSE;
+ }
+ }
+ }
+
+
+ if (g_bHotKeyRegistered)
+ {
+ g_bHotKeyRegistered = IMG_FALSE;
+
+ PVR_DPF((PVR_DBG_MESSAGE,"Hotkey pressed (%p)!\n",psStream));
+
+ if (!g_bHotKeyPressed)
+ {
+
+
+ g_ui32HotKeyFrame = psStream->psCtrl->ui32Current + 2;
+
+
+
+ g_bHotKeyPressed = IMG_TRUE;
+ }
+
+
+
+ if (((psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0) &&
+ ((psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_HOTKEY) != 0))
+ {
+ if (!g_bHotkeyMiddump)
+ {
+
+ g_ui32HotkeyMiddumpStart = g_ui32HotKeyFrame + 1;
+ g_ui32HotkeyMiddumpEnd = 0xffffffff;
+ g_bHotkeyMiddump = IMG_TRUE;
+ PVR_DPF((PVR_DBG_MESSAGE,"Sampling every %d frame(s)\n", psStream->psCtrl->ui32SampleRate));
+ }
+ else
+ {
+
+ g_ui32HotkeyMiddumpEnd = g_ui32HotKeyFrame;
+ PVR_DPF((PVR_DBG_MESSAGE,"Turning off sampling\n"));
+ }
+ }
+
+ }
+
+
+
+ if (psStream->psCtrl->ui32Current > g_ui32HotKeyFrame)
+ {
+ g_bHotKeyPressed = IMG_FALSE;
+ }
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(PDBG_STREAM psStream)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return(0);
+ }
+
+ return(psStream->psCtrl->ui32Current);
+}
+
+IMG_BOOL IMG_CALLCONV DBGDrivIsLastCaptureFrame(PDBG_STREAM psStream)
+{
+ IMG_UINT32 ui32NextFrame;
+
+
+
+ if (!StreamValid(psStream))
+ {
+ return IMG_FALSE;
+ }
+
+ if (psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED)
+ {
+ ui32NextFrame = psStream->psCtrl->ui32Current + psStream->psCtrl->ui32SampleRate;
+ if (ui32NextFrame > psStream->psCtrl->ui32End)
+ {
+ return IMG_TRUE;
+ }
+ }
+ return IMG_FALSE;
+}
+
+IMG_BOOL IMG_CALLCONV DBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame)
+{
+ IMG_UINT32 ui32FrameShift = bCheckPreviousFrame ? 1UL : 0UL;
+
+
+
+ if (!StreamValid(psStream))
+ {
+ return IMG_FALSE;
+ }
+
+ if (psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED)
+ {
+
+ if (g_bHotkeyMiddump)
+ {
+ if ((psStream->psCtrl->ui32Current >= (g_ui32HotkeyMiddumpStart - ui32FrameShift)) &&
+ (psStream->psCtrl->ui32Current <= (g_ui32HotkeyMiddumpEnd - ui32FrameShift)) &&
+ ((((psStream->psCtrl->ui32Current + ui32FrameShift) - g_ui32HotkeyMiddumpStart) % psStream->psCtrl->ui32SampleRate) == 0))
+ {
+ return IMG_TRUE;
+ }
+ }
+ else
+ {
+ if ((psStream->psCtrl->ui32Current >= (psStream->psCtrl->ui32Start - ui32FrameShift)) &&
+ (psStream->psCtrl->ui32Current <= (psStream->psCtrl->ui32End - ui32FrameShift)) &&
+ ((((psStream->psCtrl->ui32Current + ui32FrameShift) - psStream->psCtrl->ui32Start) % psStream->psCtrl->ui32SampleRate) == 0))
+ {
+ return IMG_TRUE;
+ }
+ }
+ }
+ else if (psStream->psCtrl->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
+ {
+ if ((psStream->psCtrl->ui32Current == (g_ui32HotKeyFrame-ui32FrameShift)) && (g_bHotKeyPressed))
+ {
+ return IMG_TRUE;
+ }
+ }
+ return IMG_FALSE;
+}
+
+void IMG_CALLCONV DBGDrivOverrideMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ psStream->psCtrl->ui32CapMode = ui32Mode;
+}
+
+void IMG_CALLCONV DBGDrivDefaultMode(PDBG_STREAM psStream)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ psStream->psCtrl->ui32CapMode = psStream->psCtrl->ui32DefaultMode;
+}
+
+IMG_VOID IMG_CALLCONV DBGDrivSetClientMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ psStream->ui32InitPhaseWOff = ui32Marker;
+}
+
+void IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ psStream->ui32Marker = ui32Marker;
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream)
+{
+
+
+ if (!StreamValid(psStream))
+ {
+ return 0;
+ }
+
+ return psStream->ui32Marker;
+}
+
+
+IMG_UINT32 IMG_CALLCONV DBGDrivGetStreamOffset(PDBG_STREAM psMainStream)
+{
+ PDBG_STREAM psStream;
+
+
+
+ if (!StreamValid(psMainStream))
+ {
+ return 0;
+ }
+
+ if(psMainStream->psCtrl->bInitPhaseComplete)
+ {
+ psStream = psMainStream;
+ }
+ else
+ {
+ psStream = psMainStream->psInitStream;
+ }
+
+ return psStream->ui32DataWritten;
+}
+
+IMG_VOID IMG_CALLCONV DBGDrivSetStreamOffset(PDBG_STREAM psMainStream, IMG_UINT32 ui32StreamOffset)
+{
+ PDBG_STREAM psStream;
+
+
+
+ if (!StreamValid(psMainStream))
+ {
+ return;
+ }
+
+ if(psMainStream->psCtrl->bInitPhaseComplete)
+ {
+ psStream = psMainStream;
+ }
+ else
+ {
+ psStream = psMainStream->psInitStream;
+ }
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "DBGDrivSetStreamOffset: %s set to %x b",
+ psStream->szName,
+ ui32StreamOffset));
+ psStream->ui32DataWritten = ui32StreamOffset;
+}
+
+IMG_PVOID IMG_CALLCONV DBGDrivGetServiceTable(IMG_VOID)
+{
+ return((IMG_PVOID)&g_sDBGKMServices);
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 * pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags)
+{
+ PDBG_LASTFRAME_BUFFER psLFBuffer;
+
+
+
+ if (!StreamValidForWrite(psStream))
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+ if ((psStream->psCtrl->ui32DebugLevel & ui32Level) == 0)
+ {
+ return(0xFFFFFFFFUL);
+ }
+
+
+
+ if ((psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0)
+ {
+ if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
+ {
+
+ return(ui32InBuffSize);
+ }
+ }
+ else if (psStream->psCtrl->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
+ {
+ if ((psStream->psCtrl->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
+ {
+
+ return(ui32InBuffSize);
+ }
+ }
+
+ psLFBuffer = FindLFBuf(psStream);
+
+ if (ui32Flags & WRITELF_FLAGS_RESETBUF)
+ {
+
+
+ ui32InBuffSize = (ui32InBuffSize > LAST_FRAME_BUF_SIZE) ? LAST_FRAME_BUF_SIZE : ui32InBuffSize;
+ HostMemCopy((IMG_VOID *)psLFBuffer->ui8Buffer, (IMG_VOID *)pui8InBuf, ui32InBuffSize);
+ psLFBuffer->ui32BufLen = ui32InBuffSize;
+ }
+ else
+ {
+
+
+ ui32InBuffSize = ((psLFBuffer->ui32BufLen + ui32InBuffSize) > LAST_FRAME_BUF_SIZE) ? (LAST_FRAME_BUF_SIZE - psLFBuffer->ui32BufLen) : ui32InBuffSize;
+ HostMemCopy((IMG_VOID *)(&psLFBuffer->ui8Buffer[psLFBuffer->ui32BufLen]), (IMG_VOID *)pui8InBuf, ui32InBuffSize);
+ psLFBuffer->ui32BufLen += ui32InBuffSize;
+ }
+
+ return(ui32InBuffSize);
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 * pui8OutBuf)
+{
+ PDBG_LASTFRAME_BUFFER psLFBuffer;
+ IMG_UINT32 ui32Data;
+
+
+
+ if (!StreamValidForRead(psStream))
+ {
+ return(0);
+ }
+
+ psLFBuffer = FindLFBuf(psStream);
+
+
+
+ ui32Data = (ui32OutBuffSize < psLFBuffer->ui32BufLen) ? ui32OutBuffSize : psLFBuffer->ui32BufLen;
+
+
+
+ HostMemCopy((IMG_VOID *)pui8OutBuf, (IMG_VOID *)psLFBuffer->ui8Buffer, ui32Data);
+
+ return ui32Data;
+}
+
+IMG_VOID IMG_CALLCONV DBGDrivStartInitPhase(PDBG_STREAM psStream)
+{
+ psStream->psCtrl->bInitPhaseComplete = IMG_FALSE;
+}
+
+IMG_VOID IMG_CALLCONV DBGDrivStopInitPhase(PDBG_STREAM psStream)
+{
+ psStream->psCtrl->bInitPhaseComplete = IMG_TRUE;
+}
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+IMG_VOID IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent)
+{
+ HostWaitForEvent(eEvent);
+}
+#endif
+
+IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize)
+{
+ IMG_VOID * pvNewBuf;
+ IMG_UINT32 ui32NewSizeInPages;
+ IMG_UINT32 ui32NewWOffset;
+ IMG_UINT32 ui32NewROffset;
+ IMG_UINT32 ui32SpaceInOldBuf;
+
+
+
+ if (psStream->ui32Size >= ui32NewSize)
+ {
+ return IMG_FALSE;
+ }
+
+
+
+ ui32SpaceInOldBuf = SpaceInStream(psStream);
+
+
+
+ ui32NewSizeInPages = ((ui32NewSize + 0xfffUL) & ~0xfffUL) / 4096UL;
+
+ if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+ {
+ pvNewBuf = HostNonPageablePageAlloc(ui32NewSizeInPages);
+ }
+ else
+ {
+ pvNewBuf = HostPageablePageAlloc(ui32NewSizeInPages);
+ }
+
+ if (pvNewBuf == IMG_NULL)
+ {
+ return IMG_FALSE;
+ }
+
+ if(psStream->bCircularAllowed)
+ {
+
+
+
+ if (psStream->ui32RPtr <= psStream->ui32WPtr)
+ {
+
+
+ HostMemCopy(pvNewBuf,
+ (IMG_VOID *)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32RPtr),
+ psStream->ui32WPtr - psStream->ui32RPtr);
+ }
+ else
+ {
+ IMG_UINT32 ui32FirstCopySize;
+
+
+
+ ui32FirstCopySize = psStream->ui32Size - psStream->ui32RPtr;
+
+ HostMemCopy(pvNewBuf,
+ (IMG_VOID *)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32RPtr),
+ ui32FirstCopySize);
+
+
+
+ HostMemCopy((IMG_VOID *)((IMG_UINTPTR_T)pvNewBuf + ui32FirstCopySize),
+ (IMG_VOID *)(IMG_PBYTE)psStream->pvBase,
+ psStream->ui32WPtr);
+ }
+ ui32NewROffset = 0;
+ }
+ else
+ {
+
+ HostMemCopy(pvNewBuf, psStream->pvBase, psStream->ui32WPtr);
+ ui32NewROffset = psStream->ui32RPtr;
+ }
+
+
+
+
+ ui32NewWOffset = psStream->ui32Size - ui32SpaceInOldBuf;
+
+
+
+ if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+ {
+ HostNonPageablePageFree(psStream->pvBase);
+ }
+ else
+ {
+ HostPageablePageFree(psStream->pvBase);
+ }
+
+
+
+ psStream->pvBase = pvNewBuf;
+ psStream->ui32RPtr = ui32NewROffset;
+ psStream->ui32WPtr = ui32NewWOffset;
+ psStream->ui32Size = ui32NewSizeInPages * 4096;
+
+ return IMG_TRUE;
+}
+
+IMG_UINT32 SpaceInStream(PDBG_STREAM psStream)
+{
+ IMG_UINT32 ui32Space;
+
+ if (psStream->bCircularAllowed)
+ {
+
+ if (psStream->ui32RPtr > psStream->ui32WPtr)
+ {
+ ui32Space = psStream->ui32RPtr - psStream->ui32WPtr;
+ }
+ else
+ {
+ ui32Space = psStream->ui32RPtr + (psStream->ui32Size - psStream->ui32WPtr);
+ }
+ }
+ else
+ {
+
+ ui32Space = psStream->ui32Size - psStream->ui32WPtr;
+ }
+
+ return ui32Space;
+}
+
+
+void DestroyAllStreams(void)
+{
+ while (g_psStreamList != IMG_NULL)
+ {
+ DBGDrivDestroyStream(g_psStreamList);
+ }
+ return;
+}
+
+PDBG_LASTFRAME_BUFFER FindLFBuf(PDBG_STREAM psStream)
+{
+ PDBG_LASTFRAME_BUFFER psLFBuffer;
+
+ psLFBuffer = g_psLFBufferList;
+
+ while (psLFBuffer)
+ {
+ if (psLFBuffer->psStream == psStream)
+ {
+ break;
+ }
+
+ psLFBuffer = psLFBuffer->psNext;
+ }
+
+ return psLFBuffer;
+}
+
diff --git a/drivers/gpu/pvr/pdump/dbgdriv.h b/drivers/gpu/pvr/pdump/dbgdriv.h
new file mode 100644
index 0000000..438e716
--- /dev/null
+++ b/drivers/gpu/pvr/pdump/dbgdriv.h
@@ -0,0 +1,122 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _DBGDRIV_
+#define _DBGDRIV_
+
+#define BUFFER_SIZE 64*PAGESIZE
+
+#define DBGDRIV_VERSION 0x100
+#define MAX_PROCESSES 2
+#define BLOCK_USED 0x01
+#define BLOCK_LOCKED 0x02
+#define DBGDRIV_MONOBASE 0x000B0000
+
+
+extern IMG_VOID * g_pvAPIMutex;
+
+IMG_VOID * IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR * pszName,
+ IMG_UINT32 ui32CapMode,
+ IMG_UINT32 ui32OutMode,
+ IMG_UINT32 ui32Flags,
+ IMG_UINT32 ui32Pages);
+IMG_VOID IMG_CALLCONV DBGDrivDestroyStream(PDBG_STREAM psStream);
+IMG_VOID * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream);
+IMG_UINT32 IMG_CALLCONV DBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
+IMG_UINT32 IMG_CALLCONV DBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit);
+IMG_UINT32 IMG_CALLCONV DBGDrivWrite(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBufferSize,IMG_UINT8 *pui8OutBuf);
+IMG_VOID IMG_CALLCONV DBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32Stop,IMG_UINT32 ui32SampleRate);
+IMG_VOID IMG_CALLCONV DBGDrivSetOutputMode(PDBG_STREAM psStream,IMG_UINT32 ui32OutMode);
+IMG_VOID IMG_CALLCONV DBGDrivSetDebugLevel(PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel);
+IMG_VOID IMG_CALLCONV DBGDrivSetFrame(PDBG_STREAM psStream,IMG_UINT32 ui32Frame);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(PDBG_STREAM psStream);
+IMG_VOID IMG_CALLCONV DBGDrivOverrideMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode);
+IMG_VOID IMG_CALLCONV DBGDrivDefaultMode(PDBG_STREAM psStream);
+IMG_PVOID IMG_CALLCONV DBGDrivGetServiceTable(IMG_VOID);
+IMG_UINT32 IMG_CALLCONV DBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
+IMG_UINT32 IMG_CALLCONV DBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+IMG_VOID IMG_CALLCONV DBGDrivSetClientMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+IMG_VOID IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream);
+IMG_BOOL IMG_CALLCONV DBGDrivIsLastCaptureFrame(PDBG_STREAM psStream);
+IMG_BOOL IMG_CALLCONV DBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame);
+IMG_UINT32 IMG_CALLCONV DBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags);
+IMG_UINT32 IMG_CALLCONV DBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf);
+IMG_VOID IMG_CALLCONV DBGDrivStartInitPhase(PDBG_STREAM psStream);
+IMG_VOID IMG_CALLCONV DBGDrivStopInitPhase(PDBG_STREAM psStream);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetStreamOffset(PDBG_STREAM psStream);
+IMG_VOID IMG_CALLCONV DBGDrivSetStreamOffset(PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset);
+IMG_VOID IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent);
+
+IMG_VOID DestroyAllStreams(IMG_VOID);
+
+IMG_UINT32 AtoI(IMG_CHAR *szIn);
+
+IMG_VOID HostMemSet(IMG_VOID *pvDest,IMG_UINT8 ui8Value,IMG_UINT32 ui32Size);
+IMG_VOID HostMemCopy(IMG_VOID *pvDest,IMG_VOID *pvSrc,IMG_UINT32 ui32Size);
+IMG_VOID MonoOut(IMG_CHAR * pszString,IMG_BOOL bNewLine);
+
+IMG_SID PStream2SID(PDBG_STREAM psStream);
+PDBG_STREAM SID2PStream(IMG_SID hStream);
+IMG_BOOL AddSIDEntry(PDBG_STREAM psStream);
+IMG_BOOL RemoveSIDEntry(PDBG_STREAM psStream);
+
+IMG_VOID * IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR * pszName, IMG_UINT32 ui32CapMode, IMG_UINT32 ui32OutMode, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size);
+IMG_VOID IMG_CALLCONV ExtDBGDrivDestroyStream(PDBG_STREAM psStream);
+IMG_VOID * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 *pui8OutBuf);
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32End,IMG_UINT32 ui32SampleRate);
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetOutputMode(PDBG_STREAM psStream,IMG_UINT32 ui32OutMode);
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetDebugLevel(PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel);
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetFrame(PDBG_STREAM psStream,IMG_UINT32 ui32Frame);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(PDBG_STREAM psStream);
+IMG_VOID IMG_CALLCONV ExtDBGDrivOverrideMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode);
+IMG_VOID IMG_CALLCONV ExtDBGDrivDefaultMode(PDBG_STREAM psStream);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream);
+IMG_VOID IMG_CALLCONV ExtDBGDrivStartInitPhase(PDBG_STREAM psStream);
+IMG_VOID IMG_CALLCONV ExtDBGDrivStopInitPhase(PDBG_STREAM psStream);
+IMG_BOOL IMG_CALLCONV ExtDBGDrivIsLastCaptureFrame(PDBG_STREAM psStream);
+IMG_BOOL IMG_CALLCONV ExtDBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetStreamOffset(PDBG_STREAM psStream);
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetStreamOffset(PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset);
+IMG_VOID IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent);
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetConnectNotifier(DBGKM_CONNECT_NOTIFIER fn_notifier);
+
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWritePersist(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
+
+#endif
+
diff --git a/drivers/gpu/pvr/pdump/dbgdriv_ioctl.h b/drivers/gpu/pvr/pdump/dbgdriv_ioctl.h
new file mode 100644
index 0000000..8cef6ac
--- /dev/null
+++ b/drivers/gpu/pvr/pdump/dbgdriv_ioctl.h
@@ -0,0 +1,35 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _IOCTL_
+#define _IOCTL_
+
+#define MAX_DBGVXD_W32_API 25
+
+extern IMG_UINT32 (*g_DBGDrivProc[MAX_DBGVXD_W32_API])(IMG_VOID *, IMG_VOID *);
+
+#endif
+
diff --git a/drivers/gpu/pvr/pdump/handle.c b/drivers/gpu/pvr/pdump/handle.c
new file mode 100644
index 0000000..dceeab8
--- /dev/null
+++ b/drivers/gpu/pvr/pdump/handle.c
@@ -0,0 +1,121 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "img_defs.h"
+#include "dbgdrvif.h"
+#include "dbgdriv.h"
+
+#define MAX_SID_ENTRIES 8
+
+typedef struct _SID_INFO
+{
+ PDBG_STREAM psStream;
+} SID_INFO, *PSID_INFO;
+
+static SID_INFO gaSID_Xlat_Table[MAX_SID_ENTRIES];
+
+IMG_SID PStream2SID(PDBG_STREAM psStream)
+{
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ IMG_INT32 iIdx;
+
+ for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++)
+ {
+ if (psStream == gaSID_Xlat_Table[iIdx].psStream)
+ {
+
+ return (IMG_SID)iIdx+1;
+ }
+ }
+ }
+
+ return (IMG_SID)0;
+}
+
+
+PDBG_STREAM SID2PStream(IMG_SID hStream)
+{
+
+ IMG_INT32 iIdx = (IMG_INT32)hStream-1;
+
+ if (iIdx >= 0 && iIdx < MAX_SID_ENTRIES)
+ {
+ return gaSID_Xlat_Table[iIdx].psStream;
+ }
+ else
+ {
+ return (PDBG_STREAM)IMG_NULL;
+ }
+}
+
+
+IMG_BOOL AddSIDEntry(PDBG_STREAM psStream)
+{
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ IMG_INT32 iIdx;
+
+ for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++)
+ {
+ if (psStream == gaSID_Xlat_Table[iIdx].psStream)
+ {
+
+ return IMG_TRUE;
+ }
+
+ if (gaSID_Xlat_Table[iIdx].psStream == (PDBG_STREAM)IMG_NULL)
+ {
+
+ gaSID_Xlat_Table[iIdx].psStream = psStream;
+ return IMG_TRUE;
+ }
+ }
+ }
+
+ return IMG_FALSE;
+}
+
+IMG_BOOL RemoveSIDEntry(PDBG_STREAM psStream)
+{
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ IMG_INT32 iIdx;
+
+ for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++)
+ {
+ if (psStream == gaSID_Xlat_Table[iIdx].psStream)
+ {
+ gaSID_Xlat_Table[iIdx].psStream = (PDBG_STREAM)IMG_NULL;
+ return IMG_TRUE;
+ }
+ }
+ }
+
+ return IMG_FALSE;
+}
+
+
diff --git a/drivers/gpu/pvr/pdump/hostfunc.c b/drivers/gpu/pvr/pdump/hostfunc.c
new file mode 100644
index 0000000..64b20cc
--- /dev/null
+++ b/drivers/gpu/pvr/pdump/hostfunc.c
@@ -0,0 +1,298 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <asm/page.h>
+#include <linux/vmalloc.h>
+#include <linux/mutex.h>
+#include <linux/hardirq.h>
+#include <linux/slab.h>
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#endif
+
+#include "img_types.h"
+#include "pvr_debug.h"
+
+#include "dbgdrvif.h"
+#include "hostfunc.h"
+#include "dbgdriv.h"
+
+#if defined(DEBUG) && !defined(SUPPORT_DRI_DRM)
+IMG_UINT32 gPVRDumpDebugLevel = (DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING);
+
+#define PVR_STRING_TERMINATOR '\0'
+#define PVR_IS_FILE_SEPARATOR(character) ( ((character) == '\\') || ((character) == '/') )
+
+void PVRSRVDumpDebugPrintf (
+ IMG_UINT32 ui32DebugLevel,
+ const IMG_CHAR* pszFileName,
+ IMG_UINT32 ui32Line,
+ const IMG_CHAR* pszFormat,
+ ...
+ )
+{
+ IMG_BOOL bTrace;
+#if !defined(__sh__)
+ IMG_CHAR *pszLeafName;
+
+ pszLeafName = (char *)strrchr (pszFileName, '\\');
+
+ if (pszLeafName)
+ {
+ pszFileName = pszLeafName;
+ }
+#endif
+
+ bTrace = (IMG_BOOL)(ui32DebugLevel & DBGPRIV_CALLTRACE) ? IMG_TRUE : IMG_FALSE;
+
+ if (gPVRDumpDebugLevel & ui32DebugLevel)
+ {
+ va_list vaArgs;
+ static char szBuffer[256];
+
+ va_start (vaArgs, pszFormat);
+
+
+ if (bTrace == IMG_FALSE)
+ {
+ switch(ui32DebugLevel)
+ {
+ case DBGPRIV_FATAL:
+ {
+ strcpy (szBuffer, "PVR_K:(Fatal): ");
+ break;
+ }
+ case DBGPRIV_ERROR:
+ {
+ strcpy (szBuffer, "PVR_K:(Error): ");
+ break;
+ }
+ case DBGPRIV_WARNING:
+ {
+ strcpy (szBuffer, "PVR_K:(Warning): ");
+ break;
+ }
+ case DBGPRIV_MESSAGE:
+ {
+ strcpy (szBuffer, "PVR_K:(Message): ");
+ break;
+ }
+ case DBGPRIV_VERBOSE:
+ {
+ strcpy (szBuffer, "PVR_K:(Verbose): ");
+ break;
+ }
+ default:
+ {
+ strcpy (szBuffer, "PVR_K:(Unknown message level)");
+ break;
+ }
+ }
+ }
+ else
+ {
+ strcpy (szBuffer, "PVR_K: ");
+ }
+
+ vsprintf (&szBuffer[strlen(szBuffer)], pszFormat, vaArgs);
+
+
+ if (bTrace == IMG_FALSE)
+ {
+ sprintf (&szBuffer[strlen(szBuffer)], " [%d, %s]", (int)ui32Line, pszFileName);
+ }
+
+ printk(KERN_INFO "%s\r\n", szBuffer);
+
+ va_end (vaArgs);
+ }
+}
+#endif
+
+IMG_VOID HostMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size)
+{
+ memset(pvDest, (int) ui8Value, (size_t) ui32Size);
+}
+
+IMG_VOID HostMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_UINT32 ui32Size)
+{
+#if defined(USE_UNOPTIMISED_MEMCPY)
+ unsigned char *src,*dst;
+ int i;
+
+ src=(unsigned char *)pvSrc;
+ dst=(unsigned char *)pvDst;
+ for(i=0;i<ui32Size;i++)
+ {
+ dst[i]=src[i];
+ }
+#else
+ memcpy(pvDst, pvSrc, ui32Size);
+#endif
+}
+
+IMG_UINT32 HostReadRegistryDWORDFromString(char *pcKey, char *pcValueName, IMG_UINT32 *pui32Data)
+{
+
+ return 0;
+}
+
+IMG_VOID * HostPageablePageAlloc(IMG_UINT32 ui32Pages)
+{
+ return (void*)vmalloc(ui32Pages * PAGE_SIZE);
+}
+
+IMG_VOID HostPageablePageFree(IMG_VOID * pvBase)
+{
+ vfree(pvBase);
+}
+
+IMG_VOID * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages)
+{
+ return (void*)vmalloc(ui32Pages * PAGE_SIZE);
+}
+
+IMG_VOID HostNonPageablePageFree(IMG_VOID * pvBase)
+{
+ vfree(pvBase);
+}
+
+IMG_VOID * HostMapKrnBufIntoUser(IMG_VOID * pvKrnAddr, IMG_UINT32 ui32Size, IMG_VOID **ppvMdl)
+{
+
+ return IMG_NULL;
+}
+
+IMG_VOID HostUnMapKrnBufFromUser(IMG_VOID * pvUserAddr, IMG_VOID * pvMdl, IMG_VOID * pvProcess)
+{
+
+}
+
+IMG_VOID HostCreateRegDeclStreams(IMG_VOID)
+{
+
+}
+
+IMG_VOID * HostCreateMutex(IMG_VOID)
+{
+ struct semaphore *psSem;
+
+ psSem = kmalloc(sizeof(*psSem), GFP_KERNEL);
+ if (psSem)
+ {
+ init_MUTEX(psSem);
+ }
+
+ return psSem;
+}
+
+IMG_VOID HostAquireMutex(IMG_VOID * pvMutex)
+{
+ BUG_ON(in_interrupt());
+
+#if defined(PVR_DEBUG_DBGDRV_DETECT_HOST_MUTEX_COLLISIONS)
+ if (down_trylock((struct semaphore *)pvMutex))
+ {
+ printk(KERN_INFO "HostAquireMutex: Waiting for mutex\n");
+ down((struct semaphore *)pvMutex);
+ }
+#else
+ down((struct semaphore *)pvMutex);
+#endif
+}
+
+IMG_VOID HostReleaseMutex(IMG_VOID * pvMutex)
+{
+ up((struct semaphore *)pvMutex);
+}
+
+IMG_VOID HostDestroyMutex(IMG_VOID * pvMutex)
+{
+ if (pvMutex)
+ {
+ kfree(pvMutex);
+ }
+}
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+
+#define EVENT_WAIT_TIMEOUT_MS 500
+#define EVENT_WAIT_TIMEOUT_JIFFIES (EVENT_WAIT_TIMEOUT_MS * HZ / 1000)
+
+static int iStreamData;
+static wait_queue_head_t sStreamDataEvent;
+
+IMG_INT32 HostCreateEventObjects(IMG_VOID)
+{
+ init_waitqueue_head(&sStreamDataEvent);
+
+ return 0;
+}
+
+IMG_VOID HostWaitForEvent(DBG_EVENT eEvent)
+{
+ switch(eEvent)
+ {
+ case DBG_EVENT_STREAM_DATA:
+
+ wait_event_interruptible_timeout(sStreamDataEvent, iStreamData != 0, EVENT_WAIT_TIMEOUT_JIFFIES);
+ iStreamData = 0;
+ break;
+ default:
+
+ msleep_interruptible(EVENT_WAIT_TIMEOUT_MS);
+ break;
+ }
+}
+
+IMG_VOID HostSignalEvent(DBG_EVENT eEvent)
+{
+ switch(eEvent)
+ {
+ case DBG_EVENT_STREAM_DATA:
+ iStreamData = 1;
+ wake_up_interruptible(&sStreamDataEvent);
+ break;
+ default:
+ break;
+ }
+}
+
+IMG_VOID HostDestroyEventObjects(IMG_VOID)
+{
+}
+#endif
diff --git a/drivers/gpu/pvr/pdump/hostfunc.h b/drivers/gpu/pvr/pdump/hostfunc.h
new file mode 100644
index 0000000..42733d7
--- /dev/null
+++ b/drivers/gpu/pvr/pdump/hostfunc.h
@@ -0,0 +1,58 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _HOSTFUNC_
+#define _HOSTFUNC_
+
+#define HOST_PAGESIZE (4096)
+#define DBG_MEMORY_INITIALIZER (0xe2)
+
+IMG_UINT32 HostReadRegistryDWORDFromString(IMG_CHAR *pcKey, IMG_CHAR *pcValueName, IMG_UINT32 *pui32Data);
+
+IMG_VOID * HostPageablePageAlloc(IMG_UINT32 ui32Pages);
+IMG_VOID HostPageablePageFree(IMG_VOID * pvBase);
+IMG_VOID * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages);
+IMG_VOID HostNonPageablePageFree(IMG_VOID * pvBase);
+
+IMG_VOID * HostMapKrnBufIntoUser(IMG_VOID * pvKrnAddr, IMG_UINT32 ui32Size, IMG_VOID * *ppvMdl);
+IMG_VOID HostUnMapKrnBufFromUser(IMG_VOID * pvUserAddr, IMG_VOID * pvMdl, IMG_VOID * pvProcess);
+
+IMG_VOID HostCreateRegDeclStreams(IMG_VOID);
+
+IMG_VOID * HostCreateMutex(IMG_VOID);
+IMG_VOID HostAquireMutex(IMG_VOID * pvMutex);
+IMG_VOID HostReleaseMutex(IMG_VOID * pvMutex);
+IMG_VOID HostDestroyMutex(IMG_VOID * pvMutex);
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+IMG_INT32 HostCreateEventObjects(IMG_VOID);
+IMG_VOID HostWaitForEvent(DBG_EVENT eEvent);
+IMG_VOID HostSignalEvent(DBG_EVENT eEvent);
+IMG_VOID HostDestroyEventObjects(IMG_VOID);
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/pdump/hotkey.c b/drivers/gpu/pvr/pdump/hotkey.c
new file mode 100644
index 0000000..48853c7
--- /dev/null
+++ b/drivers/gpu/pvr/pdump/hotkey.c
@@ -0,0 +1,135 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+
+#if !defined(LINUX)
+#include <ntddk.h>
+#include <windef.h>
+#endif
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "dbgdrvif.h"
+#include "dbgdriv.h"
+#include "hotkey.h"
+#include "hostfunc.h"
+
+
+
+
+
+IMG_UINT32 g_ui32HotKeyFrame = 0xFFFFFFFF;
+IMG_BOOL g_bHotKeyPressed = IMG_FALSE;
+IMG_BOOL g_bHotKeyRegistered = IMG_FALSE;
+
+PRIVATEHOTKEYDATA g_PrivateHotKeyData;
+
+
+IMG_VOID ReadInHotKeys(IMG_VOID)
+{
+ g_PrivateHotKeyData.ui32ScanCode = 0x58;
+ g_PrivateHotKeyData.ui32ShiftState = 0x0;
+
+
+
+#if 0
+ if (_RegOpenKey(HKEY_LOCAL_MACHINE,pszRegPath,&hKey) == ERROR_SUCCESS)
+ {
+
+
+ QueryReg(hKey,"ui32ScanCode",&g_PrivateHotKeyData.ui32ScanCode);
+ QueryReg(hKey,"ui32ShiftState",&g_PrivateHotKeyData.ui32ShiftState);
+ }
+#else
+ HostReadRegistryDWORDFromString("DEBUG\\Streams", "ui32ScanCode" , &g_PrivateHotKeyData.ui32ScanCode);
+ HostReadRegistryDWORDFromString("DEBUG\\Streams", "ui32ShiftState", &g_PrivateHotKeyData.ui32ShiftState);
+#endif
+}
+
+IMG_VOID RegisterKeyPressed(IMG_UINT32 dwui32ScanCode, PHOTKEYINFO pInfo)
+{
+ PDBG_STREAM psStream;
+
+ PVR_UNREFERENCED_PARAMETER(pInfo);
+
+ if (dwui32ScanCode == g_PrivateHotKeyData.ui32ScanCode)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"PDUMP Hotkey pressed !\n"));
+
+ psStream = (PDBG_STREAM) g_PrivateHotKeyData.sHotKeyInfo.pvStream;
+
+ if (!g_bHotKeyPressed)
+ {
+
+
+ g_ui32HotKeyFrame = psStream->psCtrl->ui32Current + 2;
+
+
+
+ g_bHotKeyPressed = IMG_TRUE;
+ }
+ }
+}
+
+IMG_VOID ActivateHotKeys(PDBG_STREAM psStream)
+{
+
+
+ ReadInHotKeys();
+
+
+
+ if (!g_PrivateHotKeyData.sHotKeyInfo.hHotKey)
+ {
+ if (g_PrivateHotKeyData.ui32ScanCode != 0)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"Activate HotKey for PDUMP.\n"));
+
+
+
+ g_PrivateHotKeyData.sHotKeyInfo.pvStream = psStream;
+
+ DefineHotKey(g_PrivateHotKeyData.ui32ScanCode, g_PrivateHotKeyData.ui32ShiftState, &g_PrivateHotKeyData.sHotKeyInfo);
+ }
+ else
+ {
+ g_PrivateHotKeyData.sHotKeyInfo.hHotKey = 0;
+ }
+ }
+}
+
+IMG_VOID DeactivateHotKeys(IMG_VOID)
+{
+ if (g_PrivateHotKeyData.sHotKeyInfo.hHotKey != 0)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"Deactivate HotKey.\n"));
+
+ RemoveHotKey(g_PrivateHotKeyData.sHotKeyInfo.hHotKey);
+ g_PrivateHotKeyData.sHotKeyInfo.hHotKey = 0;
+ }
+}
+
+
diff --git a/drivers/gpu/pvr/pdump/hotkey.h b/drivers/gpu/pvr/pdump/hotkey.h
new file mode 100644
index 0000000..d9c9458
--- /dev/null
+++ b/drivers/gpu/pvr/pdump/hotkey.h
@@ -0,0 +1,60 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _HOTKEY_
+#define _HOTKEY_
+
+
+typedef struct _hotkeyinfo
+{
+ IMG_UINT8 ui8ScanCode;
+ IMG_UINT8 ui8Type;
+ IMG_UINT8 ui8Flag;
+ IMG_UINT8 ui8Filler1;
+ IMG_UINT32 ui32ShiftState;
+ IMG_UINT32 ui32HotKeyProc;
+ IMG_VOID *pvStream;
+ IMG_UINT32 hHotKey;
+} HOTKEYINFO, *PHOTKEYINFO;
+
+typedef struct _privatehotkeydata
+{
+ IMG_UINT32 ui32ScanCode;
+ IMG_UINT32 ui32ShiftState;
+ HOTKEYINFO sHotKeyInfo;
+} PRIVATEHOTKEYDATA, *PPRIVATEHOTKEYDATA;
+
+
+IMG_VOID ReadInHotKeys (IMG_VOID);
+IMG_VOID ActivateHotKeys(PDBG_STREAM psStream);
+IMG_VOID DeactivateHotKeys(IMG_VOID);
+
+IMG_VOID RemoveHotKey (IMG_UINT32 hHotKey);
+IMG_VOID DefineHotKey (IMG_UINT32 ui32ScanCode, IMG_UINT32 ui32ShiftState, PHOTKEYINFO psInfo);
+IMG_VOID RegisterKeyPressed (IMG_UINT32 ui32ScanCode, PHOTKEYINFO psInfo);
+
+#endif
+
diff --git a/drivers/gpu/pvr/pdump/ioctl.c b/drivers/gpu/pvr/pdump/ioctl.c
new file mode 100644
index 0000000..e646c4f
--- /dev/null
+++ b/drivers/gpu/pvr/pdump/ioctl.c
@@ -0,0 +1,587 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+
+
+#ifdef LINUX
+#include <asm/uaccess.h>
+#include "pvr_uaccess.h"
+#endif
+
+#include "img_types.h"
+#include "dbgdrvif.h"
+#include "dbgdriv.h"
+#include "hotkey.h"
+#include "dbgdriv_ioctl.h"
+
+
+static IMG_UINT32 DBGDIOCDrivCreateStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_CREATESTREAM psIn;
+ IMG_VOID * *ppvOut;
+ #ifdef LINUX
+ static IMG_CHAR name[32];
+ #endif
+
+ psIn = (PDBG_IN_CREATESTREAM) pvInBuffer;
+ ppvOut = (IMG_VOID * *) pvOutBuffer;
+
+ #ifdef LINUX
+
+ if(pvr_copy_from_user(name, psIn->u.pszName, 32) != 0)
+ {
+ return IMG_FALSE;
+ }
+
+ *ppvOut = ExtDBGDrivCreateStream(name, psIn->ui32CapMode, psIn->ui32OutMode, 0, psIn->ui32Pages);
+
+ #else
+ *ppvOut = ExtDBGDrivCreateStream(psIn->u.pszName, psIn->ui32CapMode, psIn->ui32OutMode, DEBUG_FLAGS_NO_BUF_EXPANDSION, psIn->ui32Pages);
+ #endif
+
+
+ return(IMG_TRUE);
+}
+
+static IMG_UINT32 DBGDIOCDrivDestroyStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_STREAM *ppsStream;
+ PDBG_STREAM psStream;
+
+ ppsStream = (PDBG_STREAM *) pvInBuffer;
+ psStream = (PDBG_STREAM) *ppsStream;
+
+ PVR_UNREFERENCED_PARAMETER( pvOutBuffer);
+
+ ExtDBGDrivDestroyStream(psStream);
+
+ return(IMG_TRUE);
+}
+
+static IMG_UINT32 DBGDIOCDrivGetStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_FINDSTREAM psParams;
+ IMG_SID * phStream;
+
+ psParams = (PDBG_IN_FINDSTREAM)pvInBuffer;
+ phStream = (IMG_SID *)pvOutBuffer;
+
+ *phStream = PStream2SID(ExtDBGDrivFindStream(psParams->u.pszName, psParams->bResetStream));
+
+ return(IMG_TRUE);
+}
+
+static IMG_UINT32 DBGDIOCDrivWriteString(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_WRITESTRING psParams;
+ IMG_UINT32 *pui32OutLen;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_WRITESTRING) pvInBuffer;
+ pui32OutLen = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32OutLen = ExtDBGDrivWriteString(psStream,psParams->u.pszString,psParams->ui32Level);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32OutLen = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivWriteStringCM(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_WRITESTRING psParams;
+ IMG_UINT32 *pui32OutLen;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_WRITESTRING) pvInBuffer;
+ pui32OutLen = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32OutLen = ExtDBGDrivWriteStringCM(psStream,psParams->u.pszString,psParams->ui32Level);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32OutLen = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivReadString(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ IMG_UINT32 * pui32OutLen;
+ PDBG_IN_READSTRING psParams;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_READSTRING) pvInBuffer;
+ pui32OutLen = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32OutLen = ExtDBGDrivReadString(psStream,
+ psParams->u.pszString,psParams->ui32StringLen);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32OutLen = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivWrite(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ IMG_UINT32 * pui32BytesCopied;
+ PDBG_IN_WRITE psInParams;
+ PDBG_STREAM psStream;
+
+ psInParams = (PDBG_IN_WRITE) pvInBuffer;
+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psInParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32BytesCopied = ExtDBGDrivWrite(psStream,
+ psInParams->u.pui8InBuffer,
+ psInParams->ui32TransferSize,
+ psInParams->ui32Level);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32BytesCopied = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivWrite2(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ IMG_UINT32 * pui32BytesCopied;
+ PDBG_IN_WRITE psInParams;
+ PDBG_STREAM psStream;
+
+ psInParams = (PDBG_IN_WRITE) pvInBuffer;
+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psInParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32BytesCopied = ExtDBGDrivWrite2(psStream,
+ psInParams->u.pui8InBuffer,
+ psInParams->ui32TransferSize,
+ psInParams->ui32Level);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32BytesCopied = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivWriteCM(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ IMG_UINT32 * pui32BytesCopied;
+ PDBG_IN_WRITE psInParams;
+ PDBG_STREAM psStream;
+
+ psInParams = (PDBG_IN_WRITE) pvInBuffer;
+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psInParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32BytesCopied = ExtDBGDrivWriteCM(psStream,
+ psInParams->u.pui8InBuffer,
+ psInParams->ui32TransferSize,
+ psInParams->ui32Level);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32BytesCopied = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivRead(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ IMG_UINT32 * pui32BytesCopied;
+ PDBG_IN_READ psInParams;
+ PDBG_STREAM psStream;
+
+ psInParams = (PDBG_IN_READ) pvInBuffer;
+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psInParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32BytesCopied = ExtDBGDrivRead(psStream,
+ psInParams->bReadInitBuffer,
+ psInParams->ui32OutBufferSize,
+ psInParams->u.pui8OutBuffer);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32BytesCopied = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivSetCaptureMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_SETDEBUGMODE psParams;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_SETDEBUGMODE) pvInBuffer;
+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ ExtDBGDrivSetCaptureMode(psStream,
+ psParams->ui32Mode,
+ psParams->ui32Start,
+ psParams->ui32End,
+ psParams->ui32SampleRate);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivSetOutMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_SETDEBUGOUTMODE psParams;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_SETDEBUGOUTMODE) pvInBuffer;
+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ ExtDBGDrivSetOutputMode(psStream,psParams->ui32Mode);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivSetDebugLevel(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_SETDEBUGLEVEL psParams;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_SETDEBUGLEVEL) pvInBuffer;
+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ ExtDBGDrivSetDebugLevel(psStream,psParams->ui32Level);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivSetFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_SETFRAME psParams;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_SETFRAME) pvInBuffer;
+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ ExtDBGDrivSetFrame(psStream,psParams->ui32Frame);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivGetFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_STREAM psStream;
+ IMG_UINT32 *pui32Current;
+
+ pui32Current = (IMG_UINT32 *) pvOutBuffer;
+ psStream = SID2PStream(*(IMG_SID *)pvInBuffer);
+
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32Current = ExtDBGDrivGetFrame(psStream);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32Current = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivIsCaptureFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_ISCAPTUREFRAME psParams;
+ IMG_UINT32 * pui32Current;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_ISCAPTUREFRAME) pvInBuffer;
+ pui32Current = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32Current = ExtDBGDrivIsCaptureFrame(psStream,
+ psParams->bCheckPreviousFrame);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32Current = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivOverrideMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_OVERRIDEMODE psParams;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_OVERRIDEMODE) pvInBuffer;
+ PVR_UNREFERENCED_PARAMETER( pvOutBuffer);
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ ExtDBGDrivOverrideMode(psStream,psParams->ui32Mode);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivDefaultMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_STREAM psStream;
+
+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+
+ psStream = SID2PStream(*(IMG_SID *)pvInBuffer);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ ExtDBGDrivDefaultMode(psStream);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivSetMarker(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_SETMARKER psParams;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_SETMARKER) pvInBuffer;
+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ ExtDBGDrivSetMarker(psStream, psParams->ui32Marker);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivGetMarker(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_STREAM psStream;
+ IMG_UINT32 *pui32Current;
+
+ pui32Current = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(*(IMG_SID *)pvInBuffer);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32Current = ExtDBGDrivGetMarker(psStream);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32Current = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivGetServiceTable(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ IMG_PVOID * ppvOut;
+
+ PVR_UNREFERENCED_PARAMETER(pvInBuffer);
+ ppvOut = (IMG_PVOID *) pvOutBuffer;
+
+ *ppvOut = DBGDrivGetServiceTable();
+
+ return(IMG_TRUE);
+}
+
+static IMG_UINT32 DBGDIOCDrivWriteLF(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ PDBG_IN_WRITE_LF psInParams;
+ IMG_UINT32 *pui32BytesCopied;
+ PDBG_STREAM psStream;
+
+ psInParams = (PDBG_IN_WRITE_LF) pvInBuffer;
+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psInParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32BytesCopied = ExtDBGDrivWriteLF(psStream,
+ psInParams->u.pui8InBuffer,
+ psInParams->ui32BufferSize,
+ psInParams->ui32Level,
+ psInParams->ui32Flags);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivReadLF(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ IMG_UINT32 * pui32BytesCopied;
+ PDBG_IN_READ psInParams;
+ PDBG_STREAM psStream;
+
+ psInParams = (PDBG_IN_READ) pvInBuffer;
+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(psInParams->hStream);
+ if (psStream != (PDBG_STREAM)IMG_NULL)
+ {
+ *pui32BytesCopied = ExtDBGDrivReadLF(psStream,
+ psInParams->ui32OutBufferSize,
+ psInParams->u.pui8OutBuffer);
+ return(IMG_TRUE);
+ }
+ else
+ {
+
+ *pui32BytesCopied = 0;
+ return(IMG_FALSE);
+ }
+}
+
+static IMG_UINT32 DBGDIOCDrivWaitForEvent(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
+{
+ DBG_EVENT eEvent = (DBG_EVENT)(*(IMG_UINT32 *)pvInBuffer);
+
+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+
+ ExtDBGDrivWaitForEvent(eEvent);
+
+ return(IMG_TRUE);
+}
+
+IMG_UINT32 (*g_DBGDrivProc[25])(IMG_VOID *, IMG_VOID *) =
+{
+ DBGDIOCDrivCreateStream,
+ DBGDIOCDrivDestroyStream,
+ DBGDIOCDrivGetStream,
+ DBGDIOCDrivWriteString,
+ DBGDIOCDrivReadString,
+ DBGDIOCDrivWrite,
+ DBGDIOCDrivRead,
+ DBGDIOCDrivSetCaptureMode,
+ DBGDIOCDrivSetOutMode,
+ DBGDIOCDrivSetDebugLevel,
+ DBGDIOCDrivSetFrame,
+ DBGDIOCDrivGetFrame,
+ DBGDIOCDrivOverrideMode,
+ DBGDIOCDrivDefaultMode,
+ DBGDIOCDrivGetServiceTable,
+ DBGDIOCDrivWrite2,
+ DBGDIOCDrivWriteStringCM,
+ DBGDIOCDrivWriteCM,
+ DBGDIOCDrivSetMarker,
+ DBGDIOCDrivGetMarker,
+ DBGDIOCDrivIsCaptureFrame,
+ DBGDIOCDrivWriteLF,
+ DBGDIOCDrivReadLF,
+ DBGDIOCDrivWaitForEvent
+};
+
diff --git a/drivers/gpu/pvr/pdump/linuxsrv.h b/drivers/gpu/pvr/pdump/linuxsrv.h
new file mode 100644
index 0000000..671622f
--- /dev/null
+++ b/drivers/gpu/pvr/pdump/linuxsrv.h
@@ -0,0 +1,48 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _LINUXSRV_H__
+#define _LINUXSRV_H__
+
+typedef struct tagIOCTL_PACKAGE
+{
+ IMG_UINT32 ui32Cmd;
+ IMG_UINT32 ui32Size;
+ IMG_VOID *pInBuffer;
+ IMG_UINT32 ui32InBufferSize;
+ IMG_VOID *pOutBuffer;
+ IMG_UINT32 ui32OutBufferSize;
+} IOCTL_PACKAGE;
+
+IMG_UINT32 DeviceIoControl(IMG_UINT32 hDevice,
+ IMG_UINT32 ui32ControlCode,
+ IMG_VOID *pInBuffer,
+ IMG_UINT32 ui32InBufferSize,
+ IMG_VOID *pOutBuffer,
+ IMG_UINT32 ui32OutBufferSize,
+ IMG_UINT32 *pui32BytesReturned);
+
+#endif
diff --git a/drivers/gpu/pvr/pdump/main.c b/drivers/gpu/pvr/pdump/main.c
new file mode 100644
index 0000000..dd45cfa
--- /dev/null
+++ b/drivers/gpu/pvr/pdump/main.c
@@ -0,0 +1,314 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/kdev_t.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+
+#if defined(LDM_PLATFORM) && !defined(SUPPORT_DRI_DRM)
+#include <linux/platform_device.h>
+#endif
+
+#if defined(LDM_PCI) && !defined(SUPPORT_DRI_DRM)
+#include <linux/pci.h>
+#endif
+
+#include <asm/uaccess.h>
+
+#if defined(SUPPORT_DRI_DRM)
+#include "drmP.h"
+#include "drm.h"
+#endif
+
+#include "img_types.h"
+#include "linuxsrv.h"
+#include "dbgdriv_ioctl.h"
+#include "dbgdrvif.h"
+#include "dbgdriv.h"
+#include "hostfunc.h"
+#include "hotkey.h"
+#include "pvr_debug.h"
+#include "pvrmodule.h"
+#include "pvr_uaccess.h"
+
+#if defined(SUPPORT_DRI_DRM)
+
+#include "pvr_drm_shared.h"
+#include "pvr_drm.h"
+
+#else
+
+#define DRVNAME "dbgdrv"
+MODULE_SUPPORTED_DEVICE(DRVNAME);
+
+#if (defined(LDM_PLATFORM) || defined(LDM_PCI)) && !defined(SUPPORT_DRI_DRM)
+static struct class *psDbgDrvClass;
+#endif
+
+static int AssignedMajorNumber = 0;
+
+long dbgdrv_ioctl(struct file *, unsigned int, unsigned long);
+
+static int dbgdrv_open(struct inode unref__ * pInode, struct file unref__ * pFile)
+{
+ return 0;
+}
+
+static int dbgdrv_release(struct inode unref__ * pInode, struct file unref__ * pFile)
+{
+ return 0;
+}
+
+static int dbgdrv_mmap(struct file* pFile, struct vm_area_struct* ps_vma)
+{
+ return 0;
+}
+
+static struct file_operations dbgdrv_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = dbgdrv_ioctl,
+ .open = dbgdrv_open,
+ .release = dbgdrv_release,
+ .mmap = dbgdrv_mmap,
+};
+
+#endif
+
+IMG_VOID DBGDrvGetServiceTable(IMG_VOID **fn_table);
+
+IMG_VOID DBGDrvGetServiceTable(IMG_VOID **fn_table)
+{
+ extern DBGKM_SERVICE_TABLE g_sDBGKMServices;
+
+ *fn_table = &g_sDBGKMServices;
+}
+
+#if defined(SUPPORT_DRI_DRM)
+void dbgdrv_cleanup(void)
+#else
+void __exit dbgdrv_cleanup_module(void)
+#endif
+{
+#if !defined(SUPPORT_DRI_DRM)
+#if defined(LDM_PLATFORM) || defined(LDM_PCI)
+ device_destroy(psDbgDrvClass, MKDEV(AssignedMajorNumber, 0));
+ class_destroy(psDbgDrvClass);
+#endif
+ unregister_chrdev(AssignedMajorNumber, DRVNAME);
+#endif
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ HostDestroyEventObjects();
+#endif
+ HostDestroyMutex(g_pvAPIMutex);
+ return;
+}
+
+#if defined(SUPPORT_DRI_DRM)
+IMG_INT dbgdrv_init(void)
+#else
+int __init dbgdrv_init_module(void)
+#endif
+{
+#if (defined(LDM_PLATFORM) || defined(LDM_PCI)) && !defined(SUPPORT_DRI_DRM)
+ struct device *psDev;
+#endif
+
+#if !defined(SUPPORT_DRI_DRM)
+ int err = -EBUSY;
+#endif
+
+
+ if ((g_pvAPIMutex=HostCreateMutex()) == IMG_NULL)
+ {
+ return -ENOMEM;
+ }
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+
+ (void) HostCreateEventObjects();
+#endif
+
+#if !defined(SUPPORT_DRI_DRM)
+ AssignedMajorNumber =
+ register_chrdev(AssignedMajorNumber, DRVNAME, &dbgdrv_fops);
+
+ if (AssignedMajorNumber <= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR," unable to get major\n"));
+ goto ErrDestroyEventObjects;
+ }
+
+#if defined(LDM_PLATFORM) || defined(LDM_PCI)
+
+ psDbgDrvClass = class_create(THIS_MODULE, DRVNAME);
+ if (IS_ERR(psDbgDrvClass))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: unable to create class (%ld)",
+ __func__, PTR_ERR(psDbgDrvClass)));
+ goto ErrUnregisterCharDev;
+ }
+
+ psDev = device_create(psDbgDrvClass, NULL, MKDEV(AssignedMajorNumber, 0),
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
+ NULL,
+#endif
+ DRVNAME);
+ if (IS_ERR(psDev))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: unable to create device (%ld)",
+ __func__, PTR_ERR(psDev)));
+ goto ErrDestroyClass;
+ }
+#endif
+#endif
+
+ return 0;
+
+#if !defined(SUPPORT_DRI_DRM)
+ErrDestroyEventObjects:
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ HostDestroyEventObjects();
+#endif
+#if defined(LDM_PLATFORM) || defined(LDM_PCI)
+ErrUnregisterCharDev:
+ unregister_chrdev(AssignedMajorNumber, DRVNAME);
+ErrDestroyClass:
+ class_destroy(psDbgDrvClass);
+#endif
+ return err;
+#endif
+}
+
+#if defined(SUPPORT_DRI_DRM)
+int dbgdrv_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
+#else
+long dbgdrv_ioctl(struct file *file, unsigned int ioctlCmd, unsigned long arg)
+#endif
+{
+ IOCTL_PACKAGE *pIP = (IOCTL_PACKAGE *) arg;
+ char *buffer, *in, *out;
+ unsigned int cmd;
+
+ if((pIP->ui32InBufferSize > (PAGE_SIZE >> 1) ) || (pIP->ui32OutBufferSize > (PAGE_SIZE >> 1)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Sizes of the buffers are too large, cannot do ioctl\n"));
+ return -1;
+ }
+
+ buffer = (char *) HostPageablePageAlloc(1);
+ if(!buffer)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to allocate buffer, cannot do ioctl\n"));
+ return -EFAULT;
+ }
+
+ in = buffer;
+ out = buffer + (PAGE_SIZE >>1);
+
+ if(pvr_copy_from_user(in, pIP->pInBuffer, pIP->ui32InBufferSize) != 0)
+ {
+ goto init_failed;
+ }
+
+ cmd = ((pIP->ui32Cmd >> 2) & 0xFFF) - 0x801;
+
+ if(pIP->ui32Cmd == DEBUG_SERVICE_READ)
+ {
+ IMG_UINT32 *pui32BytesCopied = (IMG_UINT32 *)out;
+ DBG_IN_READ *psReadInParams = (DBG_IN_READ *)in;
+ DBG_STREAM *psStream;
+ IMG_CHAR *ui8Tmp;
+
+ ui8Tmp = vmalloc(psReadInParams->ui32OutBufferSize);
+
+ if(!ui8Tmp)
+ {
+ goto init_failed;
+ }
+
+ psStream = SID2PStream(psReadInParams->hStream);
+ if(!psStream)
+ {
+ goto init_failed;
+ }
+
+ *pui32BytesCopied = ExtDBGDrivRead(psStream,
+ psReadInParams->bReadInitBuffer,
+ psReadInParams->ui32OutBufferSize,
+ ui8Tmp);
+
+ if(pvr_copy_to_user(psReadInParams->u.pui8OutBuffer,
+ ui8Tmp,
+ *pui32BytesCopied) != 0)
+ {
+ vfree(ui8Tmp);
+ goto init_failed;
+ }
+
+ vfree(ui8Tmp);
+ }
+ else
+ {
+ (g_DBGDrivProc[cmd])(in, out);
+ }
+
+ if(copy_to_user(pIP->pOutBuffer, out, pIP->ui32OutBufferSize) != 0)
+ {
+ goto init_failed;
+ }
+
+ HostPageablePageFree((IMG_VOID *)buffer);
+ return 0;
+
+init_failed:
+ HostPageablePageFree((IMG_VOID *)buffer);
+ return -EFAULT;
+}
+
+
+IMG_VOID RemoveHotKey (IMG_UINT32 hHotKey)
+{
+ PVR_UNREFERENCED_PARAMETER(hHotKey);
+}
+
+IMG_VOID DefineHotKey (IMG_UINT32 ui32ScanCode, IMG_UINT32 ui32ShiftState, PHOTKEYINFO psInfo)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32ScanCode);
+ PVR_UNREFERENCED_PARAMETER(ui32ShiftState);
+ PVR_UNREFERENCED_PARAMETER(psInfo);
+}
+
+EXPORT_SYMBOL(DBGDrvGetServiceTable);
+
+module_init(dbgdrv_init_module);
+module_exit(dbgdrv_cleanup_module);
diff --git a/drivers/gpu/pvr/pdump_common.c b/drivers/gpu/pvr/pdump_common.c
new file mode 100644
index 0000000..45845b6cc
--- /dev/null
+++ b/drivers/gpu/pvr/pdump_common.c
@@ -0,0 +1,2368 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if defined(PDUMP)
+#include <stdarg.h>
+
+#include "services_headers.h"
+#include "perproc.h"
+
+#include "pdump_km.h"
+#include "pdump_int.h"
+
+#if !defined(PDUMP_TEMP_BUFFER_SIZE)
+#define PDUMP_TEMP_BUFFER_SIZE (64 * 1024U)
+#endif
+
+#if 1
+#define PDUMP_DBG(a) PDumpOSDebugPrintf (a)
+#else
+#define PDUMP_DBG(a)
+#endif
+
+
+#define PTR_PLUS(t, p, x) ((t)(((IMG_CHAR *)(p)) + (x)))
+#define VPTR_PLUS(p, x) PTR_PLUS(IMG_VOID *, p, x)
+#define VPTR_INC(p, x) ((p) = VPTR_PLUS(p, x))
+#define MAX_PDUMP_MMU_CONTEXTS (32)
+static IMG_VOID *gpvTempBuffer = IMG_NULL;
+static IMG_HANDLE ghTempBufferBlockAlloc;
+static IMG_UINT16 gui16MMUContextUsage = 0;
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+IMG_UINT32 g_ui32EveryLineCounter = 1U;
+#endif
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(_PDumpIsPersistent)
+#endif
+static INLINE
+IMG_BOOL _PDumpIsPersistent(IMG_VOID)
+{
+ PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData();
+
+ if(psPerProc == IMG_NULL)
+ {
+
+ return IMG_FALSE;
+ }
+ return psPerProc->bPDumpPersistent;
+}
+
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+
+
+static INLINE
+IMG_BOOL _PDumpIsProcessActive(IMG_VOID)
+{
+ PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData();
+ if(psPerProc == IMG_NULL)
+ {
+
+ return IMG_TRUE;
+ }
+ return psPerProc->bPDumpActive;
+}
+
+#endif
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+static INLINE
+IMG_UINT32 _PDumpGetPID(IMG_VOID)
+{
+ PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData();
+ if(psPerProc == IMG_NULL)
+ {
+
+ return 0;
+ }
+ return psPerProc->ui32PID;
+}
+#endif
+
+static IMG_VOID *GetTempBuffer(IMG_VOID)
+{
+
+ if (gpvTempBuffer == IMG_NULL)
+ {
+ PVRSRV_ERROR eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ PDUMP_TEMP_BUFFER_SIZE,
+ &gpvTempBuffer,
+ &ghTempBufferBlockAlloc,
+ "PDUMP Temporary Buffer");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "GetTempBuffer: OSAllocMem failed: %d", eError));
+ }
+ }
+
+ return gpvTempBuffer;
+}
+
+static IMG_VOID FreeTempBuffer(IMG_VOID)
+{
+
+ if (gpvTempBuffer != IMG_NULL)
+ {
+ PVRSRV_ERROR eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ PDUMP_TEMP_BUFFER_SIZE,
+ gpvTempBuffer,
+ ghTempBufferBlockAlloc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreeTempBuffer: OSFreeMem failed: %d", eError));
+ }
+ else
+ {
+ gpvTempBuffer = IMG_NULL;
+ }
+ }
+}
+
+IMG_VOID PDumpInitCommon(IMG_VOID)
+{
+
+ (IMG_VOID) GetTempBuffer();
+
+
+ PDumpInit();
+}
+
+IMG_VOID PDumpDeInitCommon(IMG_VOID)
+{
+
+ FreeTempBuffer();
+
+
+ PDumpDeInit();
+}
+
+IMG_BOOL PDumpIsSuspended(IMG_VOID)
+{
+ return PDumpOSIsSuspended();
+}
+
+IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID)
+{
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ if( _PDumpIsProcessActive() )
+ {
+ return PDumpOSIsCaptureFrameKM();
+ }
+ return IMG_FALSE;
+#else
+ return PDumpOSIsCaptureFrameKM();
+#endif
+}
+
+PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame)
+{
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ if( _PDumpIsProcessActive() )
+ {
+ return PDumpOSSetFrameKM(ui32Frame);
+ }
+ return PVRSRV_OK;
+#else
+ return PDumpOSSetFrameKM(ui32Frame);
+#endif
+}
+
+PVRSRV_ERROR PDumpRegWithFlagsKM(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ IMG_UINT32 ui32Data,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+ PDUMP_DBG(("PDumpRegWithFlagsKM"));
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:0x%08X 0x%08X\r\n",
+ pszPDumpRegName, ui32Reg, ui32Data);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpRegKM(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ IMG_UINT32 ui32Data)
+{
+ return PDumpRegWithFlagsKM(pszPDumpRegName, ui32Reg, ui32Data, PDUMP_FLAGS_CONTINUOUS);
+}
+
+PVRSRV_ERROR PDumpRegPolWithFlagsKM(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32Mask,
+ IMG_UINT32 ui32Flags,
+ PDUMP_POLL_OPERATOR eOperator)
+{
+
+ #define POLL_DELAY 1000U
+ #define POLL_COUNT_LONG (2000000000U / POLL_DELAY)
+ #define POLL_COUNT_SHORT (1000000U / POLL_DELAY)
+
+ PVRSRV_ERROR eErr;
+ IMG_UINT32 ui32PollCount;
+
+ PDUMP_GET_SCRIPT_STRING();
+ PDUMP_DBG(("PDumpRegPolWithFlagsKM"));
+ if ( _PDumpIsPersistent() )
+ {
+
+ return PVRSRV_OK;
+ }
+
+#if 0
+ if (((ui32RegAddr == EUR_CR_EVENT_STATUS) &&
+ (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_TA_FINISHED_MASK) != 0) ||
+ ((ui32RegAddr == EUR_CR_EVENT_STATUS) &&
+ (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK) != 0) ||
+ ((ui32RegAddr == EUR_CR_EVENT_STATUS) &&
+ (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK) != 0))
+ {
+ ui32PollCount = POLL_COUNT_LONG;
+ }
+ else
+#endif
+ {
+ ui32PollCount = POLL_COUNT_LONG;
+ }
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "POL :%s:0x%08X 0x%08X 0x%08X %d %u %d\r\n",
+ pszPDumpRegName, ui32RegAddr, ui32RegValue,
+ ui32Mask, eOperator, ui32PollCount, POLL_DELAY);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR PDumpRegPolKM(IMG_CHAR *pszPDumpRegName, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue, IMG_UINT32 ui32Mask, PDUMP_POLL_OPERATOR eOperator)
+{
+ return PDumpRegPolWithFlagsKM(pszPDumpRegName, ui32RegAddr, ui32RegValue, ui32Mask, PDUMP_FLAGS_CONTINUOUS, eOperator);
+}
+
+PVRSRV_ERROR PDumpMallocPages (PVRSRV_DEVICE_IDENTIFIER *psDevID,
+ IMG_UINT32 ui32DevVAddr,
+ IMG_CPU_VIRTADDR pvLinAddr,
+ IMG_HANDLE hOSMemHandle,
+ IMG_UINT32 ui32NumBytes,
+ IMG_UINT32 ui32PageSize,
+ IMG_BOOL bShared,
+ IMG_HANDLE hUniqueTag)
+{
+ PVRSRV_ERROR eErr;
+ IMG_PUINT8 pui8LinAddr;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32NumPages;
+ IMG_DEV_PHYADDR sDevPAddr;
+ IMG_UINT32 ui32Page;
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+
+ PDUMP_GET_SCRIPT_STRING();
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+
+ ui32Flags |= ( _PDumpIsPersistent() || bShared ) ? PDUMP_FLAGS_PERSISTENT : 0;
+#else
+ PVR_UNREFERENCED_PARAMETER(bShared);
+ ui32Flags |= ( _PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+#endif
+
+
+#if !defined(LINUX)
+ PVR_ASSERT(((IMG_UINTPTR_T)pvLinAddr & HOST_PAGEMASK) == 0);
+#endif
+
+ PVR_ASSERT(((IMG_UINT32) ui32DevVAddr & HOST_PAGEMASK) == 0);
+ PVR_ASSERT(((IMG_UINT32) ui32NumBytes & HOST_PAGEMASK) == 0);
+
+
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- MALLOC :%s:VA_%08X 0x%08X %u\r\n",
+ psDevID->pszPDumpDevName, ui32DevVAddr, ui32NumBytes, ui32PageSize);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+
+
+
+ pui8LinAddr = (IMG_PUINT8) pvLinAddr;
+ ui32Offset = 0;
+ ui32NumPages = ui32NumBytes / ui32PageSize;
+ while (ui32NumPages)
+ {
+ ui32NumPages--;
+
+
+
+
+
+
+
+
+
+
+
+ PDumpOSCPUVAddrToDevPAddr(psDevID->eDeviceType,
+ hOSMemHandle,
+ ui32Offset,
+ pui8LinAddr,
+ ui32PageSize,
+ &sDevPAddr);
+ ui32Page = (IMG_UINT32)(sDevPAddr.uiAddr / ui32PageSize);
+
+ pui8LinAddr += ui32PageSize;
+ ui32Offset += ui32PageSize;
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :%s:PA_%08X%08X %u %u 0x%08X\r\n",
+ psDevID->pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag,
+ ui32Page * ui32PageSize,
+ ui32PageSize,
+ ui32PageSize,
+ ui32Page * ui32PageSize);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+ }
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR PDumpMallocPageTable (PVRSRV_DEVICE_IDENTIFIER *psDevId,
+ IMG_HANDLE hOSMemHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_CPU_VIRTADDR pvLinAddr,
+ IMG_UINT32 ui32PTSize,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hUniqueTag)
+{
+ PVRSRV_ERROR eErr;
+ IMG_DEV_PHYADDR sDevPAddr;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ PVR_ASSERT(((IMG_UINTPTR_T)pvLinAddr & (ui32PTSize - 1)) == 0);
+ ui32Flags |= PDUMP_FLAGS_CONTINUOUS;
+ ui32Flags |= ( _PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "-- MALLOC :%s:PAGE_TABLE 0x%08X %u\r\n",
+ psDevId->pszPDumpDevName,
+ ui32PTSize,
+ ui32PTSize);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+
+
+
+
+
+
+
+
+ PDumpOSCPUVAddrToDevPAddr(psDevId->eDeviceType,
+ hOSMemHandle,
+ ui32Offset,
+ (IMG_PUINT8) pvLinAddr,
+ ui32PTSize,
+ &sDevPAddr);
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :%s:PA_%08X%08X 0x%X %u 0x%08X\r\n",
+ psDevId->pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag,
+ sDevPAddr.uiAddr,
+ ui32PTSize,
+ ui32PTSize,
+ sDevPAddr.uiAddr);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpFreePages (BM_HEAP *psBMHeap,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 ui32NumBytes,
+ IMG_UINT32 ui32PageSize,
+ IMG_HANDLE hUniqueTag,
+ IMG_BOOL bInterleaved)
+{
+ PVRSRV_ERROR eErr;
+ IMG_UINT32 ui32NumPages, ui32PageCounter;
+ IMG_DEV_PHYADDR sDevPAddr;
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ PVR_ASSERT(((IMG_UINT32) sDevVAddr.uiAddr & (ui32PageSize - 1)) == 0);
+ PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (ui32PageSize - 1)) == 0);
+
+ psDeviceNode = psBMHeap->pBMContext->psDeviceNode;
+ ui32Flags |= ( _PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :%s:VA_%08X\r\n",
+ psDeviceNode->sDevId.pszPDumpDevName, sDevVAddr.uiAddr);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = psBMHeap->pBMContext->psDeviceNode;
+
+ if( psDeviceNode->pfnMMUIsHeapShared(psBMHeap->pMMUHeap) )
+ {
+ ui32Flags |= PDUMP_FLAGS_PERSISTENT;
+ }
+ }
+#endif
+ PDumpOSWriteString2(hScript, ui32Flags);
+
+
+
+ ui32NumPages = ui32NumBytes / ui32PageSize;
+ for (ui32PageCounter = 0; ui32PageCounter < ui32NumPages; ui32PageCounter++)
+ {
+ if (!bInterleaved || (ui32PageCounter % 2) == 0)
+ {
+ sDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(psBMHeap->pMMUHeap, sDevVAddr);
+
+ PVR_ASSERT(sDevPAddr.uiAddr != 0)
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :%s:PA_%08X%08X\r\n",
+ psDeviceNode->sDevId.pszPDumpDevName, (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag, sDevPAddr.uiAddr);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+ }
+ else
+ {
+
+ }
+
+ sDevVAddr.uiAddr += ui32PageSize;
+ }
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpFreePageTable (PVRSRV_DEVICE_IDENTIFIER *psDevID,
+ IMG_HANDLE hOSMemHandle,
+ IMG_CPU_VIRTADDR pvLinAddr,
+ IMG_UINT32 ui32PTSize,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hUniqueTag)
+{
+ PVRSRV_ERROR eErr;
+ IMG_DEV_PHYADDR sDevPAddr;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ PVR_UNREFERENCED_PARAMETER(ui32PTSize);
+ ui32Flags |= PDUMP_FLAGS_CONTINUOUS;
+ ui32Flags |= ( _PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+
+ PVR_ASSERT(((IMG_UINTPTR_T)pvLinAddr & (ui32PTSize-1UL)) == 0);
+
+
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :%s:PAGE_TABLE\r\n", psDevID->pszPDumpDevName);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+
+
+
+
+
+
+
+
+ PDumpOSCPUVAddrToDevPAddr(psDevID->eDeviceType,
+ hOSMemHandle,
+ 0,
+ (IMG_PUINT8) pvLinAddr,
+ ui32PTSize,
+ &sDevPAddr);
+
+ {
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :%s:PA_%08X%08X\r\n",
+ psDevID->pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag,
+ sDevPAddr.uiAddr);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpPDRegWithFlags(PDUMP_MMU_ATTRIB *psMMUAttrib,
+ IMG_UINT32 ui32Reg,
+ IMG_UINT32 ui32Data,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hUniqueTag)
+{
+ PVRSRV_ERROR eErr;
+ IMG_CHAR *pszRegString;
+ PDUMP_GET_SCRIPT_STRING()
+
+ if(psMMUAttrib->pszPDRegRegion != IMG_NULL)
+ {
+ pszRegString = psMMUAttrib->pszPDRegRegion;
+ }
+ else
+ {
+ pszRegString = psMMUAttrib->sDevId.pszPDumpRegName;
+ }
+
+
+
+#if defined(SGX_FEATURE_36BIT_MMU)
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
+ "WRW :%s:$1 :%s:PA_%08X%08X:0x0\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)hUniqueTag,
+ (ui32Data & psMMUAttrib->ui32PDEMask) << psMMUAttrib->ui32PDEAlignShift);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "SHR :%s:$1 :%s:$1 0x4\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ psMMUAttrib->sDevId.pszPDumpDevName);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
+ "WRW :%s:0x%08X: %s:$1\r\n",
+ pszRegString,
+ ui32Reg,
+ psMMUAttrib->sDevId.pszPDumpDevName);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+#else
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW :%s:0x%08X :%s:PA_%08X%08X:0x%08X\r\n",
+ pszRegString,
+ ui32Reg,
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag,
+ (ui32Data & psMMUAttrib->ui32PDEMask) << psMMUAttrib->ui32PDEAlignShift,
+ ui32Data & ~psMMUAttrib->ui32PDEMask);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+#endif
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpPDReg (PDUMP_MMU_ATTRIB *psMMUAttrib,
+ IMG_UINT32 ui32Reg,
+ IMG_UINT32 ui32Data,
+ IMG_HANDLE hUniqueTag)
+{
+ return PDumpPDRegWithFlags(psMMUAttrib, ui32Reg, ui32Data, PDUMP_FLAGS_CONTINUOUS, hUniqueTag);
+}
+
+PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hUniqueTag)
+{
+ #define MEMPOLL_DELAY (1000)
+ #define MEMPOLL_COUNT (2000000000 / MEMPOLL_DELAY)
+
+ PVRSRV_ERROR eErr;
+ IMG_UINT32 ui32PageOffset;
+ IMG_UINT8 *pui8LinAddr;
+ IMG_DEV_PHYADDR sDevPAddr;
+ IMG_DEV_VIRTADDR sDevVPageAddr;
+ PDUMP_MMU_ATTRIB *psMMUAttrib;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ if (PDumpOSIsSuspended())
+ {
+ return PVRSRV_OK;
+ }
+
+ if ( _PDumpIsPersistent() )
+ {
+
+ return PVRSRV_OK;
+ }
+
+
+ PVR_ASSERT((ui32Offset + sizeof(IMG_UINT32)) <= psMemInfo->uAllocSize);
+
+ psMMUAttrib = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->psMMUAttrib;
+
+
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "-- POL :%s:VA_%08X 0x%08X 0x%08X %d %d %d\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ psMemInfo->sDevVAddr.uiAddr + ui32Offset,
+ ui32Value,
+ ui32Mask,
+ eOperator,
+ MEMPOLL_COUNT,
+ MEMPOLL_DELAY);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+
+
+ pui8LinAddr = psMemInfo->pvLinAddrKM;
+
+
+ pui8LinAddr += ui32Offset;
+
+
+
+
+ PDumpOSCPUVAddrToPhysPages(psMemInfo->sMemBlk.hOSMemHandle,
+ ui32Offset,
+ pui8LinAddr,
+ psMMUAttrib->ui32DataPageMask,
+ &ui32PageOffset);
+
+
+ sDevVPageAddr.uiAddr = psMemInfo->sDevVAddr.uiAddr + ui32Offset - ui32PageOffset;
+
+ PVR_ASSERT((sDevVPageAddr.uiAddr & psMMUAttrib->ui32DataPageMask) == 0);
+
+
+ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
+
+
+ sDevPAddr.uiAddr += ui32PageOffset;
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "POL :%s:PA_%08X%08X:0x%08X 0x%08X 0x%08X %d %d %d\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag,
+ sDevPAddr.uiAddr & ~(psMMUAttrib->ui32DataPageMask),
+ sDevPAddr.uiAddr & (psMMUAttrib->ui32DataPageMask),
+ ui32Value,
+ ui32Mask,
+ eOperator,
+ MEMPOLL_COUNT,
+ MEMPOLL_DELAY);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpMemKM(IMG_PVOID pvAltLinAddr,
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hUniqueTag)
+{
+ PVRSRV_ERROR eErr;
+ IMG_UINT32 ui32NumPages;
+ IMG_UINT32 ui32PageByteOffset;
+ IMG_UINT32 ui32BlockBytes;
+ IMG_UINT8* pui8LinAddr;
+ IMG_UINT8* pui8DataLinAddr = IMG_NULL;
+ IMG_DEV_VIRTADDR sDevVPageAddr;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_DEV_PHYADDR sDevPAddr;
+ IMG_UINT32 ui32ParamOutPos;
+ PDUMP_MMU_ATTRIB *psMMUAttrib;
+ IMG_UINT32 ui32DataPageSize;
+
+ PDUMP_GET_SCRIPT_AND_FILE_STRING();
+
+
+ if (ui32Bytes == 0 || PDumpOSIsSuspended())
+ {
+ return PVRSRV_OK;
+ }
+
+ psMMUAttrib = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->psMMUAttrib;
+
+
+
+ PVR_ASSERT((ui32Offset + ui32Bytes) <= psMemInfo->uAllocSize);
+
+ if (!PDumpOSJTInitialised())
+ {
+ return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+ }
+
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+
+ {
+ BM_HEAP *pHeap = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap;
+ PVRSRV_DEVICE_NODE *psDeviceNode = pHeap->pBMContext->psDeviceNode;
+
+ if( psDeviceNode->pfnMMUIsHeapShared(pHeap->pMMUHeap) )
+ {
+ ui32Flags |= PDUMP_FLAGS_PERSISTENT;
+ }
+ }
+#endif
+
+
+ if(pvAltLinAddr)
+ {
+ pui8DataLinAddr = pvAltLinAddr;
+ }
+ else if(psMemInfo->pvLinAddrKM)
+ {
+ pui8DataLinAddr = (IMG_UINT8 *)psMemInfo->pvLinAddrKM + ui32Offset;
+ }
+ pui8LinAddr = (IMG_UINT8 *)psMemInfo->pvLinAddrKM;
+ sDevVAddr = psMemInfo->sDevVAddr;
+
+
+ sDevVAddr.uiAddr += ui32Offset;
+ pui8LinAddr += ui32Offset;
+
+ PVR_ASSERT(pui8DataLinAddr);
+
+ PDumpOSCheckForSplitting(PDumpOSGetStream(PDUMP_STREAM_PARAM2), ui32Bytes, ui32Flags);
+
+ ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2);
+
+
+
+ if(!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
+ pui8DataLinAddr,
+ ui32Bytes,
+ ui32Flags))
+ {
+ return PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+ }
+
+ if (PDumpOSGetParamFileNum() == 0)
+ {
+ eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%.prm");
+ }
+ else
+ {
+ eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%_%u.prm", PDumpOSGetParamFileNum());
+ }
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "-- LDB :%s:VA_%08X%08X:0x%08X 0x%08X 0x%08X %s\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag,
+ psMemInfo->sDevVAddr.uiAddr,
+ ui32Offset,
+ ui32Bytes,
+ ui32ParamOutPos,
+ pszFileName);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+
+
+
+
+ PDumpOSCPUVAddrToPhysPages(psMemInfo->sMemBlk.hOSMemHandle,
+ ui32Offset,
+ pui8LinAddr,
+ psMMUAttrib->ui32DataPageMask,
+ &ui32PageByteOffset);
+ ui32DataPageSize = psMMUAttrib->ui32DataPageMask + 1;
+ ui32NumPages = (ui32PageByteOffset + ui32Bytes + psMMUAttrib->ui32DataPageMask) / ui32DataPageSize;
+
+ while(ui32NumPages)
+ {
+ ui32NumPages--;
+
+
+ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset;
+
+ if (ui32DataPageSize <= PDUMP_TEMP_BUFFER_SIZE)
+ {
+
+ PVR_ASSERT((sDevVPageAddr.uiAddr & psMMUAttrib->ui32DataPageMask) == 0);
+ }
+
+
+ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
+
+
+ sDevPAddr.uiAddr += ui32PageByteOffset;
+
+
+ if (ui32PageByteOffset + ui32Bytes > ui32DataPageSize)
+ {
+
+ ui32BlockBytes = ui32DataPageSize - ui32PageByteOffset;
+ }
+ else
+ {
+
+ ui32BlockBytes = ui32Bytes;
+ }
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "LDB :%s:PA_%08X%08X:0x%08X 0x%08X 0x%08X %s\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag,
+ sDevPAddr.uiAddr & ~(psMMUAttrib->ui32DataPageMask),
+ sDevPAddr.uiAddr & (psMMUAttrib->ui32DataPageMask),
+ ui32BlockBytes,
+ ui32ParamOutPos,
+ pszFileName);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+
+
+
+#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
+
+ ui32PageByteOffset = (ui32PageByteOffset + ui32BlockBytes) % ui32DataPageSize;
+#else
+
+ ui32PageByteOffset = 0;
+#endif
+
+ ui32Bytes -= ui32BlockBytes;
+
+ sDevVAddr.uiAddr += ui32BlockBytes;
+
+ pui8LinAddr += ui32BlockBytes;
+
+ ui32ParamOutPos += ui32BlockBytes;
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpMemPDEntriesKM(PDUMP_MMU_ATTRIB *psMMUAttrib,
+ IMG_HANDLE hOSMemHandle,
+ IMG_CPU_VIRTADDR pvLinAddr,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32Flags,
+ IMG_BOOL bInitialisePages,
+ IMG_HANDLE hUniqueTag1,
+ IMG_HANDLE hUniqueTag2)
+{
+ PDUMP_MMU_ATTRIB sMMUAttrib;
+
+
+ sMMUAttrib = *psMMUAttrib;
+ sMMUAttrib.ui32PTSize = (IMG_UINT32)HOST_PAGESIZE();
+ return PDumpMemPTEntriesKM( &sMMUAttrib,
+ hOSMemHandle,
+ pvLinAddr,
+ ui32Bytes,
+ ui32Flags,
+ bInitialisePages,
+ hUniqueTag1,
+ hUniqueTag2);
+}
+
+PVRSRV_ERROR PDumpMemPTEntriesKM(PDUMP_MMU_ATTRIB *psMMUAttrib,
+ IMG_HANDLE hOSMemHandle,
+ IMG_CPU_VIRTADDR pvLinAddr,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32Flags,
+ IMG_BOOL bInitialisePages,
+ IMG_HANDLE hUniqueTag1,
+ IMG_HANDLE hUniqueTag2)
+{
+ PVRSRV_ERROR eErr;
+ IMG_UINT32 ui32NumPages;
+ IMG_UINT32 ui32PageOffset;
+ IMG_UINT32 ui32BlockBytes;
+ IMG_UINT8* pui8LinAddr;
+ IMG_DEV_PHYADDR sDevPAddr;
+ IMG_CPU_PHYADDR sCpuPAddr;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32ParamOutPos;
+ IMG_UINT32 ui32PageMask;
+
+ PDUMP_GET_SCRIPT_AND_FILE_STRING();
+ ui32Flags |= ( _PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+ if (PDumpOSIsSuspended())
+ {
+ return PVRSRV_OK;
+ }
+
+ if (!PDumpOSJTInitialised())
+ {
+ return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+ }
+
+ if (!pvLinAddr)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ PDumpOSCheckForSplitting(PDumpOSGetStream(PDUMP_STREAM_PARAM2), ui32Bytes, ui32Flags);
+
+ ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2);
+
+ if (bInitialisePages)
+ {
+
+
+
+ if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
+ pvLinAddr,
+ ui32Bytes,
+ ui32Flags | PDUMP_FLAGS_CONTINUOUS))
+ {
+ return PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+ }
+
+ if (PDumpOSGetParamFileNum() == 0)
+ {
+ eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%.prm");
+ }
+ else
+ {
+ eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%_%u.prm", PDumpOSGetParamFileNum());
+ }
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ }
+
+
+
+
+
+
+ ui32PageMask = psMMUAttrib->ui32PTSize - 1;
+
+
+
+
+ ui32PageOffset = (IMG_UINT32)((IMG_UINTPTR_T)pvLinAddr & (psMMUAttrib->ui32PTSize - 1));
+ ui32NumPages = (ui32PageOffset + ui32Bytes + psMMUAttrib->ui32PTSize - 1) / psMMUAttrib->ui32PTSize;
+ pui8LinAddr = (IMG_UINT8*) pvLinAddr;
+
+ while (ui32NumPages)
+ {
+ ui32NumPages--;
+
+
+
+
+
+
+ sCpuPAddr = OSMapLinToCPUPhys(hOSMemHandle, pui8LinAddr);
+ sDevPAddr = SysCpuPAddrToDevPAddr(psMMUAttrib->sDevId.eDeviceType, sCpuPAddr);
+
+
+ if (ui32PageOffset + ui32Bytes > psMMUAttrib->ui32PTSize)
+ {
+
+ ui32BlockBytes = psMMUAttrib->ui32PTSize - ui32PageOffset;
+ }
+ else
+ {
+
+ ui32BlockBytes = ui32Bytes;
+ }
+
+
+
+
+ if (bInitialisePages)
+ {
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "LDB :%s:PA_%08X%08X:0x%08X 0x%08X 0x%08X %s\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag1,
+ sDevPAddr.uiAddr & ~ui32PageMask,
+ sDevPAddr.uiAddr & ui32PageMask,
+ ui32BlockBytes,
+ ui32ParamOutPos,
+ pszFileName);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+ }
+ else
+ {
+ for (ui32Offset = 0; ui32Offset < ui32BlockBytes; ui32Offset += sizeof(IMG_UINT32))
+ {
+ IMG_UINT32 ui32PTE = *((IMG_UINT32 *)(IMG_UINTPTR_T)(pui8LinAddr + ui32Offset));
+
+ if ((ui32PTE & psMMUAttrib->ui32PDEMask) != 0)
+ {
+
+#if defined(SGX_FEATURE_36BIT_MMU)
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "WRW :%s:$1 :%s:PA_%08X%08X:0x0\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)hUniqueTag2,
+ (ui32PTE & psMMUAttrib->ui32PDEMask) << psMMUAttrib->ui32PTEAlignShift);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript, "SHR :%s:$1 :%s:$1 0x4\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ psMMUAttrib->sDevId.pszPDumpDevName);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript, "OR :%s:$1 :%s:$1 0x%08X\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ ui32PTE & ~psMMUAttrib->ui32PDEMask);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "WRW :%s:PA_%08X%08X:0x%08X :%s:$1\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)hUniqueTag1,
+ (sDevPAddr.uiAddr + ui32Offset) & ~ui32PageMask,
+ (sDevPAddr.uiAddr + ui32Offset) & ui32PageMask,
+ psMMUAttrib->sDevId.pszPDumpDevName);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+#else
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "WRW :%s:PA_%08X%08X:0x%08X :%s:PA_%08X%08X:0x%08X\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag1,
+ (sDevPAddr.uiAddr + ui32Offset) & ~ui32PageMask,
+ (sDevPAddr.uiAddr + ui32Offset) & ui32PageMask,
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag2,
+ (ui32PTE & psMMUAttrib->ui32PDEMask) << psMMUAttrib->ui32PTEAlignShift,
+ ui32PTE & ~psMMUAttrib->ui32PDEMask);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+#endif
+ }
+ else
+ {
+#if !defined(FIX_HW_BRN_31620)
+ PVR_ASSERT((ui32PTE & psMMUAttrib->ui32PTEValid) == 0UL);
+#endif
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "WRW :%s:PA_%08X%08X:0x%08X 0x%08X%08X\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag1,
+ (sDevPAddr.uiAddr + ui32Offset) & ~ui32PageMask,
+ (sDevPAddr.uiAddr + ui32Offset) & ui32PageMask,
+ (ui32PTE << psMMUAttrib->ui32PTEAlignShift),
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag2);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+ }
+ }
+ }
+
+
+
+
+ ui32PageOffset = 0;
+
+ ui32Bytes -= ui32BlockBytes;
+
+ pui8LinAddr += ui32BlockBytes;
+
+ ui32ParamOutPos += ui32BlockBytes;
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
+ IMG_UINT32 ui32Offset,
+ IMG_DEV_PHYADDR sPDDevPAddr,
+ IMG_HANDLE hUniqueTag1,
+ IMG_HANDLE hUniqueTag2)
+{
+ PVRSRV_ERROR eErr;
+ IMG_UINT32 ui32PageByteOffset;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_DEV_VIRTADDR sDevVPageAddr;
+ IMG_DEV_PHYADDR sDevPAddr;
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+ IMG_UINT32 ui32ParamOutPos;
+ PDUMP_MMU_ATTRIB *psMMUAttrib;
+ IMG_UINT32 ui32PageMask;
+
+ PDUMP_GET_SCRIPT_AND_FILE_STRING();
+
+ if (!PDumpOSJTInitialised())
+ {
+ return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+ }
+
+ psMMUAttrib = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->psMMUAttrib;
+ ui32PageMask = psMMUAttrib->ui32PTSize - 1;
+
+ ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2);
+
+
+ if(!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
+ (IMG_UINT8 *)&sPDDevPAddr,
+ sizeof(IMG_DEV_PHYADDR),
+ ui32Flags))
+ {
+ return PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+ }
+
+ if (PDumpOSGetParamFileNum() == 0)
+ {
+ eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%.prm");
+ }
+ else
+ {
+ eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%_%u.prm", PDumpOSGetParamFileNum());
+ }
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "-- LDB :%s:PA_0x%08X%08X:0x%08X 0x%08X 0x%08X %s\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag1,
+ sPDDevPAddr.uiAddr & ~ui32PageMask,
+ sPDDevPAddr.uiAddr & ui32PageMask,
+ sizeof(IMG_DEV_PHYADDR),
+ ui32ParamOutPos,
+ pszFileName);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+
+
+ sDevVAddr = psMemInfo->sDevVAddr;
+ ui32PageByteOffset = sDevVAddr.uiAddr & ui32PageMask;
+
+ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset;
+ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
+
+ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
+ sDevPAddr.uiAddr += ui32PageByteOffset + ui32Offset;
+
+ if ((sPDDevPAddr.uiAddr & psMMUAttrib->ui32PDEMask) != 0UL)
+ {
+#if defined(SGX_FEATURE_36BIT_MMU)
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "WRW :%s:$1 :%s:PA_%08X%08X:0x0\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)hUniqueTag2,
+ sPDDevPAddr.uiAddr);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript, "AND :%s:$2 :%s:$1 0xFFFFFFFF\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ psMMUAttrib->sDevId.pszPDumpDevName);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "WRW :%s:PA_%08X%08X:0x%08X :%s:$2\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)hUniqueTag1,
+ (sDevPAddr.uiAddr) & ~(psMMUAttrib->ui32DataPageMask),
+ (sDevPAddr.uiAddr) & (psMMUAttrib->ui32DataPageMask),
+ psMMUAttrib->sDevId.pszPDumpDevName);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript, "SHR :%s:$2 :%s:$1 0x20\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ psMMUAttrib->sDevId.pszPDumpDevName);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "WRW :%s:PA_%08X%08X:0x%08X :%s:$2\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)hUniqueTag1,
+ (sDevPAddr.uiAddr + 4) & ~(psMMUAttrib->ui32DataPageMask),
+ (sDevPAddr.uiAddr + 4) & (psMMUAttrib->ui32DataPageMask),
+ psMMUAttrib->sDevId.pszPDumpDevName);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+#else
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "WRW :%s:PA_%08X%08X:0x%08X :%s:PA_%08X%08X:0x%08X\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag1,
+ sDevPAddr.uiAddr & ~ui32PageMask,
+ sDevPAddr.uiAddr & ui32PageMask,
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag2,
+ sPDDevPAddr.uiAddr & psMMUAttrib->ui32PDEMask,
+ sPDDevPAddr.uiAddr & ~psMMUAttrib->ui32PDEMask);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+#endif
+ }
+ else
+ {
+ PVR_ASSERT(!(sDevPAddr.uiAddr & psMMUAttrib->ui32PTEValid));
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "WRW :%s:PA_%08X%08X:0x%08X 0x%08X\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag1,
+ sDevPAddr.uiAddr & ~ui32PageMask,
+ sDevPAddr.uiAddr & ui32PageMask,
+ sPDDevPAddr.uiAddr);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ IMG_CHAR pszCommentPrefix[] = "-- ";
+#if defined(PDUMP_DEBUG_OUTFILES)
+ IMG_CHAR pszTemp[256];
+#endif
+ IMG_UINT32 ui32LenCommentPrefix;
+ PDUMP_GET_SCRIPT_STRING();
+ PDUMP_DBG(("PDumpCommentKM"));
+#if defined(PDUMP_DEBUG_OUTFILES)
+
+ ui32Flags |= ( _PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+#endif
+
+ PDumpOSVerifyLineEnding(pszComment, ui32MaxLen);
+
+
+ ui32LenCommentPrefix = PDumpOSBuflen(pszCommentPrefix, sizeof(pszCommentPrefix));
+
+
+
+ if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_SCRIPT2),
+ (IMG_UINT8*)pszCommentPrefix,
+ ui32LenCommentPrefix,
+ ui32Flags))
+ {
+#if defined(PDUMP_DEBUG_OUTFILES)
+ if(ui32Flags & PDUMP_FLAGS_CONTINUOUS)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Incomplete comment, %d: %s (continuous set)",
+ g_ui32EveryLineCounter, pszComment));
+ return PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+ }
+ else if(ui32Flags & PDUMP_FLAGS_PERSISTENT)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Incomplete comment, %d: %s (persistent set)",
+ g_ui32EveryLineCounter, pszComment));
+ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Incomplete comment, %d: %s",
+ g_ui32EveryLineCounter, pszComment));
+ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
+ }
+#else
+ PVR_DPF((PVR_DBG_WARNING, "Incomplete comment, %s",
+ pszComment));
+ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
+#endif
+ }
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+
+ eErr = PDumpOSSprintf(pszTemp, 256, "%d-%d %s",
+ _PDumpGetPID(),
+ g_ui32EveryLineCounter,
+ pszComment);
+
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "%s",
+ pszTemp);
+#else
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "%s",
+ pszComment);
+#endif
+ if( (eErr != PVRSRV_OK) &&
+ (eErr != PVRSRV_ERROR_PDUMP_BUF_OVERFLOW))
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags, IMG_CHAR * pszFormat, ...)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_va_list ap;
+ PDUMP_GET_MSG_STRING();
+
+
+ PDUMP_va_start(ap, pszFormat);
+ eErr = PDumpOSVSprintf(pszMsg, ui32MaxLen, pszFormat, ap);
+ PDUMP_va_end(ap);
+
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ return PDumpCommentKM(pszMsg, ui32Flags);
+}
+
+PVRSRV_ERROR PDumpComment(IMG_CHAR *pszFormat, ...)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_va_list ap;
+ PDUMP_GET_MSG_STRING();
+
+
+ PDUMP_va_start(ap, pszFormat);
+ eErr = PDumpOSVSprintf(pszMsg, ui32MaxLen, pszFormat, ap);
+ PDUMP_va_end(ap);
+
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ return PDumpCommentKM(pszMsg, PDUMP_FLAGS_CONTINUOUS);
+}
+
+PVRSRV_ERROR PDumpDriverInfoKM(IMG_CHAR *pszString, IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ IMG_UINT32 ui32MsgLen;
+ PDUMP_GET_MSG_STRING();
+
+
+ eErr = PDumpOSSprintf(pszMsg, ui32MaxLen, "%s", pszString);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+
+ PDumpOSVerifyLineEnding(pszMsg, ui32MaxLen);
+ ui32MsgLen = PDumpOSBuflen(pszMsg, ui32MaxLen);
+
+ if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_DRIVERINFO),
+ (IMG_UINT8*)pszMsg,
+ ui32MsgLen,
+ ui32Flags))
+ {
+ if (ui32Flags & PDUMP_FLAGS_CONTINUOUS)
+ {
+ return PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+ }
+ else
+ {
+ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
+ }
+ }
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpBitmapKM( PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ PDUMP_MEM_FORMAT eMemFormat,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_DEVICE_IDENTIFIER *psDevId = &psDeviceNode->sDevId;
+ IMG_UINT32 ui32MMUContextID;
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ if ( _PDumpIsPersistent() )
+ {
+ return PVRSRV_OK;
+ }
+
+ PDumpCommentWithFlags(ui32PDumpFlags, "\r\n-- Dump bitmap of render\r\n");
+
+
+ ui32MMUContextID = psDeviceNode->pfnMMUGetContextID( hDevMemContext );
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SII %s %s.bin :%s:v%x:0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\r\n",
+ pszFileName,
+ pszFileName,
+ psDevId->pszPDumpDevName,
+ ui32MMUContextID,
+ sDevBaseAddr.uiAddr,
+ ui32Size,
+ ui32FileOffset,
+ ePixelFormat,
+ ui32Width,
+ ui32Height,
+ ui32StrideInBytes,
+ eMemFormat);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDumpOSWriteString2( hScript, ui32PDumpFlags);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpReadRegKM ( IMG_CHAR *pszPDumpRegName,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Address,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PVR_UNREFERENCED_PARAMETER(ui32Size);
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SAB :%s:0x%08X 0x%08X %s\r\n",
+ pszPDumpRegName,
+ ui32Address,
+ ui32FileOffset,
+ pszFileName);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDumpOSWriteString2( hScript, ui32PDumpFlags);
+
+ return PVRSRV_OK;
+}
+
+IMG_BOOL PDumpTestNextFrame(IMG_UINT32 ui32CurrentFrame)
+{
+ IMG_BOOL bFrameDumped;
+
+
+
+ (IMG_VOID) PDumpSetFrameKM(ui32CurrentFrame + 1);
+ bFrameDumped = PDumpIsCaptureFrameKM();
+ (IMG_VOID) PDumpSetFrameKM(ui32CurrentFrame);
+
+ return bFrameDumped;
+}
+
+static PVRSRV_ERROR PDumpSignatureRegister (PVRSRV_DEVICE_IDENTIFIER *psDevId,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32Address,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 *pui32FileOffset,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SAB :%s:0x%08X 0x%08X %s\r\n",
+ psDevId->pszPDumpRegName,
+ ui32Address,
+ *pui32FileOffset,
+ pszFileName);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDumpOSWriteString2(hScript, ui32Flags);
+ *pui32FileOffset += ui32Size;
+ return PVRSRV_OK;
+}
+
+static IMG_VOID PDumpRegisterRange(PVRSRV_DEVICE_IDENTIFIER *psDevId,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 *pui32Registers,
+ IMG_UINT32 ui32NumRegisters,
+ IMG_UINT32 *pui32FileOffset,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32Flags)
+{
+ IMG_UINT32 i;
+ for (i = 0; i < ui32NumRegisters; i++)
+ {
+ PDumpSignatureRegister(psDevId, pszFileName, pui32Registers[i], ui32Size, pui32FileOffset, ui32Flags);
+ }
+}
+
+PVRSRV_ERROR PDump3DSignatureRegisters(PVRSRV_DEVICE_IDENTIFIER *psDevId,
+ IMG_UINT32 ui32DumpFrameNum,
+ IMG_BOOL bLastFrame,
+ IMG_UINT32 *pui32Registers,
+ IMG_UINT32 ui32NumRegisters)
+{
+ PVRSRV_ERROR eErr;
+ IMG_UINT32 ui32FileOffset, ui32Flags;
+
+ PDUMP_GET_FILE_STRING();
+
+ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0;
+ ui32FileOffset = 0;
+
+ PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump 3D signature registers\r\n");
+ eErr = PDumpOSSprintf(pszFileName, ui32MaxLen, "out%u_3d.sig", ui32DumpFrameNum);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDumpRegisterRange(psDevId,
+ pszFileName,
+ pui32Registers,
+ ui32NumRegisters,
+ &ui32FileOffset,
+ sizeof(IMG_UINT32),
+ ui32Flags);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpTASignatureRegisters (PVRSRV_DEVICE_IDENTIFIER *psDevId,
+ IMG_UINT32 ui32DumpFrameNum,
+ IMG_UINT32 ui32TAKickCount,
+ IMG_BOOL bLastFrame,
+ IMG_UINT32 *pui32Registers,
+ IMG_UINT32 ui32NumRegisters)
+{
+ PVRSRV_ERROR eErr;
+ IMG_UINT32 ui32FileOffset, ui32Flags;
+
+ PDUMP_GET_FILE_STRING();
+
+ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0;
+ ui32FileOffset = ui32TAKickCount * ui32NumRegisters * sizeof(IMG_UINT32);
+
+ PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump TA signature registers\r\n");
+ eErr = PDumpOSSprintf(pszFileName, ui32MaxLen, "out%u_ta.sig", ui32DumpFrameNum);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDumpRegisterRange(psDevId,
+ pszFileName,
+ pui32Registers,
+ ui32NumRegisters,
+ &ui32FileOffset,
+ sizeof(IMG_UINT32),
+ ui32Flags);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpCounterRegisters (PVRSRV_DEVICE_IDENTIFIER *psDevId,
+ IMG_UINT32 ui32DumpFrameNum,
+ IMG_BOOL bLastFrame,
+ IMG_UINT32 *pui32Registers,
+ IMG_UINT32 ui32NumRegisters)
+{
+ PVRSRV_ERROR eErr;
+ IMG_UINT32 ui32FileOffset, ui32Flags;
+
+ PDUMP_GET_FILE_STRING();
+
+ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0UL;
+ ui32FileOffset = 0UL;
+
+ PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump counter registers\r\n");
+ eErr = PDumpOSSprintf(pszFileName, ui32MaxLen, "out%u.perf", ui32DumpFrameNum);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDumpRegisterRange(psDevId,
+ pszFileName,
+ pui32Registers,
+ ui32NumRegisters,
+ &ui32FileOffset,
+ sizeof(IMG_UINT32),
+ ui32Flags);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpRegRead(IMG_CHAR *pszPDumpRegName,
+ const IMG_UINT32 ui32RegOffset,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW :%s:0x%X\r\n",
+ pszPDumpRegName,
+ ui32RegOffset);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpSaveMemKM (PVRSRV_DEVICE_IDENTIFIER *psDevId,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32MMUContextID,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SAB :%s:v%x:0x%08X 0x%08X 0x%08X %s.bin\r\n",
+ psDevId->pszPDumpDevName,
+ ui32MMUContextID,
+ sDevBaseAddr.uiAddr,
+ ui32Size,
+ ui32FileOffset,
+ pszFileName);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDumpOSWriteString2(hScript, ui32PDumpFlags);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpCycleCountRegRead(PVRSRV_DEVICE_IDENTIFIER *psDevId,
+ const IMG_UINT32 ui32RegOffset,
+ IMG_BOOL bLastFrame)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW :%s:0x%X\r\n",
+ psDevId->pszPDumpRegName,
+ ui32RegOffset);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR PDumpSignatureBuffer (PVRSRV_DEVICE_IDENTIFIER *psDevId,
+ IMG_CHAR *pszFileName,
+ IMG_CHAR *pszBufferType,
+ IMG_UINT32 ui32FileOffset,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32MMUContextID,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PDumpCommentWithFlags(ui32PDumpFlags, "\r\n-- Dump microkernel %s signature Buffer\r\n",
+ pszBufferType);
+ PDumpCommentWithFlags(ui32PDumpFlags, "Buffer format (sizes in 32-bit words):\r\n");
+ PDumpCommentWithFlags(ui32PDumpFlags, "\tNumber of signatures per sample (1)\r\n");
+ PDumpCommentWithFlags(ui32PDumpFlags, "\tNumber of samples (1)\r\n");
+ PDumpCommentWithFlags(ui32PDumpFlags, "\tSignature register offsets (1 * number of signatures)\r\n");
+ PDumpCommentWithFlags(ui32PDumpFlags, "\tSignature sample values (number of samples * number of signatures)\r\n");
+ PDumpCommentWithFlags(ui32PDumpFlags, "Note: If buffer is full, last sample is final state after test completed\r\n");
+ return PDumpSaveMemKM(psDevId, pszFileName, ui32FileOffset, sDevBaseAddr, ui32Size,
+ ui32MMUContextID, ui32PDumpFlags);
+}
+
+
+PVRSRV_ERROR PDumpHWPerfCBKM (PVRSRV_DEVICE_IDENTIFIER *psDevId,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32MMUContextID,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PDumpCommentWithFlags(ui32PDumpFlags, "\r\n-- Dump Hardware Performance Circular Buffer\r\n");
+ return PDumpSaveMemKM(psDevId, pszFileName, ui32FileOffset, sDevBaseAddr, ui32Size,
+ ui32MMUContextID, ui32PDumpFlags);
+}
+
+
+PVRSRV_ERROR PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo,
+ IMG_UINT32 ui32ROffOffset,
+ IMG_UINT32 ui32WPosVal,
+ IMG_UINT32 ui32PacketSize,
+ IMG_UINT32 ui32BufferSize,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hUniqueTag)
+{
+ PVRSRV_ERROR eErr;
+ IMG_UINT32 ui32PageOffset;
+ IMG_UINT8 *pui8LinAddr;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_DEV_PHYADDR sDevPAddr;
+ IMG_DEV_VIRTADDR sDevVPageAddr;
+
+ PDUMP_MMU_ATTRIB *psMMUAttrib;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ psMMUAttrib = ((BM_BUF*)psROffMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->psMMUAttrib;
+
+
+ PVR_ASSERT((ui32ROffOffset + sizeof(IMG_UINT32)) <= psROffMemInfo->uAllocSize);
+
+ pui8LinAddr = psROffMemInfo->pvLinAddrKM;
+ sDevVAddr = psROffMemInfo->sDevVAddr;
+
+
+ pui8LinAddr += ui32ROffOffset;
+ sDevVAddr.uiAddr += ui32ROffOffset;
+
+
+
+
+ PDumpOSCPUVAddrToPhysPages(psROffMemInfo->sMemBlk.hOSMemHandle,
+ ui32ROffOffset,
+ pui8LinAddr,
+ psMMUAttrib->ui32DataPageMask,
+ &ui32PageOffset);
+
+
+ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageOffset;
+
+ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
+
+
+ BM_GetPhysPageAddr(psROffMemInfo, sDevVPageAddr, &sDevPAddr);
+
+
+ sDevPAddr.uiAddr += ui32PageOffset;
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "CBP :%s:PA_%08X%08X:0x%08X 0x%08X 0x%08X 0x%08X\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag,
+ sDevPAddr.uiAddr & ~(psMMUAttrib->ui32DataPageMask),
+ sDevPAddr.uiAddr & (psMMUAttrib->ui32DataPageMask),
+ ui32WPosVal,
+ ui32PacketSize,
+ ui32BufferSize);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+ PDUMP_DBG(("PDumpIDLWithFlags"));
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "IDL %u\r\n", ui32Clocks);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, ui32Flags);
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks)
+{
+ return PDumpIDLWithFlags(ui32Clocks, PDUMP_FLAGS_CONTINUOUS);
+}
+
+PVRSRV_ERROR PDumpMemUM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_PVOID pvAltLinAddrUM,
+ IMG_PVOID pvLinAddrUM,
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hUniqueTag)
+{
+ IMG_VOID *pvAddrUM;
+ IMG_VOID *pvAddrKM;
+ IMG_UINT32 ui32BytesDumped;
+ IMG_UINT32 ui32CurrentOffset;
+
+ if (psMemInfo->pvLinAddrKM != IMG_NULL && pvAltLinAddrUM == IMG_NULL)
+ {
+
+ return PDumpMemKM(IMG_NULL,
+ psMemInfo,
+ ui32Offset,
+ ui32Bytes,
+ ui32Flags,
+ hUniqueTag);
+ }
+
+ pvAddrUM = (pvAltLinAddrUM != IMG_NULL) ? pvAltLinAddrUM : ((pvLinAddrUM != IMG_NULL) ? VPTR_PLUS(pvLinAddrUM, ui32Offset) : IMG_NULL);
+
+ pvAddrKM = GetTempBuffer();
+
+
+ PVR_ASSERT(pvAddrUM != IMG_NULL && pvAddrKM != IMG_NULL);
+ if (pvAddrUM == IMG_NULL || pvAddrKM == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: Nothing to dump"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (ui32Bytes > PDUMP_TEMP_BUFFER_SIZE)
+ {
+ PDumpCommentWithFlags(ui32Flags, "Dumping 0x%08x bytes of memory, in blocks of 0x%08x bytes", ui32Bytes, (IMG_UINT32)PDUMP_TEMP_BUFFER_SIZE);
+ }
+
+ ui32CurrentOffset = ui32Offset;
+ for (ui32BytesDumped = 0; ui32BytesDumped < ui32Bytes;)
+ {
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32BytesToDump = MIN(PDUMP_TEMP_BUFFER_SIZE, ui32Bytes - ui32BytesDumped);
+
+ eError = OSCopyFromUser(psPerProc,
+ pvAddrKM,
+ pvAddrUM,
+ ui32BytesToDump);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: OSCopyFromUser failed (%d)", eError));
+ return eError;
+ }
+
+ eError = PDumpMemKM(pvAddrKM,
+ psMemInfo,
+ ui32CurrentOffset,
+ ui32BytesToDump,
+ ui32Flags,
+ hUniqueTag);
+
+ if (eError != PVRSRV_OK)
+ {
+
+ if (ui32BytesDumped != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: PDumpMemKM failed (%d)", eError));
+ }
+ PVR_ASSERT(ui32BytesDumped == 0);
+ return eError;
+ }
+
+ VPTR_INC(pvAddrUM, ui32BytesToDump);
+ ui32CurrentOffset += ui32BytesToDump;
+ ui32BytesDumped += ui32BytesToDump;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+static PVRSRV_ERROR _PdumpAllocMMUContext(IMG_UINT32 *pui32MMUContextID)
+{
+ IMG_UINT32 i;
+
+
+ for(i=0; i<MAX_PDUMP_MMU_CONTEXTS; i++)
+ {
+ if((gui16MMUContextUsage & (1U << i)) == 0)
+ {
+
+ gui16MMUContextUsage |= 1U << i;
+ *pui32MMUContextID = i;
+ return PVRSRV_OK;
+ }
+ }
+
+ PVR_DPF((PVR_DBG_ERROR, "_PdumpAllocMMUContext: no free MMU context ids"));
+
+ return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND;
+}
+
+
+static PVRSRV_ERROR _PdumpFreeMMUContext(IMG_UINT32 ui32MMUContextID)
+{
+ if(ui32MMUContextID < MAX_PDUMP_MMU_CONTEXTS)
+ {
+
+ gui16MMUContextUsage &= ~(1U << ui32MMUContextID);
+ return PVRSRV_OK;
+ }
+
+ PVR_DPF((PVR_DBG_ERROR, "_PdumpFreeMMUContext: MMU context ids invalid"));
+
+ return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND;
+}
+
+
+PVRSRV_ERROR PDumpSetMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
+ IMG_CHAR *pszMemSpace,
+ IMG_UINT32 *pui32MMUContextID,
+ IMG_UINT32 ui32MMUType,
+ IMG_HANDLE hUniqueTag1,
+ IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvPDCPUAddr)
+{
+ IMG_UINT8 *pui8LinAddr = (IMG_UINT8 *)pvPDCPUAddr;
+ IMG_CPU_PHYADDR sCpuPAddr;
+ IMG_DEV_PHYADDR sDevPAddr;
+ IMG_UINT32 ui32MMUContextID;
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ eErr = _PdumpAllocMMUContext(&ui32MMUContextID);
+ if(eErr != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpSetMMUContext: _PdumpAllocMMUContext failed: %d", eErr));
+ return eErr;
+ }
+
+
+
+ sCpuPAddr = OSMapLinToCPUPhys(hOSMemHandle, pui8LinAddr);
+ sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
+
+ sDevPAddr.uiAddr &= ~((PVRSRV_4K_PAGE_SIZE) -1);
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "MMU :%s:v%d %d :%s:PA_%08X%08X\r\n",
+ pszMemSpace,
+ ui32MMUContextID,
+ ui32MMUType,
+ pszMemSpace,
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag1,
+ sDevPAddr.uiAddr);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
+
+
+ *pui32MMUContextID = ui32MMUContextID;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR PDumpClearMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
+ IMG_CHAR *pszMemSpace,
+ IMG_UINT32 ui32MMUContextID,
+ IMG_UINT32 ui32MMUType)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+ PVR_UNREFERENCED_PARAMETER(eDeviceType);
+ PVR_UNREFERENCED_PARAMETER(ui32MMUType);
+
+
+ PDumpComment("Clear MMU Context for memory space %s\r\n", pszMemSpace);
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "MMU :%s:v%d\r\n",
+ pszMemSpace,
+ ui32MMUContextID);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
+
+ eErr = _PdumpFreeMMUContext(ui32MMUContextID);
+ if(eErr != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpClearMMUContext: _PdumpFreeMMUContext failed: %d", eErr));
+ return eErr;
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpStoreMemToFile(PDUMP_MMU_ATTRIB *psMMUAttrib,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
+ IMG_UINT32 uiAddr,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_HANDLE hUniqueTag)
+{
+ IMG_DEV_PHYADDR sDevPAddr;
+ IMG_DEV_VIRTADDR sDevVPageAddr;
+ IMG_UINT32 ui32PageOffset;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+
+
+
+ ui32PageOffset = (IMG_UINT32)((IMG_UINTPTR_T)psMemInfo->pvLinAddrKM & psMMUAttrib->ui32DataPageMask);
+
+
+ sDevVPageAddr.uiAddr = uiAddr - ui32PageOffset;
+
+
+ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
+
+
+ sDevPAddr.uiAddr += ui32PageOffset;
+
+ PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SAB :%s:PA_%08X%08X:0x%08X 0x%08X 0x%08X %s\r\n",
+ psMMUAttrib->sDevId.pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag,
+ sDevPAddr.uiAddr & ~psMMUAttrib->ui32DataPageMask,
+ sDevPAddr.uiAddr & psMMUAttrib->ui32DataPageMask,
+ ui32Size,
+ ui32FileOffset,
+ pszFileName);
+
+ PDumpOSWriteString2(hScript, ui32PDumpFlags);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpRegBasedCBP(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegOffset,
+ IMG_UINT32 ui32WPosVal,
+ IMG_UINT32 ui32PacketSize,
+ IMG_UINT32 ui32BufferSize,
+ IMG_UINT32 ui32Flags)
+{
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "CBP :%s:0x%08X 0x%08X 0x%08X 0x%08X\r\n",
+ pszPDumpRegName,
+ ui32RegOffset,
+ ui32WPosVal,
+ ui32PacketSize,
+ ui32BufferSize);
+ PDumpOSWriteString2(hScript, ui32Flags);
+
+ return PVRSRV_OK;
+}
+
+
+
+#include "syscommon.h"
+
+IMG_EXPORT IMG_VOID PDumpConnectionNotify(IMG_VOID)
+{
+ SYS_DATA *psSysData;
+ PVRSRV_DEVICE_NODE *psThis;
+ PVR_DPF((PVR_DBG_WARNING, "PDump has connected."));
+
+
+ SysAcquireData(&psSysData);
+
+ psThis = psSysData->psDeviceNodeList;
+ while (psThis)
+ {
+ if (psThis->pfnPDumpInitDevice)
+ {
+
+ psThis->pfnPDumpInitDevice(psThis);
+ }
+ psThis = psThis->psNext;
+ }
+}
+
+IMG_UINT32 DbgWrite(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags)
+{
+ IMG_UINT32 ui32BytesWritten = 0;
+ IMG_UINT32 ui32Off = 0;
+ PDBG_STREAM_CONTROL psCtrl = psStream->psCtrl;
+
+
+ if ((ui32Flags & PDUMP_FLAGS_NEVER) != 0)
+ {
+ return ui32BCount;
+ }
+
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+
+ if ( (_PDumpIsProcessActive() == IMG_FALSE ) &&
+ ((ui32Flags & PDUMP_FLAGS_PERSISTENT) == 0) )
+ {
+ return ui32BCount;
+ }
+#endif
+
+
+ if ( ((ui32Flags & PDUMP_FLAGS_PERSISTENT) != 0) && (psCtrl->bInitPhaseComplete) )
+ {
+ while (ui32BCount > 0)
+ {
+
+
+
+ ui32BytesWritten = PDumpOSDebugDriverWrite( psStream,
+ PDUMP_WRITE_MODE_PERSISTENT,
+ &pui8Data[ui32Off], ui32BCount, 1, 0);
+
+ if (ui32BytesWritten == 0)
+ {
+ PDumpOSReleaseExecution();
+ }
+
+ if (ui32BytesWritten != 0xFFFFFFFFU)
+ {
+ ui32Off += ui32BytesWritten;
+ ui32BCount -= ui32BytesWritten;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DbgWrite: Failed to send persistent data"));
+ if( (psCtrl->ui32Flags & DEBUG_FLAGS_READONLY) != 0)
+ {
+
+ PDumpSuspendKM();
+ }
+ return 0xFFFFFFFFU;
+ }
+ }
+
+
+ ui32BCount = ui32Off; ui32Off = 0; ui32BytesWritten = 0;
+ }
+
+ while (((IMG_UINT32) ui32BCount > 0) && (ui32BytesWritten != 0xFFFFFFFFU))
+ {
+ if ((ui32Flags & PDUMP_FLAGS_CONTINUOUS) != 0)
+ {
+
+
+ if (((psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0) &&
+ (psCtrl->ui32Start == 0xFFFFFFFFU) &&
+ (psCtrl->ui32End == 0xFFFFFFFFU) &&
+ psCtrl->bInitPhaseComplete)
+ {
+ ui32BytesWritten = ui32BCount;
+ }
+ else
+ {
+ ui32BytesWritten = PDumpOSDebugDriverWrite( psStream,
+ PDUMP_WRITE_MODE_CONTINUOUS,
+ &pui8Data[ui32Off], ui32BCount, 1, 0);
+ }
+ }
+ else
+ {
+ if (ui32Flags & PDUMP_FLAGS_LASTFRAME)
+ {
+ IMG_UINT32 ui32DbgFlags;
+
+ ui32DbgFlags = 0;
+ if (ui32Flags & PDUMP_FLAGS_RESETLFBUFFER)
+ {
+ ui32DbgFlags |= WRITELF_FLAGS_RESETBUF;
+ }
+
+ ui32BytesWritten = PDumpOSDebugDriverWrite( psStream,
+ PDUMP_WRITE_MODE_LASTFRAME,
+ &pui8Data[ui32Off], ui32BCount, 1, ui32DbgFlags);
+ }
+ else
+ {
+ ui32BytesWritten = PDumpOSDebugDriverWrite( psStream,
+ PDUMP_WRITE_MODE_BINCM,
+ &pui8Data[ui32Off], ui32BCount, 1, 0);
+ }
+ }
+
+
+
+
+ if (ui32BytesWritten == 0)
+ {
+ PDumpOSReleaseExecution();
+ }
+
+ if (ui32BytesWritten != 0xFFFFFFFFU)
+ {
+ ui32Off += ui32BytesWritten;
+ ui32BCount -= ui32BytesWritten;
+ }
+
+
+ }
+
+
+
+ return ui32BytesWritten;
+}
+
+
+
+#else
+#endif
diff --git a/drivers/gpu/pvr/pdump_int.h b/drivers/gpu/pvr/pdump_int.h
new file mode 100644
index 0000000..9f68549
--- /dev/null
+++ b/drivers/gpu/pvr/pdump_int.h
@@ -0,0 +1,67 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __PDUMP_INT_H__
+#define __PDUMP_INT_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if !defined(_UITRON)
+#include "dbgdrvif.h"
+
+IMG_EXPORT IMG_VOID PDumpConnectionNotify(IMG_VOID);
+
+#endif
+
+typedef enum
+{
+
+ PDUMP_WRITE_MODE_CONTINUOUS = 0,
+
+ PDUMP_WRITE_MODE_LASTFRAME,
+
+ PDUMP_WRITE_MODE_BINCM,
+
+ PDUMP_WRITE_MODE_PERSISTENT
+} PDUMP_DDWMODE;
+
+
+IMG_UINT32 DbgWrite(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags);
+
+IMG_UINT32 PDumpOSDebugDriverWrite( PDBG_STREAM psStream,
+ PDUMP_DDWMODE eDbgDrvWriteMode,
+ IMG_UINT8 *pui8Data,
+ IMG_UINT32 ui32BCount,
+ IMG_UINT32 ui32Level,
+ IMG_UINT32 ui32DbgDrvFlags);
+
+#if defined (__cplusplus)
+}
+#endif
+#endif
+
diff --git a/drivers/gpu/pvr/pdump_km.h b/drivers/gpu/pvr/pdump_km.h
new file mode 100644
index 0000000..6c516e0
--- /dev/null
+++ b/drivers/gpu/pvr/pdump_km.h
@@ -0,0 +1,412 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _PDUMP_KM_H_
+#define _PDUMP_KM_H_
+
+
+#include "pdump_osfunc.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "pdump.h"
+
+#define PDUMP_PD_UNIQUETAG (IMG_HANDLE)0
+#define PDUMP_PT_UNIQUETAG (IMG_HANDLE)0
+
+#define PDUMP_STREAM_PARAM2 0
+#define PDUMP_STREAM_SCRIPT2 1
+#define PDUMP_STREAM_DRIVERINFO 2
+#define PDUMP_NUM_STREAMS 3
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+extern IMG_UINT32 g_ui32EveryLineCounter;
+#endif
+
+#ifndef PDUMP
+#define MAKEUNIQUETAG(hMemInfo) (0)
+#endif
+
+#ifdef PDUMP
+
+#define MAKEUNIQUETAG(hMemInfo) (((BM_BUF *)(((PVRSRV_KERNEL_MEM_INFO *)(hMemInfo))->sMemBlk.hBuffer))->pMapping)
+
+ IMG_IMPORT PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hUniqueTag);
+
+ IMG_IMPORT PVRSRV_ERROR PDumpMemUM(PVRSRV_PER_PROCESS_DATA *psProcData,
+ IMG_PVOID pvAltLinAddr,
+ IMG_PVOID pvLinAddr,
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hUniqueTag);
+
+ IMG_IMPORT PVRSRV_ERROR PDumpMemKM(IMG_PVOID pvAltLinAddr,
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hUniqueTag);
+ PVRSRV_ERROR PDumpMemPagesKM(PVRSRV_DEVICE_IDENTIFIER *psDevID,
+ IMG_DEV_PHYADDR *pPages,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEV_VIRTADDR sDevAddr,
+ IMG_UINT32 ui32Start,
+ IMG_UINT32 ui32Length,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hUniqueTag);
+
+ PVRSRV_ERROR PDumpMemPDEntriesKM(PDUMP_MMU_ATTRIB *psMMUAttrib,
+ IMG_HANDLE hOSMemHandle,
+ IMG_CPU_VIRTADDR pvLinAddr,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32Flags,
+ IMG_BOOL bInitialisePages,
+ IMG_HANDLE hUniqueTag1,
+ IMG_HANDLE hUniqueTag2);
+
+ PVRSRV_ERROR PDumpMemPTEntriesKM(PDUMP_MMU_ATTRIB *psMMUAttrib,
+ IMG_HANDLE hOSMemHandle,
+ IMG_CPU_VIRTADDR pvLinAddr,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32Flags,
+ IMG_BOOL bInitialisePages,
+ IMG_HANDLE hUniqueTag1,
+ IMG_HANDLE hUniqueTag2);
+ IMG_VOID PDumpInitCommon(IMG_VOID);
+ IMG_VOID PDumpDeInitCommon(IMG_VOID);
+ IMG_VOID PDumpInit(IMG_VOID);
+ IMG_VOID PDumpDeInit(IMG_VOID);
+ IMG_BOOL PDumpIsSuspended(IMG_VOID);
+ PVRSRV_ERROR PDumpStartInitPhaseKM(IMG_VOID);
+ PVRSRV_ERROR PDumpStopInitPhaseKM(IMG_VOID);
+ IMG_IMPORT PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame);
+ IMG_IMPORT PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags);
+ IMG_IMPORT PVRSRV_ERROR PDumpDriverInfoKM(IMG_CHAR *pszString, IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpRegWithFlagsKM(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32Flags);
+ PVRSRV_ERROR PDumpRegPolWithFlagsKM(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32Mask,
+ IMG_UINT32 ui32Flags,
+ PDUMP_POLL_OPERATOR eOperator);
+ PVRSRV_ERROR PDumpRegPolKM(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator);
+
+ IMG_IMPORT PVRSRV_ERROR PDumpBitmapKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ PDUMP_MEM_FORMAT eMemFormat,
+ IMG_UINT32 ui32PDumpFlags);
+ IMG_IMPORT PVRSRV_ERROR PDumpReadRegKM(IMG_CHAR *pszPDumpRegName,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Address,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32PDumpFlags);
+
+ PVRSRV_ERROR PDumpRegKM(IMG_CHAR* pszPDumpRegName,
+ IMG_UINT32 dwReg,
+ IMG_UINT32 dwData);
+
+ PVRSRV_ERROR PDumpComment(IMG_CHAR* pszFormat, ...) IMG_FORMAT_PRINTF(1, 2);
+ PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags,
+ IMG_CHAR* pszFormat,
+ ...) IMG_FORMAT_PRINTF(2, 3);
+
+ PVRSRV_ERROR PDumpPDReg(PDUMP_MMU_ATTRIB *psMMUAttrib,
+ IMG_UINT32 ui32Reg,
+ IMG_UINT32 ui32dwData,
+ IMG_HANDLE hUniqueTag);
+ PVRSRV_ERROR PDumpPDRegWithFlags(PDUMP_MMU_ATTRIB *psMMUAttrib,
+ IMG_UINT32 ui32Reg,
+ IMG_UINT32 ui32Data,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hUniqueTag);
+
+ IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID);
+ IMG_IMPORT IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID);
+
+ IMG_VOID PDumpMallocPagesPhys(PVRSRV_DEVICE_IDENTIFIER *psDevID,
+ IMG_UINT32 ui32DevVAddr,
+ IMG_PUINT32 pui32PhysPages,
+ IMG_UINT32 ui32NumPages,
+ IMG_HANDLE hUniqueTag);
+ PVRSRV_ERROR PDumpSetMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
+ IMG_CHAR *pszMemSpace,
+ IMG_UINT32 *pui32MMUContextID,
+ IMG_UINT32 ui32MMUType,
+ IMG_HANDLE hUniqueTag1,
+ IMG_HANDLE hOSMemHandle,
+ IMG_VOID *pvPDCPUAddr);
+ PVRSRV_ERROR PDumpClearMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
+ IMG_CHAR *pszMemSpace,
+ IMG_UINT32 ui32MMUContextID,
+ IMG_UINT32 ui32MMUType);
+
+ PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
+ IMG_UINT32 ui32Offset,
+ IMG_DEV_PHYADDR sPDDevPAddr,
+ IMG_HANDLE hUniqueTag1,
+ IMG_HANDLE hUniqueTag2);
+
+ IMG_BOOL PDumpTestNextFrame(IMG_UINT32 ui32CurrentFrame);
+
+ PVRSRV_ERROR PDumpSaveMemKM (PVRSRV_DEVICE_IDENTIFIER *psDevId,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32DataMaster,
+ IMG_UINT32 ui32PDumpFlags);
+
+ PVRSRV_ERROR PDumpTASignatureRegisters(PVRSRV_DEVICE_IDENTIFIER *psDevId,
+ IMG_UINT32 ui32DumpFrameNum,
+ IMG_UINT32 ui32TAKickCount,
+ IMG_BOOL bLastFrame,
+ IMG_UINT32 *pui32Registers,
+ IMG_UINT32 ui32NumRegisters);
+
+ PVRSRV_ERROR PDump3DSignatureRegisters(PVRSRV_DEVICE_IDENTIFIER *psDevId,
+ IMG_UINT32 ui32DumpFrameNum,
+ IMG_BOOL bLastFrame,
+ IMG_UINT32 *pui32Registers,
+ IMG_UINT32 ui32NumRegisters);
+
+ PVRSRV_ERROR PDumpCounterRegisters(PVRSRV_DEVICE_IDENTIFIER *psDevId,
+ IMG_UINT32 ui32DumpFrameNum,
+ IMG_BOOL bLastFrame,
+ IMG_UINT32 *pui32Registers,
+ IMG_UINT32 ui32NumRegisters);
+
+ PVRSRV_ERROR PDumpRegRead(IMG_CHAR *pszPDumpRegName,
+ const IMG_UINT32 dwRegOffset,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpCycleCountRegRead(PVRSRV_DEVICE_IDENTIFIER *psDevId,
+ const IMG_UINT32 dwRegOffset,
+ IMG_BOOL bLastFrame);
+
+ PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags);
+ PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks);
+
+ PVRSRV_ERROR PDumpMallocPages(PVRSRV_DEVICE_IDENTIFIER *psDevID,
+ IMG_UINT32 ui32DevVAddr,
+ IMG_CPU_VIRTADDR pvLinAddr,
+ IMG_HANDLE hOSMemHandle,
+ IMG_UINT32 ui32NumBytes,
+ IMG_UINT32 ui32PageSize,
+ IMG_BOOL bShared,
+ IMG_HANDLE hUniqueTag);
+ PVRSRV_ERROR PDumpMallocPageTable(PVRSRV_DEVICE_IDENTIFIER *psDevId,
+ IMG_HANDLE hOSMemHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_CPU_VIRTADDR pvLinAddr,
+ IMG_UINT32 ui32NumBytes,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hUniqueTag);
+ PVRSRV_ERROR PDumpFreePages(struct _BM_HEAP_ *psBMHeap,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 ui32NumBytes,
+ IMG_UINT32 ui32PageSize,
+ IMG_HANDLE hUniqueTag,
+ IMG_BOOL bInterleaved);
+ PVRSRV_ERROR PDumpFreePageTable(PVRSRV_DEVICE_IDENTIFIER *psDevID,
+ IMG_HANDLE hOSMemHandle,
+ IMG_CPU_VIRTADDR pvLinAddr,
+ IMG_UINT32 ui32NumBytes,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hUniqueTag);
+
+ IMG_IMPORT PVRSRV_ERROR PDumpHWPerfCBKM(PVRSRV_DEVICE_IDENTIFIER *psDevId,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32MMUContextID,
+ IMG_UINT32 ui32PDumpFlags);
+
+ PVRSRV_ERROR PDumpSignatureBuffer(PVRSRV_DEVICE_IDENTIFIER *psDevId,
+ IMG_CHAR *pszFileName,
+ IMG_CHAR *pszBufferType,
+ IMG_UINT32 ui32FileOffset,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32MMUContextID,
+ IMG_UINT32 ui32PDumpFlags);
+
+ PVRSRV_ERROR PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo,
+ IMG_UINT32 ui32ROffOffset,
+ IMG_UINT32 ui32WPosVal,
+ IMG_UINT32 ui32PacketSize,
+ IMG_UINT32 ui32BufferSize,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hUniqueTag);
+
+ PVRSRV_ERROR PDumpRegBasedCBP(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegOffset,
+ IMG_UINT32 ui32WPosVal,
+ IMG_UINT32 ui32PacketSize,
+ IMG_UINT32 ui32BufferSize,
+ IMG_UINT32 ui32Flags);
+
+ IMG_VOID PDumpVGXMemToFile(IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
+ IMG_UINT32 uiAddr,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_HANDLE hUniqueTag);
+
+ IMG_VOID PDumpSuspendKM(IMG_VOID);
+ IMG_VOID PDumpResumeKM(IMG_VOID);
+
+
+ PVRSRV_ERROR PDumpStoreMemToFile(PDUMP_MMU_ATTRIB *psMMUAttrib,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
+ IMG_UINT32 uiAddr,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_HANDLE hUniqueTag);
+
+ #define PDUMPMEMPOL PDumpMemPolKM
+ #define PDUMPMEM PDumpMemKM
+ #define PDUMPMEMPTENTRIES PDumpMemPTEntriesKM
+ #define PDUMPPDENTRIES PDumpMemPDEntriesKM
+ #define PDUMPMEMUM PDumpMemUM
+ #define PDUMPINIT PDumpInitCommon
+ #define PDUMPDEINIT PDumpDeInitCommon
+ #define PDUMPISLASTFRAME PDumpIsLastCaptureFrameKM
+ #define PDUMPTESTFRAME PDumpIsCaptureFrameKM
+ #define PDUMPTESTNEXTFRAME PDumpTestNextFrame
+ #define PDUMPREGWITHFLAGS PDumpRegWithFlagsKM
+ #define PDUMPREG PDumpRegKM
+ #define PDUMPCOMMENT PDumpComment
+ #define PDUMPCOMMENTWITHFLAGS PDumpCommentWithFlags
+ #define PDUMPREGPOL PDumpRegPolKM
+ #define PDUMPREGPOLWITHFLAGS PDumpRegPolWithFlagsKM
+ #define PDUMPMALLOCPAGES PDumpMallocPages
+ #define PDUMPMALLOCPAGETABLE PDumpMallocPageTable
+ #define PDUMPSETMMUCONTEXT PDumpSetMMUContext
+ #define PDUMPCLEARMMUCONTEXT PDumpClearMMUContext
+ #define PDUMPPDDEVPADDR PDumpPDDevPAddrKM
+ #define PDUMPFREEPAGES PDumpFreePages
+ #define PDUMPFREEPAGETABLE PDumpFreePageTable
+ #define PDUMPPDREG PDumpPDReg
+ #define PDUMPPDREGWITHFLAGS PDumpPDRegWithFlags
+ #define PDUMPCBP PDumpCBP
+ #define PDUMPREGBASEDCBP PDumpRegBasedCBP
+ #define PDUMPMALLOCPAGESPHYS PDumpMallocPagesPhys
+ #define PDUMPENDINITPHASE PDumpStopInitPhaseKM
+ #define PDUMPBITMAPKM PDumpBitmapKM
+ #define PDUMPDRIVERINFO PDumpDriverInfoKM
+ #define PDUMPIDLWITHFLAGS PDumpIDLWithFlags
+ #define PDUMPIDL PDumpIDL
+ #define PDUMPSUSPEND PDumpSuspendKM
+ #define PDUMPRESUME PDumpResumeKM
+
+#else
+ #if ((defined(LINUX) || defined(GCC_IA32)) || defined(GCC_ARM))
+ #define PDUMPMEMPOL(args...)
+ #define PDUMPMEM(args...)
+ #define PDUMPMEMPTENTRIES(args...)
+ #define PDUMPPDENTRIES(args...)
+ #define PDUMPMEMUM(args...)
+ #define PDUMPINIT(args...)
+ #define PDUMPDEINIT(args...)
+ #define PDUMPISLASTFRAME(args...)
+ #define PDUMPTESTFRAME(args...)
+ #define PDUMPTESTNEXTFRAME(args...)
+ #define PDUMPREGWITHFLAGS(args...)
+ #define PDUMPREG(args...)
+ #define PDUMPCOMMENT(args...)
+ #define PDUMPREGPOL(args...)
+ #define PDUMPREGPOLWITHFLAGS(args...)
+ #define PDUMPMALLOCPAGES(args...)
+ #define PDUMPMALLOCPAGETABLE(args...)
+ #define PDUMPSETMMUCONTEXT(args...)
+ #define PDUMPCLEARMMUCONTEXT(args...)
+ #define PDUMPPDDEVPADDR(args...)
+ #define PDUMPFREEPAGES(args...)
+ #define PDUMPFREEPAGETABLE(args...)
+ #define PDUMPPDREG(args...)
+ #define PDUMPPDREGWITHFLAGS(args...)
+ #define PDUMPSYNC(args...)
+ #define PDUMPCOPYTOMEM(args...)
+ #define PDUMPWRITE(args...)
+ #define PDUMPCBP(args...)
+ #define PDUMPREGBASEDCBP(args...)
+ #define PDUMPCOMMENTWITHFLAGS(args...)
+ #define PDUMPMALLOCPAGESPHYS(args...)
+ #define PDUMPENDINITPHASE(args...)
+ #define PDUMPMSVDXREG(args...)
+ #define PDUMPMSVDXREGWRITE(args...)
+ #define PDUMPMSVDXREGREAD(args...)
+ #define PDUMPMSVDXPOLEQ(args...)
+ #define PDUMPMSVDXPOL(args...)
+ #define PDUMPBITMAPKM(args...)
+ #define PDUMPDRIVERINFO(args...)
+ #define PDUMPIDLWITHFLAGS(args...)
+ #define PDUMPIDL(args...)
+ #define PDUMPSUSPEND(args...)
+ #define PDUMPRESUME(args...)
+ #define PDUMPMSVDXWRITEREF(args...)
+ #else
+ #error Compiler not specified
+ #endif
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/pdump_osfunc.h b/drivers/gpu/pvr/pdump_osfunc.h
new file mode 100644
index 0000000..4daacf4
--- /dev/null
+++ b/drivers/gpu/pvr/pdump_osfunc.h
@@ -0,0 +1,140 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <stdarg.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+
+#define MAX_PDUMP_STRING_LENGTH (256)
+
+
+
+#define PDUMP_GET_SCRIPT_STRING() \
+ IMG_HANDLE hScript; \
+ IMG_UINT32 ui32MaxLen; \
+ PVRSRV_ERROR eError; \
+ eError = PDumpOSGetScriptString(&hScript, &ui32MaxLen);\
+ if(eError != PVRSRV_OK) return eError;
+
+#define PDUMP_GET_MSG_STRING() \
+ IMG_CHAR *pszMsg; \
+ IMG_UINT32 ui32MaxLen; \
+ PVRSRV_ERROR eError; \
+ eError = PDumpOSGetMessageString(&pszMsg, &ui32MaxLen);\
+ if(eError != PVRSRV_OK) return eError;
+
+#define PDUMP_GET_FILE_STRING() \
+ IMG_CHAR *pszFileName; \
+ IMG_UINT32 ui32MaxLen; \
+ PVRSRV_ERROR eError; \
+ eError = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLen);\
+ if(eError != PVRSRV_OK) return eError;
+
+#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \
+ IMG_HANDLE hScript; \
+ IMG_CHAR *pszFileName; \
+ IMG_UINT32 ui32MaxLenScript; \
+ IMG_UINT32 ui32MaxLenFileName; \
+ PVRSRV_ERROR eError; \
+ eError = PDumpOSGetScriptString(&hScript, &ui32MaxLenScript);\
+ if(eError != PVRSRV_OK) return eError; \
+ eError = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLenFileName);\
+ if(eError != PVRSRV_OK) return eError;
+
+
+ PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript, IMG_UINT32 *pui32MaxLen);
+
+
+ PVRSRV_ERROR PDumpOSGetMessageString(IMG_CHAR **ppszMsg, IMG_UINT32 *pui32MaxLen);
+
+
+ PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile, IMG_UINT32 *pui32MaxLen);
+
+
+
+
+#define PDUMP_va_list va_list
+#define PDUMP_va_start va_start
+#define PDUMP_va_end va_end
+
+
+
+IMG_HANDLE PDumpOSGetStream(IMG_UINT32 ePDumpStream);
+
+IMG_UINT32 PDumpOSGetStreamOffset(IMG_UINT32 ePDumpStream);
+
+IMG_UINT32 PDumpOSGetParamFileNum(IMG_VOID);
+
+IMG_VOID PDumpOSCheckForSplitting(IMG_HANDLE hStream, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags);
+
+IMG_BOOL PDumpOSIsSuspended(IMG_VOID);
+
+IMG_BOOL PDumpOSJTInitialised(IMG_VOID);
+
+IMG_BOOL PDumpOSWriteString(IMG_HANDLE hDbgStream,
+ IMG_UINT8 *psui8Data,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32Flags);
+
+IMG_BOOL PDumpOSWriteString2(IMG_HANDLE hScript, IMG_UINT32 ui32Flags);
+
+PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...) IMG_FORMAT_PRINTF(3, 4);
+
+IMG_VOID PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...) IMG_FORMAT_PRINTF(1, 2);
+
+PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...) IMG_FORMAT_PRINTF(3, 4);
+
+PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszMsg, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, PDUMP_va_list vaArgs) IMG_FORMAT_PRINTF(3, 0);
+
+IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax);
+
+IMG_VOID PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax);
+
+IMG_VOID PDumpOSCPUVAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType,
+ IMG_HANDLE hOSMemHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT8 *pui8LinAddr,
+ IMG_UINT32 ui32PageSize,
+ IMG_DEV_PHYADDR *psDevPAddr);
+
+IMG_VOID PDumpOSCPUVAddrToPhysPages(IMG_HANDLE hOSMemHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_PUINT8 pui8LinAddr,
+ IMG_UINT32 ui32DataPageMask,
+ IMG_UINT32 *pui32PageOffset);
+
+IMG_VOID PDumpOSReleaseExecution(IMG_VOID);
+
+IMG_BOOL PDumpOSIsCaptureFrameKM(IMG_VOID);
+
+PVRSRV_ERROR PDumpOSSetFrameKM(IMG_UINT32 ui32Frame);
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/drivers/gpu/pvr/pdumpdefs.h b/drivers/gpu/pvr/pdumpdefs.h
new file mode 100644
index 0000000..e43ce2f
--- /dev/null
+++ b/drivers/gpu/pvr/pdumpdefs.h
@@ -0,0 +1,111 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+*****************************************************************************/
+
+#if !defined (__PDUMPDEFS_H__)
+#define __PDUMPDEFS_H__
+
+typedef enum _PDUMP_PIXEL_FORMAT_
+{
+ PVRSRV_PDUMP_PIXEL_FORMAT_UNSUPPORTED = 0,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2,
+ PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9,
+ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10,
+ PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11,
+ PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12,
+ PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13,
+ PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15,
+ PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16,
+ PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17,
+ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18,
+ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20,
+ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25,
+ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26,
+ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27,
+ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28,
+ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29,
+ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31,
+ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34,
+ PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35,
+ PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36,
+ PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37,
+ PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA8888 = 39,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR4444 = 40,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA4444 = 41,
+ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA4444 = 42,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR1555 = 43,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA5551 = 44,
+ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA5551 = 45,
+ PVRSRV_PDUMP_PIXEL_FORMAT_BGR565 = 46,
+ PVRSRV_PDUMP_PIXEL_FORMAT_A8 = 47,
+
+ PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff
+
+} PDUMP_PIXEL_FORMAT;
+
+typedef enum _PDUMP_MEM_FORMAT_
+{
+ PVRSRV_PDUMP_MEM_FORMAT_STRIDE = 0,
+ PVRSRV_PDUMP_MEM_FORMAT_RESERVED = 1,
+ PVRSRV_PDUMP_MEM_FORMAT_TILED = 8,
+ PVRSRV_PDUMP_MEM_FORMAT_TWIDDLED = 9,
+ PVRSRV_PDUMP_MEM_FORMAT_HYBRID = 10,
+
+ PVRSRV_PDUMP_MEM_FORMAT_FORCE_I32 = 0x7fffffff
+} PDUMP_MEM_FORMAT;
+
+typedef enum _PDUMP_POLL_OPERATOR
+{
+ PDUMP_POLL_OPERATOR_EQUAL = 0,
+ PDUMP_POLL_OPERATOR_LESS = 1,
+ PDUMP_POLL_OPERATOR_LESSEQUAL = 2,
+ PDUMP_POLL_OPERATOR_GREATER = 3,
+ PDUMP_POLL_OPERATOR_GREATEREQUAL = 4,
+ PDUMP_POLL_OPERATOR_NOTEQUAL = 5,
+} PDUMP_POLL_OPERATOR;
+
+
+#endif /* __PDUMPDEFS_H__ */
+
+/*****************************************************************************
+ End of file (pdumpdefs.h)
+*****************************************************************************/
diff --git a/drivers/gpu/pvr/perfkm.h b/drivers/gpu/pvr/perfkm.h
new file mode 100644
index 0000000..e12bc2e
--- /dev/null
+++ b/drivers/gpu/pvr/perfkm.h
@@ -0,0 +1,36 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _PERFKM_H_
+#define _PERFKM_H_
+
+#include "img_types.h"
+
+#define PERFINIT()
+#define PERFDEINIT()
+
+#endif
+
diff --git a/drivers/gpu/pvr/perproc.c b/drivers/gpu/pvr/perproc.c
new file mode 100644
index 0000000..eb73166
--- /dev/null
+++ b/drivers/gpu/pvr/perproc.c
@@ -0,0 +1,305 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "services_headers.h"
+#include "resman.h"
+#include "handle.h"
+#include "perproc.h"
+#include "osperproc.h"
+#if defined(TTRACE)
+#include "ttrace.h"
+#endif
+
+#define HASH_TAB_INIT_SIZE 32
+
+static HASH_TABLE *psHashTab = IMG_NULL;
+
+static PVRSRV_ERROR FreePerProcessData(PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINTPTR_T uiPerProc;
+
+ PVR_ASSERT(psPerProc != IMG_NULL);
+
+ if (psPerProc == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: invalid parameter"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ uiPerProc = HASH_Remove(psHashTab, (IMG_UINTPTR_T)psPerProc->ui32PID);
+ if (uiPerProc == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't find process in per-process data hash table"));
+
+ PVR_ASSERT(psPerProc->ui32PID == 0);
+ }
+ else
+ {
+ PVR_ASSERT((PVRSRV_PER_PROCESS_DATA *)uiPerProc == psPerProc);
+ PVR_ASSERT(((PVRSRV_PER_PROCESS_DATA *)uiPerProc)->ui32PID == psPerProc->ui32PID);
+ }
+
+
+ if (psPerProc->psHandleBase != IMG_NULL)
+ {
+ eError = PVRSRVFreeHandleBase(psPerProc->psHandleBase);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't free handle base for process (%d)", eError));
+ return eError;
+ }
+ }
+
+
+ if (psPerProc->hPerProcData != IMG_NULL)
+ {
+ eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE, psPerProc->hPerProcData, PVRSRV_HANDLE_TYPE_PERPROC_DATA);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't release per-process data handle (%d)", eError));
+ return eError;
+ }
+ }
+
+
+ eError = OSPerProcessPrivateDataDeInit(psPerProc->hOsPrivateData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: OSPerProcessPrivateDataDeInit failed (%d)", eError));
+ return eError;
+ }
+
+ eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(*psPerProc),
+ psPerProc,
+ psPerProc->hBlockAlloc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't free per-process data (%d)", eError));
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(IMG_UINT32 ui32PID)
+{
+ PVRSRV_PER_PROCESS_DATA *psPerProc;
+
+ PVR_ASSERT(psHashTab != IMG_NULL);
+
+
+ psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID);
+ return psPerProc;
+}
+
+
+PVRSRV_ERROR PVRSRVPerProcessDataConnect(IMG_UINT32 ui32PID, IMG_UINT32 ui32Flags)
+{
+ PVRSRV_PER_PROCESS_DATA *psPerProc;
+ IMG_HANDLE hBlockAlloc;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (psHashTab == IMG_NULL)
+ {
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+
+
+ psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID);
+
+ if (psPerProc == IMG_NULL)
+ {
+
+ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(*psPerProc),
+ (IMG_PVOID *)&psPerProc,
+ &hBlockAlloc,
+ "Per Process Data");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate per-process data (%d)", eError));
+ return eError;
+ }
+ OSMemSet(psPerProc, 0, sizeof(*psPerProc));
+ psPerProc->hBlockAlloc = hBlockAlloc;
+
+ if (!HASH_Insert(psHashTab, (IMG_UINTPTR_T)ui32PID, (IMG_UINTPTR_T)psPerProc))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't insert per-process data into hash table"));
+ eError = PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED;
+ goto failure;
+ }
+
+ psPerProc->ui32PID = ui32PID;
+ psPerProc->ui32RefCount = 0;
+
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ if (ui32Flags == SRV_FLAGS_PDUMP_ACTIVE)
+ {
+ psPerProc->bPDumpActive = IMG_TRUE;
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(ui32Flags);
+#endif
+
+
+ eError = OSPerProcessPrivateDataInit(&psPerProc->hOsPrivateData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: OSPerProcessPrivateDataInit failed (%d)", eError));
+ goto failure;
+ }
+
+
+ eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
+ &psPerProc->hPerProcData,
+ psPerProc,
+ PVRSRV_HANDLE_TYPE_PERPROC_DATA,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate handle for per-process data (%d)", eError));
+ goto failure;
+ }
+
+
+ eError = PVRSRVAllocHandleBase(&psPerProc->psHandleBase);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate handle base for process (%d)", eError));
+ goto failure;
+ }
+
+
+ eError = OSPerProcessSetHandleOptions(psPerProc->psHandleBase);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't set handle options (%d)", eError));
+ goto failure;
+ }
+
+
+ eError = PVRSRVResManConnect(psPerProc, &psPerProc->hResManContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't register with the resource manager"));
+ goto failure;
+ }
+#if defined (TTRACE)
+ PVRSRVTimeTraceBufferCreate(ui32PID);
+#endif
+ }
+
+ psPerProc->ui32RefCount++;
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "PVRSRVPerProcessDataConnect: Process 0x%x has ref-count %d",
+ ui32PID, psPerProc->ui32RefCount));
+
+ return eError;
+
+failure:
+ (IMG_VOID)FreePerProcessData(psPerProc);
+ return eError;
+}
+
+
+IMG_VOID PVRSRVPerProcessDataDisconnect(IMG_UINT32 ui32PID)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_PER_PROCESS_DATA *psPerProc;
+
+ PVR_ASSERT(psHashTab != IMG_NULL);
+
+ psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID);
+ if (psPerProc == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataDealloc: Couldn't locate per-process data for PID %u", ui32PID));
+ }
+ else
+ {
+ psPerProc->ui32RefCount--;
+ if (psPerProc->ui32RefCount == 0)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVPerProcessDataDisconnect: "
+ "Last close from process 0x%x received", ui32PID));
+
+#if defined (TTRACE)
+ PVRSRVTimeTraceBufferDestroy(ui32PID);
+#endif
+
+
+ PVRSRVResManDisconnect(psPerProc->hResManContext, IMG_FALSE);
+
+
+ eError = FreePerProcessData(psPerProc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataDisconnect: Error freeing per-process data"));
+ }
+ }
+ }
+
+ eError = PVRSRVPurgeHandles(KERNEL_HANDLE_BASE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataDisconnect: Purge of global handle pool failed (%d)", eError));
+ }
+}
+
+
+PVRSRV_ERROR PVRSRVPerProcessDataInit(IMG_VOID)
+{
+ PVR_ASSERT(psHashTab == IMG_NULL);
+
+
+ psHashTab = HASH_Create(HASH_TAB_INIT_SIZE);
+ if (psHashTab == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataInit: Couldn't create per-process data hash table"));
+ return PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE;
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVPerProcessDataDeInit(IMG_VOID)
+{
+
+ if (psHashTab != IMG_NULL)
+ {
+
+ HASH_Delete(psHashTab);
+ psHashTab = IMG_NULL;
+ }
+
+ return PVRSRV_OK;
+}
+
diff --git a/drivers/gpu/pvr/perproc.h b/drivers/gpu/pvr/perproc.h
new file mode 100644
index 0000000..842680c
--- /dev/null
+++ b/drivers/gpu/pvr/perproc.h
@@ -0,0 +1,126 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __PERPROC_H__
+#define __PERPROC_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_types.h"
+#include "resman.h"
+
+#include "handle.h"
+
+typedef struct _PVRSRV_PER_PROCESS_DATA_
+{
+ IMG_UINT32 ui32PID;
+ IMG_HANDLE hBlockAlloc;
+ PRESMAN_CONTEXT hResManContext;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hPerProcData;
+#else
+ IMG_HANDLE hPerProcData;
+#endif
+ PVRSRV_HANDLE_BASE *psHandleBase;
+#if defined (SUPPORT_SID_INTERFACE)
+
+ IMG_BOOL bHandlesBatched;
+#else
+#if defined (PVR_SECURE_HANDLES)
+
+ IMG_BOOL bHandlesBatched;
+#endif
+#endif
+ IMG_UINT32 ui32RefCount;
+
+
+ IMG_BOOL bInitProcess;
+#if defined(PDUMP)
+
+ IMG_BOOL bPDumpPersistent;
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+
+ IMG_BOOL bPDumpActive;
+#endif
+#endif
+
+ IMG_HANDLE hOsPrivateData;
+} PVRSRV_PER_PROCESS_DATA;
+
+PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(IMG_UINT32 ui32PID);
+
+PVRSRV_ERROR PVRSRVPerProcessDataConnect(IMG_UINT32 ui32PID, IMG_UINT32 ui32Flags);
+IMG_VOID PVRSRVPerProcessDataDisconnect(IMG_UINT32 ui32PID);
+
+PVRSRV_ERROR PVRSRVPerProcessDataInit(IMG_VOID);
+PVRSRV_ERROR PVRSRVPerProcessDataDeInit(IMG_VOID);
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVFindPerProcessData)
+#endif
+static INLINE
+PVRSRV_PER_PROCESS_DATA *PVRSRVFindPerProcessData(IMG_VOID)
+{
+ return PVRSRVPerProcessData(OSGetCurrentProcessIDKM());
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVProcessPrivateData)
+#endif
+static INLINE
+IMG_HANDLE PVRSRVProcessPrivateData(PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ return (psPerProc != IMG_NULL) ? psPerProc->hOsPrivateData : IMG_NULL;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPerProcessPrivateData)
+#endif
+static INLINE
+IMG_HANDLE PVRSRVPerProcessPrivateData(IMG_UINT32 ui32PID)
+{
+ return PVRSRVProcessPrivateData(PVRSRVPerProcessData(ui32PID));
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVFindPerProcessPrivateData)
+#endif
+static INLINE
+IMG_HANDLE PVRSRVFindPerProcessPrivateData(IMG_VOID)
+{
+ return PVRSRVProcessPrivateData(PVRSRVFindPerProcessData());
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/power.c b/drivers/gpu/pvr/power.c
new file mode 100644
index 0000000..21d7ad4
--- /dev/null
+++ b/drivers/gpu/pvr/power.c
@@ -0,0 +1,719 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "services_headers.h"
+#include "pdump_km.h"
+
+#include "lists.h"
+
+static IMG_BOOL gbInitServerRunning = IMG_FALSE;
+static IMG_BOOL gbInitServerRan = IMG_FALSE;
+static IMG_BOOL gbInitSuccessful = IMG_FALSE;
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState, IMG_BOOL bState)
+{
+
+ switch(eInitServerState)
+ {
+ case PVRSRV_INIT_SERVER_RUNNING:
+ gbInitServerRunning = bState;
+ break;
+ case PVRSRV_INIT_SERVER_RAN:
+ gbInitServerRan = bState;
+ break;
+ case PVRSRV_INIT_SERVER_SUCCESSFUL:
+ gbInitSuccessful = bState;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVSetInitServerState : Unknown state %x", eInitServerState));
+ return PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE;
+ }
+
+ return PVRSRV_OK;
+}
+
+IMG_EXPORT
+IMG_BOOL PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState)
+{
+ IMG_BOOL bReturnVal;
+
+ switch(eInitServerState)
+ {
+ case PVRSRV_INIT_SERVER_RUNNING:
+ bReturnVal = gbInitServerRunning;
+ break;
+ case PVRSRV_INIT_SERVER_RAN:
+ bReturnVal = gbInitServerRan;
+ break;
+ case PVRSRV_INIT_SERVER_SUCCESSFUL:
+ bReturnVal = gbInitSuccessful;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVGetInitServerState : Unknown state %x", eInitServerState));
+ bReturnVal = IMG_FALSE;
+ }
+
+ return bReturnVal;
+}
+
+static IMG_BOOL _IsSystemStatePowered(PVRSRV_SYS_POWER_STATE eSystemPowerState)
+{
+ return (IMG_BOOL)(eSystemPowerState < PVRSRV_SYS_POWER_STATE_D2);
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32 ui32CallerID,
+ IMG_BOOL bSystemPowerEvent)
+{
+ PVRSRV_ERROR eError;
+ SYS_DATA *psSysData;
+ IMG_UINT32 ui32Timeout = 1000000;
+ IMG_BOOL bTryLock = (ui32CallerID == ISR_ID);
+
+ SysAcquireData(&psSysData);
+
+ eError = OSPowerLockWrap(bTryLock);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ do
+ {
+ eError = OSLockResource(&psSysData->sPowerStateChangeResource,
+ ui32CallerID);
+ if (eError == PVRSRV_OK)
+ {
+ break;
+ }
+ else if (bTryLock)
+ {
+
+
+ eError = PVRSRV_ERROR_RETRY;
+ break;
+ }
+
+ OSWaitus(1);
+ ui32Timeout--;
+ } while (ui32Timeout > 0);
+
+ if (eError != PVRSRV_OK)
+ {
+ OSPowerLockUnwrap();
+ }
+
+
+ if ((eError == PVRSRV_OK) &&
+ !bSystemPowerEvent &&
+ !_IsSystemStatePowered(psSysData->eCurrentPowerState))
+ {
+
+ PVRSRVPowerUnlock(ui32CallerID);
+ eError = PVRSRV_ERROR_RETRY;
+ }
+
+ return eError;
+}
+
+
+IMG_EXPORT
+IMG_VOID PVRSRVPowerUnlock(IMG_UINT32 ui32CallerID)
+{
+ OSUnlockResource(&gpsSysData->sPowerStateChangeResource, ui32CallerID);
+ OSPowerLockUnwrap();
+}
+
+
+static PVRSRV_ERROR PVRSRVDevicePrePowerStateKM_AnyVaCb(PVRSRV_POWER_DEV *psPowerDevice, va_list va)
+{
+ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
+ PVRSRV_ERROR eError;
+
+
+ IMG_BOOL bAllDevices;
+ IMG_UINT32 ui32DeviceIndex;
+ PVRSRV_DEV_POWER_STATE eNewPowerState;
+
+
+ bAllDevices = va_arg(va, IMG_BOOL);
+ ui32DeviceIndex = va_arg(va, IMG_UINT32);
+ eNewPowerState = va_arg(va, PVRSRV_DEV_POWER_STATE);
+
+ if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex))
+ {
+ eNewDevicePowerState = (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ?
+ psPowerDevice->eDefaultPowerState : eNewPowerState;
+
+ if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState)
+ {
+ if (psPowerDevice->pfnPrePower != IMG_NULL)
+ {
+
+ eError = psPowerDevice->pfnPrePower(psPowerDevice->hDevCookie,
+ eNewDevicePowerState,
+ psPowerDevice->eCurrentPowerState);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+
+ eError = SysDevicePrePowerState(psPowerDevice->ui32DeviceIndex,
+ eNewDevicePowerState,
+ psPowerDevice->eCurrentPowerState);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(IMG_BOOL bAllDevices,
+ IMG_UINT32 ui32DeviceIndex,
+ PVRSRV_DEV_POWER_STATE eNewPowerState)
+{
+ PVRSRV_ERROR eError;
+ SYS_DATA *psSysData;
+
+ SysAcquireData(&psSysData);
+
+
+ eError = List_PVRSRV_POWER_DEV_PVRSRV_ERROR_Any_va(psSysData->psPowerDeviceList,
+ &PVRSRVDevicePrePowerStateKM_AnyVaCb,
+ bAllDevices,
+ ui32DeviceIndex,
+ eNewPowerState);
+
+ return eError;
+}
+
+static PVRSRV_ERROR PVRSRVDevicePostPowerStateKM_AnyVaCb(PVRSRV_POWER_DEV *psPowerDevice, va_list va)
+{
+ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
+ PVRSRV_ERROR eError;
+
+
+ IMG_BOOL bAllDevices;
+ IMG_UINT32 ui32DeviceIndex;
+ PVRSRV_DEV_POWER_STATE eNewPowerState;
+
+
+ bAllDevices = va_arg(va, IMG_BOOL);
+ ui32DeviceIndex = va_arg(va, IMG_UINT32);
+ eNewPowerState = va_arg(va, PVRSRV_DEV_POWER_STATE);
+
+ if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex))
+ {
+ eNewDevicePowerState = (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ?
+ psPowerDevice->eDefaultPowerState : eNewPowerState;
+
+ if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState)
+ {
+
+ eError = SysDevicePostPowerState(psPowerDevice->ui32DeviceIndex,
+ eNewDevicePowerState,
+ psPowerDevice->eCurrentPowerState);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ if (psPowerDevice->pfnPostPower != IMG_NULL)
+ {
+
+ eError = psPowerDevice->pfnPostPower(psPowerDevice->hDevCookie,
+ eNewDevicePowerState,
+ psPowerDevice->eCurrentPowerState);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ psPowerDevice->eCurrentPowerState = eNewDevicePowerState;
+ }
+ }
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(IMG_BOOL bAllDevices,
+ IMG_UINT32 ui32DeviceIndex,
+ PVRSRV_DEV_POWER_STATE eNewPowerState)
+{
+ PVRSRV_ERROR eError;
+ SYS_DATA *psSysData;
+
+ SysAcquireData(&psSysData);
+
+
+ eError = List_PVRSRV_POWER_DEV_PVRSRV_ERROR_Any_va(psSysData->psPowerDeviceList,
+ &PVRSRVDevicePostPowerStateKM_AnyVaCb,
+ bAllDevices,
+ ui32DeviceIndex,
+ eNewPowerState);
+
+ return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32 ui32DeviceIndex,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ IMG_UINT32 ui32CallerID,
+ IMG_BOOL bRetainMutex)
+{
+ PVRSRV_ERROR eError;
+ SYS_DATA *psSysData;
+
+ SysAcquireData(&psSysData);
+
+ eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
+ if(eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ #if defined(PDUMP)
+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT)
+ {
+
+
+
+
+ eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON);
+ if(eError != PVRSRV_OK)
+ {
+ goto Exit;
+ }
+
+ eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto Exit;
+ }
+
+ PDUMPSUSPEND();
+ }
+ #endif
+
+ eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
+ if(eError != PVRSRV_OK)
+ {
+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT)
+ {
+ PDUMPRESUME();
+ }
+ goto Exit;
+ }
+
+ eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
+
+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT)
+ {
+ PDUMPRESUME();
+ }
+
+Exit:
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVSetDevicePowerStateKM : Transition to %d FAILED 0x%x", eNewPowerState, eError));
+ }
+
+ if (!bRetainMutex || (eError != PVRSRV_OK))
+ {
+ PVRSRVPowerUnlock(ui32CallerID);
+ }
+
+ return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState)
+{
+ PVRSRV_ERROR eError;
+ SYS_DATA *psSysData;
+ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
+
+ SysAcquireData(&psSysData);
+
+
+ eError = PVRSRVPowerLock(KERNEL_ID, IMG_TRUE);
+ if(eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ if (_IsSystemStatePowered(eNewSysPowerState) !=
+ _IsSystemStatePowered(psSysData->eCurrentPowerState))
+ {
+ if (_IsSystemStatePowered(eNewSysPowerState))
+ {
+
+ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT;
+ }
+ else
+ {
+ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_OFF;
+ }
+
+
+ eError = PVRSRVDevicePrePowerStateKM(IMG_TRUE, 0, eNewDevicePowerState);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorExit;
+ }
+ }
+
+ if (eNewSysPowerState != psSysData->eCurrentPowerState)
+ {
+
+ eError = SysSystemPrePowerState(eNewSysPowerState);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorExit;
+ }
+ }
+
+ return eError;
+
+ErrorExit:
+
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVSystemPrePowerStateKM: Transition from %d to %d FAILED 0x%x",
+ psSysData->eCurrentPowerState, eNewSysPowerState, eError));
+
+
+ psSysData->eFailedPowerState = eNewSysPowerState;
+
+ PVRSRVPowerUnlock(KERNEL_ID);
+
+ return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SYS_DATA *psSysData;
+ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
+
+ SysAcquireData(&psSysData);
+
+ if (eNewSysPowerState != psSysData->eCurrentPowerState)
+ {
+
+ eError = SysSystemPostPowerState(eNewSysPowerState);
+ if (eError != PVRSRV_OK)
+ {
+ goto Exit;
+ }
+ }
+
+ if (_IsSystemStatePowered(eNewSysPowerState) !=
+ _IsSystemStatePowered(psSysData->eCurrentPowerState))
+ {
+ if (_IsSystemStatePowered(eNewSysPowerState))
+ {
+
+ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT;
+ }
+ else
+ {
+ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_OFF;
+ }
+
+
+ eError = PVRSRVDevicePostPowerStateKM(IMG_TRUE, 0, eNewDevicePowerState);
+ if (eError != PVRSRV_OK)
+ {
+ goto Exit;
+ }
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "PVRSRVSystemPostPowerStateKM: System Power Transition from %d to %d OK",
+ psSysData->eCurrentPowerState, eNewSysPowerState));
+
+ psSysData->eCurrentPowerState = eNewSysPowerState;
+
+Exit:
+
+ PVRSRVPowerUnlock(KERNEL_ID);
+
+
+ if (_IsSystemStatePowered(eNewSysPowerState) &&
+ PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL))
+ {
+
+
+
+ PVRSRVScheduleDeviceCallbacks();
+ }
+
+ return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState)
+{
+ PVRSRV_ERROR eError;
+ SYS_DATA *psSysData;
+
+ SysAcquireData(&psSysData);
+
+ eError = PVRSRVSystemPrePowerStateKM(eNewSysPowerState);
+ if(eError != PVRSRV_OK)
+ {
+ goto ErrorExit;
+ }
+
+ eError = PVRSRVSystemPostPowerStateKM(eNewSysPowerState);
+ if(eError != PVRSRV_OK)
+ {
+ goto ErrorExit;
+ }
+
+
+ psSysData->eFailedPowerState = PVRSRV_SYS_POWER_STATE_Unspecified;
+
+ return PVRSRV_OK;
+
+ErrorExit:
+
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVSetPowerStateKM: Transition from %d to %d FAILED 0x%x",
+ psSysData->eCurrentPowerState, eNewSysPowerState, eError));
+
+
+ psSysData->eFailedPowerState = eNewSysPowerState;
+
+ return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32 ui32DeviceIndex,
+ PFN_PRE_POWER pfnPrePower,
+ PFN_POST_POWER pfnPostPower,
+ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange,
+ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange,
+ IMG_HANDLE hDevCookie,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ PVRSRV_DEV_POWER_STATE eDefaultPowerState)
+{
+ PVRSRV_ERROR eError;
+ SYS_DATA *psSysData;
+ PVRSRV_POWER_DEV *psPowerDevice;
+
+ if (pfnPrePower == IMG_NULL &&
+ pfnPostPower == IMG_NULL)
+ {
+ return PVRSRVRemovePowerDevice(ui32DeviceIndex);
+ }
+
+ SysAcquireData(&psSysData);
+
+ eError = OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_POWER_DEV),
+ (IMG_VOID **)&psPowerDevice, IMG_NULL,
+ "Power Device");
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterPowerDevice: Failed to alloc PVRSRV_POWER_DEV"));
+ return eError;
+ }
+
+
+ psPowerDevice->pfnPrePower = pfnPrePower;
+ psPowerDevice->pfnPostPower = pfnPostPower;
+ psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange;
+ psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange;
+ psPowerDevice->hDevCookie = hDevCookie;
+ psPowerDevice->ui32DeviceIndex = ui32DeviceIndex;
+ psPowerDevice->eCurrentPowerState = eCurrentPowerState;
+ psPowerDevice->eDefaultPowerState = eDefaultPowerState;
+
+
+ List_PVRSRV_POWER_DEV_Insert(&(psSysData->psPowerDeviceList), psPowerDevice);
+
+ return (PVRSRV_OK);
+}
+
+
+PVRSRV_ERROR PVRSRVRemovePowerDevice (IMG_UINT32 ui32DeviceIndex)
+{
+ SYS_DATA *psSysData;
+ PVRSRV_POWER_DEV *psPowerDev;
+
+ SysAcquireData(&psSysData);
+
+
+ psPowerDev = (PVRSRV_POWER_DEV*)
+ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
+ &MatchPowerDeviceIndex_AnyVaCb,
+ ui32DeviceIndex);
+
+ if (psPowerDev)
+ {
+ List_PVRSRV_POWER_DEV_Remove(psPowerDev);
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_POWER_DEV), psPowerDev, IMG_NULL);
+
+ }
+
+ return (PVRSRV_OK);
+}
+
+
+IMG_EXPORT
+IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex)
+{
+ SYS_DATA *psSysData;
+ PVRSRV_POWER_DEV *psPowerDevice;
+
+ SysAcquireData(&psSysData);
+
+
+ if (OSIsResourceLocked(&psSysData->sPowerStateChangeResource, KERNEL_ID) ||
+ OSIsResourceLocked(&psSysData->sPowerStateChangeResource, ISR_ID))
+ {
+ return IMG_FALSE;
+ }
+
+ psPowerDevice = (PVRSRV_POWER_DEV*)
+ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
+ &MatchPowerDeviceIndex_AnyVaCb,
+ ui32DeviceIndex);
+ return (psPowerDevice && (psPowerDevice->eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON))
+ ? IMG_TRUE : IMG_FALSE;
+}
+
+
+PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(IMG_UINT32 ui32DeviceIndex,
+ IMG_BOOL bIdleDevice,
+ IMG_VOID *pvInfo)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SYS_DATA *psSysData;
+ PVRSRV_POWER_DEV *psPowerDevice;
+
+ PVR_UNREFERENCED_PARAMETER(pvInfo);
+
+ SysAcquireData(&psSysData);
+
+ if (bIdleDevice)
+ {
+
+ eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDevicePreClockSpeedChange : failed to acquire lock, error:0x%x", eError));
+ return eError;
+ }
+ }
+
+
+ psPowerDevice = (PVRSRV_POWER_DEV*)
+ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
+ &MatchPowerDeviceIndex_AnyVaCb,
+ ui32DeviceIndex);
+
+ if (psPowerDevice && psPowerDevice->pfnPostClockSpeedChange)
+ {
+ eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie,
+ bIdleDevice,
+ psPowerDevice->eCurrentPowerState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVDevicePreClockSpeedChange : Device %u failed, error:0x%x",
+ ui32DeviceIndex, eError));
+ }
+ }
+
+ if (bIdleDevice && eError != PVRSRV_OK)
+ {
+ PVRSRVPowerUnlock(KERNEL_ID);
+ }
+
+ return eError;
+}
+
+
+IMG_VOID PVRSRVDevicePostClockSpeedChange(IMG_UINT32 ui32DeviceIndex,
+ IMG_BOOL bIdleDevice,
+ IMG_VOID *pvInfo)
+{
+ PVRSRV_ERROR eError;
+ SYS_DATA *psSysData;
+ PVRSRV_POWER_DEV *psPowerDevice;
+
+ PVR_UNREFERENCED_PARAMETER(pvInfo);
+
+ SysAcquireData(&psSysData);
+
+
+ psPowerDevice = (PVRSRV_POWER_DEV*)
+ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
+ &MatchPowerDeviceIndex_AnyVaCb,
+ ui32DeviceIndex);
+
+ if (psPowerDevice && psPowerDevice->pfnPostClockSpeedChange)
+ {
+ eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie,
+ bIdleDevice,
+ psPowerDevice->eCurrentPowerState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVDevicePostClockSpeedChange : Device %u failed, error:0x%x",
+ ui32DeviceIndex, eError));
+ }
+ }
+
+
+ if (bIdleDevice)
+ {
+
+ PVRSRVPowerUnlock(KERNEL_ID);
+ }
+}
+
diff --git a/drivers/gpu/pvr/power.h b/drivers/gpu/pvr/power.h
new file mode 100644
index 0000000..9e3dcc40
--- /dev/null
+++ b/drivers/gpu/pvr/power.h
@@ -0,0 +1,120 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef POWER_H
+#define POWER_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+
+
+typedef struct _PVRSRV_POWER_DEV_TAG_
+{
+ PFN_PRE_POWER pfnPrePower;
+ PFN_POST_POWER pfnPostPower;
+ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange;
+ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange;
+ IMG_HANDLE hDevCookie;
+ IMG_UINT32 ui32DeviceIndex;
+ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState;
+ struct _PVRSRV_POWER_DEV_TAG_ *psNext;
+ struct _PVRSRV_POWER_DEV_TAG_ **ppsThis;
+
+} PVRSRV_POWER_DEV;
+
+typedef enum _PVRSRV_INIT_SERVER_STATE_
+{
+ PVRSRV_INIT_SERVER_Unspecified = -1,
+ PVRSRV_INIT_SERVER_RUNNING = 0,
+ PVRSRV_INIT_SERVER_RAN = 1,
+ PVRSRV_INIT_SERVER_SUCCESSFUL = 2,
+ PVRSRV_INIT_SERVER_NUM = 3,
+ PVRSRV_INIT_SERVER_FORCE_I32 = 0x7fffffff
+
+} PVRSRV_INIT_SERVER_STATE, *PPVRSRV_INIT_SERVER_STATE;
+
+IMG_IMPORT
+IMG_BOOL PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState, IMG_BOOL bState);
+
+
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32 ui32CallerID,
+ IMG_BOOL bSystemPowerEvent);
+IMG_IMPORT
+IMG_VOID PVRSRVPowerUnlock(IMG_UINT32 ui32CallerID);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32 ui32DeviceIndex,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ IMG_UINT32 ui32CallerID,
+ IMG_BOOL bRetainMutex);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVRSRV_SYS_POWER_STATE eNewPowerState);
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVRSRV_SYS_POWER_STATE eNewPowerState);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSetPowerStateKM (PVRSRV_SYS_POWER_STATE ePVRState);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32 ui32DeviceIndex,
+ PFN_PRE_POWER pfnPrePower,
+ PFN_POST_POWER pfnPostPower,
+ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange,
+ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange,
+ IMG_HANDLE hDevCookie,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ PVRSRV_DEV_POWER_STATE eDefaultPowerState);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVRemovePowerDevice (IMG_UINT32 ui32DeviceIndex);
+
+IMG_IMPORT
+IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(IMG_UINT32 ui32DeviceIndex,
+ IMG_BOOL bIdleDevice,
+ IMG_VOID *pvInfo);
+
+IMG_IMPORT
+IMG_VOID PVRSRVDevicePostClockSpeedChange(IMG_UINT32 ui32DeviceIndex,
+ IMG_BOOL bIdleDevice,
+ IMG_VOID *pvInfo);
+
+#if defined (__cplusplus)
+}
+#endif
+#endif
+
diff --git a/drivers/gpu/pvr/private_data.h b/drivers/gpu/pvr/private_data.h
new file mode 100644
index 0000000..b8751d3
--- /dev/null
+++ b/drivers/gpu/pvr/private_data.h
@@ -0,0 +1,69 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __INCLUDED_PRIVATE_DATA_H_
+#define __INCLUDED_PRIVATE_DATA_H_
+
+#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
+#include <linux/list.h>
+#include <drm/drmP.h>
+#endif
+
+typedef struct
+{
+
+ IMG_UINT32 ui32OpenPID;
+
+
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+#else
+ IMG_HANDLE hKernelMemInfo;
+#endif
+
+#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
+
+ struct list_head sDRMAuthListItem;
+
+ struct drm_file *psDRMFile;
+#endif
+
+#if defined(SUPPORT_MEMINFO_IDS)
+
+ IMG_UINT64 ui64Stamp;
+#endif
+
+
+ IMG_HANDLE hBlockAlloc;
+
+#if defined(SUPPORT_DRI_DRM_EXT)
+ IMG_PVOID pPriv;
+#endif
+}
+PVRSRV_FILE_PRIVATE_DATA;
+
+#endif
+
diff --git a/drivers/gpu/pvr/proc.c b/drivers/gpu/pvr/proc.c
new file mode 100644
index 0000000..1df8aff8
--- /dev/null
+++ b/drivers/gpu/pvr/proc.c
@@ -0,0 +1,835 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#endif
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include "services_headers.h"
+
+#include "queue.h"
+#include "resman.h"
+#include "pvrmmap.h"
+#include "pvr_debug.h"
+#include "pvrversion.h"
+#include "proc.h"
+#include "perproc.h"
+#include "env_perproc.h"
+#include "linkage.h"
+
+#include "lists.h"
+
+static struct proc_dir_entry * dir;
+
+static const IMG_CHAR PVRProcDirRoot[] = "pvr";
+
+static IMG_INT pvr_proc_open(struct inode *inode,struct file *file);
+static void *pvr_proc_seq_start (struct seq_file *m, loff_t *pos);
+static void pvr_proc_seq_stop (struct seq_file *m, void *v);
+static void *pvr_proc_seq_next (struct seq_file *m, void *v, loff_t *pos);
+static int pvr_proc_seq_show (struct seq_file *m, void *v);
+static ssize_t pvr_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos);
+
+static struct file_operations pvr_proc_operations =
+{
+ .open = pvr_proc_open,
+ .read = seq_read,
+ .write = pvr_proc_write,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static struct seq_operations pvr_proc_seq_operations =
+{
+ .start = pvr_proc_seq_start,
+ .next = pvr_proc_seq_next,
+ .stop = pvr_proc_seq_stop,
+ .show = pvr_proc_seq_show,
+};
+
+static struct proc_dir_entry* g_pProcQueue;
+static struct proc_dir_entry* g_pProcVersion;
+static struct proc_dir_entry* g_pProcSysNodes;
+
+#ifdef DEBUG
+static struct proc_dir_entry* g_pProcDebugLevel;
+#endif
+
+#ifdef PVR_MANUAL_POWER_CONTROL
+static struct proc_dir_entry* g_pProcPowerLevel;
+#endif
+
+
+static void ProcSeqShowVersion(struct seq_file *sfile,void* el);
+
+static void ProcSeqShowSysNodes(struct seq_file *sfile,void* el);
+static void* ProcSeqOff2ElementSysNodes(struct seq_file * sfile, loff_t off);
+
+off_t printAppend(IMG_CHAR * buffer, size_t size, off_t off, const IMG_CHAR * format, ...)
+{
+ IMG_INT n;
+ size_t space = size - (size_t)off;
+ va_list ap;
+
+ va_start (ap, format);
+
+ n = vsnprintf (buffer+off, space, format, ap);
+
+ va_end (ap);
+
+ if (n >= (IMG_INT)space || n < 0)
+ {
+
+ buffer[size - 1] = 0;
+ return (off_t)(size - 1);
+ }
+ else
+ {
+ return (off + (off_t)n);
+ }
+}
+
+
+void* ProcSeq1ElementOff2Element(struct seq_file *sfile, loff_t off)
+{
+ PVR_UNREFERENCED_PARAMETER(sfile);
+
+ if(!off)
+ return (void*)2;
+ return NULL;
+}
+
+
+void* ProcSeq1ElementHeaderOff2Element(struct seq_file *sfile, loff_t off)
+{
+ PVR_UNREFERENCED_PARAMETER(sfile);
+
+ if(!off)
+ {
+ return PVR_PROC_SEQ_START_TOKEN;
+ }
+
+
+ if(off == 1)
+ return (void*)2;
+
+ return NULL;
+}
+
+
+static IMG_INT pvr_proc_open(struct inode *inode,struct file *file)
+{
+ IMG_INT ret = seq_open(file, &pvr_proc_seq_operations);
+
+ struct seq_file *seq = (struct seq_file*)file->private_data;
+ struct proc_dir_entry* pvr_proc_entry = PDE(inode);
+
+
+ seq->private = pvr_proc_entry->data;
+ return ret;
+}
+
+static ssize_t pvr_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct proc_dir_entry * dp;
+
+ PVR_UNREFERENCED_PARAMETER(ppos);
+ dp = PDE(inode);
+
+ if (!dp->write_proc)
+ return -EIO;
+
+ return dp->write_proc(file, buffer, count, dp->data);
+}
+
+
+static void *pvr_proc_seq_start (struct seq_file *proc_seq_file, loff_t *pos)
+{
+ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
+ if(handlers->startstop != NULL)
+ handlers->startstop(proc_seq_file, IMG_TRUE);
+ return handlers->off2element(proc_seq_file, *pos);
+}
+
+static void pvr_proc_seq_stop (struct seq_file *proc_seq_file, void *v)
+{
+ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
+ PVR_UNREFERENCED_PARAMETER(v);
+
+ if(handlers->startstop != NULL)
+ handlers->startstop(proc_seq_file, IMG_FALSE);
+}
+
+static void *pvr_proc_seq_next (struct seq_file *proc_seq_file, void *v, loff_t *pos)
+{
+ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
+ (*pos)++;
+ if( handlers->next != NULL)
+ return handlers->next( proc_seq_file, v, *pos );
+ return handlers->off2element(proc_seq_file, *pos);
+}
+
+static int pvr_proc_seq_show (struct seq_file *proc_seq_file, void *v)
+{
+ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
+ handlers->show( proc_seq_file,v );
+ return 0;
+}
+
+
+
+static struct proc_dir_entry* CreateProcEntryInDirSeq(
+ struct proc_dir_entry *pdir,
+ const IMG_CHAR * name,
+ IMG_VOID* data,
+ pvr_next_proc_seq_t next_handler,
+ pvr_show_proc_seq_t show_handler,
+ pvr_off2element_proc_seq_t off2element_handler,
+ pvr_startstop_proc_seq_t startstop_handler,
+ write_proc_t whandler
+ )
+{
+
+ struct proc_dir_entry * file;
+ mode_t mode;
+
+ if (!dir)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDirSeq: cannot make proc entry /proc/%s/%s: no parent", PVRProcDirRoot, name));
+ return NULL;
+ }
+
+ mode = S_IFREG;
+
+ if (show_handler)
+ {
+ mode |= S_IRUGO;
+ }
+
+ if (whandler)
+ {
+ mode |= S_IWUSR;
+ }
+
+ file=create_proc_entry(name, mode, pdir);
+
+ if (file)
+ {
+ PVR_PROC_SEQ_HANDLERS *seq_handlers;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
+ file->owner = THIS_MODULE;
+#endif
+
+ file->proc_fops = &pvr_proc_operations;
+ file->write_proc = whandler;
+
+
+ file->data = kmalloc(sizeof(PVR_PROC_SEQ_HANDLERS), GFP_KERNEL);
+ if(file->data)
+ {
+ seq_handlers = (PVR_PROC_SEQ_HANDLERS*)file->data;
+ seq_handlers->next = next_handler;
+ seq_handlers->show = show_handler;
+ seq_handlers->off2element = off2element_handler;
+ seq_handlers->startstop = startstop_handler;
+ seq_handlers->data = data;
+
+ return file;
+ }
+ }
+
+ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDirSeq: cannot make proc entry /proc/%s/%s: no memory", PVRProcDirRoot, name));
+ return NULL;
+}
+
+
+struct proc_dir_entry* CreateProcReadEntrySeq (
+ const IMG_CHAR * name,
+ IMG_VOID* data,
+ pvr_next_proc_seq_t next_handler,
+ pvr_show_proc_seq_t show_handler,
+ pvr_off2element_proc_seq_t off2element_handler,
+ pvr_startstop_proc_seq_t startstop_handler
+ )
+{
+ return CreateProcEntrySeq(name,
+ data,
+ next_handler,
+ show_handler,
+ off2element_handler,
+ startstop_handler,
+ NULL);
+}
+
+struct proc_dir_entry* CreateProcEntrySeq (
+ const IMG_CHAR * name,
+ IMG_VOID* data,
+ pvr_next_proc_seq_t next_handler,
+ pvr_show_proc_seq_t show_handler,
+ pvr_off2element_proc_seq_t off2element_handler,
+ pvr_startstop_proc_seq_t startstop_handler,
+ write_proc_t whandler
+ )
+{
+ return CreateProcEntryInDirSeq(
+ dir,
+ name,
+ data,
+ next_handler,
+ show_handler,
+ off2element_handler,
+ startstop_handler,
+ whandler
+ );
+}
+
+
+
+struct proc_dir_entry* CreatePerProcessProcEntrySeq (
+ const IMG_CHAR * name,
+ IMG_VOID* data,
+ pvr_next_proc_seq_t next_handler,
+ pvr_show_proc_seq_t show_handler,
+ pvr_off2element_proc_seq_t off2element_handler,
+ pvr_startstop_proc_seq_t startstop_handler,
+ write_proc_t whandler
+ )
+{
+ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
+ IMG_UINT32 ui32PID;
+
+ if (!dir)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntrySeq: /proc/%s doesn't exist", PVRProcDirRoot));
+ return NULL;
+ }
+
+ ui32PID = OSGetCurrentProcessIDKM();
+
+ psPerProc = PVRSRVPerProcessPrivateData(ui32PID);
+ if (!psPerProc)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntrySeq: no per process data"));
+
+ return NULL;
+ }
+
+ if (!psPerProc->psProcDir)
+ {
+ IMG_CHAR dirname[16];
+ IMG_INT ret;
+
+ ret = snprintf(dirname, sizeof(dirname), "%u", ui32PID);
+
+ if (ret <=0 || ret >= (IMG_INT)sizeof(dirname))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't generate per process proc directory name \"%u\"", ui32PID));
+ return NULL;
+ }
+ else
+ {
+ psPerProc->psProcDir = proc_mkdir(dirname, dir);
+ if (!psPerProc->psProcDir)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't create per process proc directory /proc/%s/%u",
+ PVRProcDirRoot, ui32PID));
+ return NULL;
+ }
+ }
+ }
+
+ return CreateProcEntryInDirSeq(psPerProc->psProcDir, name, data, next_handler,
+ show_handler,off2element_handler,startstop_handler,whandler);
+}
+
+
+IMG_VOID RemoveProcEntrySeq( struct proc_dir_entry* proc_entry )
+{
+ if (dir)
+ {
+ void* data = proc_entry->data ;
+ PVR_DPF((PVR_DBG_MESSAGE, "Removing /proc/%s/%s", PVRProcDirRoot, proc_entry->name));
+
+ remove_proc_entry(proc_entry->name, dir);
+ if( data)
+ kfree( data );
+
+ }
+}
+
+IMG_VOID RemovePerProcessProcEntrySeq(struct proc_dir_entry* proc_entry)
+{
+ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
+
+ psPerProc = LinuxTerminatingProcessPrivateData();
+ if (!psPerProc)
+ {
+ psPerProc = PVRSRVFindPerProcessPrivateData();
+ if (!psPerProc)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: can't "
+ "remove %s, no per process data", proc_entry->name));
+ return;
+ }
+ }
+
+ if (psPerProc->psProcDir)
+ {
+ void* data = proc_entry->data ;
+ PVR_DPF((PVR_DBG_MESSAGE, "Removing proc entry %s from %s", proc_entry->name, psPerProc->psProcDir->name));
+
+ remove_proc_entry(proc_entry->name, psPerProc->psProcDir);
+ if(data)
+ kfree( data );
+ }
+}
+
+static IMG_INT pvr_read_proc(IMG_CHAR *page, IMG_CHAR **start, off_t off,
+ IMG_INT count, IMG_INT *eof, IMG_VOID *data)
+{
+
+ pvr_read_proc_t *pprn = (pvr_read_proc_t *)data;
+
+ off_t len = pprn (page, (size_t)count, off);
+
+ if (len == END_OF_FILE)
+ {
+ len = 0;
+ *eof = 1;
+ }
+ else if (!len)
+ {
+ *start = (IMG_CHAR *) 0;
+ }
+ else
+ {
+ *start = (IMG_CHAR *) 1;
+ }
+
+ return len;
+}
+
+
+static IMG_INT CreateProcEntryInDir(struct proc_dir_entry *pdir, const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data)
+{
+ struct proc_dir_entry * file;
+ mode_t mode;
+
+ if (!pdir)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDir: parent directory doesn't exist"));
+
+ return -ENOMEM;
+ }
+
+ mode = S_IFREG;
+
+ if (rhandler)
+ {
+ mode |= S_IRUGO;
+ }
+
+ if (whandler)
+ {
+ mode |= S_IWUSR;
+ }
+
+ file = create_proc_entry(name, mode, pdir);
+
+ if (file)
+ {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
+ file->owner = THIS_MODULE;
+#endif
+ file->read_proc = rhandler;
+ file->write_proc = whandler;
+ file->data = data;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Created proc entry %s in %s", name, pdir->name));
+
+ return 0;
+ }
+
+ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntry: cannot create proc entry %s in %s", name, pdir->name));
+
+ return -ENOMEM;
+}
+
+
+IMG_INT CreateProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data)
+{
+ return CreateProcEntryInDir(dir, name, rhandler, whandler, data);
+}
+
+
+IMG_INT CreatePerProcessProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data)
+{
+ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
+ IMG_UINT32 ui32PID;
+
+ if (!dir)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: /proc/%s doesn't exist", PVRProcDirRoot));
+
+ return -ENOMEM;
+ }
+
+ ui32PID = OSGetCurrentProcessIDKM();
+
+ psPerProc = PVRSRVPerProcessPrivateData(ui32PID);
+ if (!psPerProc)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: no per process data"));
+
+ return -ENOMEM;
+ }
+
+ if (!psPerProc->psProcDir)
+ {
+ IMG_CHAR dirname[16];
+ IMG_INT ret;
+
+ ret = snprintf(dirname, sizeof(dirname), "%u", ui32PID);
+
+ if (ret <=0 || ret >= (IMG_INT)sizeof(dirname))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't generate per process proc directory name \"%u\"", ui32PID));
+
+ return -ENOMEM;
+ }
+ else
+ {
+ psPerProc->psProcDir = proc_mkdir(dirname, dir);
+ if (!psPerProc->psProcDir)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't create per process proc directory /proc/%s/%u", PVRProcDirRoot, ui32PID));
+
+ return -ENOMEM;
+ }
+ }
+ }
+
+ return CreateProcEntryInDir(psPerProc->psProcDir, name, rhandler, whandler, data);
+}
+
+
+IMG_INT CreateProcReadEntry(const IMG_CHAR * name, pvr_read_proc_t handler)
+{
+ struct proc_dir_entry * file;
+
+ if (!dir)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CreateProcReadEntry: cannot make proc entry /proc/%s/%s: no parent", PVRProcDirRoot, name));
+
+ return -ENOMEM;
+ }
+
+
+ file = create_proc_read_entry (name, S_IFREG | S_IRUGO, dir, pvr_read_proc, (IMG_VOID *)handler);
+
+ if (file)
+ {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
+ file->owner = THIS_MODULE;
+#endif
+ return 0;
+ }
+
+ PVR_DPF((PVR_DBG_ERROR, "CreateProcReadEntry: cannot make proc entry /proc/%s/%s: no memory", PVRProcDirRoot, name));
+
+ return -ENOMEM;
+}
+
+
+IMG_INT CreateProcEntries(IMG_VOID)
+{
+ dir = proc_mkdir (PVRProcDirRoot, NULL);
+
+ if (!dir)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: cannot make /proc/%s directory", PVRProcDirRoot));
+
+ return -ENOMEM;
+ }
+
+ g_pProcQueue = CreateProcReadEntrySeq("queue", NULL, NULL, ProcSeqShowQueue, ProcSeqOff2ElementQueue, NULL);
+ g_pProcVersion = CreateProcReadEntrySeq("version", NULL, NULL, ProcSeqShowVersion, ProcSeq1ElementHeaderOff2Element, NULL);
+ g_pProcSysNodes = CreateProcReadEntrySeq("nodes", NULL, NULL, ProcSeqShowSysNodes, ProcSeqOff2ElementSysNodes, NULL);
+
+ if(!g_pProcQueue || !g_pProcVersion || !g_pProcSysNodes)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s files", PVRProcDirRoot));
+
+ return -ENOMEM;
+ }
+
+
+#ifdef DEBUG
+
+ g_pProcDebugLevel = CreateProcEntrySeq("debug_level", NULL, NULL,
+ ProcSeqShowDebugLevel,
+ ProcSeq1ElementOff2Element, NULL,
+ (IMG_VOID*)PVRDebugProcSetLevel);
+ if(!g_pProcDebugLevel)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s/debug_level", PVRProcDirRoot));
+
+ return -ENOMEM;
+ }
+
+#ifdef PVR_MANUAL_POWER_CONTROL
+ g_pProcPowerLevel = CreateProcEntrySeq("power_control", NULL, NULL,
+ ProcSeqShowPowerLevel,
+ ProcSeq1ElementOff2Element, NULL,
+ PVRProcSetPowerLevel);
+ if(!g_pProcPowerLevel)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s/power_control", PVRProcDirRoot));
+
+ return -ENOMEM;
+ }
+#endif
+#endif
+
+ return 0;
+}
+
+
+IMG_VOID RemoveProcEntry(const IMG_CHAR * name)
+{
+ if (dir)
+ {
+ remove_proc_entry(name, dir);
+ PVR_DPF((PVR_DBG_MESSAGE, "Removing /proc/%s/%s", PVRProcDirRoot, name));
+ }
+}
+
+
+IMG_VOID RemovePerProcessProcEntry(const IMG_CHAR *name)
+{
+ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
+
+ psPerProc = LinuxTerminatingProcessPrivateData();
+ if (!psPerProc)
+ {
+ psPerProc = PVRSRVFindPerProcessPrivateData();
+ if (!psPerProc)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: can't "
+ "remove %s, no per process data", name));
+ return;
+ }
+ }
+
+ if (psPerProc->psProcDir)
+ {
+ remove_proc_entry(name, psPerProc->psProcDir);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Removing proc entry %s from %s", name, psPerProc->psProcDir->name));
+ }
+}
+
+
+IMG_VOID RemovePerProcessProcDir(PVRSRV_ENV_PER_PROCESS_DATA *psPerProc)
+{
+ if (psPerProc->psProcDir)
+ {
+ while (psPerProc->psProcDir->subdir)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Belatedly removing /proc/%s/%s/%s", PVRProcDirRoot, psPerProc->psProcDir->name, psPerProc->psProcDir->subdir->name));
+
+ RemoveProcEntry(psPerProc->psProcDir->subdir->name);
+ }
+ RemoveProcEntry(psPerProc->psProcDir->name);
+ }
+}
+
+IMG_VOID RemoveProcEntries(IMG_VOID)
+{
+#ifdef DEBUG
+ RemoveProcEntrySeq( g_pProcDebugLevel );
+#ifdef PVR_MANUAL_POWER_CONTROL
+ RemoveProcEntrySeq( g_pProcPowerLevel );
+#endif
+#endif
+
+ RemoveProcEntrySeq(g_pProcQueue);
+ RemoveProcEntrySeq(g_pProcVersion);
+ RemoveProcEntrySeq(g_pProcSysNodes);
+
+ while (dir->subdir)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Belatedly removing /proc/%s/%s", PVRProcDirRoot, dir->subdir->name));
+
+ RemoveProcEntry(dir->subdir->name);
+ }
+
+ remove_proc_entry(PVRProcDirRoot, NULL);
+}
+
+static void ProcSeqShowVersion(struct seq_file *sfile,void* el)
+{
+ SYS_DATA *psSysData;
+ IMG_CHAR *pszSystemVersionString = "None";
+
+ if(el == PVR_PROC_SEQ_START_TOKEN)
+ {
+ seq_printf(sfile,
+ "Version %s (%s) %s\n",
+ PVRVERSION_STRING,
+ PVR_BUILD_TYPE, PVR_BUILD_DIR);
+ return;
+ }
+
+ psSysData = SysAcquireDataNoCheck();
+ if(psSysData != IMG_NULL && psSysData->pszVersionString != IMG_NULL)
+ {
+ pszSystemVersionString = psSysData->pszVersionString;
+ }
+
+ seq_printf( sfile, "System Version String: %s\n", pszSystemVersionString);
+}
+
+static const IMG_CHAR *deviceTypeToString(PVRSRV_DEVICE_TYPE deviceType)
+{
+ switch (deviceType)
+ {
+ default:
+ {
+ static IMG_CHAR text[10];
+
+ sprintf(text, "?%x", (IMG_UINT)deviceType);
+
+ return text;
+ }
+ }
+}
+
+
+static const IMG_CHAR *deviceClassToString(PVRSRV_DEVICE_CLASS deviceClass)
+{
+ switch (deviceClass)
+ {
+ case PVRSRV_DEVICE_CLASS_3D:
+ {
+ return "3D";
+ }
+ case PVRSRV_DEVICE_CLASS_DISPLAY:
+ {
+ return "display";
+ }
+ case PVRSRV_DEVICE_CLASS_BUFFER:
+ {
+ return "buffer";
+ }
+ default:
+ {
+ static IMG_CHAR text[10];
+
+ sprintf(text, "?%x", (IMG_UINT)deviceClass);
+ return text;
+ }
+ }
+}
+
+static IMG_VOID* DecOffPsDev_AnyVaCb(PVRSRV_DEVICE_NODE *psNode, va_list va)
+{
+ off_t *pOff = va_arg(va, off_t*);
+ if (--(*pOff))
+ {
+ return IMG_NULL;
+ }
+ else
+ {
+ return psNode;
+ }
+}
+
+static void ProcSeqShowSysNodes(struct seq_file *sfile,void* el)
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+ if(el == PVR_PROC_SEQ_START_TOKEN)
+ {
+ seq_printf( sfile,
+ "Registered nodes\n"
+ "Addr Type Class Index Ref pvDev Size Res\n");
+ return;
+ }
+
+ psDevNode = (PVRSRV_DEVICE_NODE*)el;
+
+ seq_printf( sfile,
+ "%p %-8s %-8s %4d %2u %p %3u %p\n",
+ psDevNode,
+ deviceTypeToString(psDevNode->sDevId.eDeviceType),
+ deviceClassToString(psDevNode->sDevId.eDeviceClass),
+ psDevNode->sDevId.eDeviceClass,
+ psDevNode->ui32RefCount,
+ psDevNode->pvDevice,
+ psDevNode->ui32pvDeviceSize,
+ psDevNode->hResManContext);
+}
+
+static void* ProcSeqOff2ElementSysNodes(struct seq_file * sfile, loff_t off)
+{
+ SYS_DATA *psSysData;
+ PVRSRV_DEVICE_NODE*psDevNode = IMG_NULL;
+
+ PVR_UNREFERENCED_PARAMETER(sfile);
+
+ if(!off)
+ {
+ return PVR_PROC_SEQ_START_TOKEN;
+ }
+
+ psSysData = SysAcquireDataNoCheck();
+ if (psSysData != IMG_NULL)
+ {
+
+ psDevNode = (PVRSRV_DEVICE_NODE*)
+ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
+ DecOffPsDev_AnyVaCb,
+ &off);
+ }
+
+
+ return (void*)psDevNode;
+}
+
diff --git a/drivers/gpu/pvr/proc.h b/drivers/gpu/pvr/proc.h
new file mode 100644
index 0000000..2066d71
--- /dev/null
+++ b/drivers/gpu/pvr/proc.h
@@ -0,0 +1,108 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __SERVICES_PROC_H__
+#define __SERVICES_PROC_H__
+
+#include <asm/system.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#define END_OF_FILE (off_t) -1
+
+typedef off_t (pvr_read_proc_t)(IMG_CHAR *, size_t, off_t);
+
+
+#define PVR_PROC_SEQ_START_TOKEN (void*)1
+typedef void* (pvr_next_proc_seq_t)(struct seq_file *,void*,loff_t);
+typedef void* (pvr_off2element_proc_seq_t)(struct seq_file *, loff_t);
+typedef void (pvr_show_proc_seq_t)(struct seq_file *,void*);
+typedef void (pvr_startstop_proc_seq_t)(struct seq_file *, IMG_BOOL start);
+
+typedef struct _PVR_PROC_SEQ_HANDLERS_ {
+ pvr_next_proc_seq_t *next;
+ pvr_show_proc_seq_t *show;
+ pvr_off2element_proc_seq_t *off2element;
+ pvr_startstop_proc_seq_t *startstop;
+ IMG_VOID *data;
+} PVR_PROC_SEQ_HANDLERS;
+
+
+void* ProcSeq1ElementOff2Element(struct seq_file *sfile, loff_t off);
+
+void* ProcSeq1ElementHeaderOff2Element(struct seq_file *sfile, loff_t off);
+
+off_t printAppend(IMG_CHAR * buffer, size_t size, off_t off, const IMG_CHAR * format, ...)
+ __attribute__((format(printf, 4, 5)));
+
+IMG_INT CreateProcEntries(IMG_VOID);
+
+IMG_INT CreateProcReadEntry (const IMG_CHAR * name, pvr_read_proc_t handler);
+
+IMG_INT CreateProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data);
+
+IMG_INT CreatePerProcessProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data);
+
+IMG_VOID RemoveProcEntry(const IMG_CHAR * name);
+
+IMG_VOID RemovePerProcessProcEntry(const IMG_CHAR * name);
+
+IMG_VOID RemoveProcEntries(IMG_VOID);
+
+struct proc_dir_entry* CreateProcReadEntrySeq (
+ const IMG_CHAR* name,
+ IMG_VOID* data,
+ pvr_next_proc_seq_t next_handler,
+ pvr_show_proc_seq_t show_handler,
+ pvr_off2element_proc_seq_t off2element_handler,
+ pvr_startstop_proc_seq_t startstop_handler
+ );
+
+struct proc_dir_entry* CreateProcEntrySeq (
+ const IMG_CHAR* name,
+ IMG_VOID* data,
+ pvr_next_proc_seq_t next_handler,
+ pvr_show_proc_seq_t show_handler,
+ pvr_off2element_proc_seq_t off2element_handler,
+ pvr_startstop_proc_seq_t startstop_handler,
+ write_proc_t whandler
+ );
+
+struct proc_dir_entry* CreatePerProcessProcEntrySeq (
+ const IMG_CHAR* name,
+ IMG_VOID* data,
+ pvr_next_proc_seq_t next_handler,
+ pvr_show_proc_seq_t show_handler,
+ pvr_off2element_proc_seq_t off2element_handler,
+ pvr_startstop_proc_seq_t startstop_handler,
+ write_proc_t whandler
+ );
+
+
+IMG_VOID RemoveProcEntrySeq(struct proc_dir_entry* proc_entry);
+IMG_VOID RemovePerProcessProcEntrySeq(struct proc_dir_entry* proc_entry);
+
+#endif
diff --git a/drivers/gpu/pvr/pvr_bridge.h b/drivers/gpu/pvr/pvr_bridge.h
new file mode 100644
index 0000000..443ad1e
--- /dev/null
+++ b/drivers/gpu/pvr/pvr_bridge.h
@@ -0,0 +1,1786 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __PVR_BRIDGE_H__
+#define __PVR_BRIDGE_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "servicesint.h"
+
+#ifdef __linux__
+
+ #include <linux/ioctl.h>
+
+ #define PVRSRV_IOC_GID 'g'
+ #define PVRSRV_IO(INDEX) _IO(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
+ #define PVRSRV_IOW(INDEX) _IOW(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
+ #define PVRSRV_IOR(INDEX) _IOR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
+ #define PVRSRV_IOWR(INDEX) _IOWR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
+
+#else
+
+ #error Unknown platform: Cannot define ioctls
+
+ #define PVRSRV_IO(INDEX) (PVRSRV_IOC_GID + (INDEX))
+ #define PVRSRV_IOW(INDEX) (PVRSRV_IOC_GID + (INDEX))
+ #define PVRSRV_IOR(INDEX) (PVRSRV_IOC_GID + (INDEX))
+ #define PVRSRV_IOWR(INDEX) (PVRSRV_IOC_GID + (INDEX))
+
+ #define PVRSRV_BRIDGE_BASE PVRSRV_IOC_GID
+#endif
+
+
+#define PVRSRV_BRIDGE_CORE_CMD_FIRST 0UL
+#define PVRSRV_BRIDGE_ENUM_DEVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+0)
+#define PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+1)
+#define PVRSRV_BRIDGE_RELEASE_DEVICEINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+2)
+#define PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+3)
+#define PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+4)
+#define PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+5)
+#define PVRSRV_BRIDGE_ALLOC_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+6)
+#define PVRSRV_BRIDGE_FREE_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+7)
+#define PVRSRV_BRIDGE_GETFREE_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+8)
+#define PVRSRV_BRIDGE_CREATE_COMMANDQUEUE PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+9)
+#define PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+10)
+#define PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+11)
+#define PVRSRV_BRIDGE_CONNECT_SERVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+12)
+#define PVRSRV_BRIDGE_DISCONNECT_SERVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+13)
+#define PVRSRV_BRIDGE_WRAP_DEVICE_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+14)
+#define PVRSRV_BRIDGE_GET_DEVICEMEMINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+15)
+#define PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+16)
+#define PVRSRV_BRIDGE_FREE_DEV_VIRTMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+17)
+#define PVRSRV_BRIDGE_MAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+18)
+#define PVRSRV_BRIDGE_UNMAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+19)
+#define PVRSRV_BRIDGE_MAP_DEV_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+20)
+#define PVRSRV_BRIDGE_UNMAP_DEV_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+21)
+#define PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+22)
+#define PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+23)
+#define PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+24)
+#define PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+25)
+#define PVRSRV_BRIDGE_EXPORT_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+26)
+#define PVRSRV_BRIDGE_RELEASE_MMAP_DATA PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+27)
+#define PVRSRV_BRIDGE_CHG_DEV_MEM_ATTRIBS PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+28)
+#define PVRSRV_BRIDGE_MAP_DEV_MEMORY_2 PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+29)
+#define PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2 PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+30)
+#define PVRSRV_BRIDGE_CORE_CMD_LAST (PVRSRV_BRIDGE_CORE_CMD_FIRST+30)
+
+#define PVRSRV_BRIDGE_SIM_CMD_FIRST (PVRSRV_BRIDGE_CORE_CMD_LAST+1)
+#define PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+0)
+#define PVRSRV_BRIDGE_REGISTER_SIM_PROCESS PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+1)
+#define PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+2)
+#define PVRSRV_BRIDGE_SIM_CMD_LAST (PVRSRV_BRIDGE_SIM_CMD_FIRST+2)
+
+#define PVRSRV_BRIDGE_MAPPING_CMD_FIRST (PVRSRV_BRIDGE_SIM_CMD_LAST+1)
+#define PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+0)
+#define PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+1)
+#define PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2)
+#define PVRSRV_BRIDGE_MAPPING_CMD_LAST (PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2)
+
+#define PVRSRV_BRIDGE_STATS_CMD_FIRST (PVRSRV_BRIDGE_MAPPING_CMD_LAST+1)
+#define PVRSRV_BRIDGE_GET_FB_STATS PVRSRV_IOWR(PVRSRV_BRIDGE_STATS_CMD_FIRST+0)
+#define PVRSRV_BRIDGE_STATS_CMD_LAST (PVRSRV_BRIDGE_STATS_CMD_FIRST+0)
+
+#define PVRSRV_BRIDGE_MISC_CMD_FIRST (PVRSRV_BRIDGE_STATS_CMD_LAST+1)
+#define PVRSRV_BRIDGE_GET_MISC_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_MISC_CMD_FIRST+0)
+#define PVRSRV_BRIDGE_RELEASE_MISC_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_MISC_CMD_FIRST+1)
+#define PVRSRV_BRIDGE_MISC_CMD_LAST (PVRSRV_BRIDGE_MISC_CMD_FIRST+1)
+
+#if defined (SUPPORT_OVERLAY_ROTATE_BLIT)
+#define PVRSRV_BRIDGE_OVERLAY_CMD_FIRST (PVRSRV_BRIDGE_MISC_CMD_LAST+1)
+#define PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES PVRSRV_IOWR(PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+0)
+#define PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES PVRSRV_IOWR(PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
+#define PVRSRV_BRIDGE_OVERLAY_CMD_LAST (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
+#else
+#define PVRSRV_BRIDGE_OVERLAY_CMD_LAST PVRSRV_BRIDGE_MISC_CMD_LAST
+#endif
+
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST (PVRSRV_BRIDGE_OVERLAY_CMD_LAST+1)
+#define PVRSRV_BRIDGE_PDUMP_INIT PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0)
+#define PVRSRV_BRIDGE_PDUMP_MEMPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1)
+#define PVRSRV_BRIDGE_PDUMP_DUMPMEM PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2)
+#define PVRSRV_BRIDGE_PDUMP_REG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3)
+#define PVRSRV_BRIDGE_PDUMP_REGPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+4)
+#define PVRSRV_BRIDGE_PDUMP_COMMENT PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+5)
+#define PVRSRV_BRIDGE_PDUMP_SETFRAME PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+6)
+#define PVRSRV_BRIDGE_PDUMP_ISCAPTURING PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+7)
+#define PVRSRV_BRIDGE_PDUMP_DUMPBITMAP PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+8)
+#define PVRSRV_BRIDGE_PDUMP_DUMPREADREG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+9)
+#define PVRSRV_BRIDGE_PDUMP_SYNCPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+10)
+#define PVRSRV_BRIDGE_PDUMP_DUMPSYNC PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+11)
+#define PVRSRV_BRIDGE_PDUMP_MEMPAGES PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+12)
+#define PVRSRV_BRIDGE_PDUMP_DRIVERINFO PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+13)
+#define PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+15)
+#define PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+16)
+#define PVRSRV_BRIDGE_PDUMP_STARTINITPHASE PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+17)
+#define PVRSRV_BRIDGE_PDUMP_STOPINITPHASE PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+18)
+#define PVRSRV_BRIDGE_PDUMP_CMD_LAST (PVRSRV_BRIDGE_PDUMP_CMD_FIRST+18)
+#else
+#define PVRSRV_BRIDGE_PDUMP_CMD_LAST PVRSRV_BRIDGE_OVERLAY_CMD_LAST
+#endif
+
+#define PVRSRV_BRIDGE_OEM_CMD_FIRST (PVRSRV_BRIDGE_PDUMP_CMD_LAST+1)
+#define PVRSRV_BRIDGE_GET_OEMJTABLE PVRSRV_IOWR(PVRSRV_BRIDGE_OEM_CMD_FIRST+0)
+#define PVRSRV_BRIDGE_OEM_CMD_LAST (PVRSRV_BRIDGE_OEM_CMD_FIRST+0)
+
+#define PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST (PVRSRV_BRIDGE_OEM_CMD_LAST+1)
+#define PVRSRV_BRIDGE_ENUM_CLASS PVRSRV_IOWR(PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0)
+#define PVRSRV_BRIDGE_DEVCLASS_CMD_LAST (PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0)
+
+#define PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST (PVRSRV_BRIDGE_DEVCLASS_CMD_LAST+1)
+#define PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+0)
+#define PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+1)
+#define PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+2)
+#define PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+3)
+#define PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+4)
+#define PVRSRV_BRIDGE_GET_DISPCLASS_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+5)
+#define PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+6)
+#define PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+7)
+#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+8)
+#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+9)
+#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+10)
+#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+11)
+#define PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+12)
+#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+13)
+#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER2 PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14)
+#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+15)
+#define PVRSRV_BRIDGE_DISPCLASS_CMD_LAST (PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+15)
+
+#define PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST (PVRSRV_BRIDGE_DISPCLASS_CMD_LAST+1)
+#define PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+0)
+#define PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+1)
+#define PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+2)
+#define PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3)
+#define PVRSRV_BRIDGE_BUFCLASS_CMD_LAST (PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3)
+
+#define PVRSRV_BRIDGE_WRAP_CMD_FIRST (PVRSRV_BRIDGE_BUFCLASS_CMD_LAST+1)
+#define PVRSRV_BRIDGE_WRAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+0)
+#define PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+1)
+#define PVRSRV_BRIDGE_WRAP_CMD_LAST (PVRSRV_BRIDGE_WRAP_CMD_FIRST+1)
+
+#define PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST (PVRSRV_BRIDGE_WRAP_CMD_LAST+1)
+#define PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+0)
+#define PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+1)
+#define PVRSRV_BRIDGE_MAP_MEMINFO_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+2)
+#define PVRSRV_BRIDGE_UNMAP_MEMINFO_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
+#define PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST (PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
+
+#define PVRSRV_BRIDGE_INITSRV_CMD_FIRST (PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST+1)
+#define PVRSRV_BRIDGE_INITSRV_CONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+0)
+#define PVRSRV_BRIDGE_INITSRV_DISCONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
+#define PVRSRV_BRIDGE_INITSRV_CMD_LAST (PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
+
+#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST (PVRSRV_BRIDGE_INITSRV_CMD_LAST+1)
+#define PVRSRV_BRIDGE_EVENT_OBJECT_WAIT PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+0)
+#define PVRSRV_BRIDGE_EVENT_OBJECT_OPEN PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+1)
+#define PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
+#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
+
+#define PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST+1)
+#define PVRSRV_BRIDGE_CREATE_SYNC_INFO_MOD_OBJ PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+0)
+#define PVRSRV_BRIDGE_DESTROY_SYNC_INFO_MOD_OBJ PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+1)
+#define PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+2)
+#define PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+3)
+#define PVRSRV_BRIDGE_SYNC_OPS_TAKE_TOKEN PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+4)
+#define PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_TOKEN PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+5)
+#define PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_MOD_OBJ PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+6)
+#define PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_DELTA PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+7)
+#define PVRSRV_BRIDGE_ALLOC_SYNC_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+8)
+#define PVRSRV_BRIDGE_FREE_SYNC_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+9)
+#define PVRSRV_BRIDGE_SYNC_OPS_CMD_LAST (PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+9)
+
+#define PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD (PVRSRV_BRIDGE_SYNC_OPS_CMD_LAST+1)
+
+
+#define PVRSRV_KERNEL_MODE_CLIENT 1
+
+typedef struct PVRSRV_BRIDGE_RETURN_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_VOID *pvData;
+
+}PVRSRV_BRIDGE_RETURN;
+
+
+typedef struct PVRSRV_BRIDGE_PACKAGE_TAG
+{
+ IMG_UINT32 ui32BridgeID;
+ IMG_UINT32 ui32Size;
+ IMG_VOID *pvParamIn;
+ IMG_UINT32 ui32InBufferSize;
+ IMG_VOID *pvParamOut;
+ IMG_UINT32 ui32OutBufferSize;
+
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelServices;
+#else
+ IMG_HANDLE hKernelServices;
+#endif
+}PVRSRV_BRIDGE_PACKAGE;
+
+
+typedef struct PVRSRV_BRIDGE_IN_CONNECT_SERVICES_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_UINT32 ui32Flags;
+} PVRSRV_BRIDGE_IN_CONNECT_SERVICES;
+
+typedef struct PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_UINT32 uiDevIndex;
+ PVRSRV_DEVICE_TYPE eDeviceType;
+
+} PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO;
+
+
+typedef struct PVRSRV_BRIDGE_IN_ENUMCLASS_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ PVRSRV_DEVICE_CLASS sDeviceClass;
+} PVRSRV_BRIDGE_IN_ENUMCLASS;
+
+
+typedef struct PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+#else
+ IMG_HANDLE hDeviceKM;
+#endif
+} PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE;
+
+
+typedef struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+#else
+ IMG_HANDLE hDeviceKM;
+#endif
+} PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS;
+
+
+typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+#else
+ IMG_HANDLE hDeviceKM;
+#endif
+} PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER;
+
+
+typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+#else
+ IMG_HANDLE hDeviceKM;
+#endif
+} PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO;
+
+
+typedef struct PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+#else
+ IMG_HANDLE hDeviceKM;
+#endif
+} PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE;
+
+
+typedef struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+#else
+ IMG_HANDLE hDeviceKM;
+#endif
+} PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO;
+
+
+typedef struct PVRSRV_BRIDGE_IN_RELEASE_DEVICEINFO_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+
+} PVRSRV_BRIDGE_IN_RELEASE_DEVICEINFO;
+
+
+typedef struct PVRSRV_BRIDGE_IN_FREE_CLASSDEVICEINFO_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ PVRSRV_DEVICE_CLASS DeviceClass;
+ IMG_VOID* pvDevInfo;
+
+}PVRSRV_BRIDGE_IN_FREE_CLASSDEVICEINFO;
+
+
+typedef struct PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+ IMG_SID hDevMemContext;
+#else
+ IMG_HANDLE hDevCookie;
+ IMG_HANDLE hDevMemContext;
+#endif
+
+}PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO;
+
+
+typedef struct PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+
+}PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT;
+
+
+typedef struct PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+ IMG_SID hDevMemContext;
+#else
+ IMG_HANDLE hDevCookie;
+ IMG_HANDLE hDevMemContext;
+#endif
+
+}PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT;
+
+
+typedef struct PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+ IMG_SID hDevMemHeap;
+#else
+ IMG_HANDLE hDevCookie;
+ IMG_HANDLE hDevMemHeap;
+#endif
+ IMG_UINT32 ui32Attribs;
+ IMG_SIZE_T ui32Size;
+ IMG_SIZE_T ui32Alignment;
+ IMG_PVOID pvPrivData;
+ IMG_UINT32 ui32PrivDataLength;
+
+}PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM;
+
+typedef struct PVRSRV_BRIDGE_IN_MAPMEMINFOTOUSER_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+#else
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+#endif
+
+}PVRSRV_BRIDGE_IN_MAPMEMINFOTOUSER;
+
+typedef struct PVRSRV_BRIDGE_IN_UNMAPMEMINFOFROMUSER_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+#else
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+#endif
+ IMG_PVOID pvLinAddr;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hMappingInfo;
+#else
+ IMG_HANDLE hMappingInfo;
+#endif
+
+}PVRSRV_BRIDGE_IN_UNMAPMEMINFOFROMUSER;
+
+typedef struct PVRSRV_BRIDGE_IN_FREEDEVICEMEM_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+ IMG_SID hKernelMemInfo;
+#else
+ IMG_HANDLE hDevCookie;
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+#endif
+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
+
+}PVRSRV_BRIDGE_IN_FREEDEVICEMEM;
+
+typedef struct PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+ IMG_SID hKernelMemInfo;
+#else
+ IMG_HANDLE hDevCookie;
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+#endif
+
+}PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM;
+
+typedef struct PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_UINT32 ui32Flags;
+
+} PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM;
+
+typedef struct PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ IMG_SIZE_T ui32QueueSize;
+
+}PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE;
+
+
+typedef struct PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ PVRSRV_QUEUE_INFO *psQueueInfo;
+
+}PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE;
+
+
+typedef struct PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hMHandle;
+#else
+ IMG_HANDLE hMHandle;
+#endif
+} PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA;
+
+
+typedef struct PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hMHandle;
+#else
+ IMG_HANDLE hMHandle;
+#endif
+} PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA;
+
+
+typedef struct PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevMemHeap;
+#else
+ IMG_HANDLE hDevMemHeap;
+#endif
+ IMG_DEV_VIRTADDR *psDevVAddr;
+ IMG_SIZE_T ui32Size;
+ IMG_SIZE_T ui32Alignment;
+
+}PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM;
+
+typedef struct PVRSRV_BRIDGE_OUT_CONNECT_SERVICES_TAG
+{
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelServices;
+#else
+ IMG_HANDLE hKernelServices;
+#endif
+}PVRSRV_BRIDGE_OUT_CONNECT_SERVICES;
+
+typedef struct PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM_TAG
+{
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+ IMG_SID hKernelSyncInfo;
+#else
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
+#endif
+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
+
+}PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM;
+
+
+typedef struct PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+#else
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+#endif
+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
+
+}PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM;
+
+
+typedef struct PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+ IMG_SID hDstDevMemHeap;
+#else
+ IMG_HANDLE hKernelMemInfo;
+ IMG_HANDLE hDstDevMemHeap;
+#endif
+
+}PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY_TAG
+{
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDstKernelMemInfo;
+#else
+ PVRSRV_KERNEL_MEM_INFO *psDstKernelMemInfo;
+#endif
+ PVRSRV_CLIENT_MEM_INFO sDstClientMemInfo;
+ PVRSRV_CLIENT_SYNC_INFO sDstClientSyncInfo;
+
+}PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY;
+
+
+typedef struct PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+#else
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+#endif
+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
+
+}PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY;
+
+
+typedef struct PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+#else
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+#endif
+ IMG_SYS_PHYADDR *psSysPAddr;
+ IMG_UINT32 ui32Flags;
+
+}PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY;
+
+typedef struct PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
+ IMG_UINT32 ui32Flags;
+
+}PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY;
+
+typedef struct PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceClassBuffer;
+ IMG_SID hDevMemContext;
+#else
+ IMG_HANDLE hDeviceClassBuffer;
+ IMG_HANDLE hDevMemContext;
+#endif
+
+}PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY_TAG
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+ IMG_SID hMappingInfo;
+#else
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+ IMG_HANDLE hMappingInfo;
+#endif
+
+}PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY;
+
+
+typedef struct PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+#else
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+#endif
+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
+
+}PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY;
+
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_MEMPOL_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+#else
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+#endif
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Value;
+ IMG_UINT32 ui32Mask;
+ PDUMP_POLL_OPERATOR eOperator;
+ IMG_UINT32 ui32Flags;
+
+}PVRSRV_BRIDGE_IN_PDUMP_MEMPOL;
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfo;
+#else
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
+#endif
+ IMG_BOOL bIsRead;
+ IMG_BOOL bUseLastOpDumpVal;
+ IMG_UINT32 ui32Value;
+ IMG_UINT32 ui32Mask;
+
+}PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL;
+
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_PVOID pvLinAddr;
+ IMG_PVOID pvAltLinAddr;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+#else
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+#endif
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Bytes;
+ IMG_UINT32 ui32Flags;
+
+}PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM;
+
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_PVOID pvAltLinAddr;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfo;
+#else
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
+#endif
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Bytes;
+
+}PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC;
+
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPREG_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ PVRSRV_HWREG sHWReg;
+ IMG_UINT32 ui32Flags;
+ IMG_CHAR szRegRegion[32];
+
+}PVRSRV_BRIDGE_IN_PDUMP_DUMPREG;
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_REGPOL_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ PVRSRV_HWREG sHWReg;
+ IMG_UINT32 ui32Mask;
+ IMG_UINT32 ui32Flags;
+ IMG_CHAR szRegRegion[32];
+}PVRSRV_BRIDGE_IN_PDUMP_REGPOL;
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ PVRSRV_HWREG sHWReg;
+ IMG_UINT32 ui32Flags;
+
+}PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG;
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+ IMG_SID hKernelMemInfo;
+#else
+ IMG_HANDLE hDevCookie;
+ IMG_HANDLE hKernelMemInfo;
+#endif
+ IMG_DEV_PHYADDR *pPages;
+ IMG_UINT32 ui32NumPages;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_UINT32 ui32Start;
+ IMG_UINT32 ui32Length;
+ IMG_UINT32 ui32Flags;
+
+}PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES;
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_COMMENT_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_CHAR szComment[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
+ IMG_UINT32 ui32Flags;
+
+}PVRSRV_BRIDGE_IN_PDUMP_COMMENT;
+
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_SETFRAME_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_UINT32 ui32Frame;
+
+}PVRSRV_BRIDGE_IN_PDUMP_SETFRAME;
+
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_BITMAP_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
+ IMG_UINT32 ui32FileOffset;
+ IMG_UINT32 ui32Width;
+ IMG_UINT32 ui32Height;
+ IMG_UINT32 ui32StrideInBytes;
+ IMG_DEV_VIRTADDR sDevBaseAddr;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevMemContext;
+#else
+ IMG_HANDLE hDevMemContext;
+#endif
+ IMG_UINT32 ui32Size;
+ PDUMP_PIXEL_FORMAT ePixelFormat;
+ PDUMP_MEM_FORMAT eMemFormat;
+ IMG_UINT32 ui32Flags;
+
+}PVRSRV_BRIDGE_IN_PDUMP_BITMAP;
+
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_READREG_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
+ IMG_UINT32 ui32FileOffset;
+ IMG_UINT32 ui32Address;
+ IMG_UINT32 ui32Size;
+ IMG_UINT32 ui32Flags;
+ IMG_CHAR szRegRegion[32];
+
+}PVRSRV_BRIDGE_IN_PDUMP_READREG;
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_CHAR szString[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
+ IMG_BOOL bContinuous;
+
+}PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO;
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+#else
+ IMG_HANDLE hKernelMemInfo;
+#endif
+ IMG_UINT32 ui32Offset;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+}PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR;
+
+typedef struct PVRSRV_BRIDGE_PDUM_IN_CYCLE_COUNT_REG_READ_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ IMG_UINT32 ui32RegOffset;
+ IMG_BOOL bLastFrame;
+}PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ;
+
+typedef struct PVRSRV_BRIDGE_OUT_ENUMDEVICE_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32NumDevices;
+ PVRSRV_DEVICE_IDENTIFIER asDeviceIdentifier[PVRSRV_MAX_DEVICES];
+
+}PVRSRV_BRIDGE_OUT_ENUMDEVICE;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO_TAG
+{
+
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+
+} PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_ENUMCLASS_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32NumDevices;
+ IMG_UINT32 ui32DevID[PVRSRV_MAX_DEVICES];
+
+}PVRSRV_BRIDGE_OUT_ENUMCLASS;
+
+
+typedef struct PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_UINT32 ui32DeviceID;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+
+}PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE;
+
+typedef struct PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE_TAG
+{
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+#else
+ IMG_HANDLE hDeviceKM;
+#endif
+
+}PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE;
+
+
+typedef struct PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+ IMG_SID hDevMemContext;
+#else
+ IMG_HANDLE hDevCookie;
+ IMG_HANDLE hDevMemContext;
+#endif
+ IMG_VOID *pvLinAddr;
+ IMG_SIZE_T ui32ByteSize;
+ IMG_SIZE_T ui32PageOffset;
+ IMG_BOOL bPhysContig;
+ IMG_UINT32 ui32NumPageTableEntries;
+ IMG_SYS_PHYADDR *psSysPAddr;
+ IMG_UINT32 ui32Flags;
+
+}PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY;
+
+typedef struct PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY_TAG
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
+
+}PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY;
+
+typedef struct PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+#else
+ IMG_HANDLE hKernelMemInfo;
+#endif
+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
+
+}PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY;
+
+
+#define PVRSRV_MAX_DC_DISPLAY_FORMATS 10
+#define PVRSRV_MAX_DC_DISPLAY_DIMENSIONS 10
+#define PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS 4
+#define PVRSRV_MAX_DC_CLIP_RECTS 32
+
+typedef struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Count;
+ DISPLAY_FORMAT asFormat[PVRSRV_MAX_DC_DISPLAY_FORMATS];
+
+}PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS;
+
+
+typedef struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+#else
+ IMG_HANDLE hDeviceKM;
+#endif
+ DISPLAY_FORMAT sFormat;
+
+}PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Count;
+ DISPLAY_DIMS asDim[PVRSRV_MAX_DC_DISPLAY_DIMENSIONS];
+
+}PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO_TAG
+{
+ PVRSRV_ERROR eError;
+ DISPLAY_INFO sDisplayInfo;
+
+}PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER_TAG
+{
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hBuffer;
+#else
+ IMG_HANDLE hBuffer;
+#endif
+
+}PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER;
+
+
+typedef struct PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+#else
+ IMG_HANDLE hDeviceKM;
+#endif
+ IMG_UINT32 ui32Flags;
+ DISPLAY_SURF_ATTRIBUTES sDstSurfAttrib;
+ DISPLAY_SURF_ATTRIBUTES sSrcSurfAttrib;
+ IMG_UINT32 ui32BufferCount;
+ IMG_UINT32 ui32OEMFlags;
+ IMG_UINT32 ui32SwapChainID;
+
+} PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN_TAG
+{
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hSwapChain;
+#else
+ IMG_HANDLE hSwapChain;
+#endif
+ IMG_UINT32 ui32SwapChainID;
+
+} PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN;
+
+
+typedef struct PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+ IMG_SID hSwapChain;
+#else
+ IMG_HANDLE hDeviceKM;
+ IMG_HANDLE hSwapChain;
+#endif
+
+} PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN;
+
+
+typedef struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+ IMG_SID hSwapChain;
+#else
+ IMG_HANDLE hDeviceKM;
+ IMG_HANDLE hSwapChain;
+#endif
+ IMG_RECT sRect;
+
+} PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT;
+
+
+typedef struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+ IMG_SID hSwapChain;
+#else
+ IMG_HANDLE hDeviceKM;
+ IMG_HANDLE hSwapChain;
+#endif
+ IMG_UINT32 ui32CKColour;
+
+} PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY;
+
+
+typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+ IMG_SID hSwapChain;
+#else
+ IMG_HANDLE hDeviceKM;
+ IMG_HANDLE hSwapChain;
+#endif
+
+} PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32BufferCount;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID ahBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
+#else
+ IMG_HANDLE ahBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
+#endif
+ IMG_SYS_PHYADDR asPhyAddr[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
+} PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS;
+
+
+typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+ IMG_SID hBuffer;
+#else
+ IMG_HANDLE hDeviceKM;
+ IMG_HANDLE hBuffer;
+#endif
+ IMG_UINT32 ui32SwapInterval;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hPrivateTag;
+#else
+ IMG_HANDLE hPrivateTag;
+#endif
+ IMG_UINT32 ui32ClipRectCount;
+ IMG_RECT sClipRect[PVRSRV_MAX_DC_CLIP_RECTS];
+
+} PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER;
+
+typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER2_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+ IMG_SID hSwapChain;
+#else
+ IMG_HANDLE hDeviceKM;
+ IMG_HANDLE hSwapChain;
+#endif
+ IMG_UINT32 ui32SwapInterval;
+
+ IMG_UINT32 ui32NumMemInfos;
+ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfos;
+ PVRSRV_KERNEL_SYNC_INFO **ppsKernelSyncInfos;
+
+ IMG_UINT32 ui32PrivDataLength;
+ IMG_PVOID pvPrivData;
+
+} PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER2;
+
+typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+ IMG_SID hSwapChain;
+#else
+ IMG_HANDLE hDeviceKM;
+ IMG_HANDLE hSwapChain;
+#endif
+
+} PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM;
+
+
+typedef struct PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_UINT32 ui32DeviceID;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+
+} PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE_TAG
+{
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+#else
+ IMG_HANDLE hDeviceKM;
+#endif
+
+} PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO_TAG
+{
+ PVRSRV_ERROR eError;
+ BUFFER_INFO sBufferInfo;
+
+} PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO;
+
+
+typedef struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+#else
+ IMG_HANDLE hDeviceKM;
+#endif
+ IMG_UINT32 ui32BufferIndex;
+
+} PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER_TAG
+{
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hBuffer;
+#else
+ IMG_HANDLE hBuffer;
+#endif
+
+} PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32ClientHeapCount;
+ PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
+
+} PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevMemContext;
+#else
+ IMG_HANDLE hDevMemContext;
+#endif
+ IMG_UINT32 ui32ClientHeapCount;
+ PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
+
+} PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP_TAG
+{
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevMemHeap;
+#else
+ IMG_HANDLE hDevMemHeap;
+#endif
+
+} PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM_TAG
+{
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+#else
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+#endif
+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
+
+} PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM_TAG
+{
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hMemInfo;
+#else
+ IMG_HANDLE hMemInfo;
+#endif
+#if defined(SUPPORT_MEMINFO_IDS)
+ IMG_UINT64 ui64Stamp;
+#endif
+
+} PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_PVOID pvLinAddr;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hMappingInfo;
+#else
+ IMG_HANDLE hMappingInfo;
+#endif
+
+}PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_SIZE_T ui32Total;
+ IMG_SIZE_T ui32Free;
+ IMG_SIZE_T ui32LargestBlock;
+
+} PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM;
+
+
+#include "pvrmmap.h"
+typedef struct PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA_TAG
+{
+ PVRSRV_ERROR eError;
+
+
+ IMG_UINT32 ui32MMapOffset;
+
+
+ IMG_UINT32 ui32ByteOffset;
+
+
+ IMG_UINT32 ui32RealByteSize;
+
+
+ IMG_UINT32 ui32UserVAddr;
+
+} PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA;
+
+typedef struct PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA_TAG
+{
+ PVRSRV_ERROR eError;
+
+
+ IMG_BOOL bMUnmap;
+
+
+ IMG_UINT32 ui32UserVAddr;
+
+
+ IMG_UINT32 ui32RealByteSize;
+} PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA;
+typedef struct PVRSRV_BRIDGE_IN_GET_MISC_INFO_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ PVRSRV_MISC_INFO sMiscInfo;
+
+}PVRSRV_BRIDGE_IN_GET_MISC_INFO;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_GET_MISC_INFO_TAG
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_MISC_INFO sMiscInfo;
+
+}PVRSRV_BRIDGE_OUT_GET_MISC_INFO;
+
+
+typedef struct PVRSRV_BRIDGE_IN_RELEASE_MISC_INFO_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ PVRSRV_MISC_INFO sMiscInfo;
+
+}PVRSRV_BRIDGE_IN_RELEASE_MISC_INFO;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_RELEASE_MISC_INFO_TAG
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_MISC_INFO sMiscInfo;
+
+}PVRSRV_BRIDGE_OUT_RELEASE_MISC_INFO;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_BOOL bIsCapturing;
+
+} PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING;
+
+typedef struct PVRSRV_BRIDGE_IN_GET_FB_STATS_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_SIZE_T ui32Total;
+ IMG_SIZE_T ui32Available;
+
+} PVRSRV_BRIDGE_IN_GET_FB_STATS;
+
+
+typedef struct PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ IMG_SYS_PHYADDR sSysPhysAddr;
+ IMG_UINT32 uiSizeInBytes;
+
+} PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE_TAG
+{
+ IMG_PVOID pvUserAddr;
+ IMG_UINT32 uiActualSize;
+ IMG_PVOID pvProcess;
+
+} PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE;
+
+
+typedef struct PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ IMG_PVOID pvUserAddr;
+ IMG_PVOID pvProcess;
+
+} PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP_TAG
+{
+ IMG_PVOID *ppvTbl;
+ IMG_UINT32 uiTblSize;
+
+} PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP;
+
+
+#if !defined (SUPPORT_SID_INTERFACE)
+typedef struct PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_HANDLE hDevCookie;
+ IMG_PVOID pvProcess;
+
+} PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS_TAG
+{
+ IMG_SYS_PHYADDR sRegsPhysBase;
+ IMG_VOID *pvRegsBase;
+ IMG_PVOID pvProcess;
+ IMG_UINT32 ulNoOfEntries;
+ IMG_PVOID pvTblLinAddr;
+
+} PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS;
+
+
+typedef struct PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_HANDLE hDevCookie;
+ IMG_PVOID pvProcess;
+ IMG_VOID *pvRegsBase;
+
+} PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS;
+
+typedef struct PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_HANDLE hDevCookie;
+ IMG_UINT32 ui32StatusAndMask;
+ PVRSRV_ERROR eError;
+
+} PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT;
+#endif
+
+typedef struct PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_BOOL bInitSuccesful;
+} PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT;
+
+
+typedef struct PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_UINT32 ui32Flags;
+ IMG_SIZE_T ui32Size;
+}PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM;
+
+typedef struct PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM_TAG
+{
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+#else
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+#endif
+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
+}PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM;
+
+typedef struct PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM_TAG
+{
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+ IMG_SID hMappingInfo;
+#else
+ IMG_UINT32 ui32BridgeFlags;
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+#endif
+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
+}PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM;
+
+typedef struct PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM_TAG
+{
+ PVRSRV_ERROR eError;
+}PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM;
+
+typedef struct PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+#else
+ IMG_HANDLE hKernelMemInfo;
+#endif
+}PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM;
+
+typedef struct PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM_TAG
+{
+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
+ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+#else
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+#endif
+ PVRSRV_ERROR eError;
+}PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM;
+
+typedef struct PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
+}PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM;
+
+typedef struct PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM_TAG
+{
+ PVRSRV_ERROR eError;
+}PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM;
+
+typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAI_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hOSEventKM;
+#else
+ IMG_HANDLE hOSEventKM;
+#endif
+} PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT;
+
+typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN_TAG
+{
+ PVRSRV_EVENTOBJECT sEventObject;
+} PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN;
+
+typedef struct PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN_TAG
+{
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_UINT32 hOSEvent;
+#else
+ IMG_HANDLE hOSEvent;
+#endif
+ PVRSRV_ERROR eError;
+} PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN;
+
+typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE_TAG
+{
+ PVRSRV_EVENTOBJECT sEventObject;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hOSEventKM;
+#else
+ IMG_HANDLE hOSEventKM;
+#endif
+} PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE;
+
+typedef struct PVRSRV_BRIDGE_OUT_CREATE_SYNC_INFO_MOD_OBJ_TAG
+{
+ PVRSRV_ERROR eError;
+
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfoModObj;
+#else
+ IMG_HANDLE hKernelSyncInfoModObj;
+#endif
+
+} PVRSRV_BRIDGE_OUT_CREATE_SYNC_INFO_MOD_OBJ;
+
+typedef struct PVRSRV_BRIDGE_IN_DESTROY_SYNC_INFO_MOD_OBJ
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfoModObj;
+#else
+ IMG_HANDLE hKernelSyncInfoModObj;
+#endif
+} PVRSRV_BRIDGE_IN_DESTROY_SYNC_INFO_MOD_OBJ;
+
+typedef struct PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfoModObj;
+ IMG_SID hKernelSyncInfo;
+#else
+ IMG_HANDLE hKernelSyncInfoModObj;
+ IMG_HANDLE hKernelSyncInfo;
+#endif
+ IMG_UINT32 ui32ModifyFlags;
+
+} PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS;
+
+typedef struct PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfoModObj;
+#else
+ IMG_HANDLE hKernelSyncInfoModObj;
+#endif
+} PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS;
+
+typedef struct PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS_TAG
+{
+ PVRSRV_ERROR eError;
+
+
+ IMG_UINT32 ui32ReadOpsPending;
+ IMG_UINT32 ui32WriteOpsPending;
+ IMG_UINT32 ui32ReadOps2Pending;
+
+} PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS;
+
+typedef struct PVRSRV_BRIDGE_IN_SYNC_OPS_TAKE_TOKEN_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfo;
+#else
+ IMG_HANDLE hKernelSyncInfo;
+#endif
+
+} PVRSRV_BRIDGE_IN_SYNC_OPS_TAKE_TOKEN;
+
+typedef struct PVRSRV_BRIDGE_OUT_SYNC_OPS_TAKE_TOKEN_TAG
+{
+ PVRSRV_ERROR eError;
+
+ IMG_UINT32 ui32ReadOpsPending;
+ IMG_UINT32 ui32WriteOpsPending;
+ IMG_UINT32 ui32ReadOps2Pending;
+
+} PVRSRV_BRIDGE_OUT_SYNC_OPS_TAKE_TOKEN;
+
+typedef struct PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_TOKEN_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfo;
+#else
+ IMG_HANDLE hKernelSyncInfo;
+#endif
+ IMG_UINT32 ui32ReadOpsPendingSnapshot;
+ IMG_UINT32 ui32WriteOpsPendingSnapshot;
+ IMG_UINT32 ui32ReadOps2PendingSnapshot;
+} PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_TOKEN;
+
+typedef struct PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_MOD_OBJ_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfoModObj;
+#else
+ IMG_HANDLE hKernelSyncInfoModObj;
+#endif
+} PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_MOD_OBJ;
+
+typedef struct PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_DELTA_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfo;
+#else
+ IMG_HANDLE hKernelSyncInfo;
+#endif
+ IMG_UINT32 ui32Delta;
+} PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_DELTA;
+
+typedef struct PVRSRV_BRIDGE_IN_ALLOC_SYNC_INFO_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+} PVRSRV_BRIDGE_IN_ALLOC_SYNC_INFO;
+
+typedef struct PVRSRV_BRIDGE_OUT_ALLOC_SYNC_INFO_TAG
+{
+ PVRSRV_ERROR eError;
+
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfo;
+#else
+ IMG_HANDLE hKernelSyncInfo;
+#endif
+} PVRSRV_BRIDGE_OUT_ALLOC_SYNC_INFO;
+
+typedef struct PVRSRV_BRIDGE_IN_FREE_SYNC_INFO_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfo;
+#else
+ IMG_HANDLE hKernelSyncInfo;
+#endif
+} PVRSRV_BRIDGE_IN_FREE_SYNC_INFO;
+
+typedef struct PVRSRV_BRIDGE_IN_CHG_DEV_MEM_ATTRIBS_TAG
+{
+ IMG_SID hKernelMemInfo;
+ IMG_UINT32 ui32Attribs;
+} PVRSRV_BRIDGE_IN_CHG_DEV_MEM_ATTRIBS;
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/pvr_bridge_k.c b/drivers/gpu/pvr/pvr_bridge_k.c
new file mode 100644
index 0000000..9185df4
--- /dev/null
+++ b/drivers/gpu/pvr/pvr_bridge_k.c
@@ -0,0 +1,464 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "img_defs.h"
+#include "services.h"
+#include "pvr_bridge.h"
+#include "perproc.h"
+#include "mutex.h"
+#include "syscommon.h"
+#include "pvr_debug.h"
+#include "proc.h"
+#include "private_data.h"
+#include "linkage.h"
+#include "pvr_bridge_km.h"
+#include "pvr_uaccess.h"
+#include "refcount.h"
+#include "buffer_manager.h"
+
+#if defined(SUPPORT_DRI_DRM)
+#include <drm/drmP.h>
+#include "pvr_drm.h"
+#if defined(PVR_SECURE_DRM_AUTH_EXPORT)
+#include "env_perproc.h"
+#endif
+#endif
+
+#if defined(SUPPORT_VGX)
+#include "vgx_bridge.h"
+#endif
+
+#if defined(SUPPORT_SGX)
+#include "sgx_bridge.h"
+#endif
+
+#include "bridged_pvr_bridge.h"
+
+#if defined(SUPPORT_DRI_DRM)
+#define PRIVATE_DATA(pFile) ((pFile)->driver_priv)
+#else
+#define PRIVATE_DATA(pFile) ((pFile)->private_data)
+#endif
+
+#if defined(DEBUG_BRIDGE_KM)
+
+static struct proc_dir_entry *g_ProcBridgeStats =0;
+static void* ProcSeqNextBridgeStats(struct seq_file *sfile,void* el,loff_t off);
+static void ProcSeqShowBridgeStats(struct seq_file *sfile,void* el);
+static void* ProcSeqOff2ElementBridgeStats(struct seq_file * sfile, loff_t off);
+static void ProcSeqStartstopBridgeStats(struct seq_file *sfile,IMG_BOOL start);
+
+#endif
+
+extern PVRSRV_LINUX_MUTEX gPVRSRVLock;
+
+#if defined(SUPPORT_MEMINFO_IDS)
+static IMG_UINT64 ui64Stamp;
+#endif
+
+PVRSRV_ERROR
+LinuxBridgeInit(IMG_VOID)
+{
+#if defined(DEBUG_BRIDGE_KM)
+ {
+ g_ProcBridgeStats = CreateProcReadEntrySeq(
+ "bridge_stats",
+ NULL,
+ ProcSeqNextBridgeStats,
+ ProcSeqShowBridgeStats,
+ ProcSeqOff2ElementBridgeStats,
+ ProcSeqStartstopBridgeStats
+ );
+ if(!g_ProcBridgeStats)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ }
+#endif
+ return CommonBridgeInit();
+}
+
+IMG_VOID
+LinuxBridgeDeInit(IMG_VOID)
+{
+#if defined(DEBUG_BRIDGE_KM)
+ RemoveProcEntrySeq(g_ProcBridgeStats);
+#endif
+}
+
+#if defined(DEBUG_BRIDGE_KM)
+
+static void ProcSeqStartstopBridgeStats(struct seq_file *sfile,IMG_BOOL start)
+{
+ if(start)
+ {
+ LinuxLockMutex(&gPVRSRVLock);
+ }
+ else
+ {
+ LinuxUnLockMutex(&gPVRSRVLock);
+ }
+}
+
+
+static void* ProcSeqOff2ElementBridgeStats(struct seq_file *sfile, loff_t off)
+{
+ if(!off)
+ {
+ return PVR_PROC_SEQ_START_TOKEN;
+ }
+
+ if(off > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+ {
+ return (void*)0;
+ }
+
+
+ return (void*)&g_BridgeDispatchTable[off-1];
+}
+
+static void* ProcSeqNextBridgeStats(struct seq_file *sfile,void* el,loff_t off)
+{
+ return ProcSeqOff2ElementBridgeStats(sfile,off);
+}
+
+
+static void ProcSeqShowBridgeStats(struct seq_file *sfile,void* el)
+{
+ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry = ( PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY*)el;
+
+ if(el == PVR_PROC_SEQ_START_TOKEN)
+ {
+ seq_printf(sfile,
+ "Total ioctl call count = %u\n"
+ "Total number of bytes copied via copy_from_user = %u\n"
+ "Total number of bytes copied via copy_to_user = %u\n"
+ "Total number of bytes copied via copy_*_user = %u\n\n"
+ "%-45s | %-40s | %10s | %20s | %10s\n",
+ g_BridgeGlobalStats.ui32IOCTLCount,
+ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
+ g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
+ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
+ "Bridge Name",
+ "Wrapper Function",
+ "Call Count",
+ "copy_from_user Bytes",
+ "copy_to_user Bytes"
+ );
+ return;
+ }
+
+ seq_printf(sfile,
+ "%-45s %-40s %-10u %-20u %-10u\n",
+ psEntry->pszIOCName,
+ psEntry->pszFunctionName,
+ psEntry->ui32CallCount,
+ psEntry->ui32CopyFromUserTotalBytes,
+ psEntry->ui32CopyToUserTotalBytes);
+}
+
+#endif
+
+
+#if defined(SUPPORT_DRI_DRM)
+int
+PVRSRV_BridgeDispatchKM(struct drm_device unref__ *dev, void *arg, struct drm_file *pFile)
+#else
+long
+PVRSRV_BridgeDispatchKM(struct file *pFile, unsigned int unref__ ioctlCmd, unsigned long arg)
+#endif
+{
+ IMG_UINT32 cmd;
+#if !defined(SUPPORT_DRI_DRM)
+ PVRSRV_BRIDGE_PACKAGE *psBridgePackageUM = (PVRSRV_BRIDGE_PACKAGE *)arg;
+ PVRSRV_BRIDGE_PACKAGE sBridgePackageKM;
+#endif
+ PVRSRV_BRIDGE_PACKAGE *psBridgePackageKM;
+ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
+ PVRSRV_PER_PROCESS_DATA *psPerProc;
+ IMG_INT err = -EFAULT;
+
+ LinuxLockMutex(&gPVRSRVLock);
+
+#if defined(SUPPORT_DRI_DRM)
+ psBridgePackageKM = (PVRSRV_BRIDGE_PACKAGE *)arg;
+ PVR_ASSERT(psBridgePackageKM != IMG_NULL);
+#else
+ psBridgePackageKM = &sBridgePackageKM;
+
+ if(!OSAccessOK(PVR_VERIFY_WRITE,
+ psBridgePackageUM,
+ sizeof(PVRSRV_BRIDGE_PACKAGE)))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Received invalid pointer to function arguments",
+ __FUNCTION__));
+
+ goto unlock_and_return;
+ }
+
+
+ if(OSCopyFromUser(IMG_NULL,
+ psBridgePackageKM,
+ psBridgePackageUM,
+ sizeof(PVRSRV_BRIDGE_PACKAGE))
+ != PVRSRV_OK)
+ {
+ goto unlock_and_return;
+ }
+#endif
+
+ cmd = psBridgePackageKM->ui32BridgeID;
+
+ if(cmd != PVRSRV_BRIDGE_CONNECT_SERVICES)
+ {
+ PVRSRV_ERROR eError;
+
+ eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
+ (IMG_PVOID *)&psPerProc,
+ psBridgePackageKM->hKernelServices,
+ PVRSRV_HANDLE_TYPE_PERPROC_DATA);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid kernel services handle (%d)",
+ __FUNCTION__, eError));
+ goto unlock_and_return;
+ }
+
+ if(psPerProc->ui32PID != ui32PID)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Process %d tried to access data "
+ "belonging to process %d", __FUNCTION__, ui32PID,
+ psPerProc->ui32PID));
+ goto unlock_and_return;
+ }
+ }
+ else
+ {
+
+ psPerProc = PVRSRVPerProcessData(ui32PID);
+ if(psPerProc == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BridgeDispatchKM: "
+ "Couldn't create per-process data area"));
+ goto unlock_and_return;
+ }
+ }
+
+ psBridgePackageKM->ui32BridgeID = PVRSRV_GET_BRIDGE_ID(psBridgePackageKM->ui32BridgeID);
+
+ switch(cmd)
+ {
+ case PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2:
+ {
+ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
+
+ if(psPrivateData->hKernelMemInfo)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Can only export one MemInfo "
+ "per file descriptor", __FUNCTION__));
+ err = -EINVAL;
+ goto unlock_and_return;
+ }
+ break;
+ }
+
+ case PVRSRV_BRIDGE_MAP_DEV_MEMORY_2:
+ {
+ PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN =
+ (PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *)psBridgePackageKM->pvParamIn;
+ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
+
+ if(!psPrivateData->hKernelMemInfo)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: File descriptor has no "
+ "associated MemInfo handle", __FUNCTION__));
+ err = -EINVAL;
+ goto unlock_and_return;
+ }
+
+ if (pvr_put_user(psPrivateData->hKernelMemInfo, &psMapDevMemIN->hKernelMemInfo) != 0)
+ {
+ err = -EFAULT;
+ goto unlock_and_return;
+ }
+ break;
+ }
+
+ default:
+ {
+ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
+
+ if(psPrivateData->hKernelMemInfo)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Import/Export handle tried "
+ "to use privileged service", __FUNCTION__));
+ goto unlock_and_return;
+ }
+ break;
+ }
+ }
+
+#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
+ switch(cmd)
+ {
+ case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
+ case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY:
+ {
+ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
+ int authenticated = pFile->authenticated;
+ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
+
+ if (authenticated)
+ {
+ break;
+ }
+
+
+ psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)PVRSRVProcessPrivateData(psPerProc);
+ if (psEnvPerProc == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Process private data not allocated", __FUNCTION__));
+ err = -EFAULT;
+ goto unlock_and_return;
+ }
+
+ list_for_each_entry(psPrivateData, &psEnvPerProc->sDRMAuthListHead, sDRMAuthListItem)
+ {
+ struct drm_file *psDRMFile = psPrivateData->psDRMFile;
+
+ if (pFile->master == psDRMFile->master)
+ {
+ authenticated |= psDRMFile->authenticated;
+ if (authenticated)
+ {
+ break;
+ }
+ }
+ }
+
+ if (!authenticated)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Not authenticated for mapping device or device class memory", __FUNCTION__));
+ err = -EPERM;
+ goto unlock_and_return;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+#endif
+
+ err = BridgedDispatchKM(psPerProc, psBridgePackageKM);
+ if(err != PVRSRV_OK)
+ goto unlock_and_return;
+
+ switch(cmd)
+ {
+ case PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2:
+ {
+ PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT =
+ (PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *)psBridgePackageKM->pvParamOut;
+ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
+ IMG_HANDLE hMemInfo;
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+
+ if (pvr_get_user(hMemInfo, &psExportDeviceMemOUT->hMemInfo) != 0)
+ {
+ err = -EFAULT;
+ goto unlock_and_return;
+ }
+
+
+ if(PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
+ (IMG_PVOID *)&psKernelMemInfo,
+ hMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to look up export handle", __FUNCTION__));
+ err = -EFAULT;
+ goto unlock_and_return;
+ }
+
+
+ PVRSRVKernelMemInfoIncRef(psKernelMemInfo);
+
+
+ if (psKernelMemInfo->sShareMemWorkaround.bInUse)
+ {
+ BM_XProcIndexAcquire(psKernelMemInfo->sShareMemWorkaround.ui32ShareIndex);
+ }
+
+ psPrivateData->hKernelMemInfo = hMemInfo;
+#if defined(SUPPORT_MEMINFO_IDS)
+ psPrivateData->ui64Stamp = ++ui64Stamp;
+
+ psKernelMemInfo->ui64Stamp = psPrivateData->ui64Stamp;
+ if (pvr_put_user(psPrivateData->ui64Stamp, &psExportDeviceMemOUT->ui64Stamp) != 0)
+ {
+ err = -EFAULT;
+ goto unlock_and_return;
+ }
+#endif
+ break;
+ }
+
+#if defined(SUPPORT_MEMINFO_IDS)
+ case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
+ case PVRSRV_BRIDGE_MAP_DEV_MEMORY_2:
+ {
+ PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDeviceMemoryOUT =
+ (PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *)psBridgePackageKM->pvParamOut;
+ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
+ if (pvr_put_user(psPrivateData->ui64Stamp, &psMapDeviceMemoryOUT->sDstClientMemInfo.ui64Stamp) != 0)
+ {
+ err = -EFAULT;
+ goto unlock_and_return;
+ }
+ break;
+ }
+
+ case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY:
+ {
+ PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psDeviceClassMemoryOUT =
+ (PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *)psBridgePackageKM->pvParamOut;
+ if (pvr_put_user(++ui64Stamp, &psDeviceClassMemoryOUT->sClientMemInfo.ui64Stamp) != 0)
+ {
+ err = -EFAULT;
+ goto unlock_and_return;
+ }
+ break;
+ }
+#endif
+
+ default:
+ break;
+ }
+
+unlock_and_return:
+ LinuxUnLockMutex(&gPVRSRVLock);
+ return err;
+}
diff --git a/drivers/gpu/pvr/pvr_bridge_km.h b/drivers/gpu/pvr/pvr_bridge_km.h
new file mode 100644
index 0000000..e14f2fd
--- /dev/null
+++ b/drivers/gpu/pvr/pvr_bridge_km.h
@@ -0,0 +1,319 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __PVR_BRIDGE_KM_H_
+#define __PVR_BRIDGE_KM_H_
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "pvr_bridge.h"
+#include "perproc.h"
+
+#if defined(__linux__)
+PVRSRV_ERROR LinuxBridgeInit(IMG_VOID);
+IMG_VOID LinuxBridgeDeInit(IMG_VOID);
+#endif
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 *pui32NumDevices,
+ PVRSRV_DEVICE_IDENTIFIER *psDevIdList);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM(IMG_UINT32 uiDevIndex,
+ PVRSRV_DEVICE_TYPE eDeviceType,
+ IMG_HANDLE *phDevCookie);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_SIZE_T ui32QueueSize,
+ PVRSRV_QUEUE_INFO **ppsQueueInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapsKM(IMG_HANDLE hDevCookie,
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_HEAP_INFO_KM *psHeapInfo);
+#else
+ PVRSRV_HEAP_INFO *psHeapInfo);
+#endif
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContextKM(IMG_HANDLE hDevCookie,
+ PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE *phDevMemContext,
+ IMG_UINT32 *pui32ClientHeapCount,
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_HEAP_INFO_KM *psHeapInfo,
+#else
+ PVRSRV_HEAP_INFO *psHeapInfo,
+#endif
+ IMG_BOOL *pbCreated,
+ IMG_BOOL *pbShared);
+
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContextKM(IMG_HANDLE hDevCookie,
+ IMG_HANDLE hDevMemContext,
+ IMG_BOOL *pbDestroyed);
+
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfoKM(IMG_HANDLE hDevCookie,
+ IMG_HANDLE hDevMemContext,
+ IMG_UINT32 *pui32ClientHeapCount,
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_HEAP_INFO_KM *psHeapInfo,
+#else
+ PVRSRV_HEAP_INFO *psHeapInfo,
+#endif
+ IMG_BOOL *pbShared
+ );
+
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV _PVRSRVAllocDeviceMemKM(IMG_HANDLE hDevCookie,
+ PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDevMemHeap,
+ IMG_UINT32 ui32Flags,
+ IMG_SIZE_T ui32Size,
+ IMG_SIZE_T ui32Alignment,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
+ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
+
+
+#if defined(PVRSRV_LOG_MEMORY_ALLOCS)
+ #define PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, privdata, privdatalength, memInfo, logStr) \
+ (PVR_TRACE(("PVRSRVAllocDeviceMemKM(" #devCookie ", " #perProc ", " #devMemHeap ", " #flags ", " #size \
+ ", " #alignment "," #memInfo "): " logStr " (size = 0x%x)", size)),\
+ _PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, privdata, privdatalength, memInfo))
+#else
+ #define PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, privdata, privdatalength, memInfo, logStr) \
+ _PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, privdata, privdatalength, memInfo)
+#endif
+
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMemKM(IMG_HANDLE hDevCookie,
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDissociateDeviceMemKM(IMG_HANDLE hDevCookie,
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVReserveDeviceVirtualMemKM(IMG_HANDLE hDevMemHeap,
+ IMG_DEV_VIRTADDR *psDevVAddr,
+ IMG_SIZE_T ui32Size,
+ IMG_SIZE_T ui32Alignment,
+ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceVirtualMemKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo,
+ IMG_HANDLE hDstDevMemHeap,
+ PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE hDevCookie,
+ PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDevMemContext,
+ IMG_SIZE_T ui32ByteSize,
+ IMG_SIZE_T ui32PageOffset,
+ IMG_BOOL bPhysContig,
+ IMG_SYS_PHYADDR *psSysAddr,
+ IMG_VOID *pvLinAddr,
+ IMG_UINT32 ui32Flags,
+ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVEnumerateDCKM(PVRSRV_DEVICE_CLASS DeviceClass,
+ IMG_UINT32 *pui32DevCount,
+ IMG_UINT32 *pui32DevID );
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVOpenDCDeviceKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_UINT32 ui32DeviceID,
+ IMG_HANDLE hDevCookie,
+ IMG_HANDLE *phDeviceKM);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVCloseDCDeviceKM(IMG_HANDLE hDeviceKM, IMG_BOOL bResManCallback);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVEnumDCFormatsKM(IMG_HANDLE hDeviceKM,
+ IMG_UINT32 *pui32Count,
+ DISPLAY_FORMAT *psFormat);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVEnumDCDimsKM(IMG_HANDLE hDeviceKM,
+ DISPLAY_FORMAT *psFormat,
+ IMG_UINT32 *pui32Count,
+ DISPLAY_DIMS *psDim);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVGetDCSystemBufferKM(IMG_HANDLE hDeviceKM,
+ IMG_HANDLE *phBuffer);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVGetDCInfoKM(IMG_HANDLE hDeviceKM,
+ DISPLAY_INFO *psDisplayInfo);
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVCreateDCSwapChainKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDeviceKM,
+ IMG_UINT32 ui32Flags,
+ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
+ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
+ IMG_UINT32 ui32BufferCount,
+ IMG_UINT32 ui32OEMFlags,
+ IMG_HANDLE *phSwapChain,
+ IMG_UINT32 *pui32SwapChainID);
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(IMG_HANDLE hSwapChain);
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSetDCDstRectKM(IMG_HANDLE hDeviceKM,
+ IMG_HANDLE hSwapChain,
+ IMG_RECT *psRect);
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSetDCSrcRectKM(IMG_HANDLE hDeviceKM,
+ IMG_HANDLE hSwapChain,
+ IMG_RECT *psRect);
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(IMG_HANDLE hDeviceKM,
+ IMG_HANDLE hSwapChain,
+ IMG_UINT32 ui32CKColour);
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(IMG_HANDLE hDeviceKM,
+ IMG_HANDLE hSwapChain,
+ IMG_UINT32 ui32CKColour);
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVGetDCBuffersKM(IMG_HANDLE hDeviceKM,
+ IMG_HANDLE hSwapChain,
+ IMG_UINT32 *pui32BufferCount,
+ IMG_HANDLE *phBuffer,
+ IMG_SYS_PHYADDR *psPhyAddr);
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSwapToDCBufferKM(IMG_HANDLE hDeviceKM,
+ IMG_HANDLE hBuffer,
+ IMG_UINT32 ui32SwapInterval,
+ IMG_HANDLE hPrivateTag,
+ IMG_UINT32 ui32ClipRectCount,
+ IMG_RECT *psClipRect);
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSwapToDCBuffer2KM(IMG_HANDLE hDeviceKM,
+ IMG_HANDLE hBuffer,
+ IMG_UINT32 ui32SwapInterval,
+ PVRSRV_KERNEL_MEM_INFO **ppsMemInfos,
+ PVRSRV_KERNEL_SYNC_INFO **ppsSyncInfos,
+ IMG_UINT32 ui32NumMemSyncInfos,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength);
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSwapToDCSystemKM(IMG_HANDLE hDeviceKM,
+ IMG_HANDLE hSwapChain);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVOpenBCDeviceKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_UINT32 ui32DeviceID,
+ IMG_HANDLE hDevCookie,
+ IMG_HANDLE *phDeviceKM);
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVCloseBCDeviceKM(IMG_HANDLE hDeviceKM, IMG_BOOL bResManCallback);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVGetBCInfoKM(IMG_HANDLE hDeviceKM,
+ BUFFER_INFO *psBufferInfo);
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVGetBCBufferKM(IMG_HANDLE hDeviceKM,
+ IMG_UINT32 ui32BufferIndex,
+ IMG_HANDLE *phBuffer);
+
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDevMemContext,
+ IMG_HANDLE hDeviceClassBuffer,
+ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo,
+ IMG_HANDLE *phOSMapInfo);
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVChangeDeviceMemoryAttributesKM(IMG_HANDLE hKernelMemInfo,
+ IMG_UINT32 ui32Attribs);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFreeDeviceMemKM(IMG_UINT32 ui32Flags,
+ IMG_SIZE_T *pui32Total,
+ IMG_SIZE_T *pui32Free,
+ IMG_SIZE_T *pui32LargestBlock);
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfoKM(IMG_HANDLE hDevCookie,
+ IMG_HANDLE hDevMemContext,
+ PVRSRV_KERNEL_SYNC_INFO **ppsKernelSyncInfo);
+IMG_IMPORT
+IMG_VOID IMG_CALLCONV PVRSRVAcquireSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo);
+IMG_IMPORT
+IMG_VOID IMG_CALLCONV PVRSRVReleaseSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo);
+
+IMG_IMPORT
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO_KM *psMiscInfo);
+#else
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo);
+#endif
+
+IMG_IMPORT PVRSRV_ERROR
+PVRSRVAllocSharedSysMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_UINT32 ui32Flags,
+ IMG_SIZE_T ui32Size,
+ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo);
+
+IMG_IMPORT PVRSRV_ERROR
+PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
+
+IMG_IMPORT PVRSRV_ERROR
+PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/pvr_debug.c b/drivers/gpu/pvr/pvr_debug.c
new file mode 100644
index 0000000..9f0016f
--- /dev/null
+++ b/drivers/gpu/pvr/pvr_debug.c
@@ -0,0 +1,453 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#endif
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/hardirq.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <stdarg.h>
+#include "img_types.h"
+#include "servicesext.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "proc.h"
+#include "mutex.h"
+#include "linkage.h"
+#include "pvr_uaccess.h"
+
+#if !defined(CONFIG_PREEMPT)
+#define PVR_DEBUG_ALWAYS_USE_SPINLOCK
+#endif
+
+static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz,
+ const IMG_CHAR* pszFormat, va_list VArgs)
+ IMG_FORMAT_PRINTF(3, 0);
+
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+#define PVR_MAX_FILEPATH_LEN 256
+
+static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz,
+ const IMG_CHAR *pszFormat, ...)
+ IMG_FORMAT_PRINTF(3, 4);
+
+IMG_UINT32 gPVRDebugLevel =
+ (DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING);
+
+#endif
+
+#define PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN
+
+#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK)
+static IMG_CHAR gszBufferNonIRQ[PVR_MAX_MSG_LEN + 1];
+#endif
+
+static IMG_CHAR gszBufferIRQ[PVR_MAX_MSG_LEN + 1];
+
+#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK)
+static PVRSRV_LINUX_MUTEX gsDebugMutexNonIRQ;
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
+
+static spinlock_t gsDebugLockIRQ = SPIN_LOCK_UNLOCKED;
+#else
+static DEFINE_SPINLOCK(gsDebugLockIRQ);
+#endif
+
+#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK)
+#if !defined (USE_SPIN_LOCK)
+#define USE_SPIN_LOCK (in_interrupt() || !preemptible())
+#endif
+#endif
+
+static inline void GetBufferLock(unsigned long *pulLockFlags)
+{
+#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK)
+ if (USE_SPIN_LOCK)
+#endif
+ {
+ spin_lock_irqsave(&gsDebugLockIRQ, *pulLockFlags);
+ }
+#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK)
+ else
+ {
+ LinuxLockMutex(&gsDebugMutexNonIRQ);
+ }
+#endif
+}
+
+static inline void ReleaseBufferLock(unsigned long ulLockFlags)
+{
+#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK)
+ if (USE_SPIN_LOCK)
+#endif
+ {
+ spin_unlock_irqrestore(&gsDebugLockIRQ, ulLockFlags);
+ }
+#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK)
+ else
+ {
+ LinuxUnLockMutex(&gsDebugMutexNonIRQ);
+ }
+#endif
+}
+
+static inline void SelectBuffer(IMG_CHAR **ppszBuf, IMG_UINT32 *pui32BufSiz)
+{
+#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK)
+ if (USE_SPIN_LOCK)
+#endif
+ {
+ *ppszBuf = gszBufferIRQ;
+ *pui32BufSiz = sizeof(gszBufferIRQ);
+ }
+#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK)
+ else
+ {
+ *ppszBuf = gszBufferNonIRQ;
+ *pui32BufSiz = sizeof(gszBufferNonIRQ);
+ }
+#endif
+}
+
+static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR* pszFormat, va_list VArgs)
+{
+ IMG_UINT32 ui32Used;
+ IMG_UINT32 ui32Space;
+ IMG_INT32 i32Len;
+
+ ui32Used = strlen(pszBuf);
+ BUG_ON(ui32Used >= ui32BufSiz);
+ ui32Space = ui32BufSiz - ui32Used;
+
+ i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs);
+ pszBuf[ui32BufSiz - 1] = 0;
+
+
+ return (i32Len < 0 || i32Len >= (IMG_INT32)ui32Space) ? IMG_TRUE : IMG_FALSE;
+}
+
+IMG_VOID PVRDPFInit(IMG_VOID)
+{
+#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK)
+ LinuxInitMutex(&gsDebugMutexNonIRQ);
+#endif
+}
+
+IMG_VOID PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...)
+{
+ va_list vaArgs;
+ unsigned long ulLockFlags = 0;
+ IMG_CHAR *pszBuf;
+ IMG_UINT32 ui32BufSiz;
+
+ SelectBuffer(&pszBuf, &ui32BufSiz);
+
+ va_start(vaArgs, pszFormat);
+
+ GetBufferLock(&ulLockFlags);
+ strncpy (pszBuf, "PVR_K: ", (ui32BufSiz -1));
+
+ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
+ {
+ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
+ }
+ else
+ {
+ printk(KERN_INFO "%s\n", pszBuf);
+ }
+
+ ReleaseBufferLock(ulLockFlags);
+ va_end(vaArgs);
+
+}
+
+#if defined(PVRSRV_NEED_PVR_ASSERT)
+
+IMG_VOID PVRSRVDebugAssertFail(const IMG_CHAR* pszFile, IMG_UINT32 uLine)
+{
+ PVRSRVDebugPrintf(DBGPRIV_FATAL, pszFile, uLine, "Debug assertion failed!");
+ BUG();
+}
+
+#endif
+
+#if defined(PVRSRV_NEED_PVR_TRACE)
+
+IMG_VOID PVRSRVTrace(const IMG_CHAR* pszFormat, ...)
+{
+ va_list VArgs;
+ unsigned long ulLockFlags = 0;
+ IMG_CHAR *pszBuf;
+ IMG_UINT32 ui32BufSiz;
+
+ SelectBuffer(&pszBuf, &ui32BufSiz);
+
+ va_start(VArgs, pszFormat);
+
+ GetBufferLock(&ulLockFlags);
+
+ strncpy(pszBuf, "PVR: ", (ui32BufSiz -1));
+
+ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs))
+ {
+ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
+ }
+ else
+ {
+ printk(KERN_INFO "%s\n", pszBuf);
+ }
+
+ ReleaseBufferLock(ulLockFlags);
+
+ va_end(VArgs);
+}
+
+#endif
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, ...)
+{
+ va_list VArgs;
+ IMG_BOOL bTrunc;
+
+ va_start (VArgs, pszFormat);
+
+ bTrunc = VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs);
+
+ va_end (VArgs);
+
+ return bTrunc;
+}
+
+IMG_VOID PVRSRVDebugPrintf (
+ IMG_UINT32 ui32DebugLevel,
+ const IMG_CHAR* pszFullFileName,
+ IMG_UINT32 ui32Line,
+ const IMG_CHAR* pszFormat,
+ ...
+ )
+{
+ IMG_BOOL bTrace;
+ const IMG_CHAR *pszFileName = pszFullFileName;
+ IMG_CHAR *pszLeafName;
+
+
+ bTrace = (IMG_BOOL)(ui32DebugLevel & DBGPRIV_CALLTRACE) ? IMG_TRUE : IMG_FALSE;
+
+ if (gPVRDebugLevel & ui32DebugLevel)
+ {
+ va_list vaArgs;
+ unsigned long ulLockFlags = 0;
+ IMG_CHAR *pszBuf;
+ IMG_UINT32 ui32BufSiz;
+
+ SelectBuffer(&pszBuf, &ui32BufSiz);
+
+ va_start(vaArgs, pszFormat);
+
+ GetBufferLock(&ulLockFlags);
+
+
+ if (bTrace == IMG_FALSE)
+ {
+ switch(ui32DebugLevel)
+ {
+ case DBGPRIV_FATAL:
+ {
+ strncpy (pszBuf, "PVR_K:(Fatal): ", (ui32BufSiz -1));
+ break;
+ }
+ case DBGPRIV_ERROR:
+ {
+ strncpy (pszBuf, "PVR_K:(Error): ", (ui32BufSiz -1));
+ break;
+ }
+ case DBGPRIV_WARNING:
+ {
+ strncpy (pszBuf, "PVR_K:(Warning): ", (ui32BufSiz -1));
+ break;
+ }
+ case DBGPRIV_MESSAGE:
+ {
+ strncpy (pszBuf, "PVR_K:(Message): ", (ui32BufSiz -1));
+ break;
+ }
+ case DBGPRIV_VERBOSE:
+ {
+ strncpy (pszBuf, "PVR_K:(Verbose): ", (ui32BufSiz -1));
+ break;
+ }
+ default:
+ {
+ strncpy (pszBuf, "PVR_K:(Unknown message level)", (ui32BufSiz -1));
+ break;
+ }
+ }
+ }
+ else
+ {
+ strncpy (pszBuf, "PVR_K: ", (ui32BufSiz -1));
+ }
+
+ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
+ {
+ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
+ }
+ else
+ {
+
+ if (bTrace == IMG_FALSE)
+ {
+#ifdef DEBUG_LOG_PATH_TRUNCATE
+
+ static IMG_CHAR szFileNameRewrite[PVR_MAX_FILEPATH_LEN];
+
+ IMG_CHAR* pszTruncIter;
+ IMG_CHAR* pszTruncBackInter;
+
+
+ if (strlen(pszFullFileName) > strlen(DEBUG_LOG_PATH_TRUNCATE)+1)
+ pszFileName = pszFullFileName + strlen(DEBUG_LOG_PATH_TRUNCATE)+1;
+
+
+ strncpy(szFileNameRewrite, pszFileName,PVR_MAX_FILEPATH_LEN);
+
+ if(strlen(szFileNameRewrite) == PVR_MAX_FILEPATH_LEN-1) {
+ IMG_CHAR szTruncateMassage[] = "FILENAME TRUNCATED";
+ strcpy(szFileNameRewrite + (PVR_MAX_FILEPATH_LEN - 1 - strlen(szTruncateMassage)), szTruncateMassage);
+ }
+
+ pszTruncIter = szFileNameRewrite;
+ while(*pszTruncIter++ != 0)
+ {
+ IMG_CHAR* pszNextStartPoint;
+
+ if(
+ !( ( *pszTruncIter == '/' && (pszTruncIter-4 >= szFileNameRewrite) ) &&
+ ( *(pszTruncIter-1) == '.') &&
+ ( *(pszTruncIter-2) == '.') &&
+ ( *(pszTruncIter-3) == '/') )
+ ) continue;
+
+
+ pszTruncBackInter = pszTruncIter - 3;
+ while(*(--pszTruncBackInter) != '/')
+ {
+ if(pszTruncBackInter <= szFileNameRewrite) break;
+ }
+ pszNextStartPoint = pszTruncBackInter;
+
+
+ while(*pszTruncIter != 0)
+ {
+ *pszTruncBackInter++ = *pszTruncIter++;
+ }
+ *pszTruncBackInter = 0;
+
+
+ pszTruncIter = pszNextStartPoint;
+ }
+
+ pszFileName = szFileNameRewrite;
+
+ if(*pszFileName == '/') pszFileName++;
+#endif
+
+#if !defined(__sh__)
+ pszLeafName = (IMG_CHAR *)strrchr (pszFileName, '\\');
+
+ if (pszLeafName)
+ {
+ pszFileName = pszLeafName;
+ }
+#endif
+
+ if (BAppend(pszBuf, ui32BufSiz, " [%u, %s]", ui32Line, pszFileName))
+ {
+ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
+ }
+ else
+ {
+ printk(KERN_INFO "%s\n", pszBuf);
+ }
+ }
+ else
+ {
+ printk(KERN_INFO "%s\n", pszBuf);
+ }
+ }
+
+ ReleaseBufferLock(ulLockFlags);
+
+ va_end (vaArgs);
+ }
+}
+
+#endif
+
+#if defined(DEBUG)
+
+IMG_INT PVRDebugProcSetLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data)
+{
+#define _PROC_SET_BUFFER_SZ 2
+ IMG_CHAR data_buffer[_PROC_SET_BUFFER_SZ];
+
+ if (count != _PROC_SET_BUFFER_SZ)
+ {
+ return -EINVAL;
+ }
+ else
+ {
+ if (pvr_copy_from_user(data_buffer, buffer, count))
+ return -EINVAL;
+ if (data_buffer[count - 1] != '\n')
+ return -EINVAL;
+ gPVRDebugLevel = data_buffer[0] - '0';
+ }
+ return (count);
+}
+
+void ProcSeqShowDebugLevel(struct seq_file *sfile,void* el)
+{
+ seq_printf(sfile, "%u\n", gPVRDebugLevel);
+}
+
+#endif
diff --git a/drivers/gpu/pvr/pvr_debug.h b/drivers/gpu/pvr/pvr_debug.h
new file mode 100644
index 0000000..e66fdab
--- /dev/null
+++ b/drivers/gpu/pvr/pvr_debug.h
@@ -0,0 +1,184 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+******************************************************************************/
+
+#ifndef __PVR_DEBUG_H__
+#define __PVR_DEBUG_H__
+
+
+#include "img_types.h"
+
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#define PVR_MAX_DEBUG_MESSAGE_LEN (512)
+
+/* These are privately used by pvr_debug, use the PVR_DBG_ defines instead */
+#define DBGPRIV_FATAL 0x01UL
+#define DBGPRIV_ERROR 0x02UL
+#define DBGPRIV_WARNING 0x04UL
+#define DBGPRIV_MESSAGE 0x08UL
+#define DBGPRIV_VERBOSE 0x10UL
+#define DBGPRIV_CALLTRACE 0x20UL
+#define DBGPRIV_ALLOC 0x40UL
+#define DBGPRIV_DBGDRV_MESSAGE 0x80UL
+
+#if !defined(PVRSRV_NEED_PVR_ASSERT) && defined(DEBUG)
+#define PVRSRV_NEED_PVR_ASSERT
+#endif
+
+#if defined(PVRSRV_NEED_PVR_ASSERT) && !defined(PVRSRV_NEED_PVR_DPF)
+#define PVRSRV_NEED_PVR_DPF
+#endif
+
+#if !defined(PVRSRV_NEED_PVR_TRACE) && (defined(DEBUG) || defined(TIMING))
+#define PVRSRV_NEED_PVR_TRACE
+#endif
+
+/* PVR_ASSERT() and PVR_DBG_BREAK handling */
+
+#if defined(PVRSRV_NEED_PVR_ASSERT)
+
+ #define PVR_ASSERT(EXPR) if (!(EXPR)) PVRSRVDebugAssertFail(__FILE__, __LINE__);
+
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugAssertFail(const IMG_CHAR *pszFile,
+ IMG_UINT32 ui32Line);
+
+ #define PVR_DBG_BREAK PVRSRVDebugAssertFail(__FILE__, __LINE__)
+
+#else /* defined(PVRSRV_NEED_PVR_ASSERT) */
+
+ #define PVR_ASSERT(EXPR)
+ #define PVR_DBG_BREAK
+
+#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */
+
+
+/* PVR_DPF() handling */
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+#if defined(PVRSRV_NEW_PVR_DPF)
+
+ /* New logging mechanism */
+ #define PVR_DBG_FATAL DBGPRIV_FATAL
+ #define PVR_DBG_ERROR DBGPRIV_ERROR
+ #define PVR_DBG_WARNING DBGPRIV_WARNING
+ #define PVR_DBG_MESSAGE DBGPRIV_MESSAGE
+ #define PVR_DBG_VERBOSE DBGPRIV_VERBOSE
+ #define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE
+ #define PVR_DBG_ALLOC DBGPRIV_ALLOC
+ #define PVR_DBGDRIV_MESSAGE DBGPRIV_DBGDRV_MESSAGE
+
+ /* These levels are always on with PVRSRV_NEED_PVR_DPF */
+ #define __PVR_DPF_0x01UL(x...) PVRSRVDebugPrintf(DBGPRIV_FATAL, x)
+ #define __PVR_DPF_0x02UL(x...) PVRSRVDebugPrintf(DBGPRIV_ERROR, x)
+
+ /* Some are compiled out completely in release builds */
+#if defined(DEBUG)
+ #define __PVR_DPF_0x04UL(x...) PVRSRVDebugPrintf(DBGPRIV_WARNING, x)
+ #define __PVR_DPF_0x08UL(x...) PVRSRVDebugPrintf(DBGPRIV_MESSAGE, x)
+ #define __PVR_DPF_0x10UL(x...) PVRSRVDebugPrintf(DBGPRIV_VERBOSE, x)
+ #define __PVR_DPF_0x20UL(x...) PVRSRVDebugPrintf(DBGPRIV_CALLTRACE, x)
+ #define __PVR_DPF_0x40UL(x...) PVRSRVDebugPrintf(DBGPRIV_ALLOC, x)
+ #define __PVR_DPF_0x80UL(x...) PVRSRVDebugPrintf(DBGPRIV_DBGDRV_MESSAGE, x)
+#else
+ #define __PVR_DPF_0x04UL(x...)
+ #define __PVR_DPF_0x08UL(x...)
+ #define __PVR_DPF_0x10UL(x...)
+ #define __PVR_DPF_0x20UL(x...)
+ #define __PVR_DPF_0x40UL(x...)
+ #define __PVR_DPF_0x80UL(x...)
+#endif
+
+ /* Translate the different log levels to separate macros
+ * so they can each be compiled out.
+ */
+#if defined(DEBUG)
+ #define __PVR_DPF(lvl, x...) __PVR_DPF_ ## lvl (__FILE__, __LINE__, x)
+#else
+ #define __PVR_DPF(lvl, x...) __PVR_DPF_ ## lvl ("", 0, x)
+#endif
+
+ /* Get rid of the double bracketing */
+ #define PVR_DPF(x) __PVR_DPF x
+
+#else /* defined(PVRSRV_NEW_PVR_DPF) */
+
+ /* Old logging mechanism */
+ #define PVR_DBG_FATAL DBGPRIV_FATAL,__FILE__, __LINE__
+ #define PVR_DBG_ERROR DBGPRIV_ERROR,__FILE__, __LINE__
+ #define PVR_DBG_WARNING DBGPRIV_WARNING,__FILE__, __LINE__
+ #define PVR_DBG_MESSAGE DBGPRIV_MESSAGE,__FILE__, __LINE__
+ #define PVR_DBG_VERBOSE DBGPRIV_VERBOSE,__FILE__, __LINE__
+ #define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE,__FILE__, __LINE__
+ #define PVR_DBG_ALLOC DBGPRIV_ALLOC,__FILE__, __LINE__
+ #define PVR_DBGDRIV_MESSAGE DBGPRIV_DBGDRV_MESSAGE, "", 0
+
+ #define PVR_DPF(X) PVRSRVDebugPrintf X
+
+#endif /* defined(PVRSRV_NEW_PVR_DPF) */
+
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel,
+ const IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32Line,
+ const IMG_CHAR *pszFormat,
+ ...) IMG_FORMAT_PRINTF(4, 5);
+
+#else /* defined(PVRSRV_NEED_PVR_DPF) */
+
+ #define PVR_DPF(X)
+
+#endif /* defined(PVRSRV_NEED_PVR_DPF) */
+
+
+/* PVR_TRACE() handling */
+
+#if defined(PVRSRV_NEED_PVR_TRACE)
+
+ #define PVR_TRACE(X) PVRSRVTrace X
+
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... )
+ IMG_FORMAT_PRINTF(1, 2);
+
+#else /* defined(PVRSRV_NEED_PVR_TRACE) */
+
+ #define PVR_TRACE(X)
+
+#endif /* defined(PVRSRV_NEED_PVR_TRACE) */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __PVR_DEBUG_H__ */
+
+/******************************************************************************
+ End of file (pvr_debug.h)
+******************************************************************************/
+
diff --git a/drivers/gpu/pvr/pvr_uaccess.h b/drivers/gpu/pvr/pvr_uaccess.h
new file mode 100644
index 0000000..6e7f1d3
--- /dev/null
+++ b/drivers/gpu/pvr/pvr_uaccess.h
@@ -0,0 +1,71 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __PVR_UACCESS_H__
+#define __PVR_UACCESS_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#endif
+
+#include <asm/uaccess.h>
+
+static inline unsigned long pvr_copy_to_user(void __user *pvTo, const void *pvFrom, unsigned long ulBytes)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
+ if (access_ok(VERIFY_WRITE, pvTo, ulBytes))
+ {
+ return __copy_to_user(pvTo, pvFrom, ulBytes);
+ }
+ return ulBytes;
+#else
+ return copy_to_user(pvTo, pvFrom, ulBytes);
+#endif
+}
+
+static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
+
+ if (access_ok(VERIFY_READ, pvFrom, ulBytes))
+ {
+ return __copy_from_user(pvTo, pvFrom, ulBytes);
+ }
+ return ulBytes;
+#else
+ return copy_from_user(pvTo, pvFrom, ulBytes);
+#endif
+}
+
+#define pvr_put_user put_user
+#define pvr_get_user get_user
+
+#endif
+
diff --git a/drivers/gpu/pvr/pvrmmap.h b/drivers/gpu/pvr/pvrmmap.h
new file mode 100644
index 0000000..242d953
--- /dev/null
+++ b/drivers/gpu/pvr/pvrmmap.h
@@ -0,0 +1,44 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __PVRMMAP_H__
+#define __PVRMMAP_H__
+
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR PVRPMapKMem(IMG_HANDLE hModule, IMG_VOID **ppvLinAddr, IMG_VOID *pvLinAddrKM, IMG_SID *phMappingInfo, IMG_SID hMHandle);
+#else
+PVRSRV_ERROR PVRPMapKMem(IMG_HANDLE hModule, IMG_VOID **ppvLinAddr, IMG_VOID *pvLinAddrKM, IMG_HANDLE *phMappingInfo, IMG_HANDLE hMHandle);
+#endif
+
+
+#if defined (SUPPORT_SID_INTERFACE)
+IMG_BOOL PVRUnMapKMem(IMG_HANDLE hModule, IMG_SID hMappingInfo, IMG_SID hMHandle);
+#else
+IMG_BOOL PVRUnMapKMem(IMG_HANDLE hModule, IMG_HANDLE hMappingInfo, IMG_HANDLE hMHandle);
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/pvrmodule.h b/drivers/gpu/pvr/pvrmodule.h
new file mode 100644
index 0000000..3dd5845
--- /dev/null
+++ b/drivers/gpu/pvr/pvrmodule.h
@@ -0,0 +1,31 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _PVRMODULE_H_
+#define _PVRMODULE_H_
+MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
+MODULE_LICENSE("GPL");
+#endif
diff --git a/drivers/gpu/pvr/pvrsrv.c b/drivers/gpu/pvr/pvrsrv.c
new file mode 100644
index 0000000..af2b6eb
--- /dev/null
+++ b/drivers/gpu/pvr/pvrsrv.c
@@ -0,0 +1,1372 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "services_headers.h"
+#include "buffer_manager.h"
+#include "pvr_bridge_km.h"
+#include "handle.h"
+#include "perproc.h"
+#include "pdump_km.h"
+#include "deviceid.h"
+#include "ra.h"
+#if defined(TTRACE)
+#include "ttrace.h"
+#endif
+#include "perfkm.h"
+
+#include "pvrversion.h"
+
+#include "lists.h"
+
+IMG_UINT32 g_ui32InitFlags;
+
+#define INIT_DATA_ENABLE_PDUMPINIT 0x1U
+#define INIT_DATA_ENABLE_TTARCE 0x2U
+
+PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID)
+{
+ SYS_DEVICE_ID* psDeviceWalker;
+ SYS_DEVICE_ID* psDeviceEnd;
+
+ psDeviceWalker = &psSysData->sDeviceID[0];
+ psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices;
+
+
+ while (psDeviceWalker < psDeviceEnd)
+ {
+ if (!psDeviceWalker->bInUse)
+ {
+ psDeviceWalker->bInUse = IMG_TRUE;
+ *pui32DevID = psDeviceWalker->uiID;
+ return PVRSRV_OK;
+ }
+ psDeviceWalker++;
+ }
+
+ PVR_DPF((PVR_DBG_ERROR,"AllocateDeviceID: No free and valid device IDs available!"));
+
+
+ PVR_ASSERT(psDeviceWalker < psDeviceEnd);
+
+ return PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVALIABLE;
+}
+
+
+PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID)
+{
+ SYS_DEVICE_ID* psDeviceWalker;
+ SYS_DEVICE_ID* psDeviceEnd;
+
+ psDeviceWalker = &psSysData->sDeviceID[0];
+ psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices;
+
+
+ while (psDeviceWalker < psDeviceEnd)
+ {
+
+ if (
+ (psDeviceWalker->uiID == ui32DevID) &&
+ (psDeviceWalker->bInUse)
+ )
+ {
+ psDeviceWalker->bInUse = IMG_FALSE;
+ return PVRSRV_OK;
+ }
+ psDeviceWalker++;
+ }
+
+ PVR_DPF((PVR_DBG_ERROR,"FreeDeviceID: no matching dev ID that is in use!"));
+
+
+ PVR_ASSERT(psDeviceWalker < psDeviceEnd);
+
+ return PVRSRV_ERROR_INVALID_DEVICEID;
+}
+
+
+#ifndef ReadHWReg
+IMG_EXPORT
+IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset)
+{
+ return *(volatile IMG_UINT32*)((IMG_UINTPTR_T)pvLinRegBaseAddr+ui32Offset);
+}
+#endif
+
+
+#ifndef WriteHWReg
+IMG_EXPORT
+IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
+{
+ PVR_DPF((PVR_DBG_MESSAGE,"WriteHWReg Base:%x, Offset: %x, Value %x",
+ (IMG_UINTPTR_T)pvLinRegBaseAddr,ui32Offset,ui32Value));
+
+ *(IMG_UINT32*)((IMG_UINTPTR_T)pvLinRegBaseAddr+ui32Offset) = ui32Value;
+}
+#endif
+
+
+#ifndef WriteHWRegs
+IMG_EXPORT
+IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Count, PVRSRV_HWREG *psHWRegs)
+{
+ while (ui32Count)
+ {
+ WriteHWReg (pvLinRegBaseAddr, psHWRegs->ui32RegAddr, psHWRegs->ui32RegVal);
+ psHWRegs++;
+ ui32Count--;
+ }
+}
+#endif
+
+static IMG_VOID PVRSRVEnumerateDevicesKM_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
+{
+ IMG_UINT *pui32DevCount;
+ PVRSRV_DEVICE_IDENTIFIER **ppsDevIdList;
+
+ pui32DevCount = va_arg(va, IMG_UINT*);
+ ppsDevIdList = va_arg(va, PVRSRV_DEVICE_IDENTIFIER**);
+
+ if (psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_EXT)
+ {
+ *(*ppsDevIdList) = psDeviceNode->sDevId;
+ (*ppsDevIdList)++;
+ (*pui32DevCount)++;
+ }
+}
+
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 *pui32NumDevices,
+ PVRSRV_DEVICE_IDENTIFIER *psDevIdList)
+{
+ SYS_DATA *psSysData;
+ IMG_UINT32 i;
+
+ if (!pui32NumDevices || !psDevIdList)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDevicesKM: Invalid params"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ SysAcquireData(&psSysData);
+
+
+
+ for (i=0; i<PVRSRV_MAX_DEVICES; i++)
+ {
+ psDevIdList[i].eDeviceType = PVRSRV_DEVICE_TYPE_UNKNOWN;
+ }
+
+
+ *pui32NumDevices = 0;
+
+
+
+
+
+ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
+ &PVRSRVEnumerateDevicesKM_ForEachVaCb,
+ pui32NumDevices,
+ &psDevIdList);
+
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(PSYS_DATA psSysData)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError = ResManInit();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+ eError = PVRSRVPerProcessDataInit();
+ if(eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+
+ eError = PVRSRVHandleInit();
+ if(eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+
+ eError = OSCreateResource(&psSysData->sPowerStateChangeResource);
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+
+ psSysData->eCurrentPowerState = PVRSRV_SYS_POWER_STATE_D0;
+ psSysData->eFailedPowerState = PVRSRV_SYS_POWER_STATE_Unspecified;
+
+
+ if(OSAllocMem( PVRSRV_PAGEABLE_SELECT,
+ sizeof(PVRSRV_EVENTOBJECT) ,
+ (IMG_VOID **)&psSysData->psGlobalEventObject, 0,
+ "Event Object") != PVRSRV_OK)
+ {
+
+ goto Error;
+ }
+
+ if(OSEventObjectCreateKM("PVRSRV_GLOBAL_EVENTOBJECT", psSysData->psGlobalEventObject) != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+
+ psSysData->pfnHighResTimerCreate = OSFuncHighResTimerCreate;
+ psSysData->pfnHighResTimerGetus = OSFuncHighResTimerGetus;
+ psSysData->pfnHighResTimerDestroy = OSFuncHighResTimerDestroy;
+
+#if defined(TTRACE)
+ eError = PVRSRVTimeTraceInit();
+ if (eError != PVRSRV_OK)
+ goto Error;
+ g_ui32InitFlags |= INIT_DATA_ENABLE_TTARCE;
+#endif
+
+
+ PDUMPINIT();
+ g_ui32InitFlags |= INIT_DATA_ENABLE_PDUMPINIT;
+
+ PERFINIT();
+ return eError;
+
+Error:
+ PVRSRVDeInit(psSysData);
+ return eError;
+}
+
+
+
+IMG_VOID IMG_CALLCONV PVRSRVDeInit(PSYS_DATA psSysData)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+
+ if (psSysData == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVHandleDeInit failed - invalid param"));
+ return;
+ }
+
+ PERFDEINIT();
+
+#if defined(TTRACE)
+
+ if ((g_ui32InitFlags & INIT_DATA_ENABLE_TTARCE) > 0)
+ {
+ PVRSRVTimeTraceDeinit();
+ }
+#endif
+
+ if( (g_ui32InitFlags & INIT_DATA_ENABLE_PDUMPINIT) > 0)
+ {
+ PDUMPDEINIT();
+ }
+
+
+ if(psSysData->psGlobalEventObject)
+ {
+ OSEventObjectDestroyKM(psSysData->psGlobalEventObject);
+ OSFreeMem( PVRSRV_PAGEABLE_SELECT,
+ sizeof(PVRSRV_EVENTOBJECT),
+ psSysData->psGlobalEventObject,
+ 0);
+ psSysData->psGlobalEventObject = IMG_NULL;
+ }
+
+ eError = PVRSRVHandleDeInit();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVHandleDeInit failed"));
+ }
+
+ eError = PVRSRVPerProcessDataDeInit();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVPerProcessDataDeInit failed"));
+ }
+
+ ResManDeInit();
+}
+
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PSYS_DATA psSysData,
+ PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
+ IMG_UINT32 ui32SOCInterruptBit,
+ IMG_UINT32 *pui32DeviceIndex)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+
+ if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_DEVICE_NODE),
+ (IMG_VOID **)&psDeviceNode, IMG_NULL,
+ "Device Node") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to alloc memory for psDeviceNode"));
+ return (PVRSRV_ERROR_OUT_OF_MEMORY);
+ }
+ OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
+
+ eError = pfnRegisterDevice(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL);
+
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to register device"));
+ return (PVRSRV_ERROR_DEVICE_REGISTER_FAILED);
+ }
+
+
+
+
+
+
+ psDeviceNode->ui32RefCount = 1;
+ psDeviceNode->psSysData = psSysData;
+ psDeviceNode->ui32SOCInterruptBit = ui32SOCInterruptBit;
+
+
+ AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex);
+
+
+ List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList, psDeviceNode);
+
+
+ *pui32DeviceIndex = psDeviceNode->sDevId.ui32DeviceIndex;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVInitialiseDevice (IMG_UINT32 ui32DevIndex)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ SYS_DATA *psSysData;
+ PVRSRV_ERROR eError;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInitialiseDevice"));
+
+ SysAcquireData(&psSysData);
+
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE*)
+ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
+ &MatchDeviceKM_AnyVaCb,
+ ui32DevIndex,
+ IMG_TRUE);
+ if(!psDeviceNode)
+ {
+
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: requested device is not present"));
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+ PVR_ASSERT (psDeviceNode->ui32RefCount > 0);
+
+
+
+ eError = PVRSRVResManConnect(IMG_NULL, &psDeviceNode->hResManContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: Failed PVRSRVResManConnect call"));
+ return eError;
+ }
+
+
+ if(psDeviceNode->pfnInitDevice != IMG_NULL)
+ {
+ eError = psDeviceNode->pfnInitDevice(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: Failed InitDevice call"));
+ return eError;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+
+static PVRSRV_ERROR PVRSRVFinaliseSystem_SetPowerState_AnyCb(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
+ PVRSRV_DEV_POWER_STATE_DEFAULT,
+ KERNEL_ID, IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVSetDevicePowerStateKM call (device index: %d)", psDeviceNode->sDevId.ui32DeviceIndex));
+ }
+ return eError;
+}
+
+static PVRSRV_ERROR PVRSRVFinaliseSystem_CompatCheck_AnyCb(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+ eError = PVRSRVDevInitCompatCheck(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVDevInitCompatCheck call (device index: %d)", psDeviceNode->sDevId.ui32DeviceIndex));
+ }
+ return eError;
+}
+
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccessful)
+{
+ SYS_DATA *psSysData;
+ PVRSRV_ERROR eError;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVFinaliseSystem"));
+
+ SysAcquireData(&psSysData);
+
+ if (bInitSuccessful)
+ {
+ eError = SysFinalise();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: SysFinalise failed (%d)", eError));
+ return eError;
+ }
+
+
+ eError = List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any(psSysData->psDeviceNodeList,
+ &PVRSRVFinaliseSystem_SetPowerState_AnyCb);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+
+ eError = List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any(psSysData->psDeviceNodeList,
+ &PVRSRVFinaliseSystem_CompatCheck_AnyCb);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+
+
+
+
+ PDUMPENDINITPHASE();
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+
+ if (psDeviceNode->pfnInitDeviceCompatCheck)
+ return psDeviceNode->pfnInitDeviceCompatCheck(psDeviceNode);
+ else
+ return PVRSRV_OK;
+}
+
+static IMG_VOID * PVRSRVAcquireDeviceDataKM_Match_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
+{
+ PVRSRV_DEVICE_TYPE eDeviceType;
+ IMG_UINT32 ui32DevIndex;
+
+ eDeviceType = va_arg(va, PVRSRV_DEVICE_TYPE);
+ ui32DevIndex = va_arg(va, IMG_UINT32);
+
+ if ((eDeviceType != PVRSRV_DEVICE_TYPE_UNKNOWN &&
+ psDeviceNode->sDevId.eDeviceType == eDeviceType) ||
+ (eDeviceType == PVRSRV_DEVICE_TYPE_UNKNOWN &&
+ psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex))
+ {
+ return psDeviceNode;
+ }
+ else
+ {
+ return IMG_NULL;
+ }
+}
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM (IMG_UINT32 ui32DevIndex,
+ PVRSRV_DEVICE_TYPE eDeviceType,
+ IMG_HANDLE *phDevCookie)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ SYS_DATA *psSysData;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVAcquireDeviceDataKM"));
+
+ SysAcquireData(&psSysData);
+
+
+ psDeviceNode = List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
+ &PVRSRVAcquireDeviceDataKM_Match_AnyVaCb,
+ eDeviceType,
+ ui32DevIndex);
+
+
+ if (!psDeviceNode)
+ {
+
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAcquireDeviceDataKM: requested device is not present"));
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+
+ PVR_ASSERT (psDeviceNode->ui32RefCount > 0);
+
+
+ if (phDevCookie)
+ {
+ *phDevCookie = (IMG_HANDLE)psDeviceNode;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ SYS_DATA *psSysData;
+ PVRSRV_ERROR eError;
+
+ SysAcquireData(&psSysData);
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE*)
+ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
+ &MatchDeviceKM_AnyVaCb,
+ ui32DevIndex,
+ IMG_TRUE);
+
+ if (!psDeviceNode)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: requested device %d is not present", ui32DevIndex));
+ return PVRSRV_ERROR_DEVICEID_NOT_FOUND;
+ }
+
+
+
+ eError = PVRSRVSetDevicePowerStateKM(ui32DevIndex,
+ PVRSRV_DEV_POWER_STATE_OFF,
+ KERNEL_ID,
+ IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed PVRSRVSetDevicePowerStateKM call"));
+ return eError;
+ }
+
+
+
+ eError = ResManFreeResByCriteria(psDeviceNode->hResManContext,
+ RESMAN_CRITERIA_RESTYPE,
+ RESMAN_TYPE_DEVICEMEM_ALLOCATION,
+ IMG_NULL, 0);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed ResManFreeResByCriteria call"));
+ return eError;
+ }
+
+
+
+ if(psDeviceNode->pfnDeInitDevice != IMG_NULL)
+ {
+ eError = psDeviceNode->pfnDeInitDevice(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed DeInitDevice call"));
+ return eError;
+ }
+ }
+
+
+
+ PVRSRVResManDisconnect(psDeviceNode->hResManContext, IMG_TRUE);
+ psDeviceNode->hResManContext = IMG_NULL;
+
+
+ List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
+
+
+ (IMG_VOID)FreeDeviceID(psSysData, ui32DevIndex);
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL);
+
+
+ return (PVRSRV_OK);
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ IMG_UINT32 ui32Timeoutus,
+ IMG_UINT32 ui32PollPeriodus,
+ IMG_BOOL bAllowPreemption)
+{
+#if defined (EMULATOR)
+ {
+ PVR_UNREFERENCED_PARAMETER(bAllowPreemption);
+ #if !defined(__linux__)
+ PVR_UNREFERENCED_PARAMETER(ui32PollPeriodus);
+ #endif
+
+
+
+ do
+ {
+ if((*pui32LinMemAddr & ui32Mask) == ui32Value)
+ {
+ return PVRSRV_OK;
+ }
+
+ #if defined(__linux__)
+ OSWaitus(ui32PollPeriodus);
+ #else
+ OSReleaseThreadQuanta();
+ #endif
+
+ } while (ui32Timeoutus);
+ }
+#else
+ {
+ IMG_UINT32 ui32ActualValue = 0xFFFFFFFFU;
+
+ if (bAllowPreemption)
+ {
+ PVR_ASSERT(ui32PollPeriodus >= 1000);
+ }
+
+
+ LOOP_UNTIL_TIMEOUT(ui32Timeoutus)
+ {
+ ui32ActualValue = (*pui32LinMemAddr & ui32Mask);
+ if(ui32ActualValue == ui32Value)
+ {
+ return PVRSRV_OK;
+ }
+
+ if (bAllowPreemption)
+ {
+ OSSleepms(ui32PollPeriodus / 1000);
+ }
+ else
+ {
+ OSWaitus(ui32PollPeriodus);
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ PVR_DPF((PVR_DBG_ERROR,"PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).",
+ ui32Value, ui32ActualValue, ui32Mask));
+ }
+#endif
+
+ return PVRSRV_ERROR_TIMEOUT;
+}
+
+
+static IMG_VOID PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb(BM_HEAP *psBMHeap, va_list va)
+{
+ IMG_CHAR **ppszStr;
+ IMG_UINT32 *pui32StrLen;
+ IMG_UINT32 ui32Mode;
+ PVRSRV_ERROR (*pfnGetStats)(RA_ARENA *, IMG_CHAR **, IMG_UINT32 *);
+
+ ppszStr = va_arg(va, IMG_CHAR**);
+ pui32StrLen = va_arg(va, IMG_UINT32*);
+ ui32Mode = va_arg(va, IMG_UINT32);
+
+
+ switch(ui32Mode)
+ {
+ case PVRSRV_MISC_INFO_MEMSTATS_PRESENT:
+ pfnGetStats = &RA_GetStats;
+ break;
+ case PVRSRV_MISC_INFO_FREEMEM_PRESENT:
+ pfnGetStats = &RA_GetStatsFreeMem;
+ break;
+ default:
+ return;
+ }
+
+ if(psBMHeap->pImportArena)
+ {
+ pfnGetStats(psBMHeap->pImportArena,
+ ppszStr,
+ pui32StrLen);
+ }
+
+ if(psBMHeap->pVMArena)
+ {
+ pfnGetStats(psBMHeap->pVMArena,
+ ppszStr,
+ pui32StrLen);
+ }
+}
+
+static PVRSRV_ERROR PVRSRVGetMiscInfoKM_BMContext_AnyVaCb(BM_CONTEXT *psBMContext, va_list va)
+{
+
+ IMG_UINT32 *pui32StrLen;
+ IMG_INT32 *pi32Count;
+ IMG_CHAR **ppszStr;
+ IMG_UINT32 ui32Mode;
+
+ pui32StrLen = va_arg(va, IMG_UINT32*);
+ pi32Count = va_arg(va, IMG_INT32*);
+ ppszStr = va_arg(va, IMG_CHAR**);
+ ui32Mode = va_arg(va, IMG_UINT32);
+
+ CHECK_SPACE(*pui32StrLen);
+ *pi32Count = OSSNPrintf(*ppszStr, 100, "\nApplication Context (hDevMemContext) %p:\n",
+ (IMG_HANDLE)psBMContext);
+ UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen);
+
+ List_BM_HEAP_ForEach_va(psBMContext->psBMHeap,
+ &PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb,
+ ppszStr,
+ pui32StrLen,
+ ui32Mode);
+ return PVRSRV_OK;
+}
+
+
+static PVRSRV_ERROR PVRSRVGetMiscInfoKM_Device_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
+{
+ IMG_UINT32 *pui32StrLen;
+ IMG_INT32 *pi32Count;
+ IMG_CHAR **ppszStr;
+ IMG_UINT32 ui32Mode;
+
+ pui32StrLen = va_arg(va, IMG_UINT32*);
+ pi32Count = va_arg(va, IMG_INT32*);
+ ppszStr = va_arg(va, IMG_CHAR**);
+ ui32Mode = va_arg(va, IMG_UINT32);
+
+ CHECK_SPACE(*pui32StrLen);
+ *pi32Count = OSSNPrintf(*ppszStr, 100, "\n\nDevice Type %d:\n", psDeviceNode->sDevId.eDeviceType);
+ UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen);
+
+
+ if(psDeviceNode->sDevMemoryInfo.pBMKernelContext)
+ {
+ CHECK_SPACE(*pui32StrLen);
+ *pi32Count = OSSNPrintf(*ppszStr, 100, "\nKernel Context:\n");
+ UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen);
+
+ List_BM_HEAP_ForEach_va(psDeviceNode->sDevMemoryInfo.pBMKernelContext->psBMHeap,
+ &PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb,
+ ppszStr,
+ pui32StrLen,
+ ui32Mode);
+ }
+
+
+ return List_BM_CONTEXT_PVRSRV_ERROR_Any_va(psDeviceNode->sDevMemoryInfo.pBMContext,
+ &PVRSRVGetMiscInfoKM_BMContext_AnyVaCb,
+ pui32StrLen,
+ pi32Count,
+ ppszStr,
+ ui32Mode);
+}
+
+
+IMG_EXPORT
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO_KM *psMiscInfo)
+#else
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo)
+#endif
+{
+ SYS_DATA *psSysData;
+
+ if(!psMiscInfo)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psMiscInfo->ui32StatePresent = 0;
+
+
+ if(psMiscInfo->ui32StateRequest & ~(PVRSRV_MISC_INFO_TIMER_PRESENT
+ |PVRSRV_MISC_INFO_CLOCKGATE_PRESENT
+ |PVRSRV_MISC_INFO_MEMSTATS_PRESENT
+ |PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT
+ |PVRSRV_MISC_INFO_DDKVERSION_PRESENT
+ |PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT
+ |PVRSRV_MISC_INFO_RESET_PRESENT
+ |PVRSRV_MISC_INFO_FREEMEM_PRESENT
+ |PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT
+ |PVRSRV_MISC_INFO_GET_PAGE_SIZE_PRESENT
+ |PVRSRV_MISC_INFO_FORCE_SWAP_TO_SYSTEM_PRESENT))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid state request flags"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ SysAcquireData(&psSysData);
+
+
+ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_TIMER_PRESENT) != 0UL) &&
+ (psSysData->pvSOCTimerRegisterKM != IMG_NULL))
+ {
+ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_TIMER_PRESENT;
+ psMiscInfo->pvSOCTimerRegisterKM = psSysData->pvSOCTimerRegisterKM;
+ psMiscInfo->hSOCTimerRegisterOSMemHandle = psSysData->hSOCTimerRegisterOSMemHandle;
+ }
+ else
+ {
+ psMiscInfo->pvSOCTimerRegisterKM = IMG_NULL;
+ psMiscInfo->hSOCTimerRegisterOSMemHandle = IMG_NULL;
+ }
+
+
+ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CLOCKGATE_PRESENT) != 0UL) &&
+ (psSysData->pvSOCClockGateRegsBase != IMG_NULL))
+ {
+ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CLOCKGATE_PRESENT;
+ psMiscInfo->pvSOCClockGateRegs = psSysData->pvSOCClockGateRegsBase;
+ psMiscInfo->ui32SOCClockGateRegsSize = psSysData->ui32SOCClockGateRegsSize;
+ }
+
+
+ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0UL) &&
+ (psMiscInfo->pszMemoryStr != IMG_NULL))
+ {
+ RA_ARENA **ppArena;
+ IMG_CHAR *pszStr;
+ IMG_UINT32 ui32StrLen;
+ IMG_INT32 i32Count;
+
+ pszStr = psMiscInfo->pszMemoryStr;
+ ui32StrLen = psMiscInfo->ui32MemoryStrLen;
+
+ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_MEMSTATS_PRESENT;
+
+
+ ppArena = &psSysData->apsLocalDevMemArena[0];
+ while(*ppArena)
+ {
+ CHECK_SPACE(ui32StrLen);
+ i32Count = OSSNPrintf(pszStr, 100, "\nLocal Backing Store:\n");
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+
+ RA_GetStats(*ppArena,
+ &pszStr,
+ &ui32StrLen);
+
+ ppArena++;
+ }
+
+
+
+ List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList,
+ &PVRSRVGetMiscInfoKM_Device_AnyVaCb,
+ &ui32StrLen,
+ &i32Count,
+ &pszStr,
+ PVRSRV_MISC_INFO_MEMSTATS_PRESENT);
+
+
+ i32Count = OSSNPrintf(pszStr, 100, "\n");
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+ }
+
+
+ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) != 0)
+ && psMiscInfo->pszMemoryStr)
+ {
+ IMG_CHAR *pszStr;
+ IMG_UINT32 ui32StrLen;
+ IMG_INT32 i32Count;
+
+ pszStr = psMiscInfo->pszMemoryStr;
+ ui32StrLen = psMiscInfo->ui32MemoryStrLen;
+
+ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_FREEMEM_PRESENT;
+
+
+ List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList,
+ &PVRSRVGetMiscInfoKM_Device_AnyVaCb,
+ &ui32StrLen,
+ &i32Count,
+ &pszStr,
+ PVRSRV_MISC_INFO_FREEMEM_PRESENT);
+
+ i32Count = OSSNPrintf(pszStr, 100, "\n");
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+ }
+
+ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) != 0UL) &&
+ (psSysData->psGlobalEventObject != IMG_NULL))
+ {
+ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT;
+ psMiscInfo->sGlobalEventObject = *psSysData->psGlobalEventObject;
+ }
+
+
+
+ if (((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0UL)
+ && ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) == 0UL)
+ && (psMiscInfo->pszMemoryStr != IMG_NULL))
+ {
+ IMG_CHAR *pszStr;
+ IMG_UINT32 ui32StrLen;
+ IMG_UINT32 ui32LenStrPerNum = 12;
+ IMG_INT32 i32Count;
+ IMG_INT i;
+ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_DDKVERSION_PRESENT;
+
+
+ psMiscInfo->aui32DDKVersion[0] = PVRVERSION_MAJ;
+ psMiscInfo->aui32DDKVersion[1] = PVRVERSION_MIN;
+ psMiscInfo->aui32DDKVersion[2] = PVRVERSION_BUILD_HI;
+ psMiscInfo->aui32DDKVersion[3] = PVRVERSION_BUILD_LO;
+
+ pszStr = psMiscInfo->pszMemoryStr;
+ ui32StrLen = psMiscInfo->ui32MemoryStrLen;
+
+ for (i=0; i<4; i++)
+ {
+ if (ui32StrLen < ui32LenStrPerNum)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ i32Count = OSSNPrintf(pszStr, ui32LenStrPerNum, "%u", psMiscInfo->aui32DDKVersion[i]);
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+ if (i != 3)
+ {
+ i32Count = OSSNPrintf(pszStr, 2, ".");
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+ }
+ }
+ }
+
+ if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT) != 0UL)
+ {
+ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT;
+
+ if(psMiscInfo->sCacheOpCtl.bDeferOp)
+ {
+
+ psSysData->ePendingCacheOpType = psMiscInfo->sCacheOpCtl.eCacheOpType;
+ }
+ else
+ {
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = psMiscInfo->sCacheOpCtl.psKernelMemInfo;
+
+ if(!psMiscInfo->sCacheOpCtl.psKernelMemInfo)
+#else
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+ PVRSRV_PER_PROCESS_DATA *psPerProc;
+
+ if(!psMiscInfo->sCacheOpCtl.u.psKernelMemInfo)
+#endif
+ {
+ PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetMiscInfoKM: "
+ "Ignoring non-deferred cache op with no meminfo"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if(psSysData->ePendingCacheOpType != PVRSRV_MISC_INFO_CPUCACHEOP_NONE)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetMiscInfoKM: "
+ "Deferred cache op is pending. It is unlikely you want "
+ "to combine deferred cache ops with immediate ones"));
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ PVR_DBG_BREAK
+#else
+
+ psPerProc = PVRSRVFindPerProcessData();
+
+ if(PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_PVOID *)&psKernelMemInfo,
+ psMiscInfo->sCacheOpCtl.u.psKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoKM: "
+ "Can't find kernel meminfo"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+#endif
+
+ if(psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH)
+ {
+ if(!OSFlushCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle,
+ psMiscInfo->sCacheOpCtl.pvBaseVAddr,
+ psMiscInfo->sCacheOpCtl.ui32Length))
+ {
+ return PVRSRV_ERROR_CACHEOP_FAILED;
+ }
+ }
+ else if(psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN)
+ {
+ if(!OSCleanCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle,
+ psMiscInfo->sCacheOpCtl.pvBaseVAddr,
+ psMiscInfo->sCacheOpCtl.ui32Length))
+ {
+ return PVRSRV_ERROR_CACHEOP_FAILED;
+ }
+ }
+ }
+ }
+
+ if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT) != 0UL)
+ {
+#if !defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+ PVRSRV_PER_PROCESS_DATA *psPerProc;
+#endif
+
+ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT;
+
+#if defined (SUPPORT_SID_INTERFACE)
+ PVR_DBG_BREAK
+#else
+
+ psPerProc = PVRSRVFindPerProcessData();
+
+ if(PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_PVOID *)&psKernelMemInfo,
+ psMiscInfo->sGetRefCountCtl.u.psKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoKM: "
+ "Can't find kernel meminfo"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psMiscInfo->sGetRefCountCtl.ui32RefCount = psKernelMemInfo->ui32RefCount;
+#endif
+ }
+
+#if defined(PVRSRV_RESET_ON_HWTIMEOUT)
+ if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_RESET_PRESENT) != 0UL)
+ {
+ PVR_LOG(("User requested OS reset"));
+ OSPanic();
+ }
+#endif
+
+ if ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_FORCE_SWAP_TO_SYSTEM_PRESENT) != 0UL)
+ {
+ PVRSRVSetDCState(DC_STATE_FORCE_SWAP_TO_SYSTEM);
+ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_FORCE_SWAP_TO_SYSTEM_PRESENT;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+IMG_BOOL IMG_CALLCONV PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ SYS_DATA *psSysData;
+ IMG_BOOL bStatus = IMG_FALSE;
+ IMG_UINT32 ui32InterruptSource;
+
+ if(!psDeviceNode)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDeviceLISR: Invalid params\n"));
+ goto out;
+ }
+ psSysData = psDeviceNode->psSysData;
+
+
+ ui32InterruptSource = SysGetInterruptSource(psSysData, psDeviceNode);
+ if(ui32InterruptSource & psDeviceNode->ui32SOCInterruptBit)
+ {
+ if(psDeviceNode->pfnDeviceISR != IMG_NULL)
+ {
+ bStatus = (*psDeviceNode->pfnDeviceISR)(psDeviceNode->pvISRData);
+ }
+
+ SysClearInterrupts(psSysData, psDeviceNode->ui32SOCInterruptBit);
+ }
+
+out:
+ return bStatus;
+}
+
+static IMG_VOID PVRSRVSystemLISR_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
+{
+
+ IMG_BOOL *pbStatus;
+ IMG_UINT32 *pui32InterruptSource;
+ IMG_UINT32 *pui32ClearInterrupts;
+
+ pbStatus = va_arg(va, IMG_BOOL*);
+ pui32InterruptSource = va_arg(va, IMG_UINT32*);
+ pui32ClearInterrupts = va_arg(va, IMG_UINT32*);
+
+
+ if(psDeviceNode->pfnDeviceISR != IMG_NULL)
+ {
+ if(*pui32InterruptSource & psDeviceNode->ui32SOCInterruptBit)
+ {
+ if((*psDeviceNode->pfnDeviceISR)(psDeviceNode->pvISRData))
+ {
+
+ *pbStatus = IMG_TRUE;
+ }
+
+ *pui32ClearInterrupts |= psDeviceNode->ui32SOCInterruptBit;
+ }
+ }
+}
+
+IMG_BOOL IMG_CALLCONV PVRSRVSystemLISR(IMG_VOID *pvSysData)
+{
+ SYS_DATA *psSysData = pvSysData;
+ IMG_BOOL bStatus = IMG_FALSE;
+ IMG_UINT32 ui32InterruptSource;
+ IMG_UINT32 ui32ClearInterrupts = 0;
+ if(!psSysData)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSystemLISR: Invalid params\n"));
+ }
+ else
+ {
+
+ ui32InterruptSource = SysGetInterruptSource(psSysData, IMG_NULL);
+
+
+ if(ui32InterruptSource)
+ {
+
+ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
+ &PVRSRVSystemLISR_ForEachVaCb,
+ &bStatus,
+ &ui32InterruptSource,
+ &ui32ClearInterrupts);
+
+ SysClearInterrupts(psSysData, ui32ClearInterrupts);
+ }
+ }
+ return bStatus;
+}
+
+
+static IMG_VOID PVRSRVMISR_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ if(psDeviceNode->pfnDeviceMISR != IMG_NULL)
+ {
+ (*psDeviceNode->pfnDeviceMISR)(psDeviceNode->pvISRData);
+ }
+}
+
+IMG_VOID IMG_CALLCONV PVRSRVMISR(IMG_VOID *pvSysData)
+{
+ SYS_DATA *psSysData = pvSysData;
+ if(!psSysData)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVMISR: Invalid params\n"));
+ return;
+ }
+
+
+ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList,
+ &PVRSRVMISR_ForEachCb);
+
+
+ if (PVRSRVProcessQueues(IMG_FALSE) == PVRSRV_ERROR_PROCESSING_BLOCKED)
+ {
+ PVRSRVProcessQueues(IMG_FALSE);
+ }
+
+
+ if (psSysData->psGlobalEventObject)
+ {
+ IMG_HANDLE hOSEventKM = psSysData->psGlobalEventObject->hOSEventKM;
+ if(hOSEventKM)
+ {
+ OSEventObjectSignalKM(hOSEventKM);
+ }
+ }
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVProcessConnect(IMG_UINT32 ui32PID, IMG_UINT32 ui32Flags)
+{
+ return PVRSRVPerProcessDataConnect(ui32PID, ui32Flags);
+}
+
+
+IMG_EXPORT
+IMG_VOID IMG_CALLCONV PVRSRVProcessDisconnect(IMG_UINT32 ui32PID)
+{
+ PVRSRVPerProcessDataDisconnect(ui32PID);
+}
+
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer,
+ IMG_SIZE_T *puiBufSize, IMG_BOOL bSave)
+{
+ IMG_SIZE_T uiBytesSaved = 0;
+ IMG_PVOID pvLocalMemCPUVAddr;
+ RA_SEGMENT_DETAILS sSegDetails;
+
+ if (hArena == IMG_NULL)
+ {
+ return (PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ sSegDetails.uiSize = 0;
+ sSegDetails.sCpuPhyAddr.uiAddr = 0;
+ sSegDetails.hSegment = 0;
+
+
+ while (RA_GetNextLiveSegment(hArena, &sSegDetails))
+ {
+ if (pbyBuffer == IMG_NULL)
+ {
+
+ uiBytesSaved += sizeof(sSegDetails.uiSize) + sSegDetails.uiSize;
+ }
+ else
+ {
+ if ((uiBytesSaved + sizeof(sSegDetails.uiSize) + sSegDetails.uiSize) > *puiBufSize)
+ {
+ return (PVRSRV_ERROR_OUT_OF_MEMORY);
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVSaveRestoreLiveSegments: Base %08x size %08x", sSegDetails.sCpuPhyAddr.uiAddr, sSegDetails.uiSize));
+
+
+ pvLocalMemCPUVAddr = OSMapPhysToLin(sSegDetails.sCpuPhyAddr,
+ sSegDetails.uiSize,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ IMG_NULL);
+ if (pvLocalMemCPUVAddr == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSaveRestoreLiveSegments: Failed to map local memory to host"));
+ return (PVRSRV_ERROR_OUT_OF_MEMORY);
+ }
+
+ if (bSave)
+ {
+
+ OSMemCopy(pbyBuffer, &sSegDetails.uiSize, sizeof(sSegDetails.uiSize));
+ pbyBuffer += sizeof(sSegDetails.uiSize);
+
+ OSMemCopy(pbyBuffer, pvLocalMemCPUVAddr, sSegDetails.uiSize);
+ pbyBuffer += sSegDetails.uiSize;
+ }
+ else
+ {
+ IMG_UINT32 uiSize;
+
+ OSMemCopy(&uiSize, pbyBuffer, sizeof(sSegDetails.uiSize));
+
+ if (uiSize != sSegDetails.uiSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSaveRestoreLiveSegments: Segment size error"));
+ }
+ else
+ {
+ pbyBuffer += sizeof(sSegDetails.uiSize);
+
+ OSMemCopy(pvLocalMemCPUVAddr, pbyBuffer, sSegDetails.uiSize);
+ pbyBuffer += sSegDetails.uiSize;
+ }
+ }
+
+
+ uiBytesSaved += sizeof(sSegDetails.uiSize) + sSegDetails.uiSize;
+
+ OSUnMapPhysToLin(pvLocalMemCPUVAddr,
+ sSegDetails.uiSize,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ IMG_NULL);
+ }
+ }
+
+ if (pbyBuffer == IMG_NULL)
+ {
+ *puiBufSize = uiBytesSaved;
+ }
+
+ return (PVRSRV_OK);
+}
+
+
+IMG_EXPORT
+const IMG_CHAR *PVRSRVGetErrorStringKM(PVRSRV_ERROR eError)
+{
+
+#include "pvrsrv_errors.h"
+}
+
+static IMG_VOID PVRSRVCommandCompleteCallbacks_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ if(psDeviceNode->pfnDeviceCommandComplete != IMG_NULL)
+ {
+
+ (*psDeviceNode->pfnDeviceCommandComplete)(psDeviceNode);
+ }
+}
+
+IMG_VOID PVRSRVScheduleDeviceCallbacks(IMG_VOID)
+{
+ SYS_DATA *psSysData;
+ SysAcquireData(&psSysData);
+
+
+ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList,
+ &PVRSRVCommandCompleteCallbacks_ForEachCb);
+}
+
+IMG_EXPORT
+IMG_VOID PVRSRVScheduleDevicesKM(IMG_VOID)
+{
+ PVRSRVScheduleDeviceCallbacks();
+}
+
diff --git a/drivers/gpu/pvr/pvrsrv_errors.h b/drivers/gpu/pvr/pvrsrv_errors.h
new file mode 100644
index 0000000..5474984
--- /dev/null
+++ b/drivers/gpu/pvr/pvrsrv_errors.h
@@ -0,0 +1,266 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined (__PVRSRV_ERRORS_H__)
+#define __PVRSRV_ERRORS_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+ switch (eError)
+ {
+ case PVRSRV_OK: return "No Errors";
+ case PVRSRV_ERROR_OUT_OF_MEMORY: return "PVRSRV_ERROR_OUT_OF_MEMORY - Unable to allocate required memory";
+ case PVRSRV_ERROR_TOO_FEW_BUFFERS: return "PVRSRV_ERROR_TOO_FEW_BUFFERS";
+ case PVRSRV_ERROR_INVALID_PARAMS: return "PVRSRV_ERROR_INVALID_PARAMS";
+ case PVRSRV_ERROR_INIT_FAILURE: return "PVRSRV_ERROR_INIT_FAILURE";
+ case PVRSRV_ERROR_CANT_REGISTER_CALLBACK: return "PVRSRV_ERROR_CANT_REGISTER_CALLBACK";
+ case PVRSRV_ERROR_INVALID_DEVICE: return "PVRSRV_ERROR_INVALID_DEVICE";
+ case PVRSRV_ERROR_NOT_OWNER: return "PVRSRV_ERROR_NOT_OWNER";
+ case PVRSRV_ERROR_BAD_MAPPING: return "PVRSRV_ERROR_BAD_MAPPING";
+ case PVRSRV_ERROR_TIMEOUT: return "PVRSRV_ERROR_TIMEOUT";
+ case PVRSRV_ERROR_FLIP_CHAIN_EXISTS: return "PVRSRV_ERROR_FLIP_CHAIN_EXISTS";
+ case PVRSRV_ERROR_INVALID_SWAPINTERVAL: return "PVRSRV_ERROR_INVALID_SWAPINTERVAL";
+ case PVRSRV_ERROR_SCENE_INVALID: return "PVRSRV_ERROR_SCENE_INVALID";
+ case PVRSRV_ERROR_STREAM_ERROR: return "PVRSRV_ERROR_STREAM_ERROR";
+ case PVRSRV_ERROR_FAILED_DEPENDENCIES: return "PVRSRV_ERROR_FAILED_DEPENDENCIES";
+ case PVRSRV_ERROR_CMD_NOT_PROCESSED: return "PVRSRV_ERROR_CMD_NOT_PROCESSED";
+ case PVRSRV_ERROR_CMD_TOO_BIG: return "PVRSRV_ERROR_CMD_TOO_BIG";
+ case PVRSRV_ERROR_DEVICE_REGISTER_FAILED: return "PVRSRV_ERROR_DEVICE_REGISTER_FAILED";
+ case PVRSRV_ERROR_TOOMANYBUFFERS: return "PVRSRV_ERROR_TOOMANYBUFFERS";
+ case PVRSRV_ERROR_NOT_SUPPORTED: return "PVRSRV_ERROR_NOT_SUPPORTED - fix";
+ case PVRSRV_ERROR_PROCESSING_BLOCKED: return "PVRSRV_ERROR_PROCESSING_BLOCKED";
+
+ case PVRSRV_ERROR_CANNOT_FLUSH_QUEUE: return "PVRSRV_ERROR_CANNOT_FLUSH_QUEUE";
+ case PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE: return "PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE";
+ case PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS: return "PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS";
+ case PVRSRV_ERROR_RETRY: return "PVRSRV_ERROR_RETRY";
+
+ case PVRSRV_ERROR_DDK_VERSION_MISMATCH: return "PVRSRV_ERROR_DDK_VERSION_MISMATCH";
+ case PVRSRV_ERROR_BUILD_MISMATCH: return "PVRSRV_ERROR_BUILD_MISMATCH";
+ case PVRSRV_ERROR_CORE_REVISION_MISMATCH: return "PVRSRV_ERROR_CORE_REVISION_MISMATCH";
+
+ case PVRSRV_ERROR_UPLOAD_TOO_BIG: return "PVRSRV_ERROR_UPLOAD_TOO_BIG";
+
+ case PVRSRV_ERROR_INVALID_FLAGS: return "PVRSRV_ERROR_INVALID_FLAGS";
+ case PVRSRV_ERROR_FAILED_TO_REGISTER_PROCESS: return "PVRSRV_ERROR_FAILED_TO_REGISTER_PROCESS";
+
+ case PVRSRV_ERROR_UNABLE_TO_LOAD_LIBRARY: return "PVRSRV_ERROR_UNABLE_TO_LOAD_LIBRARY";
+ case PVRSRV_ERROR_UNABLE_GET_FUNC_ADDR: return "PVRSRV_ERROR_UNABLE_GET_FUNC_ADDR";
+ case PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED: return "PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED";
+
+ case PVRSRV_ERROR_BRIDGE_CALL_FAILED: return "PVRSRV_ERROR_BRIDGE_CALL_FAILED";
+ case PVRSRV_ERROR_IOCTL_CALL_FAILED: return "PVRSRV_ERROR_IOCTL_CALL_FAILED";
+
+ case PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND: return "PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND";
+ case PVRSRV_ERROR_BUFFER_DEVICE_NOT_FOUND: return "PVRSRV_ERROR_BUFFER_DEVICE_NOT_FOUND";
+ case PVRSRV_ERROR_BUFFER_DEVICE_ALREADY_PRESENT:return "PVRSRV_ERROR_BUFFER_DEVICE_ALREADY_PRESENT";
+
+ case PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND: return "PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND";
+ case PVRSRV_ERROR_PCI_CALL_FAILED: return "PVRSRV_ERROR_PCI_CALL_FAILED";
+ case PVRSRV_ERROR_PCI_REGION_TOO_SMALL: return "PVRSRV_ERROR_PCI_REGION_TOO_SMALL";
+ case PVRSRV_ERROR_PCI_REGION_UNAVAILABLE: return "PVRSRV_ERROR_PCI_REGION_UNAVAILABLE";
+ case PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH: return "PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH";
+
+ case PVRSRV_ERROR_REGISTER_BASE_NOT_SET: return "PVRSRV_ERROR_REGISTER_BASE_NOT_SET";
+
+ case PVRSRV_ERROR_BM_BAD_SHAREMEM_HANDLE: return "PVRSRV_ERROR_BM_BAD_SHAREMEM_HANDLE";
+
+ case PVRSRV_ERROR_FAILED_TO_ALLOC_USER_MEM: return "PVRSRV_ERROR_FAILED_TO_ALLOC_USER_MEM";
+ case PVRSRV_ERROR_FAILED_TO_ALLOC_VP_MEMORY: return "PVRSRV_ERROR_FAILED_TO_ALLOC_VP_MEMORY";
+ case PVRSRV_ERROR_FAILED_TO_MAP_SHARED_PBDESC: return "PVRSRV_ERROR_FAILED_TO_MAP_SHARED_PBDESC";
+ case PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR: return "PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR";
+
+ case PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY: return "PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY";
+ case PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY: return "PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY";
+
+ case PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES: return "PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES";
+ case PVRSRV_ERROR_FAILED_TO_FREE_PAGES: return "PVRSRV_ERROR_FAILED_TO_FREE_PAGES";
+ case PVRSRV_ERROR_FAILED_TO_COPY_PAGES: return "PVRSRV_ERROR_FAILED_TO_COPY_PAGES";
+ case PVRSRV_ERROR_UNABLE_TO_LOCK_PAGES: return "PVRSRV_ERROR_UNABLE_TO_LOCK_PAGES";
+ case PVRSRV_ERROR_UNABLE_TO_UNLOCK_PAGES: return "PVRSRV_ERROR_UNABLE_TO_UNLOCK_PAGES";
+ case PVRSRV_ERROR_STILL_MAPPED: return "PVRSRV_ERROR_STILL_MAPPED";
+ case PVRSRV_ERROR_MAPPING_NOT_FOUND: return "PVRSRV_ERROR_MAPPING_NOT_FOUND";
+ case PVRSRV_ERROR_PHYS_ADDRESS_EXCEEDS_32BIT: return "PVRSRV_ERROR_PHYS_ADDRESS_EXCEEDS_32BIT";
+ case PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE: return "PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE";
+
+ case PVRSRV_ERROR_INVALID_SEGMENT_BLOCK: return "PVRSRV_ERROR_INVALID_SEGMENT_BLOCK";
+ case PVRSRV_ERROR_INVALID_SGXDEVDATA: return "PVRSRV_ERROR_INVALID_SGXDEVDATA";
+ case PVRSRV_ERROR_INVALID_DEVINFO: return "PVRSRV_ERROR_INVALID_DEVINFO";
+ case PVRSRV_ERROR_INVALID_MEMINFO: return "PVRSRV_ERROR_INVALID_MEMINFO";
+ case PVRSRV_ERROR_INVALID_MISCINFO: return "PVRSRV_ERROR_INVALID_MISCINFO";
+ case PVRSRV_ERROR_UNKNOWN_IOCTL: return "PVRSRV_ERROR_UNKNOWN_IOCTL";
+ case PVRSRV_ERROR_INVALID_CONTEXT: return "PVRSRV_ERROR_INVALID_CONTEXT";
+ case PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT: return "PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT";
+ case PVRSRV_ERROR_INVALID_HEAP: return "PVRSRV_ERROR_INVALID_HEAP";
+ case PVRSRV_ERROR_INVALID_KERNELINFO: return "PVRSRV_ERROR_INVALID_KERNELINFO";
+ case PVRSRV_ERROR_UNKNOWN_POWER_STATE: return "PVRSRV_ERROR_UNKNOWN_POWER_STATE";
+ case PVRSRV_ERROR_INVALID_HANDLE_TYPE: return "PVRSRV_ERROR_INVALID_HANDLE_TYPE";
+ case PVRSRV_ERROR_INVALID_WRAP_TYPE: return "PVRSRV_ERROR_INVALID_WRAP_TYPE";
+ case PVRSRV_ERROR_INVALID_PHYS_ADDR: return "PVRSRV_ERROR_INVALID_PHYS_ADDR";
+ case PVRSRV_ERROR_INVALID_CPU_ADDR: return "PVRSRV_ERROR_INVALID_CPU_ADDR";
+ case PVRSRV_ERROR_INVALID_HEAPINFO: return "PVRSRV_ERROR_INVALID_HEAPINFO";
+ case PVRSRV_ERROR_INVALID_PERPROC: return "PVRSRV_ERROR_INVALID_PERPROC";
+ case PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO: return "PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO";
+ case PVRSRV_ERROR_INVALID_MAP_REQUEST: return "PVRSRV_ERROR_INVALID_MAP_REQUEST";
+ case PVRSRV_ERROR_INVALID_UNMAP_REQUEST: return "PVRSRV_ERROR_INVALID_UNMAP_REQUEST";
+ case PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP: return "PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP";
+ case PVRSRV_ERROR_MAPPING_STILL_IN_USE: return "PVRSRV_ERROR_MAPPING_STILL_IN_USE";
+
+ case PVRSRV_ERROR_EXCEEDED_HW_LIMITS: return "PVRSRV_ERROR_EXCEEDED_HW_LIMITS";
+ case PVRSRV_ERROR_NO_STAGING_BUFFER_ALLOCATED: return "PVRSRV_ERROR_NO_STAGING_BUFFER_ALLOCATED";
+
+ case PVRSRV_ERROR_UNABLE_TO_CREATE_PERPROC_AREA:return "PVRSRV_ERROR_UNABLE_TO_CREATE_PERPROC_AREA";
+ case PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT: return "PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT";
+ case PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT: return "PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT";
+ case PVRSRV_ERROR_UNABLE_TO_REGISTER_EVENT: return "PVRSRV_ERROR_UNABLE_TO_REGISTER_EVENT";
+ case PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT: return "PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT";
+ case PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD: return "PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD";
+ case PVRSRV_ERROR_UNABLE_TO_CLOSE_THREAD: return "PVRSRV_ERROR_UNABLE_TO_CLOSE_THREAD";
+ case PVRSRV_ERROR_THREAD_READ_ERROR: return "PVRSRV_ERROR_THREAD_READ_ERROR";
+ case PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER:return "PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER";
+ case PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR: return "PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR";
+ case PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR: return "PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR";
+ case PVRSRV_ERROR_ISR_ALREADY_INSTALLED: return "PVRSRV_ERROR_ISR_ALREADY_INSTALLED";
+ case PVRSRV_ERROR_ISR_NOT_INSTALLED: return "PVRSRV_ERROR_ISR_NOT_INSTALLED";
+ case PVRSRV_ERROR_UNABLE_TO_INITIALISE_INTERRUPT:return "PVRSRV_ERROR_UNABLE_TO_INITIALISE_INTERRUPT";
+ case PVRSRV_ERROR_UNABLE_TO_RETRIEVE_INFO: return "PVRSRV_ERROR_UNABLE_TO_RETRIEVE_INFO";
+ case PVRSRV_ERROR_UNABLE_TO_DO_BACKWARDS_BLIT: return "PVRSRV_ERROR_UNABLE_TO_DO_BACKWARDS_BLIT";
+ case PVRSRV_ERROR_UNABLE_TO_CLOSE_SERVICES: return "PVRSRV_ERROR_UNABLE_TO_CLOSE_SERVICES";
+ case PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT: return "PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT";
+ case PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE: return "PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE";
+
+ case PVRSRV_ERROR_INVALID_CCB_COMMAND: return "PVRSRV_ERROR_INVALID_CCB_COMMAND";
+
+ case PVRSRV_ERROR_UNABLE_TO_LOCK_RESOURCE: return "PVRSRV_ERROR_UNABLE_TO_LOCK_RESOURCE";
+ case PVRSRV_ERROR_INVALID_LOCK_ID: return "PVRSRV_ERROR_INVALID_LOCK_ID";
+ case PVRSRV_ERROR_RESOURCE_NOT_LOCKED: return "PVRSRV_ERROR_RESOURCE_NOT_LOCKED";
+
+ case PVRSRV_ERROR_FLIP_FAILED: return "PVRSRV_ERROR_FLIP_FAILED";
+ case PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED: return "PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED";
+
+ case PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE: return "PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE";
+
+ case PVRSRV_ERROR_CREATE_RENDER_CONTEXT_FAILED: return "PVRSRV_ERROR_CREATE_RENDER_CONTEXT_FAILED";
+ case PVRSRV_ERROR_UNKNOWN_PRIMARY_FRAG: return "PVRSRV_ERROR_UNKNOWN_PRIMARY_FRAG";
+ case PVRSRV_ERROR_UNEXPECTED_SECONDARY_FRAG: return "PVRSRV_ERROR_UNEXPECTED_SECONDARY_FRAG";
+ case PVRSRV_ERROR_UNEXPECTED_PRIMARY_FRAG: return "PVRSRV_ERROR_UNEXPECTED_PRIMARY_FRAG";
+
+ case PVRSRV_ERROR_UNABLE_TO_INSERT_FENCE_ID: return "PVRSRV_ERROR_UNABLE_TO_INSERT_FENCE_ID";
+
+ case PVRSRV_ERROR_BLIT_SETUP_FAILED: return "PVRSRV_ERROR_BLIT_SETUP_FAILED";
+
+ case PVRSRV_ERROR_PDUMP_NOT_AVAILABLE: return "PVRSRV_ERROR_PDUMP_NOT_AVAILABLE";
+ case PVRSRV_ERROR_PDUMP_BUFFER_FULL: return "PVRSRV_ERROR_PDUMP_BUFFER_FULL";
+ case PVRSRV_ERROR_PDUMP_BUF_OVERFLOW: return "PVRSRV_ERROR_PDUMP_BUF_OVERFLOW";
+ case PVRSRV_ERROR_PDUMP_NOT_ACTIVE: return "PVRSRV_ERROR_PDUMP_NOT_ACTIVE";
+ case PVRSRV_ERROR_INCOMPLETE_LINE_OVERLAPS_PAGES:return "PVRSRV_ERROR_INCOMPLETE_LINE_OVERLAPS_PAGES";
+
+ case PVRSRV_ERROR_MUTEX_DESTROY_FAILED: return "PVRSRV_ERROR_MUTEX_DESTROY_FAILED";
+ case PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR: return "PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR";
+
+ case PVRSRV_ERROR_INSUFFICIENT_SCRIPT_SPACE: return "PVRSRV_ERROR_INSUFFICIENT_SCRIPT_SPACE";
+ case PVRSRV_ERROR_INSUFFICIENT_SPACE_FOR_COMMAND:return "PVRSRV_ERROR_INSUFFICIENT_SPACE_FOR_COMMAND";
+
+ case PVRSRV_ERROR_PROCESS_NOT_INITIALISED: return "PVRSRV_ERROR_PROCESS_NOT_INITIALISED";
+ case PVRSRV_ERROR_PROCESS_NOT_FOUND: return "PVRSRV_ERROR_PROCESS_NOT_FOUND";
+ case PVRSRV_ERROR_SRV_CONNECT_FAILED: return "PVRSRV_ERROR_SRV_CONNECT_FAILED";
+ case PVRSRV_ERROR_SRV_DISCONNECT_FAILED: return "PVRSRV_ERROR_SRV_DISCONNECT_FAILED";
+ case PVRSRV_ERROR_DEINT_PHASE_FAILED: return "PVRSRV_ERROR_DEINT_PHASE_FAILED";
+ case PVRSRV_ERROR_INIT2_PHASE_FAILED: return "PVRSRV_ERROR_INIT2_PHASE_FAILED";
+
+ case PVRSRV_ERROR_NO_DC_DEVICES_FOUND: return "PVRSRV_ERROR_NO_DC_DEVICES_FOUND";
+ case PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE: return "PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE";
+ case PVRSRV_ERROR_UNABLE_TO_REMOVE_DEVICE: return "PVRSRV_ERROR_UNABLE_TO_REMOVE_DEVICE";
+ case PVRSRV_ERROR_NO_DEVICEDATA_FOUND: return "PVRSRV_ERROR_NO_DEVICEDATA_FOUND";
+ case PVRSRV_ERROR_NO_DEVICENODE_FOUND: return "PVRSRV_ERROR_NO_DEVICENODE_FOUND";
+ case PVRSRV_ERROR_NO_CLIENTNODE_FOUND: return "PVRSRV_ERROR_NO_CLIENTNODE_FOUND";
+ case PVRSRV_ERROR_FAILED_TO_PROCESS_QUEUE: return "PVRSRV_ERROR_FAILED_TO_PROCESS_QUEUE";
+
+ case PVRSRV_ERROR_UNABLE_TO_INIT_TASK: return "PVRSRV_ERROR_UNABLE_TO_INIT_TASK";
+ case PVRSRV_ERROR_UNABLE_TO_SCHEDULE_TASK: return "PVRSRV_ERROR_UNABLE_TO_SCHEDULE_TASK";
+ case PVRSRV_ERROR_UNABLE_TO_KILL_TASK: return "PVRSRV_ERROR_UNABLE_TO_KILL_TASK";
+
+ case PVRSRV_ERROR_UNABLE_TO_ENABLE_TIMER: return "PVRSRV_ERROR_UNABLE_TO_ENABLE_TIMER";
+ case PVRSRV_ERROR_UNABLE_TO_DISABLE_TIMER: return "PVRSRV_ERROR_UNABLE_TO_DISABLE_TIMER";
+ case PVRSRV_ERROR_UNABLE_TO_REMOVE_TIMER: return "PVRSRV_ERROR_UNABLE_TO_REMOVE_TIMER";
+
+ case PVRSRV_ERROR_UNKNOWN_PIXEL_FORMAT: return "PVRSRV_ERROR_UNKNOWN_PIXEL_FORMAT";
+ case PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION: return "PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION";
+
+ case PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE: return "PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE";
+ case PVRSRV_ERROR_HANDLE_NOT_ALLOCATED: return "PVRSRV_ERROR_HANDLE_NOT_ALLOCATED";
+ case PVRSRV_ERROR_HANDLE_TYPE_MISMATCH: return "PVRSRV_ERROR_HANDLE_TYPE_MISMATCH";
+ case PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE: return "PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE";
+ case PVRSRV_ERROR_HANDLE_NOT_SHAREABLE: return "PVRSRV_ERROR_HANDLE_NOT_SHAREABLE";
+ case PVRSRV_ERROR_HANDLE_NOT_FOUND: return "PVRSRV_ERROR_HANDLE_NOT_FOUND";
+ case PVRSRV_ERROR_INVALID_SUBHANDLE: return "PVRSRV_ERROR_INVALID_SUBHANDLE";
+ case PVRSRV_ERROR_HANDLE_BATCH_IN_USE: return "PVRSRV_ERROR_HANDLE_BATCH_IN_USE";
+ case PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE: return "PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE";
+
+ case PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE: return "PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE";
+ case PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED:return "PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED";
+
+ case PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE: return "PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE";
+ case PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP: return "PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP";
+
+ case PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE: return "PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE";
+
+ case PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVALIABLE: return "PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVALIABLE";
+ case PVRSRV_ERROR_INVALID_DEVICEID: return "PVRSRV_ERROR_INVALID_DEVICEID";
+ case PVRSRV_ERROR_DEVICEID_NOT_FOUND: return "PVRSRV_ERROR_DEVICEID_NOT_FOUND";
+
+ case PVRSRV_ERROR_MEMORY_TEST_FAILED: return "PVRSRV_ERROR_MEMORY_TEST_FAILED";
+ case PVRSRV_ERROR_CPUPADDR_TEST_FAILED: return "PVRSRV_ERROR_CPUPADDR_TEST_FAILED";
+ case PVRSRV_ERROR_COPY_TEST_FAILED: return "PVRSRV_ERROR_COPY_TEST_FAILED";
+
+ case PVRSRV_ERROR_SEMAPHORE_NOT_INITIALISED: return "PVRSRV_ERROR_SEMAPHORE_NOT_INITIALISED";
+
+ case PVRSRV_ERROR_UNABLE_TO_RELEASE_CLOCK: return "PVRSRV_ERROR_UNABLE_TO_RELEASE_CLOCK";
+ case PVRSRV_ERROR_CLOCK_REQUEST_FAILED: return "PVRSRV_ERROR_CLOCK_REQUEST_FAILED";
+ case PVRSRV_ERROR_DISABLE_CLOCK_FAILURE: return "PVRSRV_ERROR_DISABLE_CLOCK_FAILURE";
+ case PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE: return "PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE";
+ case PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE: return "PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE";
+ case PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK: return "PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK";
+ case PVRSRV_ERROR_UNABLE_TO_GET_CLOCK: return "PVRSRV_ERROR_UNABLE_TO_GET_CLOCK";
+ case PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK: return "PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK";
+ case PVRSRV_ERROR_UNABLE_TO_GET_SYSTEM_CLOCK: return "PVRSRV_ERROR_UNABLE_TO_GET_SYSTEM_CLOCK";
+
+ case PVRSRV_ERROR_UNKNOWN_SGL_ERROR: return "PVRSRV_ERROR_UNKNOWN_SGL_ERROR";
+ case PVRSRV_ERROR_BAD_SYNC_STATE: return "PVRSRV_ERROR_BAD_SYNC_STATE";
+
+ case PVRSRV_ERROR_FORCE_I32: return "PVRSRV_ERROR_FORCE_I32";
+
+ default:
+ return "Unknown PVRSRV error number";
+ }
+
+#if defined (__cplusplus)
+}
+#endif
+#endif
+
diff --git a/drivers/gpu/pvr/pvrversion.h b/drivers/gpu/pvr/pvrversion.h
new file mode 100644
index 0000000..99a55f5
--- /dev/null
+++ b/drivers/gpu/pvr/pvrversion.h
@@ -0,0 +1,51 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+*/ /**************************************************************************/
+
+#ifndef _PVRVERSION_H_
+#define _PVRVERSION_H_
+
+#define PVR_STR(X) #X
+#define PVR_STR2(X) PVR_STR(X)
+
+#define PVRVERSION_MAJ 1
+#define PVRVERSION_MIN 8
+#define PVRVERSION_BRANCH 18
+
+#define PVRVERSION_FAMILY "sgxddk"
+#define PVRVERSION_BRANCHNAME "1.8.GOOGLENEXUS.ED945322"
+#define PVRVERSION_BUILD 2198402
+#define PVRVERSION_BSCONTROL "CustomerGoogle_Android_ogles1_ogles2_GPL"
+
+#define PVRVERSION_STRING "CustomerGoogle_Android_ogles1_ogles2_GPL sgxddk 18 1.8.GOOGLENEXUS.ED945322@" PVR_STR2(PVRVERSION_BUILD)
+#define PVRVERSION_STRING_SHORT "1.8.GOOGLENEXUS.ED945322@" PVR_STR2(PVRVERSION_BUILD)
+
+#define COPYRIGHT_TXT "Copyright (c) Imagination Technologies Ltd. All Rights Reserved."
+
+#define PVRVERSION_BUILD_HI 219
+#define PVRVERSION_BUILD_LO 8402
+#define PVRVERSION_STRING_NUMERIC PVR_STR2(PVRVERSION_MAJ) "." PVR_STR2(PVRVERSION_MIN) "." PVR_STR2(PVRVERSION_BUILD_HI) "." PVR_STR2(PVRVERSION_BUILD_LO)
+
+#endif /* _PVRVERSION_H_ */
diff --git a/drivers/gpu/pvr/queue.c b/drivers/gpu/pvr/queue.c
new file mode 100644
index 0000000..374bf7b
--- /dev/null
+++ b/drivers/gpu/pvr/queue.c
@@ -0,0 +1,1213 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "services_headers.h"
+#include "pvr_bridge_km.h"
+
+#include "lists.h"
+#include "ttrace.h"
+
+#if defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED)
+#define DC_NUM_COMMANDS_PER_TYPE 2
+#else
+#define DC_NUM_COMMANDS_PER_TYPE 1
+#endif
+
+typedef struct _DEVICE_COMMAND_DATA_
+{
+ PFN_CMD_PROC pfnCmdProc;
+ PCOMMAND_COMPLETE_DATA apsCmdCompleteData[DC_NUM_COMMANDS_PER_TYPE];
+ IMG_UINT32 ui32CCBOffset;
+ IMG_UINT32 ui32MaxDstSyncCount;
+ IMG_UINT32 ui32MaxSrcSyncCount;
+} DEVICE_COMMAND_DATA;
+
+
+#if defined(__linux__) && defined(__KERNEL__)
+
+#include "proc.h"
+
+void ProcSeqShowQueue(struct seq_file *sfile,void* el)
+{
+ PVRSRV_QUEUE_INFO *psQueue = (PVRSRV_QUEUE_INFO*)el;
+ IMG_INT cmds = 0;
+ IMG_SIZE_T ui32ReadOffset;
+ IMG_SIZE_T ui32WriteOffset;
+ PVRSRV_COMMAND *psCmd;
+
+ if(el == PVR_PROC_SEQ_START_TOKEN)
+ {
+ seq_printf( sfile,
+ "Command Queues\n"
+ "Queue CmdPtr Pid Command Size DevInd DSC SSC #Data ...\n");
+ return;
+ }
+
+ ui32ReadOffset = psQueue->ui32ReadOffset;
+ ui32WriteOffset = psQueue->ui32WriteOffset;
+
+ while (ui32ReadOffset != ui32WriteOffset)
+ {
+ psCmd= (PVRSRV_COMMAND *)((IMG_UINTPTR_T)psQueue->pvLinQueueKM + ui32ReadOffset);
+
+ seq_printf(sfile, "%x %x %5u %6u %3u %5u %2u %2u %3u \n",
+ (IMG_UINTPTR_T)psQueue,
+ (IMG_UINTPTR_T)psCmd,
+ psCmd->ui32ProcessID,
+ psCmd->CommandType,
+ psCmd->uCmdSize,
+ psCmd->ui32DevIndex,
+ psCmd->ui32DstSyncCount,
+ psCmd->ui32SrcSyncCount,
+ psCmd->uDataSize);
+ {
+ IMG_UINT32 i;
+ for (i = 0; i < psCmd->ui32SrcSyncCount; i++)
+ {
+ PVRSRV_SYNC_DATA *psSyncData = psCmd->psSrcSync[i].psKernelSyncInfoKM->psSyncData;
+ seq_printf(sfile, " Sync %u: ROP/ROC: 0x%x/0x%x WOP/WOC: 0x%x/0x%x ROC-VA: 0x%x WOC-VA: 0x%x\n",
+ i,
+ psCmd->psSrcSync[i].ui32ReadOps2Pending,
+ psSyncData->ui32ReadOps2Complete,
+ psCmd->psSrcSync[i].ui32WriteOpsPending,
+ psSyncData->ui32WriteOpsComplete,
+ psCmd->psSrcSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr,
+ psCmd->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr);
+ }
+ }
+
+
+ ui32ReadOffset += psCmd->uCmdSize;
+ ui32ReadOffset &= psQueue->ui32QueueSize - 1;
+ cmds++;
+ }
+
+ if (cmds == 0)
+ {
+ seq_printf(sfile, "%x <empty>\n", (IMG_UINTPTR_T)psQueue);
+ }
+}
+
+void* ProcSeqOff2ElementQueue(struct seq_file * sfile, loff_t off)
+{
+ PVRSRV_QUEUE_INFO *psQueue = IMG_NULL;
+ SYS_DATA *psSysData;
+
+ PVR_UNREFERENCED_PARAMETER(sfile);
+
+ if(!off)
+ {
+ return PVR_PROC_SEQ_START_TOKEN;
+ }
+
+
+ psSysData = SysAcquireDataNoCheck();
+ if (psSysData != IMG_NULL)
+ {
+ for (psQueue = psSysData->psQueueList; (((--off) > 0) && (psQueue != IMG_NULL)); psQueue = psQueue->psNextKM);
+ }
+
+ return psQueue;
+}
+#endif
+
+#define GET_SPACE_IN_CMDQ(psQueue) \
+ ((((psQueue)->ui32ReadOffset - (psQueue)->ui32WriteOffset) \
+ + ((psQueue)->ui32QueueSize - 1)) & ((psQueue)->ui32QueueSize - 1))
+
+#define UPDATE_QUEUE_WOFF(psQueue, ui32Size) \
+ (psQueue)->ui32WriteOffset = ((psQueue)->ui32WriteOffset + (ui32Size)) \
+ & ((psQueue)->ui32QueueSize - 1);
+
+#define SYNCOPS_STALE(ui32OpsComplete, ui32OpsPending) \
+ ((ui32OpsComplete) >= (ui32OpsPending))
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVGetWriteOpsPending)
+#endif
+static INLINE
+IMG_UINT32 PVRSRVGetWriteOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp)
+{
+ IMG_UINT32 ui32WriteOpsPending;
+
+ if(bIsReadOp)
+ {
+ ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+ else
+ {
+
+
+
+ ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ }
+
+ return ui32WriteOpsPending;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVGetReadOpsPending)
+#endif
+static INLINE
+IMG_UINT32 PVRSRVGetReadOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp)
+{
+ IMG_UINT32 ui32ReadOpsPending;
+
+ if(bIsReadOp)
+ {
+ ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOps2Pending++;
+ }
+ else
+ {
+ ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOps2Pending;
+ }
+
+ return ui32ReadOpsPending;
+}
+
+static IMG_VOID QueueDumpCmdComplete(COMMAND_COMPLETE_DATA *psCmdCompleteData,
+ IMG_UINT32 i,
+ IMG_BOOL bIsSrc)
+{
+ PVRSRV_SYNC_OBJECT *psSyncObject;
+
+ psSyncObject = bIsSrc ? psCmdCompleteData->psSrcSync : psCmdCompleteData->psDstSync;
+
+ if (psCmdCompleteData->bInUse)
+ {
+ PVR_LOG(("\t%s %u: ROC DevVAddr:0x%X ROP:0x%x ROC:0x%x, WOC DevVAddr:0x%X WOP:0x%x WOC:0x%x",
+ bIsSrc ? "SRC" : "DEST", i,
+ psSyncObject[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr,
+ psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32ReadOps2Pending,
+ psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32ReadOps2Complete,
+ psSyncObject[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
+ psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsPending,
+ psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsComplete))
+ }
+ else
+ {
+ PVR_LOG(("\t%s %u: (Not in use)", bIsSrc ? "SRC" : "DEST", i))
+ }
+}
+
+
+static IMG_VOID QueueDumpDebugInfo_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ if (psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY)
+ {
+ IMG_UINT32 ui32CmdCounter, ui32SyncCounter;
+ SYS_DATA *psSysData;
+ DEVICE_COMMAND_DATA *psDeviceCommandData;
+ PCOMMAND_COMPLETE_DATA psCmdCompleteData;
+
+ SysAcquireData(&psSysData);
+
+ psDeviceCommandData = psSysData->apsDeviceCommandData[psDeviceNode->sDevId.ui32DeviceIndex];
+
+ if (psDeviceCommandData != IMG_NULL)
+ {
+ for (ui32CmdCounter = 0; ui32CmdCounter < DC_NUM_COMMANDS_PER_TYPE; ui32CmdCounter++)
+ {
+ psCmdCompleteData = psDeviceCommandData[DC_FLIP_COMMAND].apsCmdCompleteData[ui32CmdCounter];
+
+ PVR_LOG(("Flip Command Complete Data %u for display device %u:",
+ ui32CmdCounter, psDeviceNode->sDevId.ui32DeviceIndex))
+
+ for (ui32SyncCounter = 0;
+ ui32SyncCounter < psCmdCompleteData->ui32SrcSyncCount;
+ ui32SyncCounter++)
+ {
+ QueueDumpCmdComplete(psCmdCompleteData, ui32SyncCounter, IMG_TRUE);
+ }
+
+ for (ui32SyncCounter = 0;
+ ui32SyncCounter < psCmdCompleteData->ui32DstSyncCount;
+ ui32SyncCounter++)
+ {
+ QueueDumpCmdComplete(psCmdCompleteData, ui32SyncCounter, IMG_FALSE);
+ }
+ }
+ }
+ else
+ {
+ PVR_LOG(("There is no Command Complete Data for display device %u", psDeviceNode->sDevId.ui32DeviceIndex))
+ }
+ }
+}
+
+
+IMG_VOID QueueDumpDebugInfo(IMG_VOID)
+{
+ SYS_DATA *psSysData;
+ SysAcquireData(&psSysData);
+ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList, &QueueDumpDebugInfo_ForEachCb);
+}
+
+
+static IMG_SIZE_T NearestPower2(IMG_SIZE_T ui32Value)
+{
+ IMG_SIZE_T ui32Temp, ui32Result = 1;
+
+ if(!ui32Value)
+ return 0;
+
+ ui32Temp = ui32Value - 1;
+ while(ui32Temp)
+ {
+ ui32Result <<= 1;
+ ui32Temp >>= 1;
+ }
+
+ return ui32Result;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_SIZE_T ui32QueueSize,
+ PVRSRV_QUEUE_INFO **ppsQueueInfo)
+{
+ PVRSRV_QUEUE_INFO *psQueueInfo;
+ IMG_SIZE_T ui32Power2QueueSize = NearestPower2(ui32QueueSize);
+ SYS_DATA *psSysData;
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hMemBlock;
+
+ SysAcquireData(&psSysData);
+
+
+ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_QUEUE_INFO),
+ (IMG_VOID **)&psQueueInfo, &hMemBlock,
+ "Queue Info");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Failed to alloc queue struct"));
+ goto ErrorExit;
+ }
+ OSMemSet(psQueueInfo, 0, sizeof(PVRSRV_QUEUE_INFO));
+
+ psQueueInfo->hMemBlock[0] = hMemBlock;
+ psQueueInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
+
+
+ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ ui32Power2QueueSize + PVRSRV_MAX_CMD_SIZE,
+ &psQueueInfo->pvLinQueueKM, &hMemBlock,
+ "Command Queue");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Failed to alloc queue buffer"));
+ goto ErrorExit;
+ }
+
+ psQueueInfo->hMemBlock[1] = hMemBlock;
+ psQueueInfo->pvLinQueueUM = psQueueInfo->pvLinQueueKM;
+
+
+ PVR_ASSERT(psQueueInfo->ui32ReadOffset == 0);
+ PVR_ASSERT(psQueueInfo->ui32WriteOffset == 0);
+
+ psQueueInfo->ui32QueueSize = ui32Power2QueueSize;
+
+
+ if (psSysData->psQueueList == IMG_NULL)
+ {
+ eError = OSCreateResource(&psSysData->sQProcessResource);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorExit;
+ }
+ }
+
+
+ eError = OSLockResource(&psSysData->sQProcessResource,
+ KERNEL_ID);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorExit;
+ }
+
+ psQueueInfo->psNextKM = psSysData->psQueueList;
+ psSysData->psQueueList = psQueueInfo;
+
+ eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorExit;
+ }
+
+ *ppsQueueInfo = psQueueInfo;
+
+ return PVRSRV_OK;
+
+ErrorExit:
+
+ if(psQueueInfo)
+ {
+ if(psQueueInfo->pvLinQueueKM)
+ {
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ psQueueInfo->ui32QueueSize,
+ psQueueInfo->pvLinQueueKM,
+ psQueueInfo->hMemBlock[1]);
+ psQueueInfo->pvLinQueueKM = IMG_NULL;
+ }
+
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_QUEUE_INFO),
+ psQueueInfo,
+ psQueueInfo->hMemBlock[0]);
+
+ }
+
+ return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo)
+{
+ PVRSRV_QUEUE_INFO *psQueue;
+ SYS_DATA *psSysData;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bTimeout = IMG_TRUE;
+
+ SysAcquireData(&psSysData);
+
+ psQueue = psSysData->psQueueList;
+
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ if(psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset)
+ {
+ bTimeout = IMG_FALSE;
+ break;
+ }
+ OSSleepms(1);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (bTimeout)
+ {
+
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDestroyCommandQueueKM : Failed to empty queue"));
+ eError = PVRSRV_ERROR_CANNOT_FLUSH_QUEUE;
+ goto ErrorExit;
+ }
+
+
+ eError = OSLockResource(&psSysData->sQProcessResource,
+ KERNEL_ID);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorExit;
+ }
+
+ if(psQueue == psQueueInfo)
+ {
+ psSysData->psQueueList = psQueueInfo->psNextKM;
+
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ NearestPower2(psQueueInfo->ui32QueueSize) + PVRSRV_MAX_CMD_SIZE,
+ psQueueInfo->pvLinQueueKM,
+ psQueueInfo->hMemBlock[1]);
+ psQueueInfo->pvLinQueueKM = IMG_NULL;
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_QUEUE_INFO),
+ psQueueInfo,
+ psQueueInfo->hMemBlock[0]);
+
+ psQueueInfo = IMG_NULL;
+ }
+ else
+ {
+ while(psQueue)
+ {
+ if(psQueue->psNextKM == psQueueInfo)
+ {
+ psQueue->psNextKM = psQueueInfo->psNextKM;
+
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ psQueueInfo->ui32QueueSize,
+ psQueueInfo->pvLinQueueKM,
+ psQueueInfo->hMemBlock[1]);
+ psQueueInfo->pvLinQueueKM = IMG_NULL;
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_QUEUE_INFO),
+ psQueueInfo,
+ psQueueInfo->hMemBlock[0]);
+
+ psQueueInfo = IMG_NULL;
+ break;
+ }
+ psQueue = psQueue->psNextKM;
+ }
+
+ if(!psQueue)
+ {
+ eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorExit;
+ }
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ErrorExit;
+ }
+ }
+
+
+ eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorExit;
+ }
+
+
+ if (psSysData->psQueueList == IMG_NULL)
+ {
+ eError = OSDestroyResource(&psSysData->sQProcessResource);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorExit;
+ }
+ }
+
+ErrorExit:
+
+ return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue,
+ IMG_SIZE_T ui32ParamSize,
+ IMG_VOID **ppvSpace)
+{
+ IMG_BOOL bTimeout = IMG_TRUE;
+
+
+ ui32ParamSize = (ui32ParamSize+3) & 0xFFFFFFFC;
+
+ if (ui32ParamSize > PVRSRV_MAX_CMD_SIZE)
+ {
+ PVR_DPF((PVR_DBG_WARNING,"PVRSRVGetQueueSpace: max command size is %d bytes", PVRSRV_MAX_CMD_SIZE));
+ return PVRSRV_ERROR_CMD_TOO_BIG;
+ }
+
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ if (GET_SPACE_IN_CMDQ(psQueue) > ui32ParamSize)
+ {
+ bTimeout = IMG_FALSE;
+ break;
+ }
+ OSSleepms(1);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (bTimeout == IMG_TRUE)
+ {
+ *ppvSpace = IMG_NULL;
+
+ return PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE;
+ }
+ else
+ {
+ *ppvSpace = (IMG_VOID *)((IMG_UINTPTR_T)psQueue->pvLinQueueUM + psQueue->ui32WriteOffset);
+ }
+
+ return PVRSRV_OK;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO *psQueue,
+ PVRSRV_COMMAND **ppsCommand,
+ IMG_UINT32 ui32DevIndex,
+ IMG_UINT16 CommandType,
+ IMG_UINT32 ui32DstSyncCount,
+ PVRSRV_KERNEL_SYNC_INFO *apsDstSync[],
+ IMG_UINT32 ui32SrcSyncCount,
+ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
+ IMG_SIZE_T ui32DataByteSize,
+ PFN_QUEUE_COMMAND_COMPLETE pfnCommandComplete,
+ IMG_HANDLE hCallbackData)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_COMMAND *psCommand;
+ IMG_SIZE_T ui32CommandSize;
+ IMG_UINT32 i;
+ SYS_DATA *psSysData;
+ DEVICE_COMMAND_DATA *psDeviceCommandData;
+
+
+ SysAcquireData(&psSysData);
+ psDeviceCommandData = psSysData->apsDeviceCommandData[ui32DevIndex];
+
+ if ((psDeviceCommandData[CommandType].ui32MaxDstSyncCount < ui32DstSyncCount) ||
+ (psDeviceCommandData[CommandType].ui32MaxSrcSyncCount < ui32SrcSyncCount))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVInsertCommandKM: Too many syncs"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+ ui32DataByteSize = (ui32DataByteSize + 3UL) & ~3UL;
+
+
+ ui32CommandSize = sizeof(PVRSRV_COMMAND)
+ + ((ui32DstSyncCount + ui32SrcSyncCount) * sizeof(PVRSRV_SYNC_OBJECT))
+ + ui32DataByteSize;
+
+
+ eError = PVRSRVGetQueueSpaceKM (psQueue, ui32CommandSize, (IMG_VOID**)&psCommand);
+ if(eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ psCommand->ui32ProcessID = OSGetCurrentProcessIDKM();
+
+
+ psCommand->uCmdSize = ui32CommandSize;
+ psCommand->ui32DevIndex = ui32DevIndex;
+ psCommand->CommandType = CommandType;
+ psCommand->ui32DstSyncCount = ui32DstSyncCount;
+ psCommand->ui32SrcSyncCount = ui32SrcSyncCount;
+
+
+ psCommand->psDstSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand) + sizeof(PVRSRV_COMMAND));
+
+
+ psCommand->psSrcSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand->psDstSync)
+ + (ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
+
+ psCommand->pvData = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand->psSrcSync)
+ + (ui32SrcSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
+ psCommand->uDataSize = ui32DataByteSize;
+
+ psCommand->pfnCommandComplete = pfnCommandComplete;
+ psCommand->hCallbackData = hCallbackData;
+
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_CMD_START, QUEUE_TOKEN_INSERTKM);
+ PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_NONE,
+ QUEUE_TOKEN_COMMAND_TYPE, CommandType);
+
+
+ for (i=0; i<ui32DstSyncCount; i++)
+ {
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_QUEUE, QUEUE_TOKEN_DST_SYNC,
+ apsDstSync[i], PVRSRV_SYNCOP_SAMPLE);
+
+ psCommand->psDstSync[i].psKernelSyncInfoKM = apsDstSync[i];
+ psCommand->psDstSync[i].ui32WriteOpsPending = PVRSRVGetWriteOpsPending(apsDstSync[i], IMG_FALSE);
+ psCommand->psDstSync[i].ui32ReadOps2Pending = PVRSRVGetReadOpsPending(apsDstSync[i], IMG_FALSE);
+
+ PVRSRVKernelSyncInfoIncRef(apsDstSync[i], IMG_NULL);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInsertCommandKM: Dst %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x",
+ i, psCommand->psDstSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr,
+ psCommand->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
+ psCommand->psDstSync[i].ui32ReadOps2Pending,
+ psCommand->psDstSync[i].ui32WriteOpsPending));
+ }
+
+
+ for (i=0; i<ui32SrcSyncCount; i++)
+ {
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_QUEUE, QUEUE_TOKEN_DST_SYNC,
+ apsSrcSync[i], PVRSRV_SYNCOP_SAMPLE);
+
+ psCommand->psSrcSync[i].psKernelSyncInfoKM = apsSrcSync[i];
+ psCommand->psSrcSync[i].ui32WriteOpsPending = PVRSRVGetWriteOpsPending(apsSrcSync[i], IMG_TRUE);
+ psCommand->psSrcSync[i].ui32ReadOps2Pending = PVRSRVGetReadOpsPending(apsSrcSync[i], IMG_TRUE);
+
+ PVRSRVKernelSyncInfoIncRef(apsSrcSync[i], IMG_NULL);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInsertCommandKM: Src %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x",
+ i, psCommand->psSrcSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr,
+ psCommand->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
+ psCommand->psSrcSync[i].ui32ReadOps2Pending,
+ psCommand->psSrcSync[i].ui32WriteOpsPending));
+ }
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_CMD_END, QUEUE_TOKEN_INSERTKM);
+
+
+ *ppsCommand = psCommand;
+
+ return PVRSRV_OK;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO *psQueue,
+ PVRSRV_COMMAND *psCommand)
+{
+
+
+
+ if (psCommand->ui32DstSyncCount > 0)
+ {
+ psCommand->psDstSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM)
+ + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND));
+ }
+
+ if (psCommand->ui32SrcSyncCount > 0)
+ {
+ psCommand->psSrcSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM)
+ + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND)
+ + (psCommand->ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
+ }
+
+ psCommand->pvData = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM)
+ + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND)
+ + (psCommand->ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT))
+ + (psCommand->ui32SrcSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
+
+
+ UPDATE_QUEUE_WOFF(psQueue, psCommand->uCmdSize);
+
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR CheckIfSyncIsQueued(PVRSRV_SYNC_OBJECT *psSync, COMMAND_COMPLETE_DATA *psCmdData)
+{
+ IMG_UINT32 k;
+
+ if (psCmdData->bInUse)
+ {
+ for (k=0;k<psCmdData->ui32SrcSyncCount;k++)
+ {
+ if (psSync->psKernelSyncInfoKM == psCmdData->psSrcSync[k].psKernelSyncInfoKM)
+ {
+ PVRSRV_SYNC_DATA *psSyncData = psSync->psKernelSyncInfoKM->psSyncData;
+ IMG_UINT32 ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
+
+
+
+
+ if (ui32WriteOpsComplete == psSync->ui32WriteOpsPending)
+ {
+ return PVRSRV_OK;
+ }
+ else
+ {
+ if (SYNCOPS_STALE(ui32WriteOpsComplete, psSync->ui32WriteOpsPending))
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "CheckIfSyncIsQueued: Stale syncops psSyncData:0x%x ui32WriteOpsComplete:0x%x ui32WriteOpsPending:0x%x",
+ (IMG_UINTPTR_T)psSyncData, ui32WriteOpsComplete, psSync->ui32WriteOpsPending));
+ return PVRSRV_OK;
+ }
+ }
+ }
+ }
+ }
+ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
+}
+
+static
+PVRSRV_ERROR PVRSRVProcessCommand(SYS_DATA *psSysData,
+ PVRSRV_COMMAND *psCommand,
+ IMG_BOOL bFlush)
+{
+ PVRSRV_SYNC_OBJECT *psWalkerObj;
+ PVRSRV_SYNC_OBJECT *psEndObj;
+ IMG_UINT32 i;
+ COMMAND_COMPLETE_DATA *psCmdCompleteData;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32WriteOpsComplete;
+ IMG_UINT32 ui32ReadOpsComplete;
+ DEVICE_COMMAND_DATA *psDeviceCommandData;
+ IMG_UINT32 ui32CCBOffset;
+
+
+ psWalkerObj = psCommand->psDstSync;
+ psEndObj = psWalkerObj + psCommand->ui32DstSyncCount;
+ while (psWalkerObj < psEndObj)
+ {
+ PVRSRV_SYNC_DATA *psSyncData = psWalkerObj->psKernelSyncInfoKM->psSyncData;
+
+ ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
+ ui32ReadOpsComplete = psSyncData->ui32ReadOps2Complete;
+
+ if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending)
+ || (ui32ReadOpsComplete != psWalkerObj->ui32ReadOps2Pending))
+ {
+ if (!bFlush ||
+ !SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) ||
+ !SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOps2Pending))
+ {
+ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
+ }
+ }
+
+ psWalkerObj++;
+ }
+
+
+ psWalkerObj = psCommand->psSrcSync;
+ psEndObj = psWalkerObj + psCommand->ui32SrcSyncCount;
+ while (psWalkerObj < psEndObj)
+ {
+ PVRSRV_SYNC_DATA *psSyncData = psWalkerObj->psKernelSyncInfoKM->psSyncData;
+
+ ui32ReadOpsComplete = psSyncData->ui32ReadOps2Complete;
+ ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
+
+ if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending)
+ || (ui32ReadOpsComplete != psWalkerObj->ui32ReadOps2Pending))
+ {
+ if (!bFlush &&
+ SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) &&
+ SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOps2Pending))
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "PVRSRVProcessCommand: Stale syncops psSyncData:0x%x ui32WriteOpsComplete:0x%x ui32WriteOpsPending:0x%x",
+ (IMG_UINTPTR_T)psSyncData, ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending));
+ }
+
+ if (!bFlush ||
+ !SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) ||
+ !SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOps2Pending))
+ {
+ IMG_UINT32 j;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bFound = IMG_FALSE;
+
+ psDeviceCommandData = psSysData->apsDeviceCommandData[psCommand->ui32DevIndex];
+ for (j=0;j<DC_NUM_COMMANDS_PER_TYPE;j++)
+ {
+ eError = CheckIfSyncIsQueued(psWalkerObj, psDeviceCommandData[psCommand->CommandType].apsCmdCompleteData[j]);
+
+ if (eError == PVRSRV_OK)
+ {
+ bFound = IMG_TRUE;
+ }
+ }
+ if (!bFound)
+ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
+ }
+ }
+ psWalkerObj++;
+ }
+
+
+ if (psCommand->ui32DevIndex >= SYS_DEVICE_COUNT)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVProcessCommand: invalid DeviceType 0x%x",
+ psCommand->ui32DevIndex));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+ psDeviceCommandData = psSysData->apsDeviceCommandData[psCommand->ui32DevIndex];
+ ui32CCBOffset = psDeviceCommandData[psCommand->CommandType].ui32CCBOffset;
+ psCmdCompleteData = psDeviceCommandData[psCommand->CommandType].apsCmdCompleteData[ui32CCBOffset];
+ if (psCmdCompleteData->bInUse)
+ {
+
+ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
+ }
+
+
+ psCmdCompleteData->bInUse = IMG_TRUE;
+
+
+ psCmdCompleteData->ui32DstSyncCount = psCommand->ui32DstSyncCount;
+ for (i=0; i<psCommand->ui32DstSyncCount; i++)
+ {
+ psCmdCompleteData->psDstSync[i] = psCommand->psDstSync[i];
+
+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVProcessCommand: Dst %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x (CCB:%u)",
+ i, psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr,
+ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
+ psCmdCompleteData->psDstSync[i].ui32ReadOps2Pending,
+ psCmdCompleteData->psDstSync[i].ui32WriteOpsPending,
+ ui32CCBOffset));
+ }
+
+ psCmdCompleteData->pfnCommandComplete = psCommand->pfnCommandComplete;
+ psCmdCompleteData->hCallbackData = psCommand->hCallbackData;
+
+
+ psCmdCompleteData->ui32SrcSyncCount = psCommand->ui32SrcSyncCount;
+ for (i=0; i<psCommand->ui32SrcSyncCount; i++)
+ {
+ psCmdCompleteData->psSrcSync[i] = psCommand->psSrcSync[i];
+
+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVProcessCommand: Src %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x (CCB:%u)",
+ i, psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr,
+ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
+ psCmdCompleteData->psSrcSync[i].ui32ReadOps2Pending,
+ psCmdCompleteData->psSrcSync[i].ui32WriteOpsPending,
+ ui32CCBOffset));
+ }
+
+
+
+
+
+
+
+
+
+
+
+ if (psDeviceCommandData[psCommand->CommandType].pfnCmdProc((IMG_HANDLE)psCmdCompleteData,
+ (IMG_UINT32)psCommand->uDataSize,
+ psCommand->pvData) == IMG_FALSE)
+ {
+
+
+
+ psCmdCompleteData->bInUse = IMG_FALSE;
+ eError = PVRSRV_ERROR_CMD_NOT_PROCESSED;
+ }
+
+
+ psDeviceCommandData[psCommand->CommandType].ui32CCBOffset = (ui32CCBOffset + 1) % DC_NUM_COMMANDS_PER_TYPE;
+
+ return eError;
+}
+
+
+static IMG_VOID PVRSRVProcessQueues_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ if (psDeviceNode->bReProcessDeviceCommandComplete &&
+ psDeviceNode->pfnDeviceCommandComplete != IMG_NULL)
+ {
+ (*psDeviceNode->pfnDeviceCommandComplete)(psDeviceNode);
+ }
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVProcessQueues(IMG_BOOL bFlush)
+{
+ PVRSRV_QUEUE_INFO *psQueue;
+ SYS_DATA *psSysData;
+ PVRSRV_COMMAND *psCommand;
+ SysAcquireData(&psSysData);
+
+
+
+ while (OSLockResource(&psSysData->sQProcessResource, ISR_ID) != PVRSRV_OK)
+ {
+ OSWaitus(1);
+ };
+
+ psQueue = psSysData->psQueueList;
+
+ if(!psQueue)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"No Queues installed - cannot process commands"));
+ }
+
+ if (bFlush)
+ {
+ PVRSRVSetDCState(DC_STATE_FLUSH_COMMANDS);
+ }
+
+ while (psQueue)
+ {
+ while (psQueue->ui32ReadOffset != psQueue->ui32WriteOffset)
+ {
+ psCommand = (PVRSRV_COMMAND*)((IMG_UINTPTR_T)psQueue->pvLinQueueKM + psQueue->ui32ReadOffset);
+
+ if (PVRSRVProcessCommand(psSysData, psCommand, bFlush) == PVRSRV_OK)
+ {
+
+ UPDATE_QUEUE_ROFF(psQueue, psCommand->uCmdSize)
+ continue;
+ }
+
+ break;
+ }
+ psQueue = psQueue->psNextKM;
+ }
+
+ if (bFlush)
+ {
+ PVRSRVSetDCState(DC_STATE_NO_FLUSH_COMMANDS);
+ }
+
+
+ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList,
+ &PVRSRVProcessQueues_ForEachCb);
+
+ OSUnlockResource(&psSysData->sQProcessResource, ISR_ID);
+
+ return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_CUSTOM_SWAP_OPERATIONS)
+IMG_INTERNAL
+IMG_VOID PVRSRVFreeCommandCompletePacketKM(IMG_HANDLE hCmdCookie,
+ IMG_BOOL bScheduleMISR)
+{
+ COMMAND_COMPLETE_DATA *psCmdCompleteData = (COMMAND_COMPLETE_DATA *)hCmdCookie;
+ SYS_DATA *psSysData;
+
+ SysAcquireData(&psSysData);
+
+
+ psCmdCompleteData->bInUse = IMG_FALSE;
+
+
+ PVRSRVScheduleDeviceCallbacks();
+
+ if(bScheduleMISR)
+ {
+ OSScheduleMISR(psSysData);
+ }
+}
+
+#endif
+
+
+IMG_EXPORT
+IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie,
+ IMG_BOOL bScheduleMISR)
+{
+ IMG_UINT32 i;
+ COMMAND_COMPLETE_DATA *psCmdCompleteData = (COMMAND_COMPLETE_DATA *)hCmdCookie;
+ SYS_DATA *psSysData;
+
+ SysAcquireData(&psSysData);
+
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_CMD_COMP_START,
+ QUEUE_TOKEN_COMMAND_COMPLETE);
+
+
+ for (i=0; i<psCmdCompleteData->ui32DstSyncCount; i++)
+ {
+ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsComplete++;
+
+ PVRSRVKernelSyncInfoDecRef(psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM, IMG_NULL);
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_QUEUE, QUEUE_TOKEN_UPDATE_DST,
+ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM,
+ PVRSRV_SYNCOP_COMPLETE);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVCommandCompleteKM: Dst %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x",
+ i, psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr,
+ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
+ psCmdCompleteData->psDstSync[i].ui32ReadOps2Pending,
+ psCmdCompleteData->psDstSync[i].ui32WriteOpsPending));
+ }
+
+
+ for (i=0; i<psCmdCompleteData->ui32SrcSyncCount; i++)
+ {
+ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->psSyncData->ui32ReadOps2Complete++;
+
+ PVRSRVKernelSyncInfoDecRef(psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM, IMG_NULL);
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_QUEUE, QUEUE_TOKEN_UPDATE_SRC,
+ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM,
+ PVRSRV_SYNCOP_COMPLETE);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVCommandCompleteKM: Src %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x",
+ i, psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr,
+ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
+ psCmdCompleteData->psSrcSync[i].ui32ReadOps2Pending,
+ psCmdCompleteData->psSrcSync[i].ui32WriteOpsPending));
+ }
+
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_CMD_COMP_END,
+ QUEUE_TOKEN_COMMAND_COMPLETE);
+
+ if (psCmdCompleteData->pfnCommandComplete)
+ {
+ psCmdCompleteData->pfnCommandComplete(psCmdCompleteData->hCallbackData);
+ }
+
+
+ psCmdCompleteData->bInUse = IMG_FALSE;
+
+
+ PVRSRVScheduleDeviceCallbacks();
+
+ if(bScheduleMISR)
+ {
+ OSScheduleMISR(psSysData);
+ }
+}
+
+
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32 ui32DevIndex,
+ PFN_CMD_PROC *ppfnCmdProcList,
+ IMG_UINT32 ui32MaxSyncsPerCmd[][2],
+ IMG_UINT32 ui32CmdCount)
+{
+ SYS_DATA *psSysData;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32CmdCounter, ui32CmdTypeCounter;
+ IMG_SIZE_T ui32AllocSize;
+ DEVICE_COMMAND_DATA *psDeviceCommandData;
+ COMMAND_COMPLETE_DATA *psCmdCompleteData;
+
+
+ if(ui32DevIndex >= SYS_DEVICE_COUNT)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVRegisterCmdProcListKM: invalid DeviceType 0x%x",
+ ui32DevIndex));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+ SysAcquireData(&psSysData);
+
+
+ ui32AllocSize = ui32CmdCount * sizeof(*psDeviceCommandData);
+ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ ui32AllocSize,
+ (IMG_VOID **)&psDeviceCommandData, IMG_NULL,
+ "Array of Pointers for Command Store");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc CC data"));
+ goto ErrorExit;
+ }
+
+ psSysData->apsDeviceCommandData[ui32DevIndex] = psDeviceCommandData;
+
+ for (ui32CmdTypeCounter = 0; ui32CmdTypeCounter < ui32CmdCount; ui32CmdTypeCounter++)
+ {
+ psDeviceCommandData[ui32CmdTypeCounter].pfnCmdProc = ppfnCmdProcList[ui32CmdTypeCounter];
+ psDeviceCommandData[ui32CmdTypeCounter].ui32CCBOffset = 0;
+ psDeviceCommandData[ui32CmdTypeCounter].ui32MaxDstSyncCount = ui32MaxSyncsPerCmd[ui32CmdTypeCounter][0];
+ psDeviceCommandData[ui32CmdTypeCounter].ui32MaxSrcSyncCount = ui32MaxSyncsPerCmd[ui32CmdTypeCounter][1];
+ for (ui32CmdCounter = 0; ui32CmdCounter < DC_NUM_COMMANDS_PER_TYPE; ui32CmdCounter++)
+ {
+
+
+ ui32AllocSize = sizeof(COMMAND_COMPLETE_DATA)
+ + ((ui32MaxSyncsPerCmd[ui32CmdTypeCounter][0]
+ + ui32MaxSyncsPerCmd[ui32CmdTypeCounter][1])
+ * sizeof(PVRSRV_SYNC_OBJECT));
+
+ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ ui32AllocSize,
+ (IMG_VOID **)&psCmdCompleteData,
+ IMG_NULL,
+ "Command Complete Data");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc cmd %d", ui32CmdTypeCounter));
+ goto ErrorExit;
+ }
+
+ psDeviceCommandData[ui32CmdTypeCounter].apsCmdCompleteData[ui32CmdCounter] = psCmdCompleteData;
+
+
+ OSMemSet(psCmdCompleteData, 0x00, ui32AllocSize);
+
+
+ psCmdCompleteData->psDstSync = (PVRSRV_SYNC_OBJECT*)
+ (((IMG_UINTPTR_T)psCmdCompleteData)
+ + sizeof(COMMAND_COMPLETE_DATA));
+ psCmdCompleteData->psSrcSync = (PVRSRV_SYNC_OBJECT*)
+ (((IMG_UINTPTR_T)psCmdCompleteData->psDstSync)
+ + (sizeof(PVRSRV_SYNC_OBJECT) * ui32MaxSyncsPerCmd[ui32CmdTypeCounter][0]));
+
+ psCmdCompleteData->ui32AllocSize = (IMG_UINT32)ui32AllocSize;
+ }
+ }
+
+ return PVRSRV_OK;
+
+ErrorExit:
+
+
+ if (PVRSRVRemoveCmdProcListKM(ui32DevIndex, ui32CmdCount) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVRegisterCmdProcListKM: Failed to clean up after error, device 0x%x",
+ ui32DevIndex));
+ }
+
+ return eError;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32 ui32DevIndex,
+ IMG_UINT32 ui32CmdCount)
+{
+ SYS_DATA *psSysData;
+ IMG_UINT32 ui32CmdTypeCounter, ui32CmdCounter;
+ DEVICE_COMMAND_DATA *psDeviceCommandData;
+ COMMAND_COMPLETE_DATA *psCmdCompleteData;
+ IMG_SIZE_T ui32AllocSize;
+
+
+ if(ui32DevIndex >= SYS_DEVICE_COUNT)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVRemoveCmdProcListKM: invalid DeviceType 0x%x",
+ ui32DevIndex));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+ SysAcquireData(&psSysData);
+
+ psDeviceCommandData = psSysData->apsDeviceCommandData[ui32DevIndex];
+ if(psDeviceCommandData != IMG_NULL)
+ {
+ for (ui32CmdTypeCounter = 0; ui32CmdTypeCounter < ui32CmdCount; ui32CmdTypeCounter++)
+ {
+ for (ui32CmdCounter = 0; ui32CmdCounter < DC_NUM_COMMANDS_PER_TYPE; ui32CmdCounter++)
+ {
+ psCmdCompleteData = psDeviceCommandData[ui32CmdTypeCounter].apsCmdCompleteData[ui32CmdCounter];
+
+
+ if (psCmdCompleteData != IMG_NULL)
+ {
+ PVR_ASSERT(psCmdCompleteData->bInUse == IMG_FALSE);
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, psCmdCompleteData->ui32AllocSize,
+ psCmdCompleteData, IMG_NULL);
+ psDeviceCommandData[ui32CmdTypeCounter].apsCmdCompleteData[ui32CmdCounter] = IMG_NULL;
+ }
+ }
+ }
+
+
+ ui32AllocSize = ui32CmdCount * sizeof(*psDeviceCommandData);
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, ui32AllocSize, psDeviceCommandData, IMG_NULL);
+ psSysData->apsDeviceCommandData[ui32DevIndex] = IMG_NULL;
+ }
+
+ return PVRSRV_OK;
+}
+
diff --git a/drivers/gpu/pvr/queue.h b/drivers/gpu/pvr/queue.h
new file mode 100644
index 0000000..d8045b1
--- /dev/null
+++ b/drivers/gpu/pvr/queue.h
@@ -0,0 +1,114 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef QUEUE_H
+#define QUEUE_H
+
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define UPDATE_QUEUE_ROFF(psQueue, ui32Size) \
+ (psQueue)->ui32ReadOffset = ((psQueue)->ui32ReadOffset + (ui32Size)) \
+ & ((psQueue)->ui32QueueSize - 1);
+
+ typedef struct _COMMAND_COMPLETE_DATA_
+ {
+ IMG_BOOL bInUse;
+
+ IMG_UINT32 ui32DstSyncCount;
+ IMG_UINT32 ui32SrcSyncCount;
+ PVRSRV_SYNC_OBJECT *psDstSync;
+ PVRSRV_SYNC_OBJECT *psSrcSync;
+ IMG_UINT32 ui32AllocSize;
+ PFN_QUEUE_COMMAND_COMPLETE pfnCommandComplete;
+ IMG_HANDLE hCallbackData;
+ }COMMAND_COMPLETE_DATA, *PCOMMAND_COMPLETE_DATA;
+
+#if !defined(USE_CODE)
+IMG_VOID QueueDumpDebugInfo(IMG_VOID);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVProcessQueues (IMG_BOOL bFlush);
+
+#if defined(__linux__) && defined(__KERNEL__)
+#include <linux/types.h>
+#include <linux/seq_file.h>
+void* ProcSeqOff2ElementQueue(struct seq_file * sfile, loff_t off);
+void ProcSeqShowQueue(struct seq_file *sfile,void* el);
+#endif
+
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_SIZE_T ui32QueueSize,
+ PVRSRV_QUEUE_INFO **ppsQueueInfo);
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO *psQueue,
+ PVRSRV_COMMAND **ppsCommand,
+ IMG_UINT32 ui32DevIndex,
+ IMG_UINT16 CommandType,
+ IMG_UINT32 ui32DstSyncCount,
+ PVRSRV_KERNEL_SYNC_INFO *apsDstSync[],
+ IMG_UINT32 ui32SrcSyncCount,
+ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
+ IMG_SIZE_T ui32DataByteSize,
+ PFN_QUEUE_COMMAND_COMPLETE pfnCommandComplete,
+ IMG_HANDLE hCallbackData);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue,
+ IMG_SIZE_T ui32ParamSize,
+ IMG_VOID **ppvSpace);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO *psQueue,
+ PVRSRV_COMMAND *psCommand);
+
+IMG_IMPORT
+IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie, IMG_BOOL bScheduleMISR);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32 ui32DevIndex,
+ PFN_CMD_PROC *ppfnCmdProcList,
+ IMG_UINT32 ui32MaxSyncsPerCmd[][2],
+ IMG_UINT32 ui32CmdCount);
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32 ui32DevIndex,
+ IMG_UINT32 ui32CmdCount);
+
+#endif
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/ra.c b/drivers/gpu/pvr/ra.c
new file mode 100644
index 0000000..84a2162
--- /dev/null
+++ b/drivers/gpu/pvr/ra.c
@@ -0,0 +1,1736 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "services_headers.h"
+#include "hash.h"
+#include "ra.h"
+#include "buffer_manager.h"
+#include "osfunc.h"
+
+#ifdef __linux__
+#include <linux/kernel.h>
+#include "proc.h"
+#endif
+
+#ifdef USE_BM_FREESPACE_CHECK
+#include <stdio.h>
+#endif
+
+#define MINIMUM_HASH_SIZE (64)
+
+#if defined(VALIDATE_ARENA_TEST)
+
+typedef enum RESOURCE_DESCRIPTOR_TAG {
+
+ RESOURCE_SPAN_LIVE = 10,
+ RESOURCE_SPAN_FREE,
+ IMPORTED_RESOURCE_SPAN_START,
+ IMPORTED_RESOURCE_SPAN_LIVE,
+ IMPORTED_RESOURCE_SPAN_FREE,
+ IMPORTED_RESOURCE_SPAN_END,
+
+} RESOURCE_DESCRIPTOR;
+
+typedef enum RESOURCE_TYPE_TAG {
+
+ IMPORTED_RESOURCE_TYPE = 20,
+ NON_IMPORTED_RESOURCE_TYPE
+
+} RESOURCE_TYPE;
+
+
+static IMG_UINT32 ui32BoundaryTagID = 0;
+
+IMG_UINT32 ValidateArena(RA_ARENA *pArena);
+#endif
+
+struct _BT_
+{
+ enum bt_type
+ {
+ btt_span,
+ btt_free,
+ btt_live
+ } type;
+
+
+ IMG_UINTPTR_T base;
+ IMG_SIZE_T uSize;
+
+
+ struct _BT_ *pNextSegment;
+ struct _BT_ *pPrevSegment;
+
+ struct _BT_ *pNextFree;
+ struct _BT_ *pPrevFree;
+
+ BM_MAPPING *psMapping;
+
+#if defined(VALIDATE_ARENA_TEST)
+ RESOURCE_DESCRIPTOR eResourceSpan;
+ RESOURCE_TYPE eResourceType;
+
+
+ IMG_UINT32 ui32BoundaryTagID;
+#endif
+
+};
+typedef struct _BT_ BT;
+
+
+struct _RA_ARENA_
+{
+
+ IMG_CHAR *name;
+
+
+ IMG_SIZE_T uQuantum;
+
+
+ IMG_BOOL (*pImportAlloc)(IMG_VOID *,
+ IMG_SIZE_T uSize,
+ IMG_SIZE_T *pActualSize,
+ BM_MAPPING **ppsMapping,
+ IMG_UINT32 uFlags,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
+ IMG_UINTPTR_T *pBase);
+ IMG_VOID (*pImportFree) (IMG_VOID *,
+ IMG_UINTPTR_T,
+ BM_MAPPING *psMapping);
+ IMG_VOID (*pBackingStoreFree) (IMG_VOID *, IMG_SIZE_T, IMG_SIZE_T, IMG_HANDLE);
+
+
+ IMG_VOID *pImportHandle;
+
+
+#define FREE_TABLE_LIMIT 32
+
+
+ BT *aHeadFree [FREE_TABLE_LIMIT];
+
+
+ BT *pHeadSegment;
+ BT *pTailSegment;
+
+
+ HASH_TABLE *pSegmentHash;
+
+#ifdef RA_STATS
+ RA_STATISTICS sStatistics;
+#endif
+
+#if defined(CONFIG_PROC_FS) && defined(DEBUG)
+#define PROC_NAME_SIZE 64
+
+ struct proc_dir_entry* pProcInfo;
+ struct proc_dir_entry* pProcSegs;
+
+ IMG_BOOL bInitProcEntry;
+#endif
+};
+#if defined(ENABLE_RA_DUMP)
+IMG_VOID RA_Dump (RA_ARENA *pArena);
+#endif
+
+#if defined(CONFIG_PROC_FS) && defined(DEBUG)
+
+static void RA_ProcSeqShowInfo(struct seq_file *sfile, void* el);
+static void* RA_ProcSeqOff2ElementInfo(struct seq_file * sfile, loff_t off);
+
+static void RA_ProcSeqShowRegs(struct seq_file *sfile, void* el);
+static void* RA_ProcSeqOff2ElementRegs(struct seq_file * sfile, loff_t off);
+
+#endif
+
+#ifdef USE_BM_FREESPACE_CHECK
+IMG_VOID CheckBMFreespace(IMG_VOID);
+#endif
+
+#if defined(CONFIG_PROC_FS) && defined(DEBUG)
+static IMG_CHAR *ReplaceSpaces(IMG_CHAR * const pS)
+{
+ IMG_CHAR *pT;
+
+ for(pT = pS; *pT != 0; pT++)
+ {
+ if (*pT == ' ' || *pT == '\t')
+ {
+ *pT = '_';
+ }
+ }
+
+ return pS;
+}
+#endif
+
+static IMG_BOOL
+_RequestAllocFail (IMG_VOID *_h,
+ IMG_SIZE_T _uSize,
+ IMG_SIZE_T *_pActualSize,
+ BM_MAPPING **_ppsMapping,
+ IMG_UINT32 _uFlags,
+ IMG_PVOID _pvPrivData,
+ IMG_UINT32 _ui32PrivDataLength,
+ IMG_UINTPTR_T *_pBase)
+{
+ PVR_UNREFERENCED_PARAMETER (_h);
+ PVR_UNREFERENCED_PARAMETER (_uSize);
+ PVR_UNREFERENCED_PARAMETER (_pActualSize);
+ PVR_UNREFERENCED_PARAMETER (_ppsMapping);
+ PVR_UNREFERENCED_PARAMETER (_uFlags);
+ PVR_UNREFERENCED_PARAMETER (_pBase);
+ PVR_UNREFERENCED_PARAMETER (_pvPrivData);
+ PVR_UNREFERENCED_PARAMETER (_ui32PrivDataLength);
+
+ return IMG_FALSE;
+}
+
+static IMG_UINT32
+pvr_log2 (IMG_SIZE_T n)
+{
+ IMG_UINT32 l = 0;
+ n>>=1;
+ while (n>0)
+ {
+ n>>=1;
+ l++;
+ }
+ return l;
+}
+
+static PVRSRV_ERROR
+_SegmentListInsertAfter (RA_ARENA *pArena,
+ BT *pInsertionPoint,
+ BT *pBT)
+{
+ PVR_ASSERT (pArena != IMG_NULL);
+ PVR_ASSERT (pInsertionPoint != IMG_NULL);
+
+ if ((pInsertionPoint == IMG_NULL) || (pArena == IMG_NULL))
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"_SegmentListInsertAfter: invalid parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ pBT->pNextSegment = pInsertionPoint->pNextSegment;
+ pBT->pPrevSegment = pInsertionPoint;
+ if (pInsertionPoint->pNextSegment == IMG_NULL)
+ pArena->pTailSegment = pBT;
+ else
+ pInsertionPoint->pNextSegment->pPrevSegment = pBT;
+ pInsertionPoint->pNextSegment = pBT;
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_SegmentListInsert (RA_ARENA *pArena, BT *pBT)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+
+ if (pArena->pHeadSegment == IMG_NULL)
+ {
+ pArena->pHeadSegment = pArena->pTailSegment = pBT;
+ pBT->pNextSegment = pBT->pPrevSegment = IMG_NULL;
+ }
+ else
+ {
+ BT *pBTScan;
+
+ if (pBT->base < pArena->pHeadSegment->base)
+ {
+
+ pBT->pNextSegment = pArena->pHeadSegment;
+ pArena->pHeadSegment->pPrevSegment = pBT;
+ pArena->pHeadSegment = pBT;
+ pBT->pPrevSegment = IMG_NULL;
+ }
+ else
+ {
+
+
+
+
+ pBTScan = pArena->pHeadSegment;
+
+ while ((pBTScan->pNextSegment != IMG_NULL) && (pBT->base >= pBTScan->pNextSegment->base))
+ {
+ pBTScan = pBTScan->pNextSegment;
+ }
+
+ eError = _SegmentListInsertAfter (pArena, pBTScan, pBT);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+ }
+ return eError;
+}
+
+static IMG_VOID
+_SegmentListRemove (RA_ARENA *pArena, BT *pBT)
+{
+ if (pBT->pPrevSegment == IMG_NULL)
+ pArena->pHeadSegment = pBT->pNextSegment;
+ else
+ pBT->pPrevSegment->pNextSegment = pBT->pNextSegment;
+
+ if (pBT->pNextSegment == IMG_NULL)
+ pArena->pTailSegment = pBT->pPrevSegment;
+ else
+ pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment;
+}
+
+static BT *
+_SegmentSplit (RA_ARENA *pArena, BT *pBT, IMG_SIZE_T uSize)
+{
+ BT *pNeighbour;
+
+ PVR_ASSERT (pArena != IMG_NULL);
+
+ if (pArena == IMG_NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"_SegmentSplit: invalid parameter - pArena"));
+ return IMG_NULL;
+ }
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(BT),
+ (IMG_VOID **)&pNeighbour, IMG_NULL,
+ "Boundary Tag") != PVRSRV_OK)
+ {
+ return IMG_NULL;
+ }
+
+ OSMemSet(pNeighbour, 0, sizeof(BT));
+
+#if defined(VALIDATE_ARENA_TEST)
+ pNeighbour->ui32BoundaryTagID = ++ui32BoundaryTagID;
+#endif
+
+ pNeighbour->pPrevSegment = pBT;
+ pNeighbour->pNextSegment = pBT->pNextSegment;
+ if (pBT->pNextSegment == IMG_NULL)
+ pArena->pTailSegment = pNeighbour;
+ else
+ pBT->pNextSegment->pPrevSegment = pNeighbour;
+ pBT->pNextSegment = pNeighbour;
+
+ pNeighbour->type = btt_free;
+ pNeighbour->uSize = pBT->uSize - uSize;
+ pNeighbour->base = pBT->base + uSize;
+ pNeighbour->psMapping = pBT->psMapping;
+ pBT->uSize = uSize;
+
+#if defined(VALIDATE_ARENA_TEST)
+ if (pNeighbour->pPrevSegment->eResourceType == IMPORTED_RESOURCE_TYPE)
+ {
+ pNeighbour->eResourceType = IMPORTED_RESOURCE_TYPE;
+ pNeighbour->eResourceSpan = IMPORTED_RESOURCE_SPAN_FREE;
+ }
+ else if (pNeighbour->pPrevSegment->eResourceType == NON_IMPORTED_RESOURCE_TYPE)
+ {
+ pNeighbour->eResourceType = NON_IMPORTED_RESOURCE_TYPE;
+ pNeighbour->eResourceSpan = RESOURCE_SPAN_FREE;
+ }
+ else
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"_SegmentSplit: pNeighbour->pPrevSegment->eResourceType unrecognized"));
+ PVR_DBG_BREAK;
+ }
+#endif
+
+ return pNeighbour;
+}
+
+static IMG_VOID
+_FreeListInsert (RA_ARENA *pArena, BT *pBT)
+{
+ IMG_UINT32 uIndex;
+ uIndex = pvr_log2 (pBT->uSize);
+ pBT->type = btt_free;
+ pBT->pNextFree = pArena->aHeadFree [uIndex];
+ pBT->pPrevFree = IMG_NULL;
+ if (pArena->aHeadFree[uIndex] != IMG_NULL)
+ pArena->aHeadFree[uIndex]->pPrevFree = pBT;
+ pArena->aHeadFree [uIndex] = pBT;
+}
+
+static IMG_VOID
+_FreeListRemove (RA_ARENA *pArena, BT *pBT)
+{
+ IMG_UINT32 uIndex;
+ uIndex = pvr_log2 (pBT->uSize);
+ if (pBT->pNextFree != IMG_NULL)
+ pBT->pNextFree->pPrevFree = pBT->pPrevFree;
+ if (pBT->pPrevFree == IMG_NULL)
+ pArena->aHeadFree[uIndex] = pBT->pNextFree;
+ else
+ pBT->pPrevFree->pNextFree = pBT->pNextFree;
+}
+
+static BT *
+_BuildSpanMarker (IMG_UINTPTR_T base, IMG_SIZE_T uSize)
+{
+ BT *pBT;
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(BT),
+ (IMG_VOID **)&pBT, IMG_NULL,
+ "Boundary Tag") != PVRSRV_OK)
+ {
+ return IMG_NULL;
+ }
+
+ OSMemSet(pBT, 0, sizeof(BT));
+
+#if defined(VALIDATE_ARENA_TEST)
+ pBT->ui32BoundaryTagID = ++ui32BoundaryTagID;
+#endif
+
+ pBT->type = btt_span;
+ pBT->base = base;
+ pBT->uSize = uSize;
+ pBT->psMapping = IMG_NULL;
+
+ return pBT;
+}
+
+static BT *
+_BuildBT (IMG_UINTPTR_T base, IMG_SIZE_T uSize)
+{
+ BT *pBT;
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(BT),
+ (IMG_VOID **)&pBT, IMG_NULL,
+ "Boundary Tag") != PVRSRV_OK)
+ {
+ return IMG_NULL;
+ }
+
+ OSMemSet(pBT, 0, sizeof(BT));
+
+#if defined(VALIDATE_ARENA_TEST)
+ pBT->ui32BoundaryTagID = ++ui32BoundaryTagID;
+#endif
+
+ pBT->type = btt_free;
+ pBT->base = base;
+ pBT->uSize = uSize;
+
+ return pBT;
+}
+
+static BT *
+_InsertResource (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize)
+{
+ BT *pBT;
+ PVR_ASSERT (pArena!=IMG_NULL);
+ if (pArena == IMG_NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"_InsertResource: invalid parameter - pArena"));
+ return IMG_NULL;
+ }
+
+ pBT = _BuildBT (base, uSize);
+ if (pBT != IMG_NULL)
+ {
+
+#if defined(VALIDATE_ARENA_TEST)
+ pBT->eResourceSpan = RESOURCE_SPAN_FREE;
+ pBT->eResourceType = NON_IMPORTED_RESOURCE_TYPE;
+#endif
+
+ if (_SegmentListInsert (pArena, pBT) != PVRSRV_OK)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"_InsertResource: call to _SegmentListInsert failed"));
+ return IMG_NULL;
+ }
+ _FreeListInsert (pArena, pBT);
+#ifdef RA_STATS
+ pArena->sStatistics.uTotalResourceCount+=uSize;
+ pArena->sStatistics.uFreeResourceCount+=uSize;
+ pArena->sStatistics.uSpanCount++;
+#endif
+ }
+ return pBT;
+}
+
+static BT *
+_InsertResourceSpan (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize)
+{
+ PVRSRV_ERROR eError;
+ BT *pSpanStart;
+ BT *pSpanEnd;
+ BT *pBT;
+
+ PVR_ASSERT (pArena != IMG_NULL);
+ if (pArena == IMG_NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"_InsertResourceSpan: invalid parameter - pArena"));
+ return IMG_NULL;
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "RA_InsertResourceSpan: arena='%s', base=0x%x, size=0x%x",
+ pArena->name, base, uSize));
+
+ pSpanStart = _BuildSpanMarker (base, uSize);
+ if (pSpanStart == IMG_NULL)
+ {
+ goto fail_start;
+ }
+
+#if defined(VALIDATE_ARENA_TEST)
+ pSpanStart->eResourceSpan = IMPORTED_RESOURCE_SPAN_START;
+ pSpanStart->eResourceType = IMPORTED_RESOURCE_TYPE;
+#endif
+
+ pSpanEnd = _BuildSpanMarker (base + uSize, 0);
+ if (pSpanEnd == IMG_NULL)
+ {
+ goto fail_end;
+ }
+
+#if defined(VALIDATE_ARENA_TEST)
+ pSpanEnd->eResourceSpan = IMPORTED_RESOURCE_SPAN_END;
+ pSpanEnd->eResourceType = IMPORTED_RESOURCE_TYPE;
+#endif
+
+ pBT = _BuildBT (base, uSize);
+ if (pBT == IMG_NULL)
+ {
+ goto fail_bt;
+ }
+
+#if defined(VALIDATE_ARENA_TEST)
+ pBT->eResourceSpan = IMPORTED_RESOURCE_SPAN_FREE;
+ pBT->eResourceType = IMPORTED_RESOURCE_TYPE;
+#endif
+
+ eError = _SegmentListInsert (pArena, pSpanStart);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_SegListInsert;
+ }
+
+ eError = _SegmentListInsertAfter (pArena, pSpanStart, pBT);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_SegListInsert;
+ }
+
+ _FreeListInsert (pArena, pBT);
+
+ eError = _SegmentListInsertAfter (pArena, pBT, pSpanEnd);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_SegListInsert;
+ }
+
+#ifdef RA_STATS
+ pArena->sStatistics.uTotalResourceCount+=uSize;
+#endif
+ return pBT;
+
+ fail_SegListInsert:
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL);
+
+ fail_bt:
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanEnd, IMG_NULL);
+
+ fail_end:
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanStart, IMG_NULL);
+
+ fail_start:
+ return IMG_NULL;
+}
+
+static IMG_VOID
+_FreeBT (RA_ARENA *pArena, BT *pBT, IMG_BOOL bFreeBackingStore)
+{
+ BT *pNeighbour;
+ IMG_UINTPTR_T uOrigBase;
+ IMG_SIZE_T uOrigSize;
+
+ PVR_ASSERT (pArena!=IMG_NULL);
+ PVR_ASSERT (pBT!=IMG_NULL);
+
+ if ((pArena == IMG_NULL) || (pBT == IMG_NULL))
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"_FreeBT: invalid parameter"));
+ return;
+ }
+
+#ifdef RA_STATS
+ pArena->sStatistics.uLiveSegmentCount--;
+ pArena->sStatistics.uFreeSegmentCount++;
+ pArena->sStatistics.uFreeResourceCount+=pBT->uSize;
+#endif
+
+ uOrigBase = pBT->base;
+ uOrigSize = pBT->uSize;
+
+
+ pNeighbour = pBT->pPrevSegment;
+ if (pNeighbour!=IMG_NULL
+ && pNeighbour->type == btt_free
+ && pNeighbour->base + pNeighbour->uSize == pBT->base)
+ {
+ _FreeListRemove (pArena, pNeighbour);
+ _SegmentListRemove (pArena, pNeighbour);
+ pBT->base = pNeighbour->base;
+ pBT->uSize += pNeighbour->uSize;
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour, IMG_NULL);
+
+#ifdef RA_STATS
+ pArena->sStatistics.uFreeSegmentCount--;
+#endif
+ }
+
+
+ pNeighbour = pBT->pNextSegment;
+ if (pNeighbour!=IMG_NULL
+ && pNeighbour->type == btt_free
+ && pBT->base + pBT->uSize == pNeighbour->base)
+ {
+ _FreeListRemove (pArena, pNeighbour);
+ _SegmentListRemove (pArena, pNeighbour);
+ pBT->uSize += pNeighbour->uSize;
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour, IMG_NULL);
+
+#ifdef RA_STATS
+ pArena->sStatistics.uFreeSegmentCount--;
+#endif
+ }
+
+
+ if (pArena->pBackingStoreFree != IMG_NULL && bFreeBackingStore)
+ {
+ IMG_UINTPTR_T uRoundedStart, uRoundedEnd;
+
+
+ uRoundedStart = (uOrigBase / pArena->uQuantum) * pArena->uQuantum;
+
+ if (uRoundedStart < pBT->base)
+ {
+ uRoundedStart += pArena->uQuantum;
+ }
+
+
+ uRoundedEnd = ((uOrigBase + uOrigSize + pArena->uQuantum - 1) / pArena->uQuantum) * pArena->uQuantum;
+
+ if (uRoundedEnd > (pBT->base + pBT->uSize))
+ {
+ uRoundedEnd -= pArena->uQuantum;
+ }
+
+ if (uRoundedStart < uRoundedEnd)
+ {
+ pArena->pBackingStoreFree(pArena->pImportHandle, (IMG_SIZE_T)uRoundedStart, (IMG_SIZE_T)uRoundedEnd, (IMG_HANDLE)0);
+ }
+ }
+
+ if (pBT->pNextSegment!=IMG_NULL && pBT->pNextSegment->type == btt_span
+ && pBT->pPrevSegment!=IMG_NULL && pBT->pPrevSegment->type == btt_span)
+ {
+ BT *next = pBT->pNextSegment;
+ BT *prev = pBT->pPrevSegment;
+ _SegmentListRemove (pArena, next);
+ _SegmentListRemove (pArena, prev);
+ _SegmentListRemove (pArena, pBT);
+ pArena->pImportFree (pArena->pImportHandle, pBT->base, pBT->psMapping);
+#ifdef RA_STATS
+ pArena->sStatistics.uSpanCount--;
+ pArena->sStatistics.uExportCount++;
+ pArena->sStatistics.uFreeSegmentCount--;
+ pArena->sStatistics.uFreeResourceCount-=pBT->uSize;
+ pArena->sStatistics.uTotalResourceCount-=pBT->uSize;
+#endif
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), next, IMG_NULL);
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), prev, IMG_NULL);
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL);
+
+ }
+ else
+ _FreeListInsert (pArena, pBT);
+}
+
+
+static IMG_BOOL
+_AttemptAllocAligned (RA_ARENA *pArena,
+ IMG_SIZE_T uSize,
+ BM_MAPPING **ppsMapping,
+ IMG_UINT32 uFlags,
+ IMG_UINT32 uAlignment,
+ IMG_UINT32 uAlignmentOffset,
+ IMG_UINTPTR_T *base)
+{
+ IMG_UINT32 uIndex;
+ PVR_ASSERT (pArena!=IMG_NULL);
+ if (pArena == IMG_NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: invalid parameter - pArena"));
+ return IMG_FALSE;
+ }
+
+ if (uAlignment>1)
+ uAlignmentOffset %= uAlignment;
+
+
+
+ uIndex = pvr_log2 (uSize);
+
+#if 0
+
+ if (1u<<uIndex < uSize)
+ uIndex++;
+#endif
+
+ while (uIndex < FREE_TABLE_LIMIT && pArena->aHeadFree[uIndex]==IMG_NULL)
+ uIndex++;
+
+ while (uIndex < FREE_TABLE_LIMIT)
+ {
+ if (pArena->aHeadFree[uIndex]!=IMG_NULL)
+ {
+
+ BT *pBT;
+
+ pBT = pArena->aHeadFree [uIndex];
+ while (pBT!=IMG_NULL)
+ {
+ IMG_UINTPTR_T aligned_base;
+
+ if (uAlignment>1)
+ aligned_base = (pBT->base + uAlignmentOffset + uAlignment - 1) / uAlignment * uAlignment - uAlignmentOffset;
+ else
+ aligned_base = pBT->base;
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "RA_AttemptAllocAligned: pBT-base=0x%x "
+ "pBT-size=0x%x alignedbase=0x%x size=0x%x",
+ pBT->base, pBT->uSize, aligned_base, uSize));
+
+ if (pBT->base + pBT->uSize >= aligned_base + uSize)
+ {
+ if(!pBT->psMapping || pBT->psMapping->ui32Flags == uFlags)
+ {
+ _FreeListRemove (pArena, pBT);
+
+ PVR_ASSERT (pBT->type == btt_free);
+
+#ifdef RA_STATS
+ pArena->sStatistics.uLiveSegmentCount++;
+ pArena->sStatistics.uFreeSegmentCount--;
+ pArena->sStatistics.uFreeResourceCount-=pBT->uSize;
+#endif
+
+
+ if (aligned_base > pBT->base)
+ {
+ BT *pNeighbour;
+ pNeighbour = _SegmentSplit (pArena, pBT, (IMG_SIZE_T)(aligned_base - pBT->base));
+
+ if (pNeighbour==IMG_NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: Front split failed"));
+
+ _FreeListInsert (pArena, pBT);
+ return IMG_FALSE;
+ }
+
+ _FreeListInsert (pArena, pBT);
+ #ifdef RA_STATS
+ pArena->sStatistics.uFreeSegmentCount++;
+ pArena->sStatistics.uFreeResourceCount+=pBT->uSize;
+ #endif
+ pBT = pNeighbour;
+ }
+
+
+ if (pBT->uSize > uSize)
+ {
+ BT *pNeighbour;
+ pNeighbour = _SegmentSplit (pArena, pBT, uSize);
+
+ if (pNeighbour==IMG_NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: Back split failed"));
+
+ _FreeListInsert (pArena, pBT);
+ return IMG_FALSE;
+ }
+
+ _FreeListInsert (pArena, pNeighbour);
+ #ifdef RA_STATS
+ pArena->sStatistics.uFreeSegmentCount++;
+ pArena->sStatistics.uFreeResourceCount+=pNeighbour->uSize;
+ #endif
+ }
+
+ pBT->type = btt_live;
+
+#if defined(VALIDATE_ARENA_TEST)
+ if (pBT->eResourceType == IMPORTED_RESOURCE_TYPE)
+ {
+ pBT->eResourceSpan = IMPORTED_RESOURCE_SPAN_LIVE;
+ }
+ else if (pBT->eResourceType == NON_IMPORTED_RESOURCE_TYPE)
+ {
+ pBT->eResourceSpan = RESOURCE_SPAN_LIVE;
+ }
+ else
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned ERROR: pBT->eResourceType unrecognized"));
+ PVR_DBG_BREAK;
+ }
+#endif
+ if (!HASH_Insert (pArena->pSegmentHash, pBT->base, (IMG_UINTPTR_T) pBT))
+ {
+ _FreeBT (pArena, pBT, IMG_FALSE);
+ return IMG_FALSE;
+ }
+
+ if (ppsMapping!=IMG_NULL)
+ *ppsMapping = pBT->psMapping;
+
+ *base = pBT->base;
+
+ return IMG_TRUE;
+ }
+ else
+ {
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "AttemptAllocAligned: mismatch in flags. Import has %x, request was %x", pBT->psMapping->ui32Flags, uFlags));
+
+ }
+ }
+ pBT = pBT->pNextFree;
+ }
+
+ }
+ uIndex++;
+ }
+
+ return IMG_FALSE;
+}
+
+
+
+RA_ARENA *
+RA_Create (IMG_CHAR *name,
+ IMG_UINTPTR_T base,
+ IMG_SIZE_T uSize,
+ BM_MAPPING *psMapping,
+ IMG_SIZE_T uQuantum,
+ IMG_BOOL (*imp_alloc)(IMG_VOID *, IMG_SIZE_T uSize, IMG_SIZE_T *pActualSize,
+ BM_MAPPING **ppsMapping, IMG_UINT32 _flags,
+ IMG_PVOID pvPrivData, IMG_UINT32 ui32PrivDataLength,
+ IMG_UINTPTR_T *pBase),
+ IMG_VOID (*imp_free) (IMG_VOID *, IMG_UINTPTR_T, BM_MAPPING *),
+ IMG_VOID (*backingstore_free) (IMG_VOID*, IMG_SIZE_T, IMG_SIZE_T, IMG_HANDLE),
+ IMG_VOID *pImportHandle)
+{
+ RA_ARENA *pArena;
+ BT *pBT;
+ IMG_INT i;
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "RA_Create: name='%s', base=0x%x, uSize=0x%x, alloc=0x%x, free=0x%x",
+ name, base, uSize, (IMG_UINTPTR_T)imp_alloc, (IMG_UINTPTR_T)imp_free));
+
+
+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof (*pArena),
+ (IMG_VOID **)&pArena, IMG_NULL,
+ "Resource Arena") != PVRSRV_OK)
+ {
+ goto arena_fail;
+ }
+
+ pArena->name = name;
+ pArena->pImportAlloc = (imp_alloc!=IMG_NULL) ? imp_alloc : &_RequestAllocFail;
+ pArena->pImportFree = imp_free;
+ pArena->pBackingStoreFree = backingstore_free;
+ pArena->pImportHandle = pImportHandle;
+ for (i=0; i<FREE_TABLE_LIMIT; i++)
+ pArena->aHeadFree[i] = IMG_NULL;
+ pArena->pHeadSegment = IMG_NULL;
+ pArena->pTailSegment = IMG_NULL;
+ pArena->uQuantum = uQuantum;
+
+#ifdef RA_STATS
+ pArena->sStatistics.uSpanCount = 0;
+ pArena->sStatistics.uLiveSegmentCount = 0;
+ pArena->sStatistics.uFreeSegmentCount = 0;
+ pArena->sStatistics.uFreeResourceCount = 0;
+ pArena->sStatistics.uTotalResourceCount = 0;
+ pArena->sStatistics.uCumulativeAllocs = 0;
+ pArena->sStatistics.uCumulativeFrees = 0;
+ pArena->sStatistics.uImportCount = 0;
+ pArena->sStatistics.uExportCount = 0;
+#endif
+
+#if defined(CONFIG_PROC_FS) && defined(DEBUG)
+ if(strcmp(pArena->name,"") != 0)
+ {
+ IMG_INT ret;
+ IMG_CHAR szProcInfoName[PROC_NAME_SIZE];
+ IMG_CHAR szProcSegsName[PROC_NAME_SIZE];
+ struct proc_dir_entry* (*pfnCreateProcEntrySeq)(const IMG_CHAR *,
+ IMG_VOID*,
+ pvr_next_proc_seq_t,
+ pvr_show_proc_seq_t,
+ pvr_off2element_proc_seq_t,
+ pvr_startstop_proc_seq_t,
+ write_proc_t);
+
+ pArena->bInitProcEntry = !PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL);
+
+
+ pfnCreateProcEntrySeq = pArena->bInitProcEntry ? CreateProcEntrySeq : CreatePerProcessProcEntrySeq;
+
+ ret = snprintf(szProcInfoName, sizeof(szProcInfoName), "ra_info_%s", pArena->name);
+ if (ret > 0 && ret < sizeof(szProcInfoName))
+ {
+ pArena->pProcInfo = pfnCreateProcEntrySeq(ReplaceSpaces(szProcInfoName), pArena, NULL,
+ RA_ProcSeqShowInfo, RA_ProcSeqOff2ElementInfo, NULL, NULL);
+ }
+ else
+ {
+ pArena->pProcInfo = 0;
+ PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_info proc entry for arena %s", pArena->name));
+ }
+
+ ret = snprintf(szProcSegsName, sizeof(szProcSegsName), "ra_segs_%s", pArena->name);
+ if (ret > 0 && ret < sizeof(szProcInfoName))
+ {
+ pArena->pProcSegs = pfnCreateProcEntrySeq(ReplaceSpaces(szProcSegsName), pArena, NULL,
+ RA_ProcSeqShowRegs, RA_ProcSeqOff2ElementRegs, NULL, NULL);
+ }
+ else
+ {
+ pArena->pProcSegs = 0;
+ PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_segs proc entry for arena %s", pArena->name));
+ }
+ }
+#endif
+
+ pArena->pSegmentHash = HASH_Create (MINIMUM_HASH_SIZE);
+ if (pArena->pSegmentHash==IMG_NULL)
+ {
+ goto hash_fail;
+ }
+ if (uSize>0)
+ {
+ uSize = (uSize + uQuantum - 1) / uQuantum * uQuantum;
+ pBT = _InsertResource (pArena, base, uSize);
+ if (pBT == IMG_NULL)
+ {
+ goto insert_fail;
+ }
+ pBT->psMapping = psMapping;
+
+ }
+ return pArena;
+
+insert_fail:
+ HASH_Delete (pArena->pSegmentHash);
+hash_fail:
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RA_ARENA), pArena, IMG_NULL);
+
+arena_fail:
+ return IMG_NULL;
+}
+
+IMG_VOID
+RA_Delete (RA_ARENA *pArena)
+{
+ IMG_UINT32 uIndex;
+
+ PVR_ASSERT(pArena != IMG_NULL);
+
+ if (pArena == IMG_NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: invalid parameter - pArena"));
+ return;
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "RA_Delete: name='%s'", pArena->name));
+
+ for (uIndex=0; uIndex<FREE_TABLE_LIMIT; uIndex++)
+ pArena->aHeadFree[uIndex] = IMG_NULL;
+
+ while (pArena->pHeadSegment != IMG_NULL)
+ {
+ BT *pBT = pArena->pHeadSegment;
+
+ if (pBT->type != btt_free)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: allocations still exist in the arena that is being destroyed"));
+ PVR_DPF ((PVR_DBG_ERROR,"Likely Cause: client drivers not freeing alocations before destroying devmemcontext"));
+ PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: base = 0x%x size=0x%x", pBT->base, pBT->uSize));
+ }
+
+ _SegmentListRemove (pArena, pBT);
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL);
+
+#ifdef RA_STATS
+ pArena->sStatistics.uSpanCount--;
+#endif
+ }
+#if defined(CONFIG_PROC_FS) && defined(DEBUG)
+ {
+ IMG_VOID (*pfnRemoveProcEntrySeq)(struct proc_dir_entry*);
+
+ pfnRemoveProcEntrySeq = pArena->bInitProcEntry ? RemoveProcEntrySeq : RemovePerProcessProcEntrySeq;
+
+ if (pArena->pProcInfo != 0)
+ {
+ pfnRemoveProcEntrySeq( pArena->pProcInfo );
+ }
+
+ if (pArena->pProcSegs != 0)
+ {
+ pfnRemoveProcEntrySeq( pArena->pProcSegs );
+ }
+ }
+#endif
+ HASH_Delete (pArena->pSegmentHash);
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RA_ARENA), pArena, IMG_NULL);
+
+}
+
+IMG_BOOL
+RA_TestDelete (RA_ARENA *pArena)
+{
+ PVR_ASSERT(pArena != IMG_NULL);
+
+ if (pArena != IMG_NULL)
+ {
+ while (pArena->pHeadSegment != IMG_NULL)
+ {
+ BT *pBT = pArena->pHeadSegment;
+ if (pBT->type != btt_free)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"RA_TestDelete: detected resource leak!"));
+ PVR_DPF ((PVR_DBG_ERROR,"RA_TestDelete: base = 0x%x size=0x%x", pBT->base, pBT->uSize));
+ return IMG_FALSE;
+ }
+ }
+ }
+
+ return IMG_TRUE;
+}
+
+IMG_BOOL
+RA_Add (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize)
+{
+ PVR_ASSERT (pArena != IMG_NULL);
+
+ if (pArena == IMG_NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"RA_Add: invalid parameter - pArena"));
+ return IMG_FALSE;
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "RA_Add: name='%s', base=0x%x, size=0x%x", pArena->name, base, uSize));
+
+ uSize = (uSize + pArena->uQuantum - 1) / pArena->uQuantum * pArena->uQuantum;
+ return ((IMG_BOOL)(_InsertResource (pArena, base, uSize) != IMG_NULL));
+}
+
+IMG_BOOL
+RA_Alloc (RA_ARENA *pArena,
+ IMG_SIZE_T uRequestSize,
+ IMG_SIZE_T *pActualSize,
+ BM_MAPPING **ppsMapping,
+ IMG_UINT32 uFlags,
+ IMG_UINT32 uAlignment,
+ IMG_UINT32 uAlignmentOffset,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
+ IMG_UINTPTR_T *base)
+{
+ IMG_BOOL bResult;
+ IMG_SIZE_T uSize = uRequestSize;
+
+ PVR_ASSERT (pArena!=IMG_NULL);
+
+ if (pArena == IMG_NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"RA_Alloc: invalid parameter - pArena"));
+ return IMG_FALSE;
+ }
+
+#if defined(VALIDATE_ARENA_TEST)
+ ValidateArena(pArena);
+#endif
+
+#ifdef USE_BM_FREESPACE_CHECK
+ CheckBMFreespace();
+#endif
+
+ if (pActualSize != IMG_NULL)
+ {
+ *pActualSize = uSize;
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "RA_Alloc: arena='%s', size=0x%x(0x%x), alignment=0x%x, offset=0x%x",
+ pArena->name, uSize, uRequestSize, uAlignment, uAlignmentOffset));
+
+
+
+ bResult = _AttemptAllocAligned (pArena, uSize, ppsMapping, uFlags,
+ uAlignment, uAlignmentOffset, base);
+ if (!bResult)
+ {
+ BM_MAPPING *psImportMapping;
+ IMG_UINTPTR_T import_base;
+ IMG_SIZE_T uImportSize = uSize;
+
+
+
+
+ if (uAlignment > pArena->uQuantum)
+ {
+ uImportSize += (uAlignment - 1);
+ }
+
+
+ uImportSize = ((uImportSize + pArena->uQuantum - 1)/pArena->uQuantum)*pArena->uQuantum;
+
+ bResult =
+ pArena->pImportAlloc (pArena->pImportHandle, uImportSize, &uImportSize,
+ &psImportMapping, uFlags,
+ pvPrivData, ui32PrivDataLength, &import_base);
+ if (bResult)
+ {
+ BT *pBT;
+ pBT = _InsertResourceSpan (pArena, import_base, uImportSize);
+
+ if (pBT == IMG_NULL)
+ {
+
+ pArena->pImportFree(pArena->pImportHandle, import_base,
+ psImportMapping);
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "RA_Alloc: name='%s', size=0x%x failed!",
+ pArena->name, uSize));
+
+ return IMG_FALSE;
+ }
+ pBT->psMapping = psImportMapping;
+#ifdef RA_STATS
+ pArena->sStatistics.uFreeSegmentCount++;
+ pArena->sStatistics.uFreeResourceCount += uImportSize;
+ pArena->sStatistics.uImportCount++;
+ pArena->sStatistics.uSpanCount++;
+#endif
+ bResult = _AttemptAllocAligned(pArena, uSize, ppsMapping, uFlags,
+ uAlignment, uAlignmentOffset,
+ base);
+ if (!bResult)
+ {
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "RA_Alloc: name='%s' uAlignment failed!",
+ pArena->name));
+ }
+ }
+ }
+#ifdef RA_STATS
+ if (bResult)
+ pArena->sStatistics.uCumulativeAllocs++;
+#endif
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "RA_Alloc: name='%s', size=0x%x, *base=0x%x = %d",
+ pArena->name, uSize, *base, bResult));
+
+
+
+#if defined(VALIDATE_ARENA_TEST)
+ ValidateArena(pArena);
+#endif
+
+ return bResult;
+}
+
+
+#if defined(VALIDATE_ARENA_TEST)
+
+IMG_UINT32 ValidateArena(RA_ARENA *pArena)
+{
+ BT* pSegment;
+ RESOURCE_DESCRIPTOR eNextSpan;
+
+ pSegment = pArena->pHeadSegment;
+
+ if (pSegment == IMG_NULL)
+ {
+ return 0;
+ }
+
+ if (pSegment->eResourceType == IMPORTED_RESOURCE_TYPE)
+ {
+ PVR_ASSERT(pSegment->eResourceSpan == IMPORTED_RESOURCE_SPAN_START);
+
+ while (pSegment->pNextSegment)
+ {
+ eNextSpan = pSegment->pNextSegment->eResourceSpan;
+
+ switch (pSegment->eResourceSpan)
+ {
+ case IMPORTED_RESOURCE_SPAN_LIVE:
+
+ if (!((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) ||
+ (eNextSpan == IMPORTED_RESOURCE_SPAN_FREE) ||
+ (eNextSpan == IMPORTED_RESOURCE_SPAN_END)))
+ {
+
+ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
+ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
+
+ PVR_DBG_BREAK;
+ }
+ break;
+
+ case IMPORTED_RESOURCE_SPAN_FREE:
+
+ if (!((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) ||
+ (eNextSpan == IMPORTED_RESOURCE_SPAN_END)))
+ {
+
+ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
+ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
+
+ PVR_DBG_BREAK;
+ }
+ break;
+
+ case IMPORTED_RESOURCE_SPAN_END:
+
+ if ((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) ||
+ (eNextSpan == IMPORTED_RESOURCE_SPAN_FREE) ||
+ (eNextSpan == IMPORTED_RESOURCE_SPAN_END))
+ {
+
+ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
+ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
+
+ PVR_DBG_BREAK;
+ }
+ break;
+
+
+ case IMPORTED_RESOURCE_SPAN_START:
+
+ if (!((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) ||
+ (eNextSpan == IMPORTED_RESOURCE_SPAN_FREE)))
+ {
+
+ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
+ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
+
+ PVR_DBG_BREAK;
+ }
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
+ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
+
+ PVR_DBG_BREAK;
+ break;
+ }
+ pSegment = pSegment->pNextSegment;
+ }
+ }
+ else if (pSegment->eResourceType == NON_IMPORTED_RESOURCE_TYPE)
+ {
+ PVR_ASSERT((pSegment->eResourceSpan == RESOURCE_SPAN_FREE) || (pSegment->eResourceSpan == RESOURCE_SPAN_LIVE));
+
+ while (pSegment->pNextSegment)
+ {
+ eNextSpan = pSegment->pNextSegment->eResourceSpan;
+
+ switch (pSegment->eResourceSpan)
+ {
+ case RESOURCE_SPAN_LIVE:
+
+ if (!((eNextSpan == RESOURCE_SPAN_FREE) ||
+ (eNextSpan == RESOURCE_SPAN_LIVE)))
+ {
+
+ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
+ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
+
+ PVR_DBG_BREAK;
+ }
+ break;
+
+ case RESOURCE_SPAN_FREE:
+
+ if (!((eNextSpan == RESOURCE_SPAN_FREE) ||
+ (eNextSpan == RESOURCE_SPAN_LIVE)))
+ {
+
+ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
+ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
+
+ PVR_DBG_BREAK;
+ }
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
+ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
+
+ PVR_DBG_BREAK;
+ break;
+ }
+ pSegment = pSegment->pNextSegment;
+ }
+
+ }
+ else
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"ValidateArena ERROR: pSegment->eResourceType unrecognized"));
+
+ PVR_DBG_BREAK;
+ }
+
+ return 0;
+}
+
+#endif
+
+
+IMG_VOID
+RA_Free (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_BOOL bFreeBackingStore)
+{
+ BT *pBT;
+
+ PVR_ASSERT (pArena != IMG_NULL);
+
+ if (pArena == IMG_NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"RA_Free: invalid parameter - pArena"));
+ return;
+ }
+
+#ifdef USE_BM_FREESPACE_CHECK
+ CheckBMFreespace();
+#endif
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "RA_Free: name='%s', base=0x%x", pArena->name, base));
+
+ pBT = (BT *) HASH_Remove (pArena->pSegmentHash, base);
+ PVR_ASSERT (pBT != IMG_NULL);
+
+ if (pBT)
+ {
+ PVR_ASSERT (pBT->base == base);
+
+#ifdef RA_STATS
+ pArena->sStatistics.uCumulativeFrees++;
+#endif
+
+#ifdef USE_BM_FREESPACE_CHECK
+{
+ IMG_BYTE* p;
+ IMG_BYTE* endp;
+
+ p = (IMG_BYTE*)pBT->base + SysGetDevicePhysOffset();
+ endp = (IMG_BYTE*)((IMG_UINT32)(p + pBT->uSize));
+ while ((IMG_UINT32)p & 3)
+ {
+ *p++ = 0xAA;
+ }
+ while (p < (IMG_BYTE*)((IMG_UINT32)endp & 0xfffffffc))
+ {
+ *(IMG_UINT32*)p = 0xAAAAAAAA;
+ p += sizeof(IMG_UINT32);
+ }
+ while (p < endp)
+ {
+ *p++ = 0xAA;
+ }
+ PVR_DPF((PVR_DBG_MESSAGE,"BM_FREESPACE_CHECK: RA_Free Cleared %08X to %08X (size=0x%x)",(IMG_BYTE*)pBT->base + SysGetDevicePhysOffset(),endp-1,pBT->uSize));
+}
+#endif
+ _FreeBT (pArena, pBT, bFreeBackingStore);
+ }
+}
+
+
+IMG_BOOL RA_GetNextLiveSegment(IMG_HANDLE hArena, RA_SEGMENT_DETAILS *psSegDetails)
+{
+ BT *pBT;
+
+ if (psSegDetails->hSegment)
+ {
+ pBT = (BT *)psSegDetails->hSegment;
+ }
+ else
+ {
+ RA_ARENA *pArena = (RA_ARENA *)hArena;
+
+ pBT = pArena->pHeadSegment;
+ }
+
+ while (pBT != IMG_NULL)
+ {
+ if (pBT->type == btt_live)
+ {
+ psSegDetails->uiSize = pBT->uSize;
+ psSegDetails->sCpuPhyAddr.uiAddr = pBT->base;
+ psSegDetails->hSegment = (IMG_HANDLE)pBT->pNextSegment;
+
+ return IMG_TRUE;
+ }
+
+ pBT = pBT->pNextSegment;
+ }
+
+ psSegDetails->uiSize = 0;
+ psSegDetails->sCpuPhyAddr.uiAddr = 0;
+ psSegDetails->hSegment = (IMG_HANDLE)IMG_UNDEF;
+
+ return IMG_FALSE;
+}
+
+
+#ifdef USE_BM_FREESPACE_CHECK
+RA_ARENA* pJFSavedArena = IMG_NULL;
+
+IMG_VOID CheckBMFreespace(IMG_VOID)
+{
+ BT *pBT;
+ IMG_BYTE* p;
+ IMG_BYTE* endp;
+
+ if (pJFSavedArena != IMG_NULL)
+ {
+ for (pBT=pJFSavedArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment)
+ {
+ if (pBT->type == btt_free)
+ {
+ p = (IMG_BYTE*)pBT->base + SysGetDevicePhysOffset();
+ endp = (IMG_BYTE*)((IMG_UINT32)(p + pBT->uSize) & 0xfffffffc);
+
+ while ((IMG_UINT32)p & 3)
+ {
+ if (*p++ != 0xAA)
+ {
+ fprintf(stderr,"BM_FREESPACE_CHECK: Blank space at %08X has changed to 0x%x\n",p,*(IMG_UINT32*)p);
+ for (;;);
+ break;
+ }
+ }
+ while (p < endp)
+ {
+ if (*(IMG_UINT32*)p != 0xAAAAAAAA)
+ {
+ fprintf(stderr,"BM_FREESPACE_CHECK: Blank space at %08X has changed to 0x%x\n",p,*(IMG_UINT32*)p);
+ for (;;);
+ break;
+ }
+ p += 4;
+ }
+ }
+ }
+ }
+}
+#endif
+
+
+#if (defined(CONFIG_PROC_FS) && defined(DEBUG)) || defined (RA_STATS)
+static IMG_CHAR *
+_BTType (IMG_INT eType)
+{
+ switch (eType)
+ {
+ case btt_span: return "span";
+ case btt_free: return "free";
+ case btt_live: return "live";
+ }
+ return "junk";
+}
+#endif
+
+#if defined(ENABLE_RA_DUMP)
+IMG_VOID
+RA_Dump (RA_ARENA *pArena)
+{
+ BT *pBT;
+ PVR_ASSERT (pArena != IMG_NULL);
+ PVR_DPF ((PVR_DBG_MESSAGE,"Arena '%s':", pArena->name));
+ PVR_DPF ((PVR_DBG_MESSAGE," alloc=%08X free=%08X handle=%08X quantum=%d",
+ pArena->pImportAlloc, pArena->pImportFree, pArena->pImportHandle,
+ pArena->uQuantum));
+ PVR_DPF ((PVR_DBG_MESSAGE," segment Chain:"));
+ if (pArena->pHeadSegment != IMG_NULL &&
+ pArena->pHeadSegment->pPrevSegment != IMG_NULL)
+ PVR_DPF ((PVR_DBG_MESSAGE," error: head boundary tag has invalid pPrevSegment"));
+ if (pArena->pTailSegment != IMG_NULL &&
+ pArena->pTailSegment->pNextSegment != IMG_NULL)
+ PVR_DPF ((PVR_DBG_MESSAGE," error: tail boundary tag has invalid pNextSegment"));
+
+ for (pBT=pArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment)
+ {
+ PVR_DPF ((PVR_DBG_MESSAGE,"\tbase=0x%x size=0x%x type=%s ref=%08X",
+ (IMG_UINT32) pBT->base, pBT->uSize, _BTType (pBT->type),
+ pBT->pRef));
+ }
+
+#ifdef HASH_TRACE
+ HASH_Dump (pArena->pSegmentHash);
+#endif
+}
+#endif
+
+
+#if defined(CONFIG_PROC_FS) && defined(DEBUG)
+
+
+static void RA_ProcSeqShowInfo(struct seq_file *sfile, void* el)
+{
+ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)sfile->private;
+ RA_ARENA *pArena = (RA_ARENA *)handlers->data;
+ IMG_INT off = (IMG_INT)el;
+
+ switch (off)
+ {
+ case 1:
+ seq_printf(sfile, "quantum\t\t\t%u\n", pArena->uQuantum);
+ break;
+ case 2:
+ seq_printf(sfile, "import_handle\t\t%08X\n", (IMG_UINT)pArena->pImportHandle);
+ break;
+#ifdef RA_STATS
+ case 3:
+ seq_printf(sfile,"span count\t\t%u\n", pArena->sStatistics.uSpanCount);
+ break;
+ case 4:
+ seq_printf(sfile, "live segment count\t%u\n", pArena->sStatistics.uLiveSegmentCount);
+ break;
+ case 5:
+ seq_printf(sfile, "free segment count\t%u\n", pArena->sStatistics.uFreeSegmentCount);
+ break;
+ case 6:
+ seq_printf(sfile, "free resource count\t%u (0x%x)\n",
+ pArena->sStatistics.uFreeResourceCount,
+ (IMG_UINT)pArena->sStatistics.uFreeResourceCount);
+ break;
+ case 7:
+ seq_printf(sfile, "total allocs\t\t%u\n", pArena->sStatistics.uCumulativeAllocs);
+ break;
+ case 8:
+ seq_printf(sfile, "total frees\t\t%u\n", pArena->sStatistics.uCumulativeFrees);
+ break;
+ case 9:
+ seq_printf(sfile, "import count\t\t%u\n", pArena->sStatistics.uImportCount);
+ break;
+ case 10:
+ seq_printf(sfile, "export count\t\t%u\n", pArena->sStatistics.uExportCount);
+ break;
+#endif
+ }
+
+}
+
+static void* RA_ProcSeqOff2ElementInfo(struct seq_file * sfile, loff_t off)
+{
+#ifdef RA_STATS
+ if(off <= 9)
+#else
+ if(off <= 1)
+#endif
+ return (void*)(IMG_INT)(off+1);
+ return 0;
+}
+
+static void RA_ProcSeqShowRegs(struct seq_file *sfile, void* el)
+{
+ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)sfile->private;
+ RA_ARENA *pArena = (RA_ARENA *)handlers->data;
+ BT *pBT = (BT*)el;
+
+ if (el == PVR_PROC_SEQ_START_TOKEN)
+ {
+ seq_printf(sfile, "Arena \"%s\"\nBase Size Type Ref\n", pArena->name);
+ return;
+ }
+
+ if (pBT)
+ {
+ seq_printf(sfile, "%08x %8x %4s %08x\n",
+ (IMG_UINT)pBT->base, (IMG_UINT)pBT->uSize, _BTType (pBT->type),
+ (IMG_UINT)pBT->psMapping);
+ }
+}
+
+static void* RA_ProcSeqOff2ElementRegs(struct seq_file * sfile, loff_t off)
+{
+ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)sfile->private;
+ RA_ARENA *pArena = (RA_ARENA *)handlers->data;
+ BT *pBT = 0;
+
+ if(off == 0)
+ return PVR_PROC_SEQ_START_TOKEN;
+
+ for (pBT=pArena->pHeadSegment; --off && pBT; pBT=pBT->pNextSegment);
+
+ return (void*)pBT;
+}
+
+#endif
+
+
+#ifdef RA_STATS
+PVRSRV_ERROR RA_GetStats(RA_ARENA *pArena,
+ IMG_CHAR **ppszStr,
+ IMG_UINT32 *pui32StrLen)
+{
+ IMG_CHAR *pszStr = *ppszStr;
+ IMG_UINT32 ui32StrLen = *pui32StrLen;
+ IMG_INT32 i32Count;
+ BT *pBT;
+
+ CHECK_SPACE(ui32StrLen);
+ i32Count = OSSNPrintf(pszStr, 100, "\nArena '%s':\n", pArena->name);
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+
+
+ CHECK_SPACE(ui32StrLen);
+ i32Count = OSSNPrintf(pszStr, 100, " allocCB=%p freeCB=%p handle=%p quantum=%d\n",
+ pArena->pImportAlloc,
+ pArena->pImportFree,
+ pArena->pImportHandle,
+ pArena->uQuantum);
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+
+ CHECK_SPACE(ui32StrLen);
+ i32Count = OSSNPrintf(pszStr, 100, "span count\t\t%u\n", pArena->sStatistics.uSpanCount);
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+
+ CHECK_SPACE(ui32StrLen);
+ i32Count = OSSNPrintf(pszStr, 100, "live segment count\t%u\n", pArena->sStatistics.uLiveSegmentCount);
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+
+ CHECK_SPACE(ui32StrLen);
+ i32Count = OSSNPrintf(pszStr, 100, "free segment count\t%u\n", pArena->sStatistics.uFreeSegmentCount);
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+
+ CHECK_SPACE(ui32StrLen);
+ i32Count = OSSNPrintf(pszStr, 100, "free resource count\t%u (0x%x)\n",
+ pArena->sStatistics.uFreeResourceCount,
+ (IMG_UINT)pArena->sStatistics.uFreeResourceCount);
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+
+ CHECK_SPACE(ui32StrLen);
+ i32Count = OSSNPrintf(pszStr, 100, "total allocs\t\t%u\n", pArena->sStatistics.uCumulativeAllocs);
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+
+ CHECK_SPACE(ui32StrLen);
+ i32Count = OSSNPrintf(pszStr, 100, "total frees\t\t%u\n", pArena->sStatistics.uCumulativeFrees);
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+
+ CHECK_SPACE(ui32StrLen);
+ i32Count = OSSNPrintf(pszStr, 100, "import count\t\t%u\n", pArena->sStatistics.uImportCount);
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+
+ CHECK_SPACE(ui32StrLen);
+ i32Count = OSSNPrintf(pszStr, 100, "export count\t\t%u\n", pArena->sStatistics.uExportCount);
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+
+ CHECK_SPACE(ui32StrLen);
+ i32Count = OSSNPrintf(pszStr, 100, " segment Chain:\n");
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+
+ if (pArena->pHeadSegment != IMG_NULL &&
+ pArena->pHeadSegment->pPrevSegment != IMG_NULL)
+ {
+ CHECK_SPACE(ui32StrLen);
+ i32Count = OSSNPrintf(pszStr, 100, " error: head boundary tag has invalid pPrevSegment\n");
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+ }
+
+ if (pArena->pTailSegment != IMG_NULL &&
+ pArena->pTailSegment->pNextSegment != IMG_NULL)
+ {
+ CHECK_SPACE(ui32StrLen);
+ i32Count = OSSNPrintf(pszStr, 100, " error: tail boundary tag has invalid pNextSegment\n");
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+ }
+
+ for (pBT=pArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment)
+ {
+ CHECK_SPACE(ui32StrLen);
+ i32Count = OSSNPrintf(pszStr, 100, "\tbase=0x%x size=0x%x type=%s ref=%p\n",
+ (IMG_UINT32) pBT->base,
+ pBT->uSize,
+ _BTType(pBT->type),
+ pBT->psMapping);
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+ }
+
+ *ppszStr = pszStr;
+ *pui32StrLen = ui32StrLen;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RA_GetStatsFreeMem(RA_ARENA *pArena,
+ IMG_CHAR **ppszStr,
+ IMG_UINT32 *pui32StrLen)
+{
+ IMG_CHAR *pszStr = *ppszStr;
+ IMG_UINT32 ui32StrLen = *pui32StrLen;
+ IMG_INT32 i32Count;
+ CHECK_SPACE(ui32StrLen);
+ i32Count = OSSNPrintf(pszStr, 100, "Bytes free: Arena %-30s: %u (0x%x)\n", pArena->name,
+ pArena->sStatistics.uFreeResourceCount,
+ pArena->sStatistics.uFreeResourceCount);
+ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+ *ppszStr = pszStr;
+ *pui32StrLen = ui32StrLen;
+
+ return PVRSRV_OK;
+}
+#endif
+
diff --git a/drivers/gpu/pvr/ra.h b/drivers/gpu/pvr/ra.h
new file mode 100644
index 0000000..d836215
--- /dev/null
+++ b/drivers/gpu/pvr/ra.h
@@ -0,0 +1,163 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _RA_H_
+#define _RA_H_
+
+#include "img_types.h"
+#include "hash.h"
+#include "osfunc.h"
+
+typedef struct _RA_ARENA_ RA_ARENA;
+typedef struct _BM_MAPPING_ BM_MAPPING;
+
+
+
+#define RA_STATS
+
+
+struct _RA_STATISTICS_
+{
+
+ IMG_SIZE_T uSpanCount;
+
+
+ IMG_SIZE_T uLiveSegmentCount;
+
+
+ IMG_SIZE_T uFreeSegmentCount;
+
+
+ IMG_SIZE_T uTotalResourceCount;
+
+
+ IMG_SIZE_T uFreeResourceCount;
+
+
+ IMG_SIZE_T uCumulativeAllocs;
+
+
+ IMG_SIZE_T uCumulativeFrees;
+
+
+ IMG_SIZE_T uImportCount;
+
+
+ IMG_SIZE_T uExportCount;
+};
+typedef struct _RA_STATISTICS_ RA_STATISTICS;
+
+struct _RA_SEGMENT_DETAILS_
+{
+ IMG_SIZE_T uiSize;
+ IMG_CPU_PHYADDR sCpuPhyAddr;
+ IMG_HANDLE hSegment;
+};
+typedef struct _RA_SEGMENT_DETAILS_ RA_SEGMENT_DETAILS;
+
+RA_ARENA *
+RA_Create (IMG_CHAR *name,
+ IMG_UINTPTR_T base,
+ IMG_SIZE_T uSize,
+ BM_MAPPING *psMapping,
+ IMG_SIZE_T uQuantum,
+ IMG_BOOL (*imp_alloc)(IMG_VOID *_h,
+ IMG_SIZE_T uSize,
+ IMG_SIZE_T *pActualSize,
+ BM_MAPPING **ppsMapping,
+ IMG_UINT32 uFlags,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
+ IMG_UINTPTR_T *pBase),
+ IMG_VOID (*imp_free) (IMG_VOID *,
+ IMG_UINTPTR_T,
+ BM_MAPPING *),
+ IMG_VOID (*backingstore_free) (IMG_VOID *,
+ IMG_SIZE_T,
+ IMG_SIZE_T,
+ IMG_HANDLE),
+ IMG_VOID *import_handle);
+
+IMG_VOID
+RA_Delete (RA_ARENA *pArena);
+
+IMG_BOOL
+RA_TestDelete (RA_ARENA *pArena);
+
+IMG_BOOL
+RA_Add (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize);
+
+IMG_BOOL
+RA_Alloc (RA_ARENA *pArena,
+ IMG_SIZE_T uSize,
+ IMG_SIZE_T *pActualSize,
+ BM_MAPPING **ppsMapping,
+ IMG_UINT32 uFlags,
+ IMG_UINT32 uAlignment,
+ IMG_UINT32 uAlignmentOffset,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
+ IMG_UINTPTR_T *pBase);
+
+IMG_VOID
+RA_Free (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_BOOL bFreeBackingStore);
+
+
+#ifdef RA_STATS
+
+#define CHECK_SPACE(total) \
+{ \
+ if((total)<100) \
+ return PVRSRV_ERROR_INVALID_PARAMS; \
+}
+
+#define UPDATE_SPACE(str, count, total) \
+{ \
+ if((count) == -1) \
+ return PVRSRV_ERROR_INVALID_PARAMS; \
+ else \
+ { \
+ (str) += (count); \
+ (total) -= (count); \
+ } \
+}
+
+
+IMG_BOOL RA_GetNextLiveSegment(IMG_HANDLE hArena, RA_SEGMENT_DETAILS *psSegDetails);
+
+
+PVRSRV_ERROR RA_GetStats(RA_ARENA *pArena,
+ IMG_CHAR **ppszStr,
+ IMG_UINT32 *pui32StrLen);
+
+PVRSRV_ERROR RA_GetStatsFreeMem(RA_ARENA *pArena,
+ IMG_CHAR **ppszStr,
+ IMG_UINT32 *pui32StrLen);
+
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/refcount.c b/drivers/gpu/pvr/refcount.c
new file mode 100644
index 0000000..5bc9b4a
--- /dev/null
+++ b/drivers/gpu/pvr/refcount.c
@@ -0,0 +1,568 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+*/ /**************************************************************************/
+
+#if defined(PVRSRV_REFCOUNT_DEBUG)
+
+#include "services_headers.h"
+
+#ifndef __linux__
+#warning Reference count debugging is not thread-safe on this platform
+#define PVRSRV_LOCK_CCB()
+#define PVRSRV_UNLOCK_CCB()
+#else /* __linux__ */
+#include <linux/mutex.h>
+static DEFINE_MUTEX(gsCCBLock);
+#define PVRSRV_LOCK_CCB() mutex_lock(&gsCCBLock)
+#define PVRSRV_UNLOCK_CCB() mutex_unlock(&gsCCBLock)
+#endif /* __linux__ */
+
+#define PVRSRV_REFCOUNT_CCB_MAX 512
+#define PVRSRV_REFCOUNT_CCB_MESG_MAX 80
+
+#define PVRSRV_REFCOUNT_CCB_DEBUG_SYNCINFO (1U << 0)
+#define PVRSRV_REFCOUNT_CCB_DEBUG_MEMINFO (1U << 1)
+#define PVRSRV_REFCOUNT_CCB_DEBUG_BM_BUF (1U << 2)
+#define PVRSRV_REFCOUNT_CCB_DEBUG_BM_BUF2 (1U << 3)
+#define PVRSRV_REFCOUNT_CCB_DEBUG_BM_XPROC (1U << 4)
+
+#if defined(__linux__)
+#define PVRSRV_REFCOUNT_CCB_DEBUG_MMAP (1U << 16)
+#define PVRSRV_REFCOUNT_CCB_DEBUG_MMAP2 (1U << 17)
+#else
+#define PVRSRV_REFCOUNT_CCB_DEBUG_MMAP 0
+#define PVRSRV_REFCOUNT_CCB_DEBUG_MMAP2 0
+#endif
+
+#define PVRSRV_REFCOUNT_CCB_DEBUG_ALL ~0U
+
+/*static const IMG_UINT guiDebugMask = PVRSRV_REFCOUNT_CCB_DEBUG_ALL;*/
+static const IMG_UINT guiDebugMask =
+ PVRSRV_REFCOUNT_CCB_DEBUG_SYNCINFO |
+ PVRSRV_REFCOUNT_CCB_DEBUG_MMAP2;
+
+typedef struct
+{
+ const IMG_CHAR *pszFile;
+ IMG_INT iLine;
+ IMG_UINT32 ui32PID;
+ IMG_CHAR pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX];
+}
+PVRSRV_REFCOUNT_CCB;
+
+static PVRSRV_REFCOUNT_CCB gsRefCountCCB[PVRSRV_REFCOUNT_CCB_MAX];
+static IMG_UINT giOffset;
+
+static const IMG_CHAR gszHeader[] =
+ /* 10 20 30 40 50 60 70
+ * 345678901234567890123456789012345678901234567890123456789012345678901
+ */
+ "TYPE SYNCINFO MEMINFO MEMHANDLE OTHER REF REF' SIZE PID";
+ /* NCINFO deadbeef deadbeef deadbeef deadbeef 1234 1234 deadbeef */
+
+#define PVRSRV_REFCOUNT_CCB_FMT_STRING "%8.8s %8p %8p %8p %8p %.4d %.4d %.8x"
+
+IMG_INTERNAL
+void PVRSRVDumpRefCountCCB(void)
+{
+ int i;
+
+ PVRSRV_LOCK_CCB();
+
+ PVR_LOG(("%s", gszHeader));
+
+ for(i = 0; i < PVRSRV_REFCOUNT_CCB_MAX; i++)
+ {
+ PVRSRV_REFCOUNT_CCB *psRefCountCCBEntry =
+ &gsRefCountCCB[(giOffset + i) % PVRSRV_REFCOUNT_CCB_MAX];
+
+ /* Early on, we won't have MAX_REFCOUNT_CCB_SIZE messages */
+ if(!psRefCountCCBEntry->pszFile)
+ break;
+
+ PVR_LOG(("%s %d %s:%d", psRefCountCCBEntry->pcMesg,
+ psRefCountCCBEntry->ui32PID,
+ psRefCountCCBEntry->pszFile,
+ psRefCountCCBEntry->iLine));
+ }
+
+ PVRSRV_UNLOCK_CCB();
+}
+
+IMG_INTERNAL
+void PVRSRVKernelSyncInfoIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine,
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo,
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
+{
+ IMG_UINT32 ui32RefValue = OSAtomicRead(psKernelSyncInfo->pvRefCount);
+
+ if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_SYNCINFO))
+ goto skip;
+
+ PVRSRV_LOCK_CCB();
+
+ gsRefCountCCB[giOffset].pszFile = pszFile;
+ gsRefCountCCB[giOffset].iLine = iLine;
+ gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM();
+ snprintf(gsRefCountCCB[giOffset].pcMesg,
+ PVRSRV_REFCOUNT_CCB_MESG_MAX - 1,
+ PVRSRV_REFCOUNT_CCB_FMT_STRING,
+ "SYNCINFO",
+ psKernelSyncInfo,
+ psKernelMemInfo,
+ NULL,
+ (psKernelMemInfo) ? psKernelMemInfo->sMemBlk.hOSMemHandle : NULL,
+ ui32RefValue,
+ ui32RefValue + 1,
+ (psKernelMemInfo) ? psKernelMemInfo->uAllocSize : 0);
+ gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0;
+ giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX;
+
+ PVRSRV_UNLOCK_CCB();
+
+skip:
+ PVRSRVAcquireSyncInfoKM(psKernelSyncInfo);
+}
+
+IMG_INTERNAL
+void PVRSRVKernelSyncInfoDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine,
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo,
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
+{
+ IMG_UINT32 ui32RefValue = OSAtomicRead(psKernelSyncInfo->pvRefCount);
+
+ if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_SYNCINFO))
+ goto skip;
+
+ PVRSRV_LOCK_CCB();
+
+ gsRefCountCCB[giOffset].pszFile = pszFile;
+ gsRefCountCCB[giOffset].iLine = iLine;
+ gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM();
+ snprintf(gsRefCountCCB[giOffset].pcMesg,
+ PVRSRV_REFCOUNT_CCB_MESG_MAX - 1,
+ PVRSRV_REFCOUNT_CCB_FMT_STRING,
+ "SYNCINFO",
+ psKernelSyncInfo,
+ psKernelMemInfo,
+ (psKernelMemInfo) ? psKernelMemInfo->sMemBlk.hOSMemHandle : NULL,
+ NULL,
+ ui32RefValue,
+ ui32RefValue - 1,
+ (psKernelMemInfo) ? psKernelMemInfo->uAllocSize : 0);
+ gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0;
+ giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX;
+
+ PVRSRV_UNLOCK_CCB();
+
+skip:
+ PVRSRVReleaseSyncInfoKM(psKernelSyncInfo);
+}
+
+IMG_INTERNAL
+void PVRSRVKernelMemInfoIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine,
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
+{
+ if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_MEMINFO))
+ goto skip;
+
+ PVRSRV_LOCK_CCB();
+
+ gsRefCountCCB[giOffset].pszFile = pszFile;
+ gsRefCountCCB[giOffset].iLine = iLine;
+ gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM();
+ snprintf(gsRefCountCCB[giOffset].pcMesg,
+ PVRSRV_REFCOUNT_CCB_MESG_MAX - 1,
+ PVRSRV_REFCOUNT_CCB_FMT_STRING,
+ "MEMINFO",
+ psKernelMemInfo->psKernelSyncInfo,
+ psKernelMemInfo,
+ psKernelMemInfo->sMemBlk.hOSMemHandle,
+ NULL,
+ psKernelMemInfo->ui32RefCount,
+ psKernelMemInfo->ui32RefCount + 1,
+ psKernelMemInfo->uAllocSize);
+ gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0;
+ giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX;
+
+ PVRSRV_UNLOCK_CCB();
+
+skip:
+ psKernelMemInfo->ui32RefCount++;
+}
+
+IMG_INTERNAL
+void PVRSRVKernelMemInfoDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine,
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
+{
+ if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_MEMINFO))
+ goto skip;
+
+ PVRSRV_LOCK_CCB();
+
+ gsRefCountCCB[giOffset].pszFile = pszFile;
+ gsRefCountCCB[giOffset].iLine = iLine;
+ gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM();
+ snprintf(gsRefCountCCB[giOffset].pcMesg,
+ PVRSRV_REFCOUNT_CCB_MESG_MAX - 1,
+ PVRSRV_REFCOUNT_CCB_FMT_STRING,
+ "MEMINFO",
+ psKernelMemInfo->psKernelSyncInfo,
+ psKernelMemInfo,
+ psKernelMemInfo->sMemBlk.hOSMemHandle,
+ NULL,
+ psKernelMemInfo->ui32RefCount,
+ psKernelMemInfo->ui32RefCount - 1,
+ psKernelMemInfo->uAllocSize);
+ gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0;
+ giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX;
+
+ PVRSRV_UNLOCK_CCB();
+
+skip:
+ psKernelMemInfo->ui32RefCount--;
+}
+
+IMG_INTERNAL
+void PVRSRVBMBufIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, BM_BUF *pBuf)
+{
+ if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_BM_BUF))
+ goto skip;
+
+ PVRSRV_LOCK_CCB();
+
+ gsRefCountCCB[giOffset].pszFile = pszFile;
+ gsRefCountCCB[giOffset].iLine = iLine;
+ gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM();
+ snprintf(gsRefCountCCB[giOffset].pcMesg,
+ PVRSRV_REFCOUNT_CCB_MESG_MAX - 1,
+ PVRSRV_REFCOUNT_CCB_FMT_STRING,
+ "BM_BUF",
+ NULL,
+ NULL,
+ BM_HandleToOSMemHandle(pBuf),
+ pBuf,
+ pBuf->ui32RefCount,
+ pBuf->ui32RefCount + 1,
+ (pBuf->pMapping) ? pBuf->pMapping->uSize : 0);
+ gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0;
+ giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX;
+
+ PVRSRV_UNLOCK_CCB();
+
+skip:
+ pBuf->ui32RefCount++;
+}
+
+IMG_INTERNAL
+void PVRSRVBMBufDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, BM_BUF *pBuf)
+{
+ if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_BM_BUF))
+ goto skip;
+
+ PVRSRV_LOCK_CCB();
+
+ gsRefCountCCB[giOffset].pszFile = pszFile;
+ gsRefCountCCB[giOffset].iLine = iLine;
+ gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM();
+ snprintf(gsRefCountCCB[giOffset].pcMesg,
+ PVRSRV_REFCOUNT_CCB_MESG_MAX - 1,
+ PVRSRV_REFCOUNT_CCB_FMT_STRING,
+ "BM_BUF",
+ NULL,
+ NULL,
+ BM_HandleToOSMemHandle(pBuf),
+ pBuf,
+ pBuf->ui32RefCount,
+ pBuf->ui32RefCount - 1,
+ (pBuf->pMapping) ? pBuf->pMapping->uSize : 0);
+ gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0;
+ giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX;
+
+ PVRSRV_UNLOCK_CCB();
+
+skip:
+ pBuf->ui32RefCount--;
+}
+
+IMG_INTERNAL
+void PVRSRVBMBufIncExport2(const IMG_CHAR *pszFile, IMG_INT iLine, BM_BUF *pBuf)
+{
+ if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_BM_BUF2))
+ goto skip;
+
+ PVRSRV_LOCK_CCB();
+
+ gsRefCountCCB[giOffset].pszFile = pszFile;
+ gsRefCountCCB[giOffset].iLine = iLine;
+ gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM();
+ snprintf(gsRefCountCCB[giOffset].pcMesg,
+ PVRSRV_REFCOUNT_CCB_MESG_MAX - 1,
+ PVRSRV_REFCOUNT_CCB_FMT_STRING,
+ "BM_BUF2",
+ NULL,
+ NULL,
+ BM_HandleToOSMemHandle(pBuf),
+ pBuf,
+ pBuf->ui32ExportCount,
+ pBuf->ui32ExportCount + 1,
+ (pBuf->pMapping) ? pBuf->pMapping->uSize : 0);
+ gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0;
+ giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX;
+
+ PVRSRV_UNLOCK_CCB();
+
+skip:
+ pBuf->ui32ExportCount++;
+}
+
+IMG_INTERNAL
+void PVRSRVBMBufDecExport2(const IMG_CHAR *pszFile, IMG_INT iLine, BM_BUF *pBuf)
+{
+ if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_BM_BUF2))
+ goto skip;
+
+ PVRSRV_LOCK_CCB();
+
+ gsRefCountCCB[giOffset].pszFile = pszFile;
+ gsRefCountCCB[giOffset].iLine = iLine;
+ gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM();
+ snprintf(gsRefCountCCB[giOffset].pcMesg,
+ PVRSRV_REFCOUNT_CCB_MESG_MAX - 1,
+ PVRSRV_REFCOUNT_CCB_FMT_STRING,
+ "BM_BUF2",
+ NULL,
+ NULL,
+ BM_HandleToOSMemHandle(pBuf),
+ pBuf,
+ pBuf->ui32ExportCount,
+ pBuf->ui32ExportCount - 1,
+ (pBuf->pMapping) ? pBuf->pMapping->uSize : 0);
+ gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0;
+ giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX;
+
+ PVRSRV_UNLOCK_CCB();
+
+skip:
+ pBuf->ui32ExportCount--;
+}
+
+IMG_INTERNAL
+void PVRSRVBMXProcIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index)
+{
+ if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_BM_XPROC))
+ goto skip;
+
+ PVRSRV_LOCK_CCB();
+
+ gsRefCountCCB[giOffset].pszFile = pszFile;
+ gsRefCountCCB[giOffset].iLine = iLine;
+ gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM();
+ snprintf(gsRefCountCCB[giOffset].pcMesg,
+ PVRSRV_REFCOUNT_CCB_MESG_MAX - 1,
+ PVRSRV_REFCOUNT_CCB_FMT_STRING,
+ "BM_XPROC",
+ NULL,
+ NULL,
+ gXProcWorkaroundShareData[ui32Index].hOSMemHandle,
+ (IMG_VOID *) ui32Index,
+ gXProcWorkaroundShareData[ui32Index].ui32RefCount,
+ gXProcWorkaroundShareData[ui32Index].ui32RefCount + 1,
+ gXProcWorkaroundShareData[ui32Index].ui32Size);
+ gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0;
+ giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX;
+
+ PVRSRV_UNLOCK_CCB();
+
+skip:
+ gXProcWorkaroundShareData[ui32Index].ui32RefCount++;
+}
+
+IMG_INTERNAL
+void PVRSRVBMXProcDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index)
+{
+ if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_BM_XPROC))
+ goto skip;
+
+ PVRSRV_LOCK_CCB();
+
+ gsRefCountCCB[giOffset].pszFile = pszFile;
+ gsRefCountCCB[giOffset].iLine = iLine;
+ gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM();
+ snprintf(gsRefCountCCB[giOffset].pcMesg,
+ PVRSRV_REFCOUNT_CCB_MESG_MAX - 1,
+ PVRSRV_REFCOUNT_CCB_FMT_STRING,
+ "BM_XPROC",
+ NULL,
+ NULL,
+ gXProcWorkaroundShareData[ui32Index].hOSMemHandle,
+ (IMG_VOID *) ui32Index,
+ gXProcWorkaroundShareData[ui32Index].ui32RefCount,
+ gXProcWorkaroundShareData[ui32Index].ui32RefCount - 1,
+ gXProcWorkaroundShareData[ui32Index].ui32Size);
+ gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0;
+ giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX;
+
+ PVRSRV_UNLOCK_CCB();
+
+skip:
+ gXProcWorkaroundShareData[ui32Index].ui32RefCount--;
+}
+
+#if defined(__linux__)
+
+/* mmap refcounting is Linux specific */
+
+IMG_INTERNAL
+void PVRSRVOffsetStructIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine,
+ PKV_OFFSET_STRUCT psOffsetStruct)
+{
+ if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_MMAP))
+ goto skip;
+
+ PVRSRV_LOCK_CCB();
+
+ gsRefCountCCB[giOffset].pszFile = pszFile;
+ gsRefCountCCB[giOffset].iLine = iLine;
+ gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM();
+ snprintf(gsRefCountCCB[giOffset].pcMesg,
+ PVRSRV_REFCOUNT_CCB_MESG_MAX - 1,
+ PVRSRV_REFCOUNT_CCB_FMT_STRING,
+ "MMAP",
+ NULL,
+ NULL,
+ psOffsetStruct->psLinuxMemArea,
+ psOffsetStruct,
+ psOffsetStruct->ui32RefCount,
+ psOffsetStruct->ui32RefCount + 1,
+ psOffsetStruct->ui32RealByteSize);
+ gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0;
+ giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX;
+
+ PVRSRV_UNLOCK_CCB();
+
+skip:
+ psOffsetStruct->ui32RefCount++;
+}
+
+IMG_INTERNAL
+void PVRSRVOffsetStructDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine,
+ PKV_OFFSET_STRUCT psOffsetStruct)
+{
+ if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_MMAP))
+ goto skip;
+
+ PVRSRV_LOCK_CCB();
+
+ gsRefCountCCB[giOffset].pszFile = pszFile;
+ gsRefCountCCB[giOffset].iLine = iLine;
+ gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM();
+ snprintf(gsRefCountCCB[giOffset].pcMesg,
+ PVRSRV_REFCOUNT_CCB_MESG_MAX - 1,
+ PVRSRV_REFCOUNT_CCB_FMT_STRING,
+ "MMAP",
+ NULL,
+ NULL,
+ psOffsetStruct->psLinuxMemArea,
+ psOffsetStruct,
+ psOffsetStruct->ui32RefCount,
+ psOffsetStruct->ui32RefCount - 1,
+ psOffsetStruct->ui32RealByteSize);
+ gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0;
+ giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX;
+
+ PVRSRV_UNLOCK_CCB();
+
+skip:
+ psOffsetStruct->ui32RefCount--;
+}
+
+IMG_INTERNAL
+void PVRSRVOffsetStructIncMapped2(const IMG_CHAR *pszFile, IMG_INT iLine,
+ PKV_OFFSET_STRUCT psOffsetStruct)
+{
+ if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_MMAP2))
+ goto skip;
+
+ PVRSRV_LOCK_CCB();
+
+ gsRefCountCCB[giOffset].pszFile = pszFile;
+ gsRefCountCCB[giOffset].iLine = iLine;
+ gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM();
+ snprintf(gsRefCountCCB[giOffset].pcMesg,
+ PVRSRV_REFCOUNT_CCB_MESG_MAX - 1,
+ PVRSRV_REFCOUNT_CCB_FMT_STRING,
+ "MMAP2",
+ NULL,
+ NULL,
+ psOffsetStruct->psLinuxMemArea,
+ psOffsetStruct,
+ psOffsetStruct->ui32Mapped,
+ psOffsetStruct->ui32Mapped + 1,
+ psOffsetStruct->ui32RealByteSize);
+ gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0;
+ giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX;
+
+ PVRSRV_UNLOCK_CCB();
+
+skip:
+ psOffsetStruct->ui32Mapped++;
+}
+
+IMG_INTERNAL
+void PVRSRVOffsetStructDecMapped2(const IMG_CHAR *pszFile, IMG_INT iLine,
+ PKV_OFFSET_STRUCT psOffsetStruct)
+{
+ if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_MMAP2))
+ goto skip;
+
+ PVRSRV_LOCK_CCB();
+
+ gsRefCountCCB[giOffset].pszFile = pszFile;
+ gsRefCountCCB[giOffset].iLine = iLine;
+ gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM();
+ snprintf(gsRefCountCCB[giOffset].pcMesg,
+ PVRSRV_REFCOUNT_CCB_MESG_MAX - 1,
+ PVRSRV_REFCOUNT_CCB_FMT_STRING,
+ "MMAP2",
+ NULL,
+ NULL,
+ psOffsetStruct->psLinuxMemArea,
+ psOffsetStruct,
+ psOffsetStruct->ui32Mapped,
+ psOffsetStruct->ui32Mapped - 1,
+ psOffsetStruct->ui32RealByteSize);
+ gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0;
+ giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX;
+
+ PVRSRV_UNLOCK_CCB();
+
+skip:
+ psOffsetStruct->ui32Mapped--;
+}
+
+#endif /* defined(__linux__) */
+
+#endif /* defined(PVRSRV_REFCOUNT_DEBUG) */
diff --git a/drivers/gpu/pvr/refcount.h b/drivers/gpu/pvr/refcount.h
new file mode 100644
index 0000000..92de65c
--- /dev/null
+++ b/drivers/gpu/pvr/refcount.h
@@ -0,0 +1,188 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+*/ /**************************************************************************/
+
+#ifndef __REFCOUNT_H__
+#define __REFCOUNT_H__
+
+#include "pvr_bridge_km.h"
+
+#if defined(PVRSRV_REFCOUNT_DEBUG)
+
+void PVRSRVDumpRefCountCCB(void);
+
+#define PVRSRVKernelSyncInfoIncRef(x...) \
+ PVRSRVKernelSyncInfoIncRef2(__FILE__, __LINE__, x)
+#define PVRSRVKernelSyncInfoDecRef(x...) \
+ PVRSRVKernelSyncInfoDecRef2(__FILE__, __LINE__, x)
+#define PVRSRVKernelMemInfoIncRef(x...) \
+ PVRSRVKernelMemInfoIncRef2(__FILE__, __LINE__, x)
+#define PVRSRVKernelMemInfoDecRef(x...) \
+ PVRSRVKernelMemInfoDecRef2(__FILE__, __LINE__, x)
+#define PVRSRVBMBufIncRef(x...) \
+ PVRSRVBMBufIncRef2(__FILE__, __LINE__, x)
+#define PVRSRVBMBufDecRef(x...) \
+ PVRSRVBMBufDecRef2(__FILE__, __LINE__, x)
+#define PVRSRVBMBufIncExport(x...) \
+ PVRSRVBMBufIncExport2(__FILE__, __LINE__, x)
+#define PVRSRVBMBufDecExport(x...) \
+ PVRSRVBMBufDecExport2(__FILE__, __LINE__, x)
+
+void PVRSRVKernelSyncInfoIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine,
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo,
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
+void PVRSRVKernelSyncInfoDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine,
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo,
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
+void PVRSRVKernelMemInfoIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine,
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
+void PVRSRVKernelMemInfoDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine,
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
+void PVRSRVBMBufIncRef2(const IMG_CHAR *pszFile,
+ IMG_INT iLine, BM_BUF *pBuf);
+void PVRSRVBMBufDecRef2(const IMG_CHAR *pszFile,
+ IMG_INT iLine, BM_BUF *pBuf);
+void PVRSRVBMBufIncExport2(const IMG_CHAR *pszFile,
+ IMG_INT iLine, BM_BUF *pBuf);
+void PVRSRVBMBufDecExport2(const IMG_CHAR *pszFile,
+ IMG_INT iLine, BM_BUF *pBuf);
+void PVRSRVBMXProcIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine,
+ IMG_UINT32 ui32Index);
+void PVRSRVBMXProcDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine,
+ IMG_UINT32 ui32Index);
+
+#if defined(__linux__)
+
+/* mmap refcounting is Linux specific */
+#include "mmap.h"
+
+#define PVRSRVOffsetStructIncRef(x...) \
+ PVRSRVOffsetStructIncRef2(__FILE__, __LINE__, x)
+#define PVRSRVOffsetStructDecRef(x...) \
+ PVRSRVOffsetStructDecRef2(__FILE__, __LINE__, x)
+#define PVRSRVOffsetStructIncMapped(x...) \
+ PVRSRVOffsetStructIncMapped2(__FILE__, __LINE__, x)
+#define PVRSRVOffsetStructDecMapped(x...) \
+ PVRSRVOffsetStructDecMapped2(__FILE__, __LINE__, x)
+
+void PVRSRVOffsetStructIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine,
+ PKV_OFFSET_STRUCT psOffsetStruct);
+void PVRSRVOffsetStructDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine,
+ PKV_OFFSET_STRUCT psOffsetStruct);
+void PVRSRVOffsetStructIncMapped2(const IMG_CHAR *pszFile, IMG_INT iLine,
+ PKV_OFFSET_STRUCT psOffsetStruct);
+void PVRSRVOffsetStructDecMapped2(const IMG_CHAR *pszFile, IMG_INT iLine,
+ PKV_OFFSET_STRUCT psOffsetStruct);
+
+#endif /* defined(__linux__) */
+
+#else /* defined(PVRSRV_REFCOUNT_DEBUG) */
+
+static INLINE void PVRSRVDumpRefCountCCB(void) { }
+
+static INLINE void PVRSRVKernelSyncInfoIncRef(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo,
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
+{
+ PVR_UNREFERENCED_PARAMETER(psKernelMemInfo);
+ PVRSRVAcquireSyncInfoKM(psKernelSyncInfo);
+}
+
+static INLINE void PVRSRVKernelSyncInfoDecRef(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo,
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
+{
+ PVR_UNREFERENCED_PARAMETER(psKernelMemInfo);
+ PVRSRVReleaseSyncInfoKM(psKernelSyncInfo);
+}
+
+static INLINE void PVRSRVKernelMemInfoIncRef(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
+{
+ psKernelMemInfo->ui32RefCount++;
+}
+
+static INLINE void PVRSRVKernelMemInfoDecRef(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
+{
+ psKernelMemInfo->ui32RefCount--;
+}
+
+static INLINE void PVRSRVBMBufIncRef(BM_BUF *pBuf)
+{
+ pBuf->ui32RefCount++;
+}
+
+static INLINE void PVRSRVBMBufDecRef(BM_BUF *pBuf)
+{
+ pBuf->ui32RefCount--;
+}
+
+static INLINE void PVRSRVBMBufIncExport(BM_BUF *pBuf)
+{
+ pBuf->ui32ExportCount++;
+}
+
+static INLINE void PVRSRVBMBufDecExport(BM_BUF *pBuf)
+{
+ pBuf->ui32ExportCount--;
+}
+
+static INLINE void PVRSRVBMXProcIncRef(IMG_UINT32 ui32Index)
+{
+ gXProcWorkaroundShareData[ui32Index].ui32RefCount++;
+}
+
+static INLINE void PVRSRVBMXProcDecRef(IMG_UINT32 ui32Index)
+{
+ gXProcWorkaroundShareData[ui32Index].ui32RefCount--;
+}
+
+#if defined(__linux__)
+
+/* mmap refcounting is Linux specific */
+#include "mmap.h"
+
+static INLINE void PVRSRVOffsetStructIncRef(PKV_OFFSET_STRUCT psOffsetStruct)
+{
+ psOffsetStruct->ui32RefCount++;
+}
+
+static INLINE void PVRSRVOffsetStructDecRef(PKV_OFFSET_STRUCT psOffsetStruct)
+{
+ psOffsetStruct->ui32RefCount--;
+}
+
+static INLINE void PVRSRVOffsetStructIncMapped(PKV_OFFSET_STRUCT psOffsetStruct)
+{
+ psOffsetStruct->ui32Mapped++;
+}
+
+static INLINE void PVRSRVOffsetStructDecMapped(PKV_OFFSET_STRUCT psOffsetStruct)
+{
+ psOffsetStruct->ui32Mapped--;
+}
+
+#endif /* defined(__linux__) */
+
+#endif /* defined(PVRSRV_REFCOUNT_DEBUG) */
+
+#endif /* __REFCOUNT_H__ */
diff --git a/drivers/gpu/pvr/regpaths.h b/drivers/gpu/pvr/regpaths.h
new file mode 100644
index 0000000..9193737
--- /dev/null
+++ b/drivers/gpu/pvr/regpaths.h
@@ -0,0 +1,43 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __REGPATHS_H__
+#define __REGPATHS_H__
+
+#define POWERVR_REG_ROOT "Drivers\\Display\\PowerVR"
+#define POWERVR_CHIP_KEY "\\SGX1\\"
+
+#define POWERVR_EURASIA_KEY "PowerVREurasia\\"
+
+#define POWERVR_SERVICES_KEY "\\Registry\\Machine\\System\\CurrentControlSet\\Services\\PowerVR\\"
+
+#define PVRSRV_REGISTRY_ROOT POWERVR_EURASIA_KEY "HWSettings\\PVRSRVKM"
+
+
+#define MAX_REG_STRING_SIZE 128
+
+
+#endif
diff --git a/drivers/gpu/pvr/resman.c b/drivers/gpu/pvr/resman.c
new file mode 100644
index 0000000..8eddf77
--- /dev/null
+++ b/drivers/gpu/pvr/resman.c
@@ -0,0 +1,765 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "services_headers.h"
+#include "resman.h"
+
+#ifdef __linux__
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#endif
+
+#include <linux/sched.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
+#include <linux/hardirq.h>
+#else
+#include <asm/hardirq.h>
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+#include <linux/mutex.h>
+#else
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+#include <linux/semaphore.h>
+#else
+#include <asm/semaphore.h>
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+static DEFINE_MUTEX(lock);
+#define DOWN(m) mutex_lock(m)
+#define UP(m) mutex_unlock(m)
+#else
+static DECLARE_MUTEX(lock);
+#define DOWN(m) down(m)
+#define UP(m) up(m)
+#endif
+
+#define ACQUIRE_SYNC_OBJ do { \
+ if (in_interrupt()) { \
+ printk("ISR cannot take RESMAN mutex\n"); \
+ BUG(); \
+ } \
+ else DOWN(&lock); \
+} while (0)
+#define RELEASE_SYNC_OBJ UP(&lock)
+
+#else
+
+#define ACQUIRE_SYNC_OBJ
+#define RELEASE_SYNC_OBJ
+
+#endif
+
+#define RESMAN_SIGNATURE 0x12345678
+
+typedef struct _RESMAN_ITEM_
+{
+#ifdef DEBUG
+ IMG_UINT32 ui32Signature;
+#endif
+ struct _RESMAN_ITEM_ **ppsThis;
+ struct _RESMAN_ITEM_ *psNext;
+
+ IMG_UINT32 ui32Flags;
+ IMG_UINT32 ui32ResType;
+
+ IMG_PVOID pvParam;
+ IMG_UINT32 ui32Param;
+
+ RESMAN_FREE_FN pfnFreeResource;
+} RESMAN_ITEM;
+
+
+typedef struct _RESMAN_CONTEXT_
+{
+#ifdef DEBUG
+ IMG_UINT32 ui32Signature;
+#endif
+ struct _RESMAN_CONTEXT_ **ppsThis;
+ struct _RESMAN_CONTEXT_ *psNext;
+
+ PVRSRV_PER_PROCESS_DATA *psPerProc;
+
+ RESMAN_ITEM *psResItemList;
+
+} RESMAN_CONTEXT;
+
+
+typedef struct
+{
+ RESMAN_CONTEXT *psContextList;
+
+} RESMAN_LIST, *PRESMAN_LIST;
+
+
+PRESMAN_LIST gpsResList = IMG_NULL;
+
+#include "lists.h"
+
+static IMPLEMENT_LIST_ANY_VA(RESMAN_ITEM)
+static IMPLEMENT_LIST_ANY_VA_2(RESMAN_ITEM, IMG_BOOL, IMG_FALSE)
+static IMPLEMENT_LIST_INSERT(RESMAN_ITEM)
+static IMPLEMENT_LIST_REMOVE(RESMAN_ITEM)
+static IMPLEMENT_LIST_REVERSE(RESMAN_ITEM)
+
+static IMPLEMENT_LIST_REMOVE(RESMAN_CONTEXT)
+static IMPLEMENT_LIST_INSERT(RESMAN_CONTEXT)
+
+
+#define PRINT_RESLIST(x, y, z)
+
+static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM *psItem, IMG_BOOL bExecuteCallback, IMG_BOOL bForceCleanup);
+
+static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_CONTEXT psContext,
+ IMG_UINT32 ui32SearchCriteria,
+ IMG_UINT32 ui32ResType,
+ IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bExecuteCallback);
+
+
+#ifdef DEBUG
+ static IMG_VOID ValidateResList(PRESMAN_LIST psResList);
+ #define VALIDATERESLIST() ValidateResList(gpsResList)
+#else
+ #define VALIDATERESLIST()
+#endif
+
+
+
+
+
+
+PVRSRV_ERROR ResManInit(IMG_VOID)
+{
+ if (gpsResList == IMG_NULL)
+ {
+
+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(*gpsResList),
+ (IMG_VOID **)&gpsResList, IMG_NULL,
+ "Resource Manager List") != PVRSRV_OK)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+
+ gpsResList->psContextList = IMG_NULL;
+
+
+ VALIDATERESLIST();
+ }
+
+ return PVRSRV_OK;
+}
+
+
+IMG_VOID ResManDeInit(IMG_VOID)
+{
+ if (gpsResList != IMG_NULL)
+ {
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*gpsResList), gpsResList, IMG_NULL);
+ gpsResList = IMG_NULL;
+ }
+}
+
+
+PVRSRV_ERROR PVRSRVResManConnect(IMG_HANDLE hPerProc,
+ PRESMAN_CONTEXT *phResManContext)
+{
+ PVRSRV_ERROR eError;
+ PRESMAN_CONTEXT psResManContext;
+
+
+ ACQUIRE_SYNC_OBJ;
+
+
+ VALIDATERESLIST();
+
+
+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psResManContext),
+ (IMG_VOID **)&psResManContext, IMG_NULL,
+ "Resource Manager Context");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVResManConnect: ERROR allocating new RESMAN context struct"));
+
+
+ VALIDATERESLIST();
+
+
+ RELEASE_SYNC_OBJ;
+
+ return eError;
+ }
+
+#ifdef DEBUG
+ psResManContext->ui32Signature = RESMAN_SIGNATURE;
+#endif
+ psResManContext->psResItemList = IMG_NULL;
+ psResManContext->psPerProc = hPerProc;
+
+
+ List_RESMAN_CONTEXT_Insert(&gpsResList->psContextList, psResManContext);
+
+
+ VALIDATERESLIST();
+
+
+ RELEASE_SYNC_OBJ;
+
+ *phResManContext = psResManContext;
+
+ return PVRSRV_OK;
+}
+
+
+IMG_VOID PVRSRVResManDisconnect(PRESMAN_CONTEXT psResManContext,
+ IMG_BOOL bKernelContext)
+{
+
+ ACQUIRE_SYNC_OBJ;
+
+
+ VALIDATERESLIST();
+
+
+ PRINT_RESLIST(gpsResList, psResManContext, IMG_TRUE);
+
+
+
+ if (!bKernelContext)
+ {
+
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_OS_USERMODE_MAPPING, 0, 0, IMG_TRUE);
+
+
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DMA_CLIENT_FIFO_DATA, 0, 0, IMG_TRUE);
+
+
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_EVENT_OBJECT, 0, 0, IMG_TRUE);
+
+
+
+ List_RESMAN_ITEM_Reverse(&psResManContext->psResItemList);
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_MODIFY_SYNC_OPS, 0, 0, IMG_TRUE);
+ List_RESMAN_ITEM_Reverse(&psResManContext->psResItemList);
+
+
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_RENDER_CONTEXT, 0, 0, IMG_TRUE);
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_TRANSFER_CONTEXT, 0, 0, IMG_TRUE);
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_2D_CONTEXT, 0, 0, IMG_TRUE);
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_TRANSFER_CONTEXT, 0, 0, IMG_TRUE);
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK, 0, 0, IMG_TRUE);
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_PB_DESC, 0, 0, IMG_TRUE);
+
+
+
+
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SYNC_INFO, 0, 0, IMG_TRUE);
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICECLASSMEM_MAPPING, 0, 0, IMG_TRUE);
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_WRAP, 0, 0, IMG_TRUE);
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_MAPPING, 0, 0, IMG_TRUE);
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION, 0, 0, IMG_TRUE);
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_ALLOCATION, 0, 0, IMG_TRUE);
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_CONTEXT, 0, 0, IMG_TRUE);
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_MEM_INFO, 0, 0, IMG_TRUE);
+
+
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF, 0, 0, IMG_TRUE);
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DISPLAYCLASS_DEVICE, 0, 0, IMG_TRUE);
+
+
+ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_BUFFERCLASS_DEVICE, 0, 0, IMG_TRUE);
+ }
+
+
+ PVR_ASSERT(psResManContext->psResItemList == IMG_NULL);
+
+
+ List_RESMAN_CONTEXT_Remove(psResManContext);
+
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_CONTEXT), psResManContext, IMG_NULL);
+
+
+
+
+ VALIDATERESLIST();
+
+
+ PRINT_RESLIST(gpsResList, psResManContext, IMG_FALSE);
+
+
+ RELEASE_SYNC_OBJ;
+}
+
+
+PRESMAN_ITEM ResManRegisterRes(PRESMAN_CONTEXT psResManContext,
+ IMG_UINT32 ui32ResType,
+ IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ RESMAN_FREE_FN pfnFreeResource)
+{
+ PRESMAN_ITEM psNewResItem;
+
+ PVR_ASSERT(psResManContext != IMG_NULL);
+ PVR_ASSERT(ui32ResType != 0);
+
+ if (psResManContext == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ResManRegisterRes: invalid parameter - psResManContext"));
+ return (PRESMAN_ITEM) IMG_NULL;
+ }
+
+
+ ACQUIRE_SYNC_OBJ;
+
+
+ VALIDATERESLIST();
+
+ PVR_DPF((PVR_DBG_MESSAGE, "ResManRegisterRes: register resource "
+ "Context 0x%x, ResType 0x%x, pvParam 0x%x, ui32Param 0x%x, "
+ "FreeFunc %08X",
+ (IMG_UINTPTR_T)psResManContext,
+ ui32ResType,
+ (IMG_UINTPTR_T)pvParam,
+ ui32Param,
+ (IMG_UINTPTR_T)pfnFreeResource));
+
+
+ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(RESMAN_ITEM), (IMG_VOID **)&psNewResItem,
+ IMG_NULL,
+ "Resource Manager Item") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ResManRegisterRes: "
+ "ERROR allocating new resource item"));
+
+
+ RELEASE_SYNC_OBJ;
+
+ return((PRESMAN_ITEM)IMG_NULL);
+ }
+
+
+#ifdef DEBUG
+ psNewResItem->ui32Signature = RESMAN_SIGNATURE;
+#endif
+ psNewResItem->ui32ResType = ui32ResType;
+ psNewResItem->pvParam = pvParam;
+ psNewResItem->ui32Param = ui32Param;
+ psNewResItem->pfnFreeResource = pfnFreeResource;
+ psNewResItem->ui32Flags = 0;
+
+
+ List_RESMAN_ITEM_Insert(&psResManContext->psResItemList, psNewResItem);
+
+
+ VALIDATERESLIST();
+
+
+ RELEASE_SYNC_OBJ;
+
+ return(psNewResItem);
+}
+
+PVRSRV_ERROR ResManFreeResByPtr(RESMAN_ITEM *psResItem, IMG_BOOL bForceCleanup)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psResItem != IMG_NULL);
+
+ if (psResItem == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByPtr: NULL ptr - nothing to do"));
+ return PVRSRV_OK;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByPtr: freeing resource at %08X",
+ (IMG_UINTPTR_T)psResItem));
+
+
+ ACQUIRE_SYNC_OBJ;
+
+
+ VALIDATERESLIST();
+
+
+ eError = FreeResourceByPtr(psResItem, IMG_TRUE, bForceCleanup);
+
+
+ VALIDATERESLIST();
+
+
+ RELEASE_SYNC_OBJ;
+
+ return(eError);
+}
+
+
+PVRSRV_ERROR ResManFreeResByCriteria(PRESMAN_CONTEXT psResManContext,
+ IMG_UINT32 ui32SearchCriteria,
+ IMG_UINT32 ui32ResType,
+ IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psResManContext != IMG_NULL);
+
+
+ ACQUIRE_SYNC_OBJ;
+
+
+ VALIDATERESLIST();
+
+ PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByCriteria: "
+ "Context 0x%x, Criteria 0x%x, Type 0x%x, Addr 0x%x, Param 0x%x",
+ (IMG_UINTPTR_T)psResManContext, ui32SearchCriteria, ui32ResType,
+ (IMG_UINTPTR_T)pvParam, ui32Param));
+
+
+ eError = FreeResourceByCriteria(psResManContext, ui32SearchCriteria,
+ ui32ResType, pvParam, ui32Param,
+ IMG_TRUE);
+
+
+ VALIDATERESLIST();
+
+
+ RELEASE_SYNC_OBJ;
+
+ return eError;
+}
+
+
+PVRSRV_ERROR ResManDissociateRes(RESMAN_ITEM *psResItem,
+ PRESMAN_CONTEXT psNewResManContext)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_ASSERT(psResItem != IMG_NULL);
+
+ if (psResItem == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ResManDissociateRes: invalid parameter - psResItem"));
+ PVR_DBG_BREAK;
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+#ifdef DEBUG
+ PVR_ASSERT(psResItem->ui32Signature == RESMAN_SIGNATURE);
+#endif
+
+ if (psNewResManContext != IMG_NULL)
+ {
+
+ List_RESMAN_ITEM_Remove(psResItem);
+
+
+ List_RESMAN_ITEM_Insert(&psNewResManContext->psResItemList, psResItem);
+
+ }
+ else
+ {
+ eError = FreeResourceByPtr(psResItem, IMG_FALSE, CLEANUP_WITH_POLL);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ResManDissociateRes: failed to free resource by pointer"));
+ return eError;
+ }
+ }
+
+ return eError;
+}
+
+static IMG_BOOL ResManFindResourceByPtr_AnyVaCb(RESMAN_ITEM *psCurItem, va_list va)
+{
+ RESMAN_ITEM *psItem;
+
+ psItem = va_arg(va, RESMAN_ITEM*);
+
+ return (IMG_BOOL)(psCurItem == psItem);
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_CONTEXT psResManContext,
+ RESMAN_ITEM *psItem)
+{
+ PVRSRV_ERROR eResult;
+
+ PVR_ASSERT(psResManContext != IMG_NULL);
+ PVR_ASSERT(psItem != IMG_NULL);
+
+ if ((psItem == IMG_NULL) || (psResManContext == IMG_NULL))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ResManFindResourceByPtr: invalid parameter"));
+ PVR_DBG_BREAK;
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+#ifdef DEBUG
+ PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE);
+#endif
+
+
+ ACQUIRE_SYNC_OBJ;
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "FindResourceByPtr: psItem=%08X, psItem->psNext=%08X",
+ (IMG_UINTPTR_T)psItem, (IMG_UINTPTR_T)psItem->psNext));
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "FindResourceByPtr: Resource Ctx 0x%x, Type 0x%x, Addr 0x%x, "
+ "Param 0x%x, FnCall %08X, Flags 0x%x",
+ (IMG_UINTPTR_T)psResManContext,
+ psItem->ui32ResType,
+ (IMG_UINTPTR_T)psItem->pvParam,
+ psItem->ui32Param,
+ (IMG_UINTPTR_T)psItem->pfnFreeResource,
+ psItem->ui32Flags));
+
+
+ if(List_RESMAN_ITEM_IMG_BOOL_Any_va(psResManContext->psResItemList,
+ &ResManFindResourceByPtr_AnyVaCb,
+ psItem))
+ {
+ eResult = PVRSRV_OK;
+ }
+ else
+ {
+ eResult = PVRSRV_ERROR_NOT_OWNER;
+ }
+
+
+ RELEASE_SYNC_OBJ;
+
+ return eResult;
+}
+
+static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM *psItem,
+ IMG_BOOL bExecuteCallback,
+ IMG_BOOL bForceCleanup)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_ASSERT(psItem != IMG_NULL);
+
+ if (psItem == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: invalid parameter"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+#ifdef DEBUG
+ PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE);
+#endif
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "FreeResourceByPtr: psItem=%08X, psItem->psNext=%08X",
+ (IMG_UINTPTR_T)psItem, (IMG_UINTPTR_T)psItem->psNext));
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "FreeResourceByPtr: Type 0x%x, Addr 0x%x, "
+ "Param 0x%x, FnCall %08X, Flags 0x%x",
+ psItem->ui32ResType,
+ (IMG_UINTPTR_T)psItem->pvParam, psItem->ui32Param,
+ (IMG_UINTPTR_T)psItem->pfnFreeResource, psItem->ui32Flags));
+
+
+ RELEASE_SYNC_OBJ;
+
+
+ if (bExecuteCallback)
+ {
+ eError = psItem->pfnFreeResource(psItem->pvParam, psItem->ui32Param, bForceCleanup);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: ERROR calling FreeResource function"));
+ }
+ }
+
+
+ ACQUIRE_SYNC_OBJ;
+
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+
+ List_RESMAN_ITEM_Remove(psItem);
+
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_ITEM), psItem, IMG_NULL);
+ }
+
+ return(eError);
+}
+
+static IMG_VOID* FreeResourceByCriteria_AnyVaCb(RESMAN_ITEM *psCurItem, va_list va)
+{
+ IMG_UINT32 ui32SearchCriteria;
+ IMG_UINT32 ui32ResType;
+ IMG_PVOID pvParam;
+ IMG_UINT32 ui32Param;
+
+ ui32SearchCriteria = va_arg(va, IMG_UINT32);
+ ui32ResType = va_arg(va, IMG_UINT32);
+ pvParam = va_arg(va, IMG_PVOID);
+ ui32Param = va_arg(va, IMG_UINT32);
+
+
+ if(
+
+ (((ui32SearchCriteria & RESMAN_CRITERIA_RESTYPE) == 0UL) ||
+ (psCurItem->ui32ResType == ui32ResType))
+ &&
+
+ (((ui32SearchCriteria & RESMAN_CRITERIA_PVOID_PARAM) == 0UL) ||
+ (psCurItem->pvParam == pvParam))
+ &&
+
+ (((ui32SearchCriteria & RESMAN_CRITERIA_UI32_PARAM) == 0UL) ||
+ (psCurItem->ui32Param == ui32Param))
+ )
+ {
+ return psCurItem;
+ }
+ else
+ {
+ return IMG_NULL;
+ }
+}
+
+static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_CONTEXT psResManContext,
+ IMG_UINT32 ui32SearchCriteria,
+ IMG_UINT32 ui32ResType,
+ IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bExecuteCallback)
+{
+ PRESMAN_ITEM psCurItem;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+
+
+ while((psCurItem = (PRESMAN_ITEM)
+ List_RESMAN_ITEM_Any_va(psResManContext->psResItemList,
+ &FreeResourceByCriteria_AnyVaCb,
+ ui32SearchCriteria,
+ ui32ResType,
+ pvParam,
+ ui32Param)) != IMG_NULL
+ && eError == PVRSRV_OK)
+ {
+ do
+ {
+ eError = FreeResourceByPtr(psCurItem, bExecuteCallback, CLEANUP_WITH_POLL);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ RELEASE_SYNC_OBJ;
+ OSReleaseBridgeLock();
+
+ OSSleepms(MAX_CLEANUP_TIME_WAIT_US/1000);
+ OSReacquireBridgeLock();
+ ACQUIRE_SYNC_OBJ;
+ }
+ } while (eError == PVRSRV_ERROR_RETRY);
+ }
+
+ return eError;
+}
+
+
+#ifdef DEBUG
+static IMG_VOID ValidateResList(PRESMAN_LIST psResList)
+{
+ PRESMAN_ITEM psCurItem, *ppsThisItem;
+ PRESMAN_CONTEXT psCurContext, *ppsThisContext;
+
+
+ if (psResList == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "ValidateResList: resman not initialised yet"));
+ return;
+ }
+
+ psCurContext = psResList->psContextList;
+ ppsThisContext = &psResList->psContextList;
+
+
+ while(psCurContext != IMG_NULL)
+ {
+
+ PVR_ASSERT(psCurContext->ui32Signature == RESMAN_SIGNATURE);
+ if (psCurContext->ppsThis != ppsThisContext)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "psCC=%08X psCC->ppsThis=%08X psCC->psNext=%08X ppsTC=%08X",
+ (IMG_UINTPTR_T)psCurContext,
+ (IMG_UINTPTR_T)psCurContext->ppsThis,
+ (IMG_UINTPTR_T)psCurContext->psNext,
+ (IMG_UINTPTR_T)ppsThisContext));
+ PVR_ASSERT(psCurContext->ppsThis == ppsThisContext);
+ }
+
+
+ psCurItem = psCurContext->psResItemList;
+ ppsThisItem = &psCurContext->psResItemList;
+ while(psCurItem != IMG_NULL)
+ {
+
+ PVR_ASSERT(psCurItem->ui32Signature == RESMAN_SIGNATURE);
+ if (psCurItem->ppsThis != ppsThisItem)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "psCurItem=%08X psCurItem->ppsThis=%08X psCurItem->psNext=%08X ppsThisItem=%08X",
+ (IMG_UINTPTR_T)psCurItem,
+ (IMG_UINTPTR_T)psCurItem->ppsThis,
+ (IMG_UINTPTR_T)psCurItem->psNext,
+ (IMG_UINTPTR_T)ppsThisItem));
+ PVR_ASSERT(psCurItem->ppsThis == ppsThisItem);
+ }
+
+
+ ppsThisItem = &psCurItem->psNext;
+ psCurItem = psCurItem->psNext;
+ }
+
+
+ ppsThisContext = &psCurContext->psNext;
+ psCurContext = psCurContext->psNext;
+ }
+}
+#endif
+
+
diff --git a/drivers/gpu/pvr/resman.h b/drivers/gpu/pvr/resman.h
new file mode 100644
index 0000000..648e490
--- /dev/null
+++ b/drivers/gpu/pvr/resman.h
@@ -0,0 +1,118 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __RESMAN_H__
+#define __RESMAN_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+enum {
+
+ RESMAN_TYPE_SHARED_PB_DESC = 1,
+ RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK,
+ RESMAN_TYPE_HW_RENDER_CONTEXT,
+ RESMAN_TYPE_HW_TRANSFER_CONTEXT,
+ RESMAN_TYPE_HW_2D_CONTEXT,
+ RESMAN_TYPE_TRANSFER_CONTEXT,
+
+
+ RESMAN_TYPE_DMA_CLIENT_FIFO_DATA,
+
+
+
+
+
+ RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF,
+ RESMAN_TYPE_DISPLAYCLASS_DEVICE,
+
+
+ RESMAN_TYPE_BUFFERCLASS_DEVICE,
+
+
+ RESMAN_TYPE_OS_USERMODE_MAPPING,
+
+
+ RESMAN_TYPE_DEVICEMEM_CONTEXT,
+ RESMAN_TYPE_DEVICECLASSMEM_MAPPING,
+ RESMAN_TYPE_DEVICEMEM_MAPPING,
+ RESMAN_TYPE_DEVICEMEM_WRAP,
+ RESMAN_TYPE_DEVICEMEM_ALLOCATION,
+ RESMAN_TYPE_EVENT_OBJECT,
+ RESMAN_TYPE_SHARED_MEM_INFO,
+ RESMAN_TYPE_MODIFY_SYNC_OPS,
+ RESMAN_TYPE_SYNC_INFO,
+
+
+ RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION
+};
+
+#define RESMAN_CRITERIA_ALL 0x00000000
+#define RESMAN_CRITERIA_RESTYPE 0x00000001
+#define RESMAN_CRITERIA_PVOID_PARAM 0x00000002
+#define RESMAN_CRITERIA_UI32_PARAM 0x00000004
+
+typedef PVRSRV_ERROR (*RESMAN_FREE_FN)(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bForceCleanup);
+
+typedef struct _RESMAN_ITEM_ *PRESMAN_ITEM;
+typedef struct _RESMAN_CONTEXT_ *PRESMAN_CONTEXT;
+
+PVRSRV_ERROR ResManInit(IMG_VOID);
+IMG_VOID ResManDeInit(IMG_VOID);
+
+PRESMAN_ITEM ResManRegisterRes(PRESMAN_CONTEXT hResManContext,
+ IMG_UINT32 ui32ResType,
+ IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ RESMAN_FREE_FN pfnFreeResource);
+
+PVRSRV_ERROR ResManFreeResByPtr(PRESMAN_ITEM psResItem,
+ IMG_BOOL bForceCleanup);
+
+PVRSRV_ERROR ResManFreeResByCriteria(PRESMAN_CONTEXT hResManContext,
+ IMG_UINT32 ui32SearchCriteria,
+ IMG_UINT32 ui32ResType,
+ IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param);
+
+PVRSRV_ERROR ResManDissociateRes(PRESMAN_ITEM psResItem,
+ PRESMAN_CONTEXT psNewResManContext);
+
+PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_CONTEXT hResManContext,
+ PRESMAN_ITEM psItem);
+
+PVRSRV_ERROR PVRSRVResManConnect(IMG_HANDLE hPerProc,
+ PRESMAN_CONTEXT *phResManContext);
+IMG_VOID PVRSRVResManDisconnect(PRESMAN_CONTEXT hResManContext,
+ IMG_BOOL bKernelContext);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/services.h b/drivers/gpu/pvr/services.h
new file mode 100644
index 0000000..1f34059
--- /dev/null
+++ b/drivers/gpu/pvr/services.h
@@ -0,0 +1,1324 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __SERVICES_H__
+#define __SERVICES_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+#include "servicesext.h"
+#include "pdumpdefs.h"
+
+
+#define PVRSRV_4K_PAGE_SIZE 4096UL
+
+#define PVRSRV_MAX_CMD_SIZE 1024
+
+#define PVRSRV_MAX_DEVICES 16
+
+#define EVENTOBJNAME_MAXLENGTH (50)
+
+#define PVRSRV_MEM_READ (1U<<0)
+#define PVRSRV_MEM_WRITE (1U<<1)
+#define PVRSRV_MEM_CACHE_CONSISTENT (1U<<2)
+#define PVRSRV_MEM_NO_SYNCOBJ (1U<<3)
+#define PVRSRV_MEM_INTERLEAVED (1U<<4)
+#define PVRSRV_MEM_DUMMY (1U<<5)
+#define PVRSRV_MEM_EDM_PROTECT (1U<<6)
+#define PVRSRV_MEM_ZERO (1U<<7)
+#define PVRSRV_MEM_USER_SUPPLIED_DEVVADDR (1U<<8)
+#define PVRSRV_MEM_RAM_BACKED_ALLOCATION (1U<<9)
+#define PVRSRV_MEM_NO_RESMAN (1U<<10)
+#define PVRSRV_MEM_EXPORTED (1U<<11)
+
+
+#define PVRSRV_HAP_CACHED (1U<<12)
+#define PVRSRV_HAP_UNCACHED (1U<<13)
+#define PVRSRV_HAP_WRITECOMBINE (1U<<14)
+#define PVRSRV_HAP_CACHETYPE_MASK (PVRSRV_HAP_CACHED|PVRSRV_HAP_UNCACHED|PVRSRV_HAP_WRITECOMBINE)
+#define PVRSRV_HAP_KERNEL_ONLY (1U<<15)
+#define PVRSRV_HAP_SINGLE_PROCESS (1U<<16)
+#define PVRSRV_HAP_MULTI_PROCESS (1U<<17)
+#define PVRSRV_HAP_FROM_EXISTING_PROCESS (1U<<18)
+#define PVRSRV_HAP_NO_CPU_VIRTUAL (1U<<19)
+#define PVRSRV_HAP_MAPTYPE_MASK (PVRSRV_HAP_KERNEL_ONLY \
+ |PVRSRV_HAP_SINGLE_PROCESS \
+ |PVRSRV_HAP_MULTI_PROCESS \
+ |PVRSRV_HAP_FROM_EXISTING_PROCESS \
+ |PVRSRV_HAP_NO_CPU_VIRTUAL)
+
+#define PVRSRV_MEM_CACHED PVRSRV_HAP_CACHED
+#define PVRSRV_MEM_UNCACHED PVRSRV_HAP_UNCACHED
+#define PVRSRV_MEM_WRITECOMBINE PVRSRV_HAP_WRITECOMBINE
+
+#define PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT (24)
+
+#define PVRSRV_MAP_NOUSERVIRTUAL (1UL<<27)
+#define PVRSRV_MEM_XPROC (1U<<28)
+#define PVRSRV_MEM_ION (1U<<29)
+#define PVRSRV_MEM_ALLOCATENONCACHEDMEM (1UL<<30)
+
+#define PVRSRV_NO_CONTEXT_LOSS 0
+#define PVRSRV_SEVERE_LOSS_OF_CONTEXT 1
+#define PVRSRV_PRE_STATE_CHANGE_MASK 0x80
+
+
+#define PVRSRV_DEFAULT_DEV_COOKIE (1)
+
+
+#define PVRSRV_MISC_INFO_TIMER_PRESENT (1U<<0)
+#define PVRSRV_MISC_INFO_CLOCKGATE_PRESENT (1U<<1)
+#define PVRSRV_MISC_INFO_MEMSTATS_PRESENT (1U<<2)
+#define PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT (1U<<3)
+#define PVRSRV_MISC_INFO_DDKVERSION_PRESENT (1U<<4)
+#define PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT (1U<<5)
+#define PVRSRV_MISC_INFO_FREEMEM_PRESENT (1U<<6)
+#define PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT (1U<<7)
+#define PVRSRV_MISC_INFO_GET_PAGE_SIZE_PRESENT (1U<<8)
+#define PVRSRV_MISC_INFO_FORCE_SWAP_TO_SYSTEM_PRESENT (1U<<9)
+
+#define PVRSRV_MISC_INFO_RESET_PRESENT (1U<<31)
+
+#define PVRSRV_PDUMP_MAX_FILENAME_SIZE 20
+#define PVRSRV_PDUMP_MAX_COMMENT_SIZE 200
+
+
+#define PVRSRV_CHANGEDEVMEM_ATTRIBS_CACHECOHERENT 0x00000001
+
+#define PVRSRV_MAPEXTMEMORY_FLAGS_ALTERNATEVA 0x00000001
+#define PVRSRV_MAPEXTMEMORY_FLAGS_PHYSCONTIG 0x00000002
+
+#define PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC 0x00000001
+#define PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC 0x00000002
+
+#define SRV_FLAGS_PERSIST 0x1
+#define SRV_FLAGS_PDUMP_ACTIVE 0x2
+
+#define PVRSRV_PDUMP_FLAGS_CONTINUOUS 0x1
+
+
+typedef enum _PVRSRV_DEVICE_TYPE_
+{
+ PVRSRV_DEVICE_TYPE_UNKNOWN = 0 ,
+ PVRSRV_DEVICE_TYPE_MBX1 = 1 ,
+ PVRSRV_DEVICE_TYPE_MBX1_LITE = 2 ,
+
+ PVRSRV_DEVICE_TYPE_M24VA = 3,
+ PVRSRV_DEVICE_TYPE_MVDA2 = 4,
+ PVRSRV_DEVICE_TYPE_MVED1 = 5,
+ PVRSRV_DEVICE_TYPE_MSVDX = 6,
+
+ PVRSRV_DEVICE_TYPE_SGX = 7,
+
+ PVRSRV_DEVICE_TYPE_VGX = 8,
+
+
+ PVRSRV_DEVICE_TYPE_EXT = 9,
+
+ PVRSRV_DEVICE_TYPE_LAST = 9,
+
+ PVRSRV_DEVICE_TYPE_FORCE_I32 = 0x7fffffff
+
+} PVRSRV_DEVICE_TYPE;
+
+#define HEAP_ID( _dev_ , _dev_heap_idx_ ) ( ((_dev_)<<24) | ((_dev_heap_idx_)&((1<<24)-1)) )
+#define HEAP_IDX( _heap_id_ ) ( (_heap_id_)&((1<<24) - 1 ) )
+#define HEAP_DEV( _heap_id_ ) ( (_heap_id_)>>24 )
+
+#define PVRSRV_UNDEFINED_HEAP_ID (~0LU)
+
+typedef enum
+{
+ IMG_EGL = 0x00000001,
+ IMG_OPENGLES1 = 0x00000002,
+ IMG_OPENGLES2 = 0x00000003,
+ IMG_D3DM = 0x00000004,
+ IMG_SRV_UM = 0x00000005,
+ IMG_OPENVG = 0x00000006,
+ IMG_SRVCLIENT = 0x00000007,
+ IMG_VISTAKMD = 0x00000008,
+ IMG_VISTA3DNODE = 0x00000009,
+ IMG_VISTAMVIDEONODE = 0x0000000A,
+ IMG_VISTAVPBNODE = 0x0000000B,
+ IMG_OPENGL = 0x0000000C,
+ IMG_D3D = 0x0000000D,
+#if defined(SUPPORT_GRAPHICS_HAL) || defined(SUPPORT_COMPOSER_HAL)
+ IMG_ANDROID_HAL = 0x0000000E,
+#endif
+#if defined(SUPPORT_OPENCL)
+ IMG_OPENCL = 0x0000000F,
+#endif
+
+} IMG_MODULE_ID;
+
+
+#define APPHINT_MAX_STRING_SIZE 256
+
+typedef enum
+{
+ IMG_STRING_TYPE = 1,
+ IMG_FLOAT_TYPE ,
+ IMG_UINT_TYPE ,
+ IMG_INT_TYPE ,
+ IMG_FLAG_TYPE
+}IMG_DATA_TYPE;
+
+
+typedef struct _PVRSRV_DEV_DATA_ *PPVRSRV_DEV_DATA;
+
+typedef struct _PVRSRV_DEVICE_IDENTIFIER_
+{
+ PVRSRV_DEVICE_TYPE eDeviceType;
+ PVRSRV_DEVICE_CLASS eDeviceClass;
+ IMG_UINT32 ui32DeviceIndex;
+ IMG_CHAR *pszPDumpDevName;
+ IMG_CHAR *pszPDumpRegName;
+
+} PVRSRV_DEVICE_IDENTIFIER;
+
+
+typedef struct _PVRSRV_CLIENT_DEV_DATA_
+{
+ IMG_UINT32 ui32NumDevices;
+ PVRSRV_DEVICE_IDENTIFIER asDevID[PVRSRV_MAX_DEVICES];
+ PVRSRV_ERROR (*apfnDevConnect[PVRSRV_MAX_DEVICES])(PPVRSRV_DEV_DATA);
+ PVRSRV_ERROR (*apfnDumpTrace[PVRSRV_MAX_DEVICES])(PPVRSRV_DEV_DATA);
+
+} PVRSRV_CLIENT_DEV_DATA;
+
+
+typedef struct _PVRSRV_CONNECTION_
+{
+ IMG_HANDLE hServices;
+ IMG_UINT32 ui32ProcessID;
+ PVRSRV_CLIENT_DEV_DATA sClientDevData;
+ IMG_UINT32 ui32SrvFlags;
+}PVRSRV_CONNECTION;
+
+
+typedef struct _PVRSRV_DEV_DATA_
+{
+ IMG_CONST PVRSRV_CONNECTION *psConnection;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+
+} PVRSRV_DEV_DATA;
+
+typedef struct _PVRSRV_MEMUPDATE_
+{
+ IMG_UINT32 ui32UpdateAddr;
+ IMG_UINT32 ui32UpdateVal;
+} PVRSRV_MEMUPDATE;
+
+typedef struct _PVRSRV_HWREG_
+{
+ IMG_UINT32 ui32RegAddr;
+ IMG_UINT32 ui32RegVal;
+} PVRSRV_HWREG;
+
+typedef struct _PVRSRV_MEMBLK_
+{
+ IMG_DEV_VIRTADDR sDevVirtAddr;
+ IMG_HANDLE hOSMemHandle;
+ IMG_HANDLE hOSWrapMem;
+ IMG_HANDLE hBuffer;
+ IMG_HANDLE hResItem;
+ IMG_SYS_PHYADDR *psIntSysPAddr;
+
+} PVRSRV_MEMBLK;
+
+typedef struct _PVRSRV_KERNEL_MEM_INFO_ *PPVRSRV_KERNEL_MEM_INFO;
+
+typedef struct _PVRSRV_CLIENT_MEM_INFO_
+{
+
+ IMG_PVOID pvLinAddr;
+
+
+ IMG_PVOID pvLinAddrKM;
+
+
+ IMG_DEV_VIRTADDR sDevVAddr;
+
+
+
+
+
+
+ IMG_CPU_PHYADDR sCpuPAddr;
+
+
+ IMG_UINT32 ui32Flags;
+
+
+
+
+ IMG_UINT32 ui32ClientFlags;
+
+
+ IMG_SIZE_T uAllocSize;
+
+
+
+ struct _PVRSRV_CLIENT_SYNC_INFO_ *psClientSyncInfo;
+
+#if defined (SUPPORT_SID_INTERFACE)
+
+ IMG_SID hMappingInfo;
+
+
+ IMG_SID hKernelMemInfo;
+
+
+ IMG_SID hResItem;
+#else
+
+ IMG_HANDLE hMappingInfo;
+
+
+ IMG_HANDLE hKernelMemInfo;
+
+
+ IMG_HANDLE hResItem;
+#endif
+
+#if defined(SUPPORT_MEMINFO_IDS)
+ #if !defined(USE_CODE)
+
+ IMG_UINT64 ui64Stamp;
+ #else
+ IMG_UINT32 dummy1;
+ IMG_UINT32 dummy2;
+ #endif
+#endif
+
+
+
+
+ struct _PVRSRV_CLIENT_MEM_INFO_ *psNext;
+
+} PVRSRV_CLIENT_MEM_INFO, *PPVRSRV_CLIENT_MEM_INFO;
+
+
+#define PVRSRV_MAX_CLIENT_HEAPS (32)
+typedef struct _PVRSRV_HEAP_INFO_
+{
+ IMG_UINT32 ui32HeapID;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevMemHeap;
+#else
+ IMG_HANDLE hDevMemHeap;
+#endif
+ IMG_DEV_VIRTADDR sDevVAddrBase;
+ IMG_UINT32 ui32HeapByteSize;
+ IMG_UINT32 ui32Attribs;
+ IMG_UINT32 ui32XTileStride;
+}PVRSRV_HEAP_INFO;
+
+
+
+
+typedef struct _PVRSRV_EVENTOBJECT_
+{
+
+ IMG_CHAR szName[EVENTOBJNAME_MAXLENGTH];
+
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hOSEventKM;
+#else
+ IMG_HANDLE hOSEventKM;
+#endif
+
+} PVRSRV_EVENTOBJECT;
+
+typedef enum
+{
+ PVRSRV_MISC_INFO_CPUCACHEOP_NONE = 0,
+ PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN,
+ PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH
+} PVRSRV_MISC_INFO_CPUCACHEOP_TYPE;
+
+typedef struct _PVRSRV_MISC_INFO_
+{
+ IMG_UINT32 ui32StateRequest;
+ IMG_UINT32 ui32StatePresent;
+
+
+ IMG_VOID *pvSOCTimerRegisterKM;
+ IMG_VOID *pvSOCTimerRegisterUM;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hSOCTimerRegisterOSMemHandle;
+ IMG_SID hSOCTimerRegisterMappingInfo;
+#else
+ IMG_HANDLE hSOCTimerRegisterOSMemHandle;
+ IMG_HANDLE hSOCTimerRegisterMappingInfo;
+#endif
+
+
+ IMG_VOID *pvSOCClockGateRegs;
+ IMG_UINT32 ui32SOCClockGateRegsSize;
+
+
+ IMG_CHAR *pszMemoryStr;
+ IMG_UINT32 ui32MemoryStrLen;
+
+
+ PVRSRV_EVENTOBJECT sGlobalEventObject;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_EVENTSID hOSGlobalEvent;
+#else
+ IMG_HANDLE hOSGlobalEvent;
+#endif
+
+
+ IMG_UINT32 aui32DDKVersion[4];
+
+
+ struct
+ {
+
+ IMG_BOOL bDeferOp;
+
+
+ PVRSRV_MISC_INFO_CPUCACHEOP_TYPE eCacheOpType;
+
+
+#if !defined (SUPPORT_SID_INTERFACE)
+ union
+ {
+
+ PVRSRV_CLIENT_MEM_INFO *psClientMemInfo;
+
+
+ struct _PVRSRV_KERNEL_MEM_INFO_ *psKernelMemInfo;
+ } u;
+#endif
+
+
+ IMG_VOID *pvBaseVAddr;
+
+
+ IMG_UINT32 ui32Length;
+ } sCacheOpCtl;
+
+
+ struct
+ {
+
+#if !defined(SUPPORT_SID_INTERFACE)
+ union
+ {
+
+ PVRSRV_CLIENT_MEM_INFO *psClientMemInfo;
+
+
+ struct _PVRSRV_KERNEL_MEM_INFO_ *psKernelMemInfo;
+ } u;
+#endif
+
+
+ IMG_UINT32 ui32RefCount;
+ } sGetRefCountCtl;
+} PVRSRV_MISC_INFO;
+
+typedef struct _PVRSRV_SYNC_TOKEN_
+{
+
+
+ struct
+ {
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfo;
+#else
+ IMG_HANDLE hKernelSyncInfo;
+#endif
+ IMG_UINT32 ui32ReadOpsPendingSnapshot;
+ IMG_UINT32 ui32WriteOpsPendingSnapshot;
+ IMG_UINT32 ui32ReadOps2PendingSnapshot;
+ } sPrivate;
+} PVRSRV_SYNC_TOKEN;
+
+
+typedef enum _PVRSRV_CLIENT_EVENT_
+{
+ PVRSRV_CLIENT_EVENT_HWTIMEOUT = 0,
+} PVRSRV_CLIENT_EVENT;
+
+typedef IMG_VOID (*PFN_QUEUE_COMMAND_COMPLETE)(IMG_HANDLE hCallbackData);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVClientEvent(IMG_CONST PVRSRV_CLIENT_EVENT eEvent,
+ PVRSRV_DEV_DATA *psDevData,
+ IMG_PVOID pvData);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVConnect(PVRSRV_CONNECTION **ppsConnection, IMG_UINT32 ui32SrvFlags);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDisconnect(IMG_CONST PVRSRV_CONNECTION *psConnection);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevices(IMG_CONST PVRSRV_CONNECTION *psConnection,
+ IMG_UINT32 *puiNumDevices,
+ PVRSRV_DEVICE_IDENTIFIER *puiDevIDs);
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceData(IMG_CONST PVRSRV_CONNECTION *psConnection,
+ IMG_UINT32 uiDevIndex,
+ PVRSRV_DEV_DATA *psDevData,
+ PVRSRV_DEVICE_TYPE eDeviceType);
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfo (IMG_CONST PVRSRV_CONNECTION *psConnection, PVRSRV_MISC_INFO *psMiscInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVReleaseMiscInfo (IMG_CONST PVRSRV_CONNECTION *psConnection, PVRSRV_MISC_INFO *psMiscInfo);
+
+#if 1
+IMG_IMPORT
+IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+IMG_IMPORT
+IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
+
+IMG_IMPORT IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Count, PVRSRV_HWREG *psHWRegs);
+#endif
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVPollForValue ( const PVRSRV_CONNECTION *psConnection,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hOSEvent,
+#else
+ IMG_HANDLE hOSEvent,
+#endif
+ volatile IMG_UINT32 *pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ IMG_UINT32 ui32Waitus,
+ IMG_UINT32 ui32Tries);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContext(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID *phDevMemContext,
+#else
+ IMG_HANDLE *phDevMemContext,
+#endif
+ IMG_UINT32 *pui32SharedHeapCount,
+ PVRSRV_HEAP_INFO *psHeapInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContext(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevMemContext
+#else
+ IMG_HANDLE hDevMemContext
+#endif
+ );
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfo(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevMemContext,
+#else
+ IMG_HANDLE hDevMemContext,
+#endif
+ IMG_UINT32 *pui32SharedHeapCount,
+ PVRSRV_HEAP_INFO *psHeapInfo);
+
+#if defined(PVRSRV_LOG_MEMORY_ALLOCS)
+ #define PVRSRVAllocDeviceMem_log(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo, logStr) \
+ (PVR_TRACE(("PVRSRVAllocDeviceMem(" #psDevData "," #hDevMemHeap "," #ui32Attribs "," #ui32Size "," #ui32Alignment "," #ppsMemInfo ")" \
+ ": " logStr " (size = 0x%lx)", ui32Size)), \
+ PVRSRVAllocDeviceMem(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo))
+#else
+ #define PVRSRVAllocDeviceMem_log(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo, logStr) \
+ PVRSRVAllocDeviceMem(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo)
+#endif
+
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocDeviceMem2(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevMemHeap,
+#else
+ IMG_HANDLE hDevMemHeap,
+#endif
+ IMG_UINT32 ui32Attribs,
+ IMG_SIZE_T ui32Size,
+ IMG_SIZE_T ui32Alignment,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
+ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevMemHeap,
+#else
+ IMG_HANDLE hDevMemHeap,
+#endif
+ IMG_UINT32 ui32Attribs,
+ IMG_SIZE_T ui32Size,
+ IMG_SIZE_T ui32Alignment,
+ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVExportDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID *phMemInfo
+#else
+ IMG_HANDLE *phMemInfo
+#endif
+ );
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVReserveDeviceVirtualMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevMemHeap,
+#else
+ IMG_HANDLE hDevMemHeap,
+#endif
+ IMG_DEV_VIRTADDR *psDevVAddr,
+ IMG_SIZE_T ui32Size,
+ IMG_SIZE_T ui32Alignment,
+ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceVirtualMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo,
+ IMG_SID hDstDevMemHeap,
+#else
+ IMG_HANDLE hKernelMemInfo,
+ IMG_HANDLE hDstDevMemHeap,
+#endif
+ PVRSRV_CLIENT_MEM_INFO **ppsDstMemInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVMapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
+ IMG_SYS_PHYADDR *psSysPAddr,
+ IMG_UINT32 ui32Flags);
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
+ IMG_UINT32 ui32Flags);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemory(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevMemContext,
+#else
+ IMG_HANDLE hDevMemContext,
+#endif
+ IMG_SIZE_T ui32ByteSize,
+ IMG_SIZE_T ui32PageOffset,
+ IMG_BOOL bPhysContig,
+ IMG_SYS_PHYADDR *psSysPAddr,
+ IMG_VOID *pvLinAddr,
+ IMG_UINT32 ui32Flags,
+ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
+
+PVRSRV_ERROR PVRSRVChangeDeviceMemoryAttributes(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ PVRSRV_CLIENT_MEM_INFO *psClientMemInfo,
+ IMG_UINT32 ui32Attribs);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevMemContext,
+ IMG_SID hDeviceClassBuffer,
+#else
+ IMG_HANDLE hDevMemContext,
+ IMG_HANDLE hDeviceClassBuffer,
+#endif
+ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVMapPhysToUserSpace(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ IMG_SYS_PHYADDR sSysPhysAddr,
+ IMG_UINT32 uiSizeInBytes,
+ IMG_PVOID *ppvUserAddr,
+ IMG_UINT32 *puiActualSize,
+ IMG_PVOID *ppvProcess);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapPhysToUserSpace(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ IMG_PVOID pvUserAddr,
+ IMG_PVOID pvProcess);
+
+#if defined(LINUX)
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVExportDeviceMem2(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
+ IMG_INT *iFd);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemory2(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ IMG_INT iFd,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDstDevMemHeap,
+#else
+ IMG_HANDLE hDstDevMemHeap,
+#endif
+ PVRSRV_CLIENT_MEM_INFO **ppsDstMemInfo);
+#endif
+
+#if defined(SUPPORT_ION)
+PVRSRV_ERROR PVRSRVMapIonHandle(const PVRSRV_DEV_DATA *psDevData,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevMemContext,
+#else
+ IMG_HANDLE hDevMemContext,
+#endif
+ IMG_INT32 uiFD,
+ IMG_UINT32 uiSize,
+ IMG_UINT32 ui32Attribs,
+ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
+
+PVRSRV_ERROR PVRSRVUnmapIonHandle(const PVRSRV_DEV_DATA *psDevData,
+ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
+#endif
+
+typedef enum _PVRSRV_SYNCVAL_MODE_
+{
+ PVRSRV_SYNCVAL_READ = IMG_TRUE,
+ PVRSRV_SYNCVAL_WRITE = IMG_FALSE,
+
+} PVRSRV_SYNCVAL_MODE, *PPVRSRV_SYNCVAL_MODE;
+
+typedef IMG_UINT32 PVRSRV_SYNCVAL;
+
+IMG_IMPORT PVRSRV_ERROR PVRSRVWaitForOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
+ PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
+
+IMG_IMPORT PVRSRV_ERROR PVRSRVWaitForAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
+ PVRSRV_SYNCVAL_MODE eMode);
+
+IMG_IMPORT IMG_BOOL PVRSRVTestOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
+ PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
+
+IMG_IMPORT IMG_BOOL PVRSRVTestAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
+ PVRSRV_SYNCVAL_MODE eMode);
+
+IMG_IMPORT IMG_BOOL PVRSRVTestOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
+ PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
+
+IMG_IMPORT IMG_BOOL PVRSRVTestAllOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
+ PVRSRV_SYNCVAL_MODE eMode);
+
+IMG_IMPORT PVRSRV_SYNCVAL PVRSRVGetPendingOpSyncVal(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
+ PVRSRV_SYNCVAL_MODE eMode);
+
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDeviceClass(IMG_CONST PVRSRV_CONNECTION *psConnection,
+ PVRSRV_DEVICE_CLASS DeviceClass,
+ IMG_UINT32 *pui32DevCount,
+ IMG_UINT32 *pui32DevID);
+
+IMG_IMPORT
+IMG_HANDLE IMG_CALLCONV PVRSRVOpenDCDevice(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ IMG_UINT32 ui32DeviceID);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVCloseDCDevice(IMG_CONST PVRSRV_CONNECTION *psConnection, IMG_HANDLE hDevice);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumDCFormats (IMG_HANDLE hDevice,
+ IMG_UINT32 *pui32Count,
+ DISPLAY_FORMAT *psFormat);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumDCDims (IMG_HANDLE hDevice,
+ IMG_UINT32 *pui32Count,
+ DISPLAY_FORMAT *psFormat,
+ DISPLAY_DIMS *psDims);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCSystemBuffer(IMG_HANDLE hDevice,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID *phBuffer
+#else
+ IMG_HANDLE *phBuffer
+#endif
+ );
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCInfo(IMG_HANDLE hDevice,
+ DISPLAY_INFO* psDisplayInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDCSwapChain (IMG_HANDLE hDevice,
+ IMG_UINT32 ui32Flags,
+ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
+ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
+ IMG_UINT32 ui32BufferCount,
+ IMG_UINT32 ui32OEMFlags,
+ IMG_UINT32 *pui32SwapChainID,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID *phSwapChain
+#else
+ IMG_HANDLE *phSwapChain
+#endif
+ );
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDCSwapChain (IMG_HANDLE hDevice,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hSwapChain
+#else
+ IMG_HANDLE hSwapChain
+#endif
+ );
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCDstRect (IMG_HANDLE hDevice,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hSwapChain,
+#else
+ IMG_HANDLE hSwapChain,
+#endif
+ IMG_RECT *psDstRect);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCSrcRect (IMG_HANDLE hDevice,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hSwapChain,
+#else
+ IMG_HANDLE hSwapChain,
+#endif
+ IMG_RECT *psSrcRect);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCDstColourKey (IMG_HANDLE hDevice,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hSwapChain,
+#else
+ IMG_HANDLE hSwapChain,
+#endif
+ IMG_UINT32 ui32CKColour);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCSrcColourKey (IMG_HANDLE hDevice,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hSwapChain,
+#else
+ IMG_HANDLE hSwapChain,
+#endif
+ IMG_UINT32 ui32CKColour);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCBuffers(IMG_HANDLE hDevice,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hSwapChain,
+ IMG_SID *phBuffer
+#else
+ IMG_HANDLE hSwapChain,
+ IMG_HANDLE *phBuffer
+#endif
+ );
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCBuffers2(IMG_HANDLE hDevice,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hSwapChain,
+ IMG_SID *phBuffer,
+#else
+ IMG_HANDLE hSwapChain,
+ IMG_HANDLE *phBuffer,
+#endif
+ IMG_SYS_PHYADDR *psPhyAddr);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCBuffer (IMG_HANDLE hDevice,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hBuffer,
+#else
+ IMG_HANDLE hBuffer,
+#endif
+ IMG_UINT32 ui32ClipRectCount,
+ IMG_RECT *psClipRect,
+ IMG_UINT32 ui32SwapInterval,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hPrivateTag
+#else
+ IMG_HANDLE hPrivateTag
+#endif
+ );
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCBuffer2 (IMG_HANDLE hDevice,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hBuffer,
+#else
+ IMG_HANDLE hBuffer,
+#endif
+ IMG_UINT32 ui32SwapInterval,
+ PVRSRV_CLIENT_MEM_INFO **ppsMemInfos,
+ IMG_UINT32 ui32NumMemInfos,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCSystem (IMG_HANDLE hDevice,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hSwapChain
+#else
+ IMG_HANDLE hSwapChain
+#endif
+ );
+
+IMG_IMPORT
+IMG_HANDLE IMG_CALLCONV PVRSRVOpenBCDevice(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ IMG_UINT32 ui32DeviceID);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVCloseBCDevice(IMG_CONST PVRSRV_CONNECTION *psConnection,
+ IMG_HANDLE hDevice);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetBCBufferInfo(IMG_HANDLE hDevice,
+ BUFFER_INFO *psBuffer);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVGetBCBuffer(IMG_HANDLE hDevice,
+ IMG_UINT32 ui32BufferIndex,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID *phBuffer
+#else
+ IMG_HANDLE *phBuffer
+#endif
+ );
+
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpInit(IMG_CONST PVRSRV_CONNECTION *psConnection);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpStartInitPhase(IMG_CONST PVRSRV_CONNECTION *psConnection);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpStopInitPhase(IMG_CONST PVRSRV_CONNECTION *psConnection);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo,
+#else
+ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
+#endif
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32Flags);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSyncPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfo,
+#else
+ PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo,
+#endif
+ IMG_BOOL bIsRead,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSyncPol2(IMG_CONST PVRSRV_CONNECTION *psConnection,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfo,
+#else
+ PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo,
+#endif
+ IMG_BOOL bIsRead);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMem(IMG_CONST PVRSRV_CONNECTION *psConnection,
+ IMG_PVOID pvAltLinAddr,
+ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Bytes,
+ IMG_UINT32 ui32Flags);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSync(IMG_CONST PVRSRV_CONNECTION *psConnection,
+ IMG_PVOID pvAltLinAddr,
+ PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Bytes);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpReg(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ IMG_CHAR *pszRegRegion,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32Flags);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPolWithFlags(const PVRSRV_DEV_DATA *psDevData,
+ IMG_CHAR *pszRegRegion,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32Mask,
+ IMG_UINT32 ui32Flags);
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPol(const PVRSRV_DEV_DATA *psDevData,
+ IMG_CHAR *pszRegRegion,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32Mask);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDReg(IMG_CONST PVRSRV_CONNECTION *psConnection,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue);
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDDevPAddr(IMG_CONST PVRSRV_CONNECTION *psConnection,
+ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
+ IMG_UINT32 ui32Offset,
+ IMG_DEV_PHYADDR sPDDevPAddr);
+
+#if !defined(USE_CODE)
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPages(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo,
+#else
+ IMG_HANDLE hKernelMemInfo,
+#endif
+ IMG_DEV_PHYADDR *pPages,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 ui32Start,
+ IMG_UINT32 ui32Length,
+ IMG_UINT32 ui32Flags);
+#endif
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSetFrame(IMG_CONST PVRSRV_CONNECTION *psConnection,
+ IMG_UINT32 ui32Frame);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpComment(IMG_CONST PVRSRV_CONNECTION *psConnection,
+ IMG_CONST IMG_CHAR *pszComment,
+ IMG_BOOL bContinuous);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentf(IMG_CONST PVRSRV_CONNECTION *psConnection,
+ IMG_BOOL bContinuous,
+ IMG_CONST IMG_CHAR *pszFormat, ...)
+#if !defined(USE_CODE)
+ IMG_FORMAT_PRINTF(3, 4)
+#endif
+;
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentWithFlagsf(IMG_CONST PVRSRV_CONNECTION *psConnection,
+ IMG_UINT32 ui32Flags,
+ IMG_CONST IMG_CHAR *pszFormat, ...)
+#if !defined(USE_CODE)
+ IMG_FORMAT_PRINTF(3, 4)
+#endif
+;
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpDriverInfo(IMG_CONST PVRSRV_CONNECTION *psConnection,
+ IMG_CHAR *pszString,
+ IMG_BOOL bContinuous);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpIsCapturing(IMG_CONST PVRSRV_CONNECTION *psConnection,
+ IMG_BOOL *pbIsCapturing);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpBitmap(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevMemContext,
+#else
+ IMG_HANDLE hDevMemContext,
+#endif
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ PDUMP_MEM_FORMAT eMemFormat,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegRead(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ IMG_CONST IMG_CHAR *pszRegRegion,
+ IMG_CONST IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Address,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32PDumpFlags);
+
+
+IMG_IMPORT
+IMG_BOOL IMG_CALLCONV PVRSRVPDumpIsCapturingTest(IMG_CONST PVRSRV_CONNECTION *psConnection);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCycleCountRegRead(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ IMG_UINT32 ui32RegOffset,
+ IMG_BOOL bLastFrame);
+
+IMG_IMPORT IMG_HANDLE PVRSRVLoadLibrary(const IMG_CHAR *pszLibraryName);
+IMG_IMPORT PVRSRV_ERROR PVRSRVUnloadLibrary(IMG_HANDLE hExtDrv);
+IMG_IMPORT PVRSRV_ERROR PVRSRVGetLibFuncAddr(IMG_HANDLE hExtDrv, const IMG_CHAR *pszFunctionName, IMG_VOID **ppvFuncAddr);
+
+IMG_IMPORT IMG_UINT32 PVRSRVClockus (void);
+IMG_IMPORT IMG_VOID PVRSRVWaitus (IMG_UINT32 ui32Timeus);
+IMG_IMPORT IMG_VOID PVRSRVReleaseThreadQuanta (void);
+IMG_IMPORT IMG_UINT32 IMG_CALLCONV PVRSRVGetCurrentProcessID(void);
+IMG_IMPORT IMG_CHAR * IMG_CALLCONV PVRSRVSetLocale(const IMG_CHAR *pszLocale);
+
+
+
+
+
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVCreateAppHintState(IMG_MODULE_ID eModuleID,
+ const IMG_CHAR *pszAppName,
+ IMG_VOID **ppvState);
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeAppHintState(IMG_MODULE_ID eModuleID,
+ IMG_VOID *pvHintState);
+
+IMG_IMPORT IMG_BOOL IMG_CALLCONV PVRSRVGetAppHint(IMG_VOID *pvHintState,
+ const IMG_CHAR *pszHintName,
+ IMG_DATA_TYPE eDataType,
+ const IMG_VOID *pvDefault,
+ IMG_VOID *pvReturn);
+
+IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVAllocUserModeMem (IMG_SIZE_T ui32Size);
+IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVCallocUserModeMem (IMG_SIZE_T ui32Size);
+IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVReallocUserModeMem (IMG_PVOID pvBase, IMG_SIZE_T uNewSize);
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeUserModeMem (IMG_PVOID pvMem);
+IMG_IMPORT IMG_VOID PVRSRVMemCopy(IMG_VOID *pvDst, const IMG_VOID *pvSrc, IMG_SIZE_T ui32Size);
+IMG_IMPORT IMG_VOID PVRSRVMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_SIZE_T ui32Size);
+
+struct _PVRSRV_MUTEX_OPAQUE_STRUCT_;
+typedef struct _PVRSRV_MUTEX_OPAQUE_STRUCT_ *PVRSRV_MUTEX_HANDLE;
+
+
+
+IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateMutex(PVRSRV_MUTEX_HANDLE *phMutex);
+IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyMutex(PVRSRV_MUTEX_HANDLE hMutex);
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVLockMutex(PVRSRV_MUTEX_HANDLE hMutex);
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVUnlockMutex(PVRSRV_MUTEX_HANDLE hMutex);
+
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVLockProcessGlobalMutex(void);
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVUnlockProcessGlobalMutex(void);
+
+
+struct _PVRSRV_SEMAPHORE_OPAQUE_STRUCT_;
+typedef struct _PVRSRV_SEMAPHORE_OPAQUE_STRUCT_ *PVRSRV_SEMAPHORE_HANDLE;
+
+
+ #define IMG_SEMAPHORE_WAIT_INFINITE ((IMG_UINT64)0xFFFFFFFFFFFFFFFFull)
+
+
+#if !defined(USE_CODE)
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVCreateSemaphore)
+#endif
+static INLINE PVRSRV_ERROR PVRSRVCreateSemaphore(PVRSRV_SEMAPHORE_HANDLE *phSemaphore, IMG_INT iInitialCount)
+{
+ PVR_UNREFERENCED_PARAMETER(iInitialCount);
+ *phSemaphore = 0;
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVDestroySemaphore)
+#endif
+static INLINE PVRSRV_ERROR PVRSRVDestroySemaphore(PVRSRV_SEMAPHORE_HANDLE hSemaphore)
+{
+ PVR_UNREFERENCED_PARAMETER(hSemaphore);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVWaitSemaphore)
+#endif
+static INLINE PVRSRV_ERROR PVRSRVWaitSemaphore(PVRSRV_SEMAPHORE_HANDLE hSemaphore, IMG_UINT64 ui64TimeoutMicroSeconds)
+{
+ PVR_UNREFERENCED_PARAMETER(hSemaphore);
+ PVR_UNREFERENCED_PARAMETER(ui64TimeoutMicroSeconds);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPostSemaphore)
+#endif
+static INLINE IMG_VOID PVRSRVPostSemaphore(PVRSRV_SEMAPHORE_HANDLE hSemaphore, IMG_INT iPostCount)
+{
+ PVR_UNREFERENCED_PARAMETER(hSemaphore);
+ PVR_UNREFERENCED_PARAMETER(iPostCount);
+}
+
+#endif
+
+
+#if (defined(DEBUG) && defined(__linux__))
+IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVAllocUserModeMemTracking(IMG_SIZE_T ui32Size, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
+
+IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVCallocUserModeMemTracking(IMG_SIZE_T ui32Size, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
+
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeUserModeMemTracking(IMG_VOID *pvMem);
+
+IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVReallocUserModeMemTracking(IMG_VOID *pvMem, IMG_SIZE_T ui32NewSize,
+ IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
+#endif
+
+IMG_IMPORT PVRSRV_ERROR PVRSRVEventObjectWait(const PVRSRV_CONNECTION *psConnection,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_EVENTSID hOSEvent
+#else
+ IMG_HANDLE hOSEvent
+#endif
+ );
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateSyncInfoModObj(const PVRSRV_CONNECTION *psConnection,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID *phKernelSyncInfoModObj
+#else
+ IMG_HANDLE *phKernelSyncInfoModObj
+#endif
+ );
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroySyncInfoModObj(const PVRSRV_CONNECTION *psConnection,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfoModObj
+#else
+ IMG_HANDLE hKernelSyncInfoModObj
+#endif
+ );
+
+
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVModifyPendingSyncOps(const PVRSRV_CONNECTION *psConnection,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfoModObj,
+#else
+ IMG_HANDLE hKernelSyncInfoModObj,
+#endif
+ PVRSRV_CLIENT_SYNC_INFO *psSyncInfo,
+ IMG_UINT32 ui32ModifyFlags,
+ IMG_UINT32 *pui32ReadOpsPending,
+ IMG_UINT32 *pui32WriteOpsPending);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVModifyCompleteSyncOps(const PVRSRV_CONNECTION *psConnection,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfoModObj
+#else
+ IMG_HANDLE hKernelSyncInfoModObj
+#endif
+ );
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSyncOpsTakeToken(const PVRSRV_CONNECTION *psConnection,
+#if defined (SUPPORT_SID_INTERFACE)
+ const IMG_SID hKernelSyncInfo,
+#else
+ const PVRSRV_CLIENT_SYNC_INFO *psSyncInfo,
+#endif
+ PVRSRV_SYNC_TOKEN *psSyncToken);
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSyncOpsFlushToToken(const PVRSRV_CONNECTION *psConnection,
+#if defined (SUPPORT_SID_INTERFACE)
+ const IMG_SID hKernelSyncInfo,
+#else
+ const PVRSRV_CLIENT_SYNC_INFO *psSyncInfo,
+#endif
+ const PVRSRV_SYNC_TOKEN *psSyncToken,
+ IMG_BOOL bWait);
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSyncOpsFlushToModObj(const PVRSRV_CONNECTION *psConnection,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelSyncInfoModObj,
+#else
+ IMG_HANDLE hKernelSyncInfoModObj,
+#endif
+ IMG_BOOL bWait);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSyncOpsFlushToDelta(const PVRSRV_CONNECTION *psConnection,
+ PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo,
+ IMG_UINT32 ui32Delta,
+ IMG_BOOL bWait);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfo(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ PVRSRV_CLIENT_SYNC_INFO **ppsSyncInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeSyncInfo(IMG_CONST PVRSRV_DEV_DATA *psDevData,
+ PVRSRV_CLIENT_SYNC_INFO *psSyncInfo);
+
+IMG_IMPORT
+const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError);
+
+
+#define TIME_NOT_PASSED_UINT32(a,b,c) (((a) - (b)) < (c))
+
+#if defined (__cplusplus)
+}
+#endif
+#endif
+
diff --git a/drivers/gpu/pvr/services_headers.h b/drivers/gpu/pvr/services_headers.h
new file mode 100644
index 0000000..09ed87e
--- /dev/null
+++ b/drivers/gpu/pvr/services_headers.h
@@ -0,0 +1,50 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef SERVICES_HEADERS_H
+#define SERVICES_HEADERS_H
+
+#ifdef DEBUG_RELEASE_BUILD
+#pragma optimize( "", off )
+#define DEBUG 1
+#endif
+
+#include "img_defs.h"
+#include "services.h"
+#include "servicesint.h"
+#include "power.h"
+#include "resman.h"
+#include "queue.h"
+#include "srvkm.h"
+#include "kerneldisplay.h"
+#include "syscommon.h"
+#include "pvr_debug.h"
+#include "metrics.h"
+#include "osfunc.h"
+#include "refcount.h"
+
+#endif
+
diff --git a/drivers/gpu/pvr/servicesext.h b/drivers/gpu/pvr/servicesext.h
new file mode 100644
index 0000000..42558b9
--- /dev/null
+++ b/drivers/gpu/pvr/servicesext.h
@@ -0,0 +1,854 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined (__SERVICESEXT_H__)
+#define __SERVICESEXT_H__
+
+#define PVRSRV_LOCKFLG_READONLY (1)
+
+typedef enum _PVRSRV_ERROR_
+{
+ PVRSRV_OK = 0,
+ PVRSRV_ERROR_OUT_OF_MEMORY,
+ PVRSRV_ERROR_TOO_FEW_BUFFERS,
+ PVRSRV_ERROR_INVALID_PARAMS,
+ PVRSRV_ERROR_INIT_FAILURE,
+ PVRSRV_ERROR_CANT_REGISTER_CALLBACK,
+ PVRSRV_ERROR_INVALID_DEVICE,
+ PVRSRV_ERROR_NOT_OWNER,
+ PVRSRV_ERROR_BAD_MAPPING,
+ PVRSRV_ERROR_TIMEOUT,
+ PVRSRV_ERROR_FLIP_CHAIN_EXISTS,
+ PVRSRV_ERROR_INVALID_SWAPINTERVAL,
+ PVRSRV_ERROR_SCENE_INVALID,
+ PVRSRV_ERROR_STREAM_ERROR,
+ PVRSRV_ERROR_FAILED_DEPENDENCIES,
+ PVRSRV_ERROR_CMD_NOT_PROCESSED,
+ PVRSRV_ERROR_CMD_TOO_BIG,
+ PVRSRV_ERROR_DEVICE_REGISTER_FAILED,
+ PVRSRV_ERROR_TOOMANYBUFFERS,
+ PVRSRV_ERROR_NOT_SUPPORTED,
+ PVRSRV_ERROR_PROCESSING_BLOCKED,
+
+ PVRSRV_ERROR_CANNOT_FLUSH_QUEUE,
+ PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE,
+ PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS,
+ PVRSRV_ERROR_RETRY,
+
+ PVRSRV_ERROR_DDK_VERSION_MISMATCH,
+ PVRSRV_ERROR_BUILD_MISMATCH,
+ PVRSRV_ERROR_CORE_REVISION_MISMATCH,
+
+ PVRSRV_ERROR_UPLOAD_TOO_BIG,
+
+ PVRSRV_ERROR_INVALID_FLAGS,
+ PVRSRV_ERROR_FAILED_TO_REGISTER_PROCESS,
+
+ PVRSRV_ERROR_UNABLE_TO_LOAD_LIBRARY,
+ PVRSRV_ERROR_UNABLE_GET_FUNC_ADDR,
+ PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED,
+
+ PVRSRV_ERROR_BRIDGE_CALL_FAILED,
+ PVRSRV_ERROR_IOCTL_CALL_FAILED,
+
+ PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND,
+ PVRSRV_ERROR_BUFFER_DEVICE_NOT_FOUND,
+ PVRSRV_ERROR_BUFFER_DEVICE_ALREADY_PRESENT,
+
+ PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND,
+ PVRSRV_ERROR_PCI_CALL_FAILED,
+ PVRSRV_ERROR_PCI_REGION_TOO_SMALL,
+ PVRSRV_ERROR_PCI_REGION_UNAVAILABLE,
+ PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH,
+
+ PVRSRV_ERROR_REGISTER_BASE_NOT_SET,
+
+ PVRSRV_ERROR_BM_BAD_SHAREMEM_HANDLE,
+
+ PVRSRV_ERROR_FAILED_TO_ALLOC_USER_MEM,
+ PVRSRV_ERROR_FAILED_TO_ALLOC_VP_MEMORY,
+ PVRSRV_ERROR_FAILED_TO_MAP_SHARED_PBDESC,
+ PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR,
+
+ PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY,
+ PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY,
+
+ PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES,
+ PVRSRV_ERROR_FAILED_TO_FREE_PAGES,
+ PVRSRV_ERROR_FAILED_TO_COPY_PAGES,
+ PVRSRV_ERROR_UNABLE_TO_LOCK_PAGES,
+ PVRSRV_ERROR_UNABLE_TO_UNLOCK_PAGES,
+ PVRSRV_ERROR_STILL_MAPPED,
+ PVRSRV_ERROR_MAPPING_NOT_FOUND,
+ PVRSRV_ERROR_PHYS_ADDRESS_EXCEEDS_32BIT,
+ PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE,
+
+ PVRSRV_ERROR_INVALID_SEGMENT_BLOCK,
+ PVRSRV_ERROR_INVALID_SGXDEVDATA,
+ PVRSRV_ERROR_INVALID_DEVINFO,
+ PVRSRV_ERROR_INVALID_MEMINFO,
+ PVRSRV_ERROR_INVALID_MISCINFO,
+ PVRSRV_ERROR_UNKNOWN_IOCTL,
+ PVRSRV_ERROR_INVALID_CONTEXT,
+ PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT,
+ PVRSRV_ERROR_INVALID_HEAP,
+ PVRSRV_ERROR_INVALID_KERNELINFO,
+ PVRSRV_ERROR_UNKNOWN_POWER_STATE,
+ PVRSRV_ERROR_INVALID_HANDLE_TYPE,
+ PVRSRV_ERROR_INVALID_WRAP_TYPE,
+ PVRSRV_ERROR_INVALID_PHYS_ADDR,
+ PVRSRV_ERROR_INVALID_CPU_ADDR,
+ PVRSRV_ERROR_INVALID_HEAPINFO,
+ PVRSRV_ERROR_INVALID_PERPROC,
+ PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO,
+ PVRSRV_ERROR_INVALID_MAP_REQUEST,
+ PVRSRV_ERROR_INVALID_UNMAP_REQUEST,
+ PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP,
+ PVRSRV_ERROR_MAPPING_STILL_IN_USE,
+
+ PVRSRV_ERROR_EXCEEDED_HW_LIMITS,
+ PVRSRV_ERROR_NO_STAGING_BUFFER_ALLOCATED,
+
+ PVRSRV_ERROR_UNABLE_TO_CREATE_PERPROC_AREA,
+ PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT,
+ PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT,
+ PVRSRV_ERROR_UNABLE_TO_REGISTER_EVENT,
+ PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT,
+ PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD,
+ PVRSRV_ERROR_UNABLE_TO_CLOSE_THREAD,
+ PVRSRV_ERROR_THREAD_READ_ERROR,
+ PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER,
+ PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR,
+ PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR,
+ PVRSRV_ERROR_ISR_ALREADY_INSTALLED,
+ PVRSRV_ERROR_ISR_NOT_INSTALLED,
+ PVRSRV_ERROR_UNABLE_TO_INITIALISE_INTERRUPT,
+ PVRSRV_ERROR_UNABLE_TO_RETRIEVE_INFO,
+ PVRSRV_ERROR_UNABLE_TO_DO_BACKWARDS_BLIT,
+ PVRSRV_ERROR_UNABLE_TO_CLOSE_SERVICES,
+ PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT,
+ PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE,
+ PVRSRV_ERROR_UNABLE_TO_CLOSE_HANDLE,
+
+ PVRSRV_ERROR_INVALID_CCB_COMMAND,
+
+ PVRSRV_ERROR_UNABLE_TO_LOCK_RESOURCE,
+ PVRSRV_ERROR_INVALID_LOCK_ID,
+ PVRSRV_ERROR_RESOURCE_NOT_LOCKED,
+
+ PVRSRV_ERROR_FLIP_FAILED,
+ PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED,
+
+ PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE,
+
+ PVRSRV_ERROR_CREATE_RENDER_CONTEXT_FAILED,
+ PVRSRV_ERROR_UNKNOWN_PRIMARY_FRAG,
+ PVRSRV_ERROR_UNEXPECTED_SECONDARY_FRAG,
+ PVRSRV_ERROR_UNEXPECTED_PRIMARY_FRAG,
+
+ PVRSRV_ERROR_UNABLE_TO_INSERT_FENCE_ID,
+
+ PVRSRV_ERROR_BLIT_SETUP_FAILED,
+
+ PVRSRV_ERROR_PDUMP_NOT_AVAILABLE,
+ PVRSRV_ERROR_PDUMP_BUFFER_FULL,
+ PVRSRV_ERROR_PDUMP_BUF_OVERFLOW,
+ PVRSRV_ERROR_PDUMP_NOT_ACTIVE,
+ PVRSRV_ERROR_INCOMPLETE_LINE_OVERLAPS_PAGES,
+
+ PVRSRV_ERROR_MUTEX_DESTROY_FAILED,
+ PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR,
+
+ PVRSRV_ERROR_INSUFFICIENT_SCRIPT_SPACE,
+ PVRSRV_ERROR_INSUFFICIENT_SPACE_FOR_COMMAND,
+
+ PVRSRV_ERROR_PROCESS_NOT_INITIALISED,
+ PVRSRV_ERROR_PROCESS_NOT_FOUND,
+ PVRSRV_ERROR_SRV_CONNECT_FAILED,
+ PVRSRV_ERROR_SRV_DISCONNECT_FAILED,
+ PVRSRV_ERROR_DEINT_PHASE_FAILED,
+ PVRSRV_ERROR_INIT2_PHASE_FAILED,
+
+ PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE,
+
+ PVRSRV_ERROR_NO_DC_DEVICES_FOUND,
+ PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE,
+ PVRSRV_ERROR_UNABLE_TO_REMOVE_DEVICE,
+ PVRSRV_ERROR_NO_DEVICEDATA_FOUND,
+ PVRSRV_ERROR_NO_DEVICENODE_FOUND,
+ PVRSRV_ERROR_NO_CLIENTNODE_FOUND,
+ PVRSRV_ERROR_FAILED_TO_PROCESS_QUEUE,
+
+ PVRSRV_ERROR_UNABLE_TO_INIT_TASK,
+ PVRSRV_ERROR_UNABLE_TO_SCHEDULE_TASK,
+ PVRSRV_ERROR_UNABLE_TO_KILL_TASK,
+
+ PVRSRV_ERROR_UNABLE_TO_ENABLE_TIMER,
+ PVRSRV_ERROR_UNABLE_TO_DISABLE_TIMER,
+ PVRSRV_ERROR_UNABLE_TO_REMOVE_TIMER,
+
+ PVRSRV_ERROR_UNKNOWN_PIXEL_FORMAT,
+ PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION,
+
+ PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE,
+ PVRSRV_ERROR_HANDLE_NOT_ALLOCATED,
+ PVRSRV_ERROR_HANDLE_TYPE_MISMATCH,
+ PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE,
+ PVRSRV_ERROR_HANDLE_NOT_SHAREABLE,
+ PVRSRV_ERROR_HANDLE_NOT_FOUND,
+ PVRSRV_ERROR_INVALID_SUBHANDLE,
+ PVRSRV_ERROR_HANDLE_BATCH_IN_USE,
+ PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE,
+
+ PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE,
+ PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED,
+
+ PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE,
+ PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP,
+
+ PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE,
+
+ PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVALIABLE,
+ PVRSRV_ERROR_INVALID_DEVICEID,
+ PVRSRV_ERROR_DEVICEID_NOT_FOUND,
+
+ PVRSRV_ERROR_MEMORY_TEST_FAILED,
+ PVRSRV_ERROR_CPUPADDR_TEST_FAILED,
+ PVRSRV_ERROR_COPY_TEST_FAILED,
+
+ PVRSRV_ERROR_SEMAPHORE_NOT_INITIALISED,
+
+ PVRSRV_ERROR_UNABLE_TO_RELEASE_CLOCK,
+ PVRSRV_ERROR_CLOCK_REQUEST_FAILED,
+ PVRSRV_ERROR_DISABLE_CLOCK_FAILURE,
+ PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE,
+ PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE,
+ PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK,
+ PVRSRV_ERROR_UNABLE_TO_GET_CLOCK,
+ PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK,
+ PVRSRV_ERROR_UNABLE_TO_GET_SYSTEM_CLOCK,
+
+ PVRSRV_ERROR_UNKNOWN_SGL_ERROR,
+
+ PVRSRV_ERROR_SYSTEM_POWER_CHANGE_FAILURE,
+ PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE,
+
+ PVRSRV_ERROR_BAD_SYNC_STATE,
+
+ PVRSRV_ERROR_CACHEOP_FAILED,
+
+ PVRSRV_ERROR_FORCE_I32 = 0x7fffffff
+
+} PVRSRV_ERROR;
+
+
+typedef enum _PVRSRV_DEVICE_CLASS_
+{
+ PVRSRV_DEVICE_CLASS_3D = 0 ,
+ PVRSRV_DEVICE_CLASS_DISPLAY = 1 ,
+ PVRSRV_DEVICE_CLASS_BUFFER = 2 ,
+ PVRSRV_DEVICE_CLASS_VIDEO = 3 ,
+
+ PVRSRV_DEVICE_CLASS_FORCE_I32 = 0x7fffffff
+
+} PVRSRV_DEVICE_CLASS;
+
+
+typedef enum _PVRSRV_SYS_POWER_STATE_
+{
+ PVRSRV_SYS_POWER_STATE_Unspecified = -1,
+ PVRSRV_SYS_POWER_STATE_D0 = 0,
+ PVRSRV_SYS_POWER_STATE_D1 = 1,
+ PVRSRV_SYS_POWER_STATE_D2 = 2,
+ PVRSRV_SYS_POWER_STATE_D3 = 3,
+ PVRSRV_SYS_POWER_STATE_D4 = 4,
+
+ PVRSRV_SYS_POWER_STATE_FORCE_I32 = 0x7fffffff
+
+} PVRSRV_SYS_POWER_STATE, *PPVRSRV_SYS_POWER_STATE;
+
+
+typedef enum _PVRSRV_DEV_POWER_STATE_
+{
+ PVRSRV_DEV_POWER_STATE_DEFAULT = -1,
+ PVRSRV_DEV_POWER_STATE_ON = 0,
+ PVRSRV_DEV_POWER_STATE_IDLE = 1,
+ PVRSRV_DEV_POWER_STATE_OFF = 2,
+
+ PVRSRV_DEV_POWER_STATE_FORCE_I32 = 0x7fffffff
+
+} PVRSRV_DEV_POWER_STATE, *PPVRSRV_DEV_POWER_STATE;
+
+
+typedef PVRSRV_ERROR (*PFN_PRE_POWER) (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+typedef PVRSRV_ERROR (*PFN_POST_POWER) (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+typedef PVRSRV_ERROR (*PFN_PRE_CLOCKSPEED_CHANGE) (IMG_HANDLE hDevHandle,
+ IMG_BOOL bIdleDevice,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+typedef PVRSRV_ERROR (*PFN_POST_CLOCKSPEED_CHANGE) (IMG_HANDLE hDevHandle,
+ IMG_BOOL bIdleDevice,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+
+typedef enum _PVRSRV_PIXEL_FORMAT_ {
+
+ PVRSRV_PIXEL_FORMAT_UNKNOWN = 0,
+ PVRSRV_PIXEL_FORMAT_RGB565 = 1,
+ PVRSRV_PIXEL_FORMAT_RGB555 = 2,
+ PVRSRV_PIXEL_FORMAT_RGB888 = 3,
+ PVRSRV_PIXEL_FORMAT_BGR888 = 4,
+ PVRSRV_PIXEL_FORMAT_GREY_SCALE = 8,
+ PVRSRV_PIXEL_FORMAT_PAL12 = 13,
+ PVRSRV_PIXEL_FORMAT_PAL8 = 14,
+ PVRSRV_PIXEL_FORMAT_PAL4 = 15,
+ PVRSRV_PIXEL_FORMAT_PAL2 = 16,
+ PVRSRV_PIXEL_FORMAT_PAL1 = 17,
+ PVRSRV_PIXEL_FORMAT_ARGB1555 = 18,
+ PVRSRV_PIXEL_FORMAT_ARGB4444 = 19,
+ PVRSRV_PIXEL_FORMAT_ARGB8888 = 20,
+ PVRSRV_PIXEL_FORMAT_ABGR8888 = 21,
+ PVRSRV_PIXEL_FORMAT_YV12 = 22,
+ PVRSRV_PIXEL_FORMAT_I420 = 23,
+ PVRSRV_PIXEL_FORMAT_IMC2 = 25,
+ PVRSRV_PIXEL_FORMAT_XRGB8888 = 26,
+ PVRSRV_PIXEL_FORMAT_XBGR8888 = 27,
+ PVRSRV_PIXEL_FORMAT_BGRA8888 = 28,
+ PVRSRV_PIXEL_FORMAT_XRGB4444 = 29,
+ PVRSRV_PIXEL_FORMAT_ARGB8332 = 30,
+ PVRSRV_PIXEL_FORMAT_A2RGB10 = 31,
+ PVRSRV_PIXEL_FORMAT_A2BGR10 = 32,
+ PVRSRV_PIXEL_FORMAT_P8 = 33,
+ PVRSRV_PIXEL_FORMAT_L8 = 34,
+ PVRSRV_PIXEL_FORMAT_A8L8 = 35,
+ PVRSRV_PIXEL_FORMAT_A4L4 = 36,
+ PVRSRV_PIXEL_FORMAT_L16 = 37,
+ PVRSRV_PIXEL_FORMAT_L6V5U5 = 38,
+ PVRSRV_PIXEL_FORMAT_V8U8 = 39,
+ PVRSRV_PIXEL_FORMAT_V16U16 = 40,
+ PVRSRV_PIXEL_FORMAT_QWVU8888 = 41,
+ PVRSRV_PIXEL_FORMAT_XLVU8888 = 42,
+ PVRSRV_PIXEL_FORMAT_QWVU16 = 43,
+ PVRSRV_PIXEL_FORMAT_D16 = 44,
+ PVRSRV_PIXEL_FORMAT_D24S8 = 45,
+ PVRSRV_PIXEL_FORMAT_D24X8 = 46,
+
+
+ PVRSRV_PIXEL_FORMAT_ABGR16 = 47,
+ PVRSRV_PIXEL_FORMAT_ABGR16F = 48,
+ PVRSRV_PIXEL_FORMAT_ABGR32 = 49,
+ PVRSRV_PIXEL_FORMAT_ABGR32F = 50,
+ PVRSRV_PIXEL_FORMAT_B10GR11 = 51,
+ PVRSRV_PIXEL_FORMAT_GR88 = 52,
+ PVRSRV_PIXEL_FORMAT_BGR32 = 53,
+ PVRSRV_PIXEL_FORMAT_GR32 = 54,
+ PVRSRV_PIXEL_FORMAT_E5BGR9 = 55,
+
+
+ PVRSRV_PIXEL_FORMAT_RESERVED1 = 56,
+ PVRSRV_PIXEL_FORMAT_RESERVED2 = 57,
+ PVRSRV_PIXEL_FORMAT_RESERVED3 = 58,
+ PVRSRV_PIXEL_FORMAT_RESERVED4 = 59,
+ PVRSRV_PIXEL_FORMAT_RESERVED5 = 60,
+
+
+ PVRSRV_PIXEL_FORMAT_R8G8_B8G8 = 61,
+ PVRSRV_PIXEL_FORMAT_G8R8_G8B8 = 62,
+
+
+ PVRSRV_PIXEL_FORMAT_NV11 = 63,
+ PVRSRV_PIXEL_FORMAT_NV12 = 64,
+
+
+ PVRSRV_PIXEL_FORMAT_YUY2 = 65,
+ PVRSRV_PIXEL_FORMAT_YUV420 = 66,
+ PVRSRV_PIXEL_FORMAT_YUV444 = 67,
+ PVRSRV_PIXEL_FORMAT_VUY444 = 68,
+ PVRSRV_PIXEL_FORMAT_YUYV = 69,
+ PVRSRV_PIXEL_FORMAT_YVYU = 70,
+ PVRSRV_PIXEL_FORMAT_UYVY = 71,
+ PVRSRV_PIXEL_FORMAT_VYUY = 72,
+
+ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_UYVY = 73,
+ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YUYV = 74,
+ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YVYU = 75,
+ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_VYUY = 76,
+ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_AYUV = 77,
+
+
+ PVRSRV_PIXEL_FORMAT_A32B32G32R32 = 78,
+ PVRSRV_PIXEL_FORMAT_A32B32G32R32F = 79,
+ PVRSRV_PIXEL_FORMAT_A32B32G32R32_UINT = 80,
+ PVRSRV_PIXEL_FORMAT_A32B32G32R32_SINT = 81,
+
+
+ PVRSRV_PIXEL_FORMAT_B32G32R32 = 82,
+ PVRSRV_PIXEL_FORMAT_B32G32R32F = 83,
+ PVRSRV_PIXEL_FORMAT_B32G32R32_UINT = 84,
+ PVRSRV_PIXEL_FORMAT_B32G32R32_SINT = 85,
+
+
+ PVRSRV_PIXEL_FORMAT_G32R32 = 86,
+ PVRSRV_PIXEL_FORMAT_G32R32F = 87,
+ PVRSRV_PIXEL_FORMAT_G32R32_UINT = 88,
+ PVRSRV_PIXEL_FORMAT_G32R32_SINT = 89,
+
+
+ PVRSRV_PIXEL_FORMAT_D32F = 90,
+ PVRSRV_PIXEL_FORMAT_R32 = 91,
+ PVRSRV_PIXEL_FORMAT_R32F = 92,
+ PVRSRV_PIXEL_FORMAT_R32_UINT = 93,
+ PVRSRV_PIXEL_FORMAT_R32_SINT = 94,
+
+
+ PVRSRV_PIXEL_FORMAT_A16B16G16R16 = 95,
+ PVRSRV_PIXEL_FORMAT_A16B16G16R16F = 96,
+ PVRSRV_PIXEL_FORMAT_A16B16G16R16_SINT = 97,
+ PVRSRV_PIXEL_FORMAT_A16B16G16R16_SNORM = 98,
+ PVRSRV_PIXEL_FORMAT_A16B16G16R16_UINT = 99,
+ PVRSRV_PIXEL_FORMAT_A16B16G16R16_UNORM = 100,
+
+
+ PVRSRV_PIXEL_FORMAT_G16R16 = 101,
+ PVRSRV_PIXEL_FORMAT_G16R16F = 102,
+ PVRSRV_PIXEL_FORMAT_G16R16_UINT = 103,
+ PVRSRV_PIXEL_FORMAT_G16R16_UNORM = 104,
+ PVRSRV_PIXEL_FORMAT_G16R16_SINT = 105,
+ PVRSRV_PIXEL_FORMAT_G16R16_SNORM = 106,
+
+
+ PVRSRV_PIXEL_FORMAT_R16 = 107,
+ PVRSRV_PIXEL_FORMAT_R16F = 108,
+ PVRSRV_PIXEL_FORMAT_R16_UINT = 109,
+ PVRSRV_PIXEL_FORMAT_R16_UNORM = 110,
+ PVRSRV_PIXEL_FORMAT_R16_SINT = 111,
+ PVRSRV_PIXEL_FORMAT_R16_SNORM = 112,
+
+
+ PVRSRV_PIXEL_FORMAT_X8R8G8B8 = 113,
+ PVRSRV_PIXEL_FORMAT_X8R8G8B8_UNORM = 114,
+ PVRSRV_PIXEL_FORMAT_X8R8G8B8_UNORM_SRGB = 115,
+
+ PVRSRV_PIXEL_FORMAT_A8R8G8B8 = 116,
+ PVRSRV_PIXEL_FORMAT_A8R8G8B8_UNORM = 117,
+ PVRSRV_PIXEL_FORMAT_A8R8G8B8_UNORM_SRGB = 118,
+
+ PVRSRV_PIXEL_FORMAT_A8B8G8R8 = 119,
+ PVRSRV_PIXEL_FORMAT_A8B8G8R8_UINT = 120,
+ PVRSRV_PIXEL_FORMAT_A8B8G8R8_UNORM = 121,
+ PVRSRV_PIXEL_FORMAT_A8B8G8R8_UNORM_SRGB = 122,
+ PVRSRV_PIXEL_FORMAT_A8B8G8R8_SINT = 123,
+ PVRSRV_PIXEL_FORMAT_A8B8G8R8_SNORM = 124,
+
+
+ PVRSRV_PIXEL_FORMAT_G8R8 = 125,
+ PVRSRV_PIXEL_FORMAT_G8R8_UINT = 126,
+ PVRSRV_PIXEL_FORMAT_G8R8_UNORM = 127,
+ PVRSRV_PIXEL_FORMAT_G8R8_SINT = 128,
+ PVRSRV_PIXEL_FORMAT_G8R8_SNORM = 129,
+
+
+ PVRSRV_PIXEL_FORMAT_A8 = 130,
+ PVRSRV_PIXEL_FORMAT_R8 = 131,
+ PVRSRV_PIXEL_FORMAT_R8_UINT = 132,
+ PVRSRV_PIXEL_FORMAT_R8_UNORM = 133,
+ PVRSRV_PIXEL_FORMAT_R8_SINT = 134,
+ PVRSRV_PIXEL_FORMAT_R8_SNORM = 135,
+
+
+ PVRSRV_PIXEL_FORMAT_A2B10G10R10 = 136,
+ PVRSRV_PIXEL_FORMAT_A2B10G10R10_UNORM = 137,
+ PVRSRV_PIXEL_FORMAT_A2B10G10R10_UINT = 138,
+
+
+ PVRSRV_PIXEL_FORMAT_B10G11R11 = 139,
+ PVRSRV_PIXEL_FORMAT_B10G11R11F = 140,
+
+
+ PVRSRV_PIXEL_FORMAT_X24G8R32 = 141,
+ PVRSRV_PIXEL_FORMAT_G8R24 = 142,
+ PVRSRV_PIXEL_FORMAT_X8R24 = 143,
+ PVRSRV_PIXEL_FORMAT_E5B9G9R9 = 144,
+ PVRSRV_PIXEL_FORMAT_R1 = 145,
+
+ PVRSRV_PIXEL_FORMAT_RESERVED6 = 146,
+ PVRSRV_PIXEL_FORMAT_RESERVED7 = 147,
+ PVRSRV_PIXEL_FORMAT_RESERVED8 = 148,
+ PVRSRV_PIXEL_FORMAT_RESERVED9 = 149,
+ PVRSRV_PIXEL_FORMAT_RESERVED10 = 150,
+ PVRSRV_PIXEL_FORMAT_RESERVED11 = 151,
+ PVRSRV_PIXEL_FORMAT_RESERVED12 = 152,
+ PVRSRV_PIXEL_FORMAT_RESERVED13 = 153,
+ PVRSRV_PIXEL_FORMAT_RESERVED14 = 154,
+ PVRSRV_PIXEL_FORMAT_RESERVED15 = 155,
+ PVRSRV_PIXEL_FORMAT_RESERVED16 = 156,
+ PVRSRV_PIXEL_FORMAT_RESERVED17 = 157,
+ PVRSRV_PIXEL_FORMAT_RESERVED18 = 158,
+ PVRSRV_PIXEL_FORMAT_RESERVED19 = 159,
+ PVRSRV_PIXEL_FORMAT_RESERVED20 = 160,
+
+
+ PVRSRV_PIXEL_FORMAT_UBYTE4 = 161,
+ PVRSRV_PIXEL_FORMAT_SHORT4 = 162,
+ PVRSRV_PIXEL_FORMAT_SHORT4N = 163,
+ PVRSRV_PIXEL_FORMAT_USHORT4N = 164,
+ PVRSRV_PIXEL_FORMAT_SHORT2N = 165,
+ PVRSRV_PIXEL_FORMAT_SHORT2 = 166,
+ PVRSRV_PIXEL_FORMAT_USHORT2N = 167,
+ PVRSRV_PIXEL_FORMAT_UDEC3 = 168,
+ PVRSRV_PIXEL_FORMAT_DEC3N = 169,
+ PVRSRV_PIXEL_FORMAT_F16_2 = 170,
+ PVRSRV_PIXEL_FORMAT_F16_4 = 171,
+
+
+ PVRSRV_PIXEL_FORMAT_L_F16 = 172,
+ PVRSRV_PIXEL_FORMAT_L_F16_REP = 173,
+ PVRSRV_PIXEL_FORMAT_L_F16_A_F16 = 174,
+ PVRSRV_PIXEL_FORMAT_A_F16 = 175,
+ PVRSRV_PIXEL_FORMAT_B16G16R16F = 176,
+
+ PVRSRV_PIXEL_FORMAT_L_F32 = 177,
+ PVRSRV_PIXEL_FORMAT_A_F32 = 178,
+ PVRSRV_PIXEL_FORMAT_L_F32_A_F32 = 179,
+
+
+ PVRSRV_PIXEL_FORMAT_PVRTC2 = 180,
+ PVRSRV_PIXEL_FORMAT_PVRTC4 = 181,
+ PVRSRV_PIXEL_FORMAT_PVRTCII2 = 182,
+ PVRSRV_PIXEL_FORMAT_PVRTCII4 = 183,
+ PVRSRV_PIXEL_FORMAT_PVRTCIII = 184,
+ PVRSRV_PIXEL_FORMAT_PVRO8 = 185,
+ PVRSRV_PIXEL_FORMAT_PVRO88 = 186,
+ PVRSRV_PIXEL_FORMAT_PT1 = 187,
+ PVRSRV_PIXEL_FORMAT_PT2 = 188,
+ PVRSRV_PIXEL_FORMAT_PT4 = 189,
+ PVRSRV_PIXEL_FORMAT_PT8 = 190,
+ PVRSRV_PIXEL_FORMAT_PTW = 191,
+ PVRSRV_PIXEL_FORMAT_PTB = 192,
+ PVRSRV_PIXEL_FORMAT_MONO8 = 193,
+ PVRSRV_PIXEL_FORMAT_MONO16 = 194,
+
+
+ PVRSRV_PIXEL_FORMAT_C0_YUYV = 195,
+ PVRSRV_PIXEL_FORMAT_C0_UYVY = 196,
+ PVRSRV_PIXEL_FORMAT_C0_YVYU = 197,
+ PVRSRV_PIXEL_FORMAT_C0_VYUY = 198,
+ PVRSRV_PIXEL_FORMAT_C1_YUYV = 199,
+ PVRSRV_PIXEL_FORMAT_C1_UYVY = 200,
+ PVRSRV_PIXEL_FORMAT_C1_YVYU = 201,
+ PVRSRV_PIXEL_FORMAT_C1_VYUY = 202,
+
+
+ PVRSRV_PIXEL_FORMAT_C0_YUV420_2P_UV = 203,
+ PVRSRV_PIXEL_FORMAT_C0_YUV420_2P_VU = 204,
+ PVRSRV_PIXEL_FORMAT_C0_YUV420_3P = 205,
+ PVRSRV_PIXEL_FORMAT_C1_YUV420_2P_UV = 206,
+ PVRSRV_PIXEL_FORMAT_C1_YUV420_2P_VU = 207,
+ PVRSRV_PIXEL_FORMAT_C1_YUV420_3P = 208,
+
+ PVRSRV_PIXEL_FORMAT_A2B10G10R10F = 209,
+ PVRSRV_PIXEL_FORMAT_B8G8R8_SINT = 210,
+ PVRSRV_PIXEL_FORMAT_PVRF32SIGNMASK = 211,
+
+ PVRSRV_PIXEL_FORMAT_ABGR4444 = 212,
+ PVRSRV_PIXEL_FORMAT_ABGR1555 = 213,
+ PVRSRV_PIXEL_FORMAT_BGR565 = 214,
+
+
+ PVRSRV_PIXEL_FORMAT_C0_4KYUV420_2P_UV = 215,
+ PVRSRV_PIXEL_FORMAT_C0_4KYUV420_2P_VU = 216,
+ PVRSRV_PIXEL_FORMAT_C1_4KYUV420_2P_UV = 217,
+ PVRSRV_PIXEL_FORMAT_C1_4KYUV420_2P_VU = 218,
+ PVRSRV_PIXEL_FORMAT_P208 = 219,
+ PVRSRV_PIXEL_FORMAT_A8P8 = 220,
+
+ PVRSRV_PIXEL_FORMAT_A4 = 221,
+ PVRSRV_PIXEL_FORMAT_AYUV8888 = 222,
+ PVRSRV_PIXEL_FORMAT_RAW256 = 223,
+ PVRSRV_PIXEL_FORMAT_RAW512 = 224,
+ PVRSRV_PIXEL_FORMAT_RAW1024 = 225,
+
+ PVRSRV_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff
+
+} PVRSRV_PIXEL_FORMAT;
+
+typedef enum _PVRSRV_ALPHA_FORMAT_ {
+ PVRSRV_ALPHA_FORMAT_UNKNOWN = 0x00000000,
+ PVRSRV_ALPHA_FORMAT_PRE = 0x00000001,
+ PVRSRV_ALPHA_FORMAT_NONPRE = 0x00000002,
+ PVRSRV_ALPHA_FORMAT_MASK = 0x0000000F,
+} PVRSRV_ALPHA_FORMAT;
+
+typedef enum _PVRSRV_COLOURSPACE_FORMAT_ {
+ PVRSRV_COLOURSPACE_FORMAT_UNKNOWN = 0x00000000,
+ PVRSRV_COLOURSPACE_FORMAT_LINEAR = 0x00010000,
+ PVRSRV_COLOURSPACE_FORMAT_NONLINEAR = 0x00020000,
+ PVRSRV_COLOURSPACE_FORMAT_MASK = 0x000F0000,
+} PVRSRV_COLOURSPACE_FORMAT;
+
+
+typedef enum _PVRSRV_ROTATION_ {
+ PVRSRV_ROTATE_0 = 0,
+ PVRSRV_ROTATE_90 = 1,
+ PVRSRV_ROTATE_180 = 2,
+ PVRSRV_ROTATE_270 = 3,
+ PVRSRV_FLIP_Y
+
+} PVRSRV_ROTATION;
+
+#define PVRSRV_CREATE_SWAPCHAIN_SHARED (1<<0)
+#define PVRSRV_CREATE_SWAPCHAIN_QUERY (1<<1)
+#define PVRSRV_CREATE_SWAPCHAIN_OEMOVERLAY (1<<2)
+
+typedef struct _PVRSRV_SYNC_DATA_
+{
+
+ IMG_UINT32 ui32WriteOpsPending;
+ volatile IMG_UINT32 ui32WriteOpsComplete;
+
+
+ IMG_UINT32 ui32ReadOpsPending;
+ volatile IMG_UINT32 ui32ReadOpsComplete;
+
+
+ IMG_UINT32 ui32ReadOps2Pending;
+ volatile IMG_UINT32 ui32ReadOps2Complete;
+
+
+ IMG_UINT32 ui32LastOpDumpVal;
+ IMG_UINT32 ui32LastReadOpDumpVal;
+
+
+ IMG_UINT64 ui64LastWrite;
+
+} PVRSRV_SYNC_DATA;
+
+typedef struct _PVRSRV_CLIENT_SYNC_INFO_
+{
+
+ PVRSRV_SYNC_DATA *psSyncData;
+
+
+
+
+
+ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr;
+
+
+ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr;
+
+
+ IMG_DEV_VIRTADDR sReadOps2CompleteDevVAddr;
+
+
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hMappingInfo;
+
+
+ IMG_SID hKernelSyncInfo;
+#else
+ IMG_HANDLE hMappingInfo;
+
+
+ IMG_HANDLE hKernelSyncInfo;
+#endif
+
+} PVRSRV_CLIENT_SYNC_INFO, *PPVRSRV_CLIENT_SYNC_INFO;
+
+typedef struct PVRSRV_RESOURCE_TAG
+{
+ volatile IMG_UINT32 ui32Lock;
+ IMG_UINT32 ui32ID;
+}PVRSRV_RESOURCE;
+typedef PVRSRV_RESOURCE PVRSRV_RES_HANDLE;
+
+
+typedef IMG_VOID (*PFN_CMD_COMPLETE) (IMG_HANDLE);
+typedef IMG_VOID (**PPFN_CMD_COMPLETE) (IMG_HANDLE);
+
+typedef IMG_BOOL (*PFN_CMD_PROC) (IMG_HANDLE, IMG_UINT32, IMG_VOID*);
+typedef IMG_BOOL (**PPFN_CMD_PROC) (IMG_HANDLE, IMG_UINT32, IMG_VOID*);
+
+
+typedef struct _IMG_RECT_
+{
+ IMG_INT32 x0;
+ IMG_INT32 y0;
+ IMG_INT32 x1;
+ IMG_INT32 y1;
+}IMG_RECT;
+
+typedef struct _IMG_RECT_16_
+{
+ IMG_INT16 x0;
+ IMG_INT16 y0;
+ IMG_INT16 x1;
+ IMG_INT16 y1;
+}IMG_RECT_16;
+
+
+typedef PVRSRV_ERROR (*PFN_GET_BUFFER_ADDR)(IMG_HANDLE,
+ IMG_HANDLE,
+ IMG_SYS_PHYADDR**,
+ IMG_SIZE_T*,
+ IMG_VOID**,
+ IMG_HANDLE*,
+ IMG_BOOL*,
+ IMG_UINT32*);
+
+
+typedef struct DISPLAY_DIMS_TAG
+{
+ IMG_UINT32 ui32ByteStride;
+ IMG_UINT32 ui32Width;
+ IMG_UINT32 ui32Height;
+} DISPLAY_DIMS;
+
+
+typedef struct DISPLAY_FORMAT_TAG
+{
+
+ PVRSRV_PIXEL_FORMAT pixelformat;
+} DISPLAY_FORMAT;
+
+typedef struct DISPLAY_SURF_ATTRIBUTES_TAG
+{
+
+ PVRSRV_PIXEL_FORMAT pixelformat;
+
+ DISPLAY_DIMS sDims;
+} DISPLAY_SURF_ATTRIBUTES;
+
+
+typedef struct DISPLAY_MODE_INFO_TAG
+{
+
+ PVRSRV_PIXEL_FORMAT pixelformat;
+
+ DISPLAY_DIMS sDims;
+
+ IMG_UINT32 ui32RefreshHZ;
+
+ IMG_UINT32 ui32OEMFlags;
+} DISPLAY_MODE_INFO;
+
+
+
+#define MAX_DISPLAY_NAME_SIZE (50)
+
+typedef struct DISPLAY_INFO_TAG
+{
+
+ IMG_UINT32 ui32MaxSwapChains;
+
+ IMG_UINT32 ui32MaxSwapChainBuffers;
+
+ IMG_UINT32 ui32MinSwapInterval;
+
+ IMG_UINT32 ui32MaxSwapInterval;
+
+ IMG_UINT32 ui32PhysicalWidthmm;
+ IMG_UINT32 ui32PhysicalHeightmm;
+
+ IMG_CHAR szDisplayName[MAX_DISPLAY_NAME_SIZE];
+#if defined(SUPPORT_HW_CURSOR)
+
+ IMG_UINT16 ui32CursorWidth;
+ IMG_UINT16 ui32CursorHeight;
+#endif
+} DISPLAY_INFO;
+
+typedef struct ACCESS_INFO_TAG
+{
+ IMG_UINT32 ui32Size;
+ IMG_UINT32 ui32FBPhysBaseAddress;
+ IMG_UINT32 ui32FBMemAvailable;
+ IMG_UINT32 ui32SysPhysBaseAddress;
+ IMG_UINT32 ui32SysSize;
+ IMG_UINT32 ui32DevIRQ;
+}ACCESS_INFO;
+
+
+
+#if defined(PDUMP_SUSPEND_IS_PER_THREAD)
+typedef struct {
+ IMG_UINT32 threadId;
+ IMG_INT suspendCount;
+} PVRSRV_THREAD_SUSPEND_COUNT;
+
+#define PVRSRV_PDUMP_SUSPEND_Q_NAME "PVRSRVPDumpSuspendMsgQ"
+#define PVRSRV_PDUMP_SUSPEND_Q_LENGTH 8
+
+#endif
+
+
+typedef struct _PVRSRV_REGISTRY_INFO_
+{
+ IMG_UINT32 ui32DevCookie;
+ IMG_PCHAR pszKey;
+ IMG_PCHAR pszValue;
+ IMG_PCHAR pszBuf;
+ IMG_UINT32 ui32BufSize;
+} PVRSRV_REGISTRY_INFO, *PPVRSRV_REGISTRY_INFO;
+
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVReadRegistryString (PPVRSRV_REGISTRY_INFO psRegInfo);
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWriteRegistryString (PPVRSRV_REGISTRY_INFO psRegInfo);
+
+
+#define PVRSRV_BC_FLAGS_YUVCSC_CONFORMANT_RANGE (0 << 0)
+#define PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE (1 << 0)
+
+#define PVRSRV_BC_FLAGS_YUVCSC_BT601 (0 << 1)
+#define PVRSRV_BC_FLAGS_YUVCSC_BT709 (1 << 1)
+
+#define MAX_BUFFER_DEVICE_NAME_SIZE (50)
+
+typedef struct BUFFER_INFO_TAG
+{
+ IMG_UINT32 ui32BufferCount;
+ IMG_UINT32 ui32BufferDeviceID;
+ PVRSRV_PIXEL_FORMAT pixelformat;
+ IMG_UINT32 ui32ByteStride;
+ IMG_UINT32 ui32Width;
+ IMG_UINT32 ui32Height;
+ IMG_UINT32 ui32Flags;
+ IMG_CHAR szDeviceName[MAX_BUFFER_DEVICE_NAME_SIZE];
+} BUFFER_INFO;
+
+typedef enum _OVERLAY_DEINTERLACE_MODE_
+{
+ WEAVE=0x0,
+ BOB_ODD,
+ BOB_EVEN,
+ BOB_EVEN_NONINTERLEAVED
+} OVERLAY_DEINTERLACE_MODE;
+
+#endif
diff --git a/drivers/gpu/pvr/servicesint.h b/drivers/gpu/pvr/servicesint.h
new file mode 100644
index 0000000..afe2bcf
--- /dev/null
+++ b/drivers/gpu/pvr/servicesint.h
@@ -0,0 +1,377 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined (__SERVICESINT_H__)
+#define __SERVICESINT_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "services.h"
+#include "sysinfo.h"
+#include "sysconfig.h"
+
+#define HWREC_DEFAULT_TIMEOUT (500)
+
+#define DRIVERNAME_MAXLENGTH (100)
+
+#define ALIGNSIZE(size, alignshift) (((size) + ((1UL << (alignshift))-1)) & ~((1UL << (alignshift))-1))
+
+#ifndef MAX
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+#endif
+#ifndef MIN
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#define MAX_CLEANUP_TIME_US (MAX_HW_TIME_US * 4)
+#define MAX_CLEANUP_TRYS 100
+#define MAX_CLEANUP_TIME_WAIT_US (MAX_CLEANUP_TIME_US/MAX_CLEANUP_TRYS)
+
+typedef enum _PVRSRV_MEMTYPE_
+{
+ PVRSRV_MEMTYPE_UNKNOWN = 0,
+ PVRSRV_MEMTYPE_DEVICE = 1,
+ PVRSRV_MEMTYPE_DEVICECLASS = 2,
+ PVRSRV_MEMTYPE_WRAPPED = 3,
+ PVRSRV_MEMTYPE_MAPPED = 4,
+} PVRSRV_MEMTYPE;
+
+typedef struct _PVRSRV_KERNEL_MEM_INFO_
+{
+
+ IMG_PVOID pvLinAddrKM;
+
+
+ IMG_DEV_VIRTADDR sDevVAddr;
+
+
+ IMG_UINT32 ui32Flags;
+
+
+ IMG_SIZE_T uAllocSize;
+
+
+ PVRSRV_MEMBLK sMemBlk;
+
+
+ IMG_PVOID pvSysBackupBuffer;
+
+
+ IMG_UINT32 ui32RefCount;
+
+
+ IMG_BOOL bPendingFree;
+
+
+#if defined(SUPPORT_MEMINFO_IDS)
+ #if !defined(USE_CODE)
+
+ IMG_UINT64 ui64Stamp;
+ #else
+ IMG_UINT32 dummy1;
+ IMG_UINT32 dummy2;
+ #endif
+#endif
+
+
+ struct _PVRSRV_KERNEL_SYNC_INFO_ *psKernelSyncInfo;
+
+ PVRSRV_MEMTYPE memType;
+
+
+
+
+
+
+
+
+ struct {
+
+
+ IMG_BOOL bInUse;
+
+
+ IMG_HANDLE hDevCookieInt;
+
+
+ IMG_UINT32 ui32ShareIndex;
+
+
+
+ IMG_UINT32 ui32OrigReqAttribs;
+ IMG_UINT32 ui32OrigReqSize;
+ IMG_UINT32 ui32OrigReqAlignment;
+ } sShareMemWorkaround;
+} PVRSRV_KERNEL_MEM_INFO;
+
+
+typedef struct _PVRSRV_KERNEL_SYNC_INFO_
+{
+
+ PVRSRV_SYNC_DATA *psSyncData;
+
+
+ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr;
+
+
+ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr;
+
+
+ IMG_DEV_VIRTADDR sReadOps2CompleteDevVAddr;
+
+
+ PVRSRV_KERNEL_MEM_INFO *psSyncDataMemInfoKM;
+
+
+
+ IMG_PVOID pvRefCount;
+
+
+ IMG_HANDLE hResItem;
+
+
+ IMG_UINT32 ui32UID;
+} PVRSRV_KERNEL_SYNC_INFO;
+
+typedef struct _PVRSRV_DEVICE_SYNC_OBJECT_
+{
+
+ IMG_UINT32 ui32ReadOpsPendingVal;
+ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr;
+ IMG_UINT32 ui32WriteOpsPendingVal;
+ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr;
+ IMG_UINT32 ui32ReadOps2PendingVal;
+ IMG_DEV_VIRTADDR sReadOps2CompleteDevVAddr;
+} PVRSRV_DEVICE_SYNC_OBJECT;
+
+typedef struct _PVRSRV_SYNC_OBJECT
+{
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfoKM;
+ IMG_UINT32 ui32WriteOpsPending;
+ IMG_UINT32 ui32ReadOpsPending;
+ IMG_UINT32 ui32ReadOps2Pending;
+
+}PVRSRV_SYNC_OBJECT, *PPVRSRV_SYNC_OBJECT;
+
+typedef struct _PVRSRV_COMMAND
+{
+ IMG_SIZE_T uCmdSize;
+ IMG_UINT32 ui32DevIndex;
+ IMG_UINT32 CommandType;
+ IMG_UINT32 ui32DstSyncCount;
+ IMG_UINT32 ui32SrcSyncCount;
+ PVRSRV_SYNC_OBJECT *psDstSync;
+ PVRSRV_SYNC_OBJECT *psSrcSync;
+ IMG_SIZE_T uDataSize;
+ IMG_UINT32 ui32ProcessID;
+ IMG_VOID *pvData;
+ PFN_QUEUE_COMMAND_COMPLETE pfnCommandComplete;
+ IMG_HANDLE hCallbackData;
+}PVRSRV_COMMAND, *PPVRSRV_COMMAND;
+
+
+typedef struct _PVRSRV_QUEUE_INFO_
+{
+ IMG_VOID *pvLinQueueKM;
+ IMG_VOID *pvLinQueueUM;
+ volatile IMG_SIZE_T ui32ReadOffset;
+ volatile IMG_SIZE_T ui32WriteOffset;
+ IMG_UINT32 *pui32KickerAddrKM;
+ IMG_UINT32 *pui32KickerAddrUM;
+ IMG_SIZE_T ui32QueueSize;
+
+ IMG_UINT32 ui32ProcessID;
+
+ IMG_HANDLE hMemBlock[2];
+
+ struct _PVRSRV_QUEUE_INFO_ *psNextKM;
+}PVRSRV_QUEUE_INFO;
+
+
+typedef struct _PVRSRV_HEAP_INFO_KM_
+{
+ IMG_UINT32 ui32HeapID;
+ IMG_DEV_VIRTADDR sDevVAddrBase;
+
+ IMG_HANDLE hDevMemHeap;
+ IMG_UINT32 ui32HeapByteSize;
+ IMG_UINT32 ui32Attribs;
+ IMG_UINT32 ui32XTileStride;
+}PVRSRV_HEAP_INFO_KM;
+
+
+typedef struct _PVRSRV_EVENTOBJECT_KM_
+{
+
+ IMG_CHAR szName[EVENTOBJNAME_MAXLENGTH];
+
+ IMG_HANDLE hOSEventKM;
+
+} PVRSRV_EVENTOBJECT_KM;
+
+
+typedef struct _PVRSRV_MISC_INFO_KM_
+{
+ IMG_UINT32 ui32StateRequest;
+ IMG_UINT32 ui32StatePresent;
+
+
+ IMG_VOID *pvSOCTimerRegisterKM;
+ IMG_VOID *pvSOCTimerRegisterUM;
+ IMG_HANDLE hSOCTimerRegisterOSMemHandle;
+ IMG_HANDLE hSOCTimerRegisterMappingInfo;
+
+
+ IMG_VOID *pvSOCClockGateRegs;
+ IMG_UINT32 ui32SOCClockGateRegsSize;
+
+
+ IMG_CHAR *pszMemoryStr;
+ IMG_UINT32 ui32MemoryStrLen;
+
+
+ PVRSRV_EVENTOBJECT_KM sGlobalEventObject;
+ IMG_HANDLE hOSGlobalEvent;
+
+
+ IMG_UINT32 aui32DDKVersion[4];
+
+
+ struct
+ {
+
+ IMG_BOOL bDeferOp;
+
+
+ PVRSRV_MISC_INFO_CPUCACHEOP_TYPE eCacheOpType;
+
+
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+
+
+ IMG_VOID *pvBaseVAddr;
+
+
+ IMG_UINT32 ui32Length;
+ } sCacheOpCtl;
+
+
+ struct
+ {
+
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+
+
+ IMG_UINT32 ui32RefCount;
+ } sGetRefCountCtl;
+} PVRSRV_MISC_INFO_KM;
+
+
+typedef PVRSRV_ERROR (*PFN_INSERT_CMD) (PVRSRV_QUEUE_INFO*,
+ PVRSRV_COMMAND**,
+ IMG_UINT32,
+ IMG_UINT16,
+ IMG_UINT32,
+ PVRSRV_KERNEL_SYNC_INFO*[],
+ IMG_UINT32,
+ PVRSRV_KERNEL_SYNC_INFO*[],
+ IMG_UINT32);
+typedef PVRSRV_ERROR (*PFN_SUBMIT_CMD) (PVRSRV_QUEUE_INFO*, PVRSRV_COMMAND*, IMG_BOOL);
+
+
+typedef struct PVRSRV_DEVICECLASS_BUFFER_TAG
+{
+ PFN_GET_BUFFER_ADDR pfnGetBufferAddr;
+ IMG_HANDLE hDevMemContext;
+ IMG_HANDLE hExtDevice;
+ IMG_HANDLE hExtBuffer;
+ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
+ IMG_UINT32 ui32MemMapRefCount;
+} PVRSRV_DEVICECLASS_BUFFER;
+
+
+typedef struct PVRSRV_CLIENT_DEVICECLASS_INFO_TAG
+{
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDeviceKM;
+#else
+ IMG_HANDLE hDeviceKM;
+#endif
+ IMG_HANDLE hServices;
+} PVRSRV_CLIENT_DEVICECLASS_INFO;
+
+
+typedef enum
+{
+ PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR,
+ PVRSRV_FREE_CALLBACK_ORIGIN_IMPORTER,
+ PVRSRV_FREE_CALLBACK_ORIGIN_EXTERNAL,
+}
+PVRSRV_FREE_CALLBACK_ORIGIN;
+
+
+IMG_IMPORT
+PVRSRV_ERROR FreeMemCallBackCommon(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
+ IMG_UINT32 ui32Param,
+ PVRSRV_FREE_CALLBACK_ORIGIN eCallbackOrigin);
+
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVQueueCommand(IMG_HANDLE hQueueInfo,
+ PVRSRV_COMMAND *psCommand);
+
+
+IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
+PVRSRVAllocSharedSysMem(const PVRSRV_CONNECTION *psConnection,
+ IMG_UINT32 ui32Flags,
+ IMG_SIZE_T ui32Size,
+ PVRSRV_CLIENT_MEM_INFO **ppsClientMemInfo);
+
+IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
+PVRSRVFreeSharedSysMem(const PVRSRV_CONNECTION *psConnection,
+ PVRSRV_CLIENT_MEM_INFO *psClientMemInfo);
+
+IMG_IMPORT PVRSRV_ERROR
+PVRSRVUnrefSharedSysMem(const PVRSRV_CONNECTION *psConnection,
+ PVRSRV_CLIENT_MEM_INFO *psClientMemInfo);
+
+IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
+PVRSRVMapMemInfoMem(const PVRSRV_CONNECTION *psConnection,
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo,
+#else
+ IMG_HANDLE hKernelMemInfo,
+#endif
+ PVRSRV_CLIENT_MEM_INFO **ppsClientMemInfo);
+
+
+#if defined (__cplusplus)
+}
+#endif
+#endif
+
diff --git a/drivers/gpu/pvr/sgx/bridged_sgx_bridge.c b/drivers/gpu/pvr/sgx/bridged_sgx_bridge.c
new file mode 100644
index 0000000..4e4cf24
--- /dev/null
+++ b/drivers/gpu/pvr/sgx/bridged_sgx_bridge.c
@@ -0,0 +1,3764 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+
+
+#include <stddef.h>
+
+#include "img_defs.h"
+
+#if defined(SUPPORT_SGX)
+
+#include "services.h"
+#include "pvr_debug.h"
+#include "pvr_bridge.h"
+#include "sgx_bridge.h"
+#include "perproc.h"
+#include "power.h"
+#include "pvr_bridge_km.h"
+#include "sgx_bridge_km.h"
+#include "sgx_options.h"
+
+#if defined(SUPPORT_MSVDX)
+ #include "msvdx_bridge.h"
+#endif
+
+#include "bridged_pvr_bridge.h"
+#include "bridged_sgx_bridge.h"
+#include "sgxutils.h"
+#include "buffer_manager.h"
+#include "pdump_km.h"
+
+static IMG_INT
+SGXGetClientInfoBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_GETCLIENTINFO *psGetClientInfoIN,
+ PVRSRV_BRIDGE_OUT_GETCLIENTINFO *psGetClientInfoOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETCLIENTINFO);
+
+ psGetClientInfoOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psGetClientInfoIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psGetClientInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psGetClientInfoOUT->eError =
+ SGXGetClientInfoKM(hDevCookieInt,
+ &psGetClientInfoOUT->sClientInfo);
+ return 0;
+}
+
+static IMG_INT
+SGXReleaseClientInfoBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_RELEASECLIENTINFO *psReleaseClientInfoIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+ IMG_HANDLE hDevCookieInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psReleaseClientInfoIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
+
+ PVR_ASSERT(psDevInfo->ui32ClientRefCount > 0);
+
+
+ if (psDevInfo->ui32ClientRefCount > 0)
+ {
+ psDevInfo->ui32ClientRefCount--;
+ }
+
+ psRetOUT->eError = PVRSRV_OK;
+
+ return 0;
+}
+
+
+static IMG_INT
+SGXGetInternalDevInfoBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO *psSGXGetInternalDevInfoIN,
+ PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO *psSGXGetInternalDevInfoOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+#if defined (SUPPORT_SID_INTERFACE)
+ SGX_INTERNAL_DEVINFO_KM sSGXInternalDevInfo;
+#endif
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO);
+
+ psSGXGetInternalDevInfoOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psSGXGetInternalDevInfoIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psSGXGetInternalDevInfoOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psSGXGetInternalDevInfoOUT->eError =
+ SGXGetInternalDevInfoKM(hDevCookieInt,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sSGXInternalDevInfo);
+#else
+ &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo);
+#endif
+
+
+ psSGXGetInternalDevInfoOUT->eError =
+ PVRSRVAllocHandle(psPerProc->psHandleBase,
+ &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.hHostCtlKernelMemInfoHandle,
+#if defined (SUPPORT_SID_INTERFACE)
+ sSGXInternalDevInfo.hHostCtlKernelMemInfoHandle,
+#else
+ psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.hHostCtlKernelMemInfoHandle,
+#endif
+ PVRSRV_HANDLE_TYPE_MEM_INFO,
+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
+
+ return 0;
+}
+
+
+static IMG_INT
+SGXDoKickBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_DOKICK *psDoKickIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ IMG_UINT32 i;
+ IMG_INT ret = 0;
+ IMG_UINT32 ui32NumDstSyncs;
+#if defined (SUPPORT_SID_INTERFACE)
+ SGX_CCB_KICK_KM sCCBKickKM = {{0}};
+ IMG_HANDLE ahSyncInfoHandles[16];
+#else
+ IMG_HANDLE *phKernelSyncInfoHandles = IMG_NULL;
+#endif
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DOKICK);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psDoKickIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sCCBKickKM.hCCBKernelMemInfo,
+#else
+ &psDoKickIN->sCCBKick.hCCBKernelMemInfo,
+#endif
+ psDoKickIN->sCCBKick.hCCBKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ if (psDoKickIN->sCCBKick.ui32NumDstSyncObjects > 16)
+ {
+ return 0;
+ }
+
+ if(psDoKickIN->sCCBKick.hTA3DSyncInfo != 0)
+#else
+ if(psDoKickIN->sCCBKick.hTA3DSyncInfo != IMG_NULL)
+#endif
+ {
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sCCBKickKM.hTA3DSyncInfo,
+#else
+ &psDoKickIN->sCCBKick.hTA3DSyncInfo,
+#endif
+ psDoKickIN->sCCBKick.hTA3DSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ if(psDoKickIN->sCCBKick.hTASyncInfo != 0)
+#else
+ if(psDoKickIN->sCCBKick.hTASyncInfo != IMG_NULL)
+#endif
+ {
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sCCBKickKM.hTASyncInfo,
+#else
+ &psDoKickIN->sCCBKick.hTASyncInfo,
+#endif
+ psDoKickIN->sCCBKick.hTASyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+#if defined(FIX_HW_BRN_31620)
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &psDoKickIN->sCCBKick.hDevMemContext,
+ psDoKickIN->sCCBKick.hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+#endif
+
+#if defined (SUPPORT_SID_INTERFACE)
+ if(psDoKickIN->sCCBKick.h3DSyncInfo != 0)
+#else
+ if(psDoKickIN->sCCBKick.h3DSyncInfo != IMG_NULL)
+#endif
+ {
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sCCBKickKM.h3DSyncInfo,
+#else
+ &psDoKickIN->sCCBKick.h3DSyncInfo,
+#endif
+ psDoKickIN->sCCBKick.h3DSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+
+#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
+
+ if (psDoKickIN->sCCBKick.ui32NumTASrcSyncs > SGX_MAX_TA_SRC_SYNCS)
+ {
+ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return 0;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ sCCBKickKM.ui32NumTASrcSyncs = psDoKickIN->sCCBKick.ui32NumTASrcSyncs;
+#endif
+ for(i=0; i<psDoKickIN->sCCBKick.ui32NumTASrcSyncs; i++)
+ {
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sCCBKickKM.ahTASrcKernelSyncInfo[i],
+#else
+ &psDoKickIN->sCCBKick.ahTASrcKernelSyncInfo[i],
+#endif
+ psDoKickIN->sCCBKick.ahTASrcKernelSyncInfo[i],
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+ if (psDoKickIN->sCCBKick.ui32NumTADstSyncs > SGX_MAX_TA_DST_SYNCS)
+ {
+ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return 0;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ sCCBKickKM.ui32NumTADstSyncs = psDoKickIN->sCCBKick.ui32NumTADstSyncs;
+#endif
+ for(i=0; i<psDoKickIN->sCCBKick.ui32NumTADstSyncs; i++)
+ {
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sCCBKickKM.ahTADstKernelSyncInfo[i],
+#else
+ &psDoKickIN->sCCBKick.ahTADstKernelSyncInfo[i],
+#endif
+ psDoKickIN->sCCBKick.ahTADstKernelSyncInfo[i],
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+ if (psDoKickIN->sCCBKick.ui32Num3DSrcSyncs > SGX_MAX_3D_SRC_SYNCS)
+ {
+ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return 0;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ sCCBKickKM.ui32Num3DSrcSyncs = psDoKickIN->sCCBKick.ui32Num3DSrcSyncs;
+#endif
+ for(i=0; i<psDoKickIN->sCCBKick.ui32Num3DSrcSyncs; i++)
+ {
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sCCBKickKM.ah3DSrcKernelSyncInfo[i],
+#else
+ &psDoKickIN->sCCBKick.ah3DSrcKernelSyncInfo[i],
+#endif
+ psDoKickIN->sCCBKick.ah3DSrcKernelSyncInfo[i],
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+#else
+
+ if (psDoKickIN->sCCBKick.ui32NumSrcSyncs > SGX_MAX_SRC_SYNCS_TA)
+ {
+ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return 0;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ sCCBKickKM.ui32NumSrcSyncs = psDoKickIN->sCCBKick.ui32NumSrcSyncs;
+#endif
+ for(i=0; i<psDoKickIN->sCCBKick.ui32NumSrcSyncs; i++)
+ {
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sCCBKickKM.ahSrcKernelSyncInfo[i],
+#else
+ &psDoKickIN->sCCBKick.ahSrcKernelSyncInfo[i],
+#endif
+ psDoKickIN->sCCBKick.ahSrcKernelSyncInfo[i],
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+#endif
+
+ if (psDoKickIN->sCCBKick.ui32NumTAStatusVals > SGX_MAX_TA_STATUS_VALS)
+ {
+ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return 0;
+ }
+ for (i = 0; i < psDoKickIN->sCCBKick.ui32NumTAStatusVals; i++)
+ {
+ psRetOUT->eError =
+#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sCCBKickKM.asTAStatusUpdate[i].hKernelMemInfo,
+#else
+ &psDoKickIN->sCCBKick.asTAStatusUpdate[i].hKernelMemInfo,
+#endif
+ psDoKickIN->sCCBKick.asTAStatusUpdate[i].hKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+
+#if defined (SUPPORT_SID_INTERFACE)
+ sCCBKickKM.asTAStatusUpdate[i].sCtlStatus = psDoKickIN->sCCBKick.asTAStatusUpdate[i].sCtlStatus;
+#endif
+
+#else
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sCCBKickKM.ahTAStatusSyncInfo[i],
+#else
+ &psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i],
+#endif
+ psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i],
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+#endif
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+ if (psDoKickIN->sCCBKick.ui32Num3DStatusVals > SGX_MAX_3D_STATUS_VALS)
+ {
+ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return 0;
+ }
+ for(i = 0; i < psDoKickIN->sCCBKick.ui32Num3DStatusVals; i++)
+ {
+ psRetOUT->eError =
+#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sCCBKickKM.as3DStatusUpdate[i].hKernelMemInfo,
+#else
+ &psDoKickIN->sCCBKick.as3DStatusUpdate[i].hKernelMemInfo,
+#endif
+ psDoKickIN->sCCBKick.as3DStatusUpdate[i].hKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+
+#if defined (SUPPORT_SID_INTERFACE)
+ sCCBKickKM.as3DStatusUpdate[i].sCtlStatus = psDoKickIN->sCCBKick.as3DStatusUpdate[i].sCtlStatus;
+#endif
+#else
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sCCBKickKM.ah3DStatusSyncInfo[i],
+#else
+ &psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i],
+#endif
+ psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i],
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+#endif
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+ ui32NumDstSyncs = psDoKickIN->sCCBKick.ui32NumDstSyncObjects;
+
+ if(ui32NumDstSyncs > 0)
+ {
+ if(!OSAccessOK(PVR_VERIFY_READ,
+ psDoKickIN->sCCBKick.pahDstSyncHandles,
+ ui32NumDstSyncs * sizeof(IMG_HANDLE)))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: SGXDoKickBW:"
+ " Invalid pasDstSyncHandles pointer", __FUNCTION__));
+ return -EFAULT;
+ }
+
+ psRetOUT->eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ ui32NumDstSyncs * sizeof(IMG_HANDLE),
+ (IMG_VOID **)&phKernelSyncInfoHandles,
+ 0,
+ "Array of Synchronization Info Handles");
+ if (psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ sCCBKickKM.pahDstSyncHandles = phKernelSyncInfoHandles;
+#else
+ if(CopyFromUserWrapper(psPerProc,
+ ui32BridgeID,
+ phKernelSyncInfoHandles,
+ psDoKickIN->sCCBKick.pahDstSyncHandles,
+ ui32NumDstSyncs * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+ {
+ ret = -EFAULT;
+ goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT;
+ }
+
+
+ psDoKickIN->sCCBKick.pahDstSyncHandles = phKernelSyncInfoHandles;
+#endif
+
+ for( i = 0; i < ui32NumDstSyncs; i++)
+ {
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sCCBKickKM.pahDstSyncHandles[i],
+#else
+ &psDoKickIN->sCCBKick.pahDstSyncHandles[i],
+#endif
+ psDoKickIN->sCCBKick.pahDstSyncHandles[i],
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT;
+ }
+
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sCCBKickKM.hKernelHWSyncListMemInfo,
+#else
+ &psDoKickIN->sCCBKick.hKernelHWSyncListMemInfo,
+#endif
+ psDoKickIN->sCCBKick.hKernelHWSyncListMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT;
+ }
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ OSMemCopy(&sCCBKickKM.sCommand, &psDoKickIN->sCCBKick.sCommand, sizeof(sCCBKickKM.sCommand));
+
+ sCCBKickKM.ui32NumDstSyncObjects = psDoKickIN->sCCBKick.ui32NumDstSyncObjects;
+ sCCBKickKM.ui32NumTAStatusVals = psDoKickIN->sCCBKick.ui32NumTAStatusVals;
+ sCCBKickKM.ui32Num3DStatusVals = psDoKickIN->sCCBKick.ui32Num3DStatusVals;
+ sCCBKickKM.bFirstKickOrResume = psDoKickIN->sCCBKick.bFirstKickOrResume;
+ sCCBKickKM.ui32CCBOffset = psDoKickIN->sCCBKick.ui32CCBOffset;
+ sCCBKickKM.bTADependency = psDoKickIN->sCCBKick.bTADependency;
+
+#if (defined(NO_HARDWARE) || defined(PDUMP))
+ sCCBKickKM.bTerminateOrAbort = psDoKickIN->sCCBKick.bTerminateOrAbort;
+#endif
+#if defined(PDUMP)
+ sCCBKickKM.ui32CCBDumpWOff = psDoKickIN->sCCBKick.ui32CCBDumpWOff;
+#endif
+
+#if defined(NO_HARDWARE)
+ sCCBKickKM.ui32WriteOpsPendingVal = psDoKickIN->sCCBKick.ui32WriteOpsPendingVal;
+#endif
+#endif
+ psRetOUT->eError =
+ SGXDoKickKM(hDevCookieInt,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sCCBKickKM);
+#else
+ &psDoKickIN->sCCBKick);
+#endif
+
+PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT:
+
+ if(phKernelSyncInfoHandles)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ ui32NumDstSyncs * sizeof(IMG_HANDLE),
+ (IMG_VOID *)phKernelSyncInfoHandles,
+ 0);
+
+ }
+ return ret;
+}
+
+
+static IMG_INT
+SGXScheduleProcessQueuesBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES *psScheduleProcQIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psScheduleProcQIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = SGXScheduleProcessQueuesKM(hDevCookieInt);
+
+ return 0;
+}
+
+
+#if defined(TRANSFER_QUEUE)
+static IMG_INT
+SGXSubmitTransferBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SUBMITTRANSFER *psSubmitTransferIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ PVRSRV_TRANSFER_SGX_KICK *psKick;
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_TRANSFER_SGX_KICK_KM sKickKM = {0};
+#endif
+ IMG_UINT32 i;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMITTRANSFER);
+ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
+
+ psKick = &psSubmitTransferIN->sKick;
+
+#if defined(FIX_HW_BRN_31620)
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &psKick->hDevMemContext,
+ psKick->hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+#endif
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psSubmitTransferIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sKickKM.hCCBMemInfo,
+#else
+ &psKick->hCCBMemInfo,
+#endif
+ psKick->hCCBMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ if (psKick->hTASyncInfo != IMG_NULL)
+ {
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sKickKM.hTASyncInfo,
+#else
+ &psKick->hTASyncInfo,
+#endif
+ psKick->hTASyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+ if (psKick->h3DSyncInfo != IMG_NULL)
+ {
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sKickKM.h3DSyncInfo,
+#else
+ &psKick->h3DSyncInfo,
+#endif
+ psKick->h3DSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+ if (psKick->ui32NumSrcSync > SGX_MAX_TRANSFER_SYNC_OPS)
+ {
+ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return 0;
+ }
+ for (i = 0; i < psKick->ui32NumSrcSync; i++)
+ {
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sKickKM.ahSrcSyncInfo[i],
+#else
+ &psKick->ahSrcSyncInfo[i],
+#endif
+ psKick->ahSrcSyncInfo[i],
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+ if (psKick->ui32NumDstSync > SGX_MAX_TRANSFER_SYNC_OPS)
+ {
+ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return 0;
+ }
+ for (i = 0; i < psKick->ui32NumDstSync; i++)
+ {
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sKickKM.ahDstSyncInfo[i],
+#else
+ &psKick->ahDstSyncInfo[i],
+#endif
+ psKick->ahDstSyncInfo[i],
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ sKickKM.sHWTransferContextDevVAddr = psKick->sHWTransferContextDevVAddr;
+ sKickKM.ui32SharedCmdCCBOffset = psKick->ui32SharedCmdCCBOffset;
+ sKickKM.ui32NumSrcSync = psKick->ui32NumSrcSync;
+ sKickKM.ui32NumDstSync = psKick->ui32NumDstSync;
+ sKickKM.ui32Flags = psKick->ui32Flags;
+ sKickKM.ui32PDumpFlags = psKick->ui32PDumpFlags;
+#if defined(PDUMP)
+ sKickKM.ui32CCBDumpWOff = psKick->ui32CCBDumpWOff;
+#endif
+
+ psRetOUT->eError = SGXSubmitTransferKM(hDevCookieInt, &sKickKM);
+#else
+ psRetOUT->eError = SGXSubmitTransferKM(hDevCookieInt, psKick);
+#endif
+
+ return 0;
+}
+
+static IMG_INT
+SGXSetTransferContextPriorityBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGX_SET_TRANSFER_CONTEXT_PRIORITY *psSGXSetTransferContextPriorityIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hTransferContextInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SET_TRANSFER_CONTEXT_PRIORITY);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psSGXSetTransferContextPriorityIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hTransferContextInt,
+ psSGXSetTransferContextPriorityIN->hHWTransferContext,
+ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = SGXSetTransferContextPriorityKM(
+ hDevCookieInt,
+ hTransferContextInt,
+ psSGXSetTransferContextPriorityIN->ui32Priority,
+ psSGXSetTransferContextPriorityIN->ui32OffsetOfPriorityField);
+
+ return 0;
+}
+
+static IMG_INT
+SGXSetRenderContextPriorityBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGX_SET_RENDER_CONTEXT_PRIORITY *psSGXSetRenderContextPriorityIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hRenderContextInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SET_RENDER_CONTEXT_PRIORITY);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psSGXSetRenderContextPriorityIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hRenderContextInt,
+ psSGXSetRenderContextPriorityIN->hHWRenderContext,
+ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = SGXSetRenderContextPriorityKM(
+ hDevCookieInt,
+ hRenderContextInt,
+ psSGXSetRenderContextPriorityIN->ui32Priority,
+ psSGXSetRenderContextPriorityIN->ui32OffsetOfPriorityField);
+
+ return 0;
+}
+
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+static IMG_INT
+SGXSubmit2DBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SUBMIT2D *psSubmit2DIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ PVRSRV_2D_SGX_KICK *psKick;
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_2D_SGX_KICK_KM sKickKM;
+#endif
+ IMG_UINT32 i;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMIT2D);
+ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
+
+ psKick = &psSubmit2DIN->sKick;
+
+#if defined(FIX_HW_BRN_31620)
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &psKick->hDevMemContext,
+ psKick->hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+#endif
+
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psSubmit2DIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sKickKM.hCCBMemInfo,
+#else
+ &psKick->hCCBMemInfo,
+#endif
+ psKick->hCCBMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ if (psKick->hTASyncInfo != 0)
+#else
+ if (psKick->hTASyncInfo != IMG_NULL)
+#endif
+ {
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sKickKM.hTASyncInfo,
+#else
+ &psKick->hTASyncInfo,
+#endif
+ psKick->hTASyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+#if defined (SUPPORT_SID_INTERFACE)
+ else
+ {
+ sKickKM.hTASyncInfo = IMG_NULL;
+ }
+#endif
+
+ if (psKick->h3DSyncInfo != IMG_NULL)
+ {
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sKickKM.h3DSyncInfo,
+#else
+ &psKick->h3DSyncInfo,
+#endif
+ psKick->h3DSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+#if defined (SUPPORT_SID_INTERFACE)
+ else
+ {
+ sKickKM.h3DSyncInfo = IMG_NULL;
+ }
+#endif
+
+ if (psKick->ui32NumSrcSync > SGX_MAX_2D_SRC_SYNC_OPS)
+ {
+ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return 0;
+ }
+#if defined (SUPPORT_SID_INTERFACE)
+ for (i = 0; i < SGX_MAX_2D_SRC_SYNC_OPS; i++)
+ {
+ if (i < psKick->ui32NumSrcSync)
+ {
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &sKickKM.ahSrcSyncInfo[i],
+ psKick->ahSrcSyncInfo[i],
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+ else
+ {
+ sKickKM.ahSrcSyncInfo[i] = IMG_NULL;
+ }
+ }
+#else
+ for (i = 0; i < psKick->ui32NumSrcSync; i++)
+ {
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &psKick->ahSrcSyncInfo[i],
+ psKick->ahSrcSyncInfo[i],
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+#endif
+
+ if (psKick->hDstSyncInfo != IMG_NULL)
+ {
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &sKickKM.hDstSyncInfo,
+#else
+ &psKick->hDstSyncInfo,
+#endif
+ psKick->hDstSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+#if defined (SUPPORT_SID_INTERFACE)
+ else
+ {
+ sKickKM.hDstSyncInfo = IMG_NULL;
+ }
+
+
+ sKickKM.ui32SharedCmdCCBOffset = psKick->ui32SharedCmdCCBOffset;
+ sKickKM.ui32NumSrcSync = psKick->ui32NumSrcSync;
+ sKickKM.ui32PDumpFlags = psKick->ui32PDumpFlags;
+ sKickKM.sHW2DContextDevVAddr = psKick->sHW2DContextDevVAddr;
+#if defined(PDUMP)
+ sKickKM.ui32CCBDumpWOff = psKick->ui32CCBDumpWOff;
+#endif
+#endif
+
+ psRetOUT->eError =
+#if defined (SUPPORT_SID_INTERFACE)
+ SGXSubmit2DKM(hDevCookieInt, &sKickKM);
+#else
+ SGXSubmit2DKM(hDevCookieInt, psKick);
+#endif
+
+ return 0;
+}
+#endif
+#endif
+
+
+static IMG_INT
+SGXGetMiscInfoBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGXGETMISCINFO *psSGXGetMiscInfoIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hDevMemContextInt = 0;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+ SGX_MISC_INFO sMiscInfo;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
+ PVRSRV_BRIDGE_SGX_GETMISCINFO);
+
+ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psSGXGetMiscInfoIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
+
+ if (psSGXGetMiscInfoIN->psMiscInfo->eRequest == SGX_MISC_INFO_REQUEST_MEMREAD)
+ {
+ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevMemContextInt,
+ psSGXGetMiscInfoIN->psMiscInfo->hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+#endif
+
+ psDeviceNode = hDevCookieInt;
+ PVR_ASSERT(psDeviceNode != IMG_NULL);
+ if (psDeviceNode == IMG_NULL)
+ {
+ return -EFAULT;
+ }
+
+ psDevInfo = psDeviceNode->pvDevice;
+
+
+ psRetOUT->eError = CopyFromUserWrapper(psPerProc,
+ ui32BridgeID,
+ &sMiscInfo,
+ psSGXGetMiscInfoIN->psMiscInfo,
+ sizeof(SGX_MISC_INFO));
+ if (psRetOUT->eError != PVRSRV_OK)
+ {
+ return -EFAULT;
+ }
+
+ {
+ psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, &sMiscInfo, psDeviceNode, hDevMemContextInt);
+
+ if (psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+ }
+
+
+ psRetOUT->eError = CopyToUserWrapper(psPerProc,
+ ui32BridgeID,
+ psSGXGetMiscInfoIN->psMiscInfo,
+ &sMiscInfo,
+ sizeof(SGX_MISC_INFO));
+ if (psRetOUT->eError != PVRSRV_OK)
+ {
+ return -EFAULT;
+ }
+ return 0;
+}
+
+
+static IMG_INT
+SGXReadHWPerfCBBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB *psSGXReadHWPerfCBIN,
+ PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB *psSGXReadHWPerfCBOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ PVRSRV_SGX_HWPERF_CB_ENTRY *psAllocated;
+ IMG_HANDLE hAllocatedHandle;
+ IMG_UINT32 ui32AllocatedSize;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_READ_HWPERF_CB);
+
+ psSGXReadHWPerfCBOUT->eError =PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psSGXReadHWPerfCBIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psSGXReadHWPerfCBOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ ui32AllocatedSize = psSGXReadHWPerfCBIN->ui32ArraySize *
+ sizeof(psSGXReadHWPerfCBIN->psHWPerfCBData[0]);
+ ASSIGN_AND_EXIT_ON_ERROR(psSGXReadHWPerfCBOUT->eError,
+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ ui32AllocatedSize,
+ (IMG_VOID **)&psAllocated,
+ &hAllocatedHandle,
+ "Array of Hardware Performance Circular Buffer Data"));
+
+ psSGXReadHWPerfCBOUT->eError = SGXReadHWPerfCBKM(hDevCookieInt,
+ psSGXReadHWPerfCBIN->ui32ArraySize,
+ psAllocated,
+ &psSGXReadHWPerfCBOUT->ui32DataCount,
+ &psSGXReadHWPerfCBOUT->ui32ClockSpeed,
+ &psSGXReadHWPerfCBOUT->ui32HostTimeStamp);
+ if (psSGXReadHWPerfCBOUT->eError == PVRSRV_OK)
+ {
+ psSGXReadHWPerfCBOUT->eError = CopyToUserWrapper(psPerProc,
+ ui32BridgeID,
+ psSGXReadHWPerfCBIN->psHWPerfCBData,
+ psAllocated,
+ ui32AllocatedSize);
+ }
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ ui32AllocatedSize,
+ psAllocated,
+ hAllocatedHandle);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+SGXDevInitPart2BW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGXDEVINITPART2 *psSGXDevInitPart2IN,
+ PVRSRV_BRIDGE_OUT_SGXDEVINITPART2 *psSGXDevInitPart2OUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_ERROR eError = PVRSRV_OK;
+#else
+ PVRSRV_ERROR eError;
+#endif
+ IMG_BOOL bDissociateFailed = IMG_FALSE;
+ IMG_BOOL bLookupFailed = IMG_FALSE;
+ IMG_BOOL bReleaseFailed = IMG_FALSE;
+ IMG_HANDLE hDummy;
+ IMG_UINT32 i;
+#if defined (SUPPORT_SID_INTERFACE)
+ SGX_BRIDGE_INIT_INFO_KM asInitInfoKM = {0};
+#endif
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DEVINITPART2);
+
+
+ psSGXDevInitPart2OUT->ui32KMBuildOptions = SGX_BUILD_OPTIONS;
+
+ if(!psPerProc->bInitProcess)
+ {
+ psSGXDevInitPart2OUT->eError = PVRSRV_ERROR_PROCESS_NOT_INITIALISED;
+ return 0;
+ }
+
+ psSGXDevInitPart2OUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psSGXDevInitPart2IN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psSGXDevInitPart2OUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+
+#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920)
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelSGXPTLAWriteBackMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+#endif
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+
+#if defined(SGX_SUPPORT_HWPROFILING)
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+#endif
+
+#if defined(SUPPORT_SGX_HWPERF)
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+#endif
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelTASigBufferMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernel3DSigBufferMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+
+#if defined(FIX_HW_BRN_29702)
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelCFIMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+#endif
+
+#if defined(FIX_HW_BRN_29823)
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelDummyTermStreamMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+#endif
+
+
+#if defined(FIX_HW_BRN_31542) || defined(FIX_HW_BRN_36513)
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAVDMStreamMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAIndexStreamMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPDSMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAUSEMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAParamMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPMPTMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWATPCMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPSGRgnHdrMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+#endif
+
+#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && defined(FIX_HW_BRN_31559)
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelVDMSnapShotBufferMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelVDMCtrlStreamBufferMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+#endif
+#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \
+ defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX)
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelVDMStateUpdateBufferMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+#endif
+
+#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+#endif
+
+ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
+ {
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
+#else
+ IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
+#endif
+
+#if defined (SUPPORT_SID_INTERFACE)
+ if (hHandle == 0)
+#else
+ if (hHandle == IMG_NULL)
+#endif
+ {
+ continue;
+ }
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDummy,
+ hHandle,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+ }
+
+ if (bLookupFailed)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A handle lookup failed"));
+ psSGXDevInitPart2OUT->eError = PVRSRV_ERROR_INIT2_PHASE_FAILED;
+ return 0;
+ }
+
+
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelCCBMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelCCBCtlMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelCCBEventKickerMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+
+
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelSGXHostCtlMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelSGXTA3DCtlMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+
+#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920)
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelSGXPTLAWriteBackMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelSGXPTLAWriteBackMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelSGXPTLAWriteBackMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+#endif
+
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelSGXMiscMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+
+
+#if defined(SGX_SUPPORT_HWPROFILING)
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelHWProfilingMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+#endif
+
+#if defined(SUPPORT_SGX_HWPERF)
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelHWPerfCBMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+#endif
+
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelTASigBufferMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelTASigBufferMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelTASigBufferMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernel3DSigBufferMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernel3DSigBufferMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernel3DSigBufferMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+
+#if defined(FIX_HW_BRN_29702)
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelCFIMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelCFIMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelCFIMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bLookupFailed = IMG_TRUE;
+ }
+#endif
+
+#if defined(FIX_HW_BRN_29823)
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelDummyTermStreamMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelDummyTermStreamMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelDummyTermStreamMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+#endif
+
+
+#if defined(FIX_HW_BRN_31542) || defined(FIX_HW_BRN_36513)
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelClearClipWAVDMStreamMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAVDMStreamMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAVDMStreamMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelClearClipWAIndexStreamMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAIndexStreamMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAIndexStreamMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelClearClipWAPDSMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPDSMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPDSMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelClearClipWAUSEMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAUSEMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAUSEMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelClearClipWAParamMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAParamMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAParamMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelClearClipWAPMPTMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPMPTMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPMPTMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelClearClipWATPCMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWATPCMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWATPCMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelClearClipWAPSGRgnHdrMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPSGRgnHdrMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPSGRgnHdrMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+#endif
+#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && defined(FIX_HW_BRN_31559)
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+ &psSGXDevInitPart2IN->sInitInfo.hKernelVDMSnapShotBufferMemInfo,
+ psSGXDevInitPart2IN->sInitInfo.hKernelVDMSnapShotBufferMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+ &psSGXDevInitPart2IN->sInitInfo.hKernelVDMCtrlStreamBufferMemInfo,
+ psSGXDevInitPart2IN->sInitInfo.hKernelVDMCtrlStreamBufferMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+#endif
+#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \
+ defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX)
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+ &psSGXDevInitPart2IN->sInitInfo.hKernelVDMStateUpdateBufferMemInfo,
+ psSGXDevInitPart2IN->sInitInfo.hKernelVDMStateUpdateBufferMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+#endif
+
+#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asInitInfoKM.hKernelEDMStatusBufferMemInfo,
+#else
+ &psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo,
+#endif
+ psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+#endif
+
+ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
+ {
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
+ IMG_HANDLE *phHandleKM = &asInitInfoKM.asInitMemHandles[i];
+
+ if (hHandle == 0)
+#else
+ IMG_HANDLE *phHandle = &psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
+
+ if (*phHandle == IMG_NULL)
+#endif
+ continue;
+
+ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
+#if defined (SUPPORT_SID_INTERFACE)
+ phHandleKM,
+ hHandle,
+#else
+ phHandle,
+ *phHandle,
+#endif
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if (eError != PVRSRV_OK)
+ {
+ bReleaseFailed = IMG_TRUE;
+ }
+ }
+
+ if (bReleaseFailed)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A handle release failed"));
+ psSGXDevInitPart2OUT->eError = PVRSRV_ERROR_INIT2_PHASE_FAILED;
+
+ PVR_DBG_BREAK;
+ return 0;
+ }
+
+
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelCCBMemInfo);
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo);
+#endif
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelCCBCtlMemInfo);
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo);
+#endif
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelCCBEventKickerMemInfo);
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo);
+#endif
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelSGXHostCtlMemInfo);
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo);
+#endif
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelSGXTA3DCtlMemInfo);
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo);
+#endif
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+
+#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920)
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelSGXPTLAWriteBackMemInfo);
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXPTLAWriteBackMemInfo);
+#endif
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+#endif
+
+
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelSGXMiscMemInfo);
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo);
+#endif
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+
+
+#if defined(SGX_SUPPORT_HWPROFILING)
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelHWProfilingMemInfo);
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo);
+ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+#endif
+#endif
+
+#if defined(SUPPORT_SGX_HWPERF)
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelHWPerfCBMemInfo);
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo);
+#endif
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+#endif
+
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelTASigBufferMemInfo);
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelTASigBufferMemInfo);
+#endif
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernel3DSigBufferMemInfo);
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernel3DSigBufferMemInfo);
+#endif
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+
+#if defined(FIX_HW_BRN_29702)
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelCFIMemInfo);
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCFIMemInfo);
+ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+#endif
+#endif
+
+#if defined(FIX_HW_BRN_29823)
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelDummyTermStreamMemInfo);
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelDummyTermStreamMemInfo);
+ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+#endif
+#endif
+
+#if defined(FIX_HW_BRN_31542) || defined(FIX_HW_BRN_36513)
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelClearClipWAVDMStreamMemInfo);
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAVDMStreamMemInfo);
+ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+#endif
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelClearClipWAIndexStreamMemInfo);
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAIndexStreamMemInfo);
+ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+#endif
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelClearClipWAPDSMemInfo);
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPDSMemInfo);
+ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+#endif
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelClearClipWAUSEMemInfo);
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAUSEMemInfo);
+ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+#endif
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelClearClipWAParamMemInfo);
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAParamMemInfo);
+ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+#endif
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelClearClipWAPMPTMemInfo);
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPMPTMemInfo);
+ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+#endif
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelClearClipWATPCMemInfo);
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWATPCMemInfo);
+ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+#endif
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelClearClipWAPSGRgnHdrMemInfo);
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPSGRgnHdrMemInfo);
+ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+#endif
+#endif
+
+#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && defined(FIX_HW_BRN_31559)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelVDMSnapShotBufferMemInfo);
+ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelVDMCtrlStreamBufferMemInfo);
+ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+#endif
+#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \
+ defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelVDMStateUpdateBufferMemInfo);
+ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+#endif
+
+#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
+#if defined (SUPPORT_SID_INTERFACE)
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelEDMStatusBufferMemInfo);
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+#else
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo);
+ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
+#endif
+#endif
+
+ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
+ {
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_HANDLE hHandle = asInitInfoKM.asInitMemHandles[i];
+#else
+ IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
+#endif
+
+ if (hHandle == IMG_NULL)
+ continue;
+
+ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, hHandle);
+ if (eError != PVRSRV_OK)
+ {
+ bDissociateFailed = IMG_TRUE;
+ }
+ }
+
+
+ if(bDissociateFailed)
+ {
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRVFreeDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelCCBMemInfo);
+ PVRSRVFreeDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelCCBCtlMemInfo);
+ PVRSRVFreeDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelSGXHostCtlMemInfo);
+ PVRSRVFreeDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelSGXTA3DCtlMemInfo);
+#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920)
+ PVRSRVFreeDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelSGXPTLAWriteBackMemInfo);
+#endif
+ PVRSRVFreeDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelSGXMiscMemInfo);
+#else
+ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo);
+ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo);
+ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo);
+ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo);
+#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920)
+ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXPTLAWriteBackMemInfo);
+#endif
+ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo);
+#endif
+
+ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
+ {
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_HANDLE hHandle = asInitInfoKM.asInitMemHandles[i];
+
+ if (hHandle == 0)
+#else
+ IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
+
+ if (hHandle == IMG_NULL)
+#endif
+ continue;
+
+ PVRSRVFreeDeviceMemKM(hDevCookieInt, (PVRSRV_KERNEL_MEM_INFO *)hHandle);
+
+ }
+
+ PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A dissociate failed"));
+
+ psSGXDevInitPart2OUT->eError = PVRSRV_ERROR_INIT2_PHASE_FAILED;
+
+
+ PVR_DBG_BREAK;
+ return 0;
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ asInitInfoKM.sScripts = psSGXDevInitPart2IN->sInitInfo.sScripts;
+ asInitInfoKM.ui32ClientBuildOptions = psSGXDevInitPart2IN->sInitInfo.ui32ClientBuildOptions;
+ asInitInfoKM.sSGXStructSizes = psSGXDevInitPart2IN->sInitInfo.sSGXStructSizes;
+ asInitInfoKM.ui32CacheControl = psSGXDevInitPart2IN->sInitInfo.ui32CacheControl;
+ asInitInfoKM.ui32EDMTaskReg0 = psSGXDevInitPart2IN->sInitInfo.ui32EDMTaskReg0;
+ asInitInfoKM.ui32EDMTaskReg1 = psSGXDevInitPart2IN->sInitInfo.ui32EDMTaskReg1;
+ asInitInfoKM.ui32ClkGateStatusReg = psSGXDevInitPart2IN->sInitInfo.ui32ClkGateStatusReg;
+ asInitInfoKM.ui32ClkGateStatusMask = psSGXDevInitPart2IN->sInitInfo.ui32ClkGateStatusMask;
+
+ OSMemCopy(&asInitInfoKM.asInitDevData ,
+ &psSGXDevInitPart2IN->sInitInfo.asInitDevData,
+ sizeof(asInitInfoKM.asInitDevData));
+ OSMemCopy(&asInitInfoKM.aui32HostKickAddr,
+ &psSGXDevInitPart2IN->sInitInfo.aui32HostKickAddr,
+ sizeof(asInitInfoKM.aui32HostKickAddr));
+
+ psSGXDevInitPart2OUT->eError =
+ DevInitSGXPart2KM(psPerProc,
+ hDevCookieInt,
+ &asInitInfoKM);
+#else
+ psSGXDevInitPart2OUT->eError =
+ DevInitSGXPart2KM(psPerProc,
+ hDevCookieInt,
+ &psSGXDevInitPart2IN->sInitInfo);
+#endif
+
+ return 0;
+}
+
+
+static IMG_INT
+SGXRegisterHWRenderContextBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT *psSGXRegHWRenderContextIN,
+ PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT *psSGXRegHWRenderContextOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hHWRenderContextInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHWRenderContextOUT->eError, psPerProc, 1);
+
+ psSGXRegHWRenderContextOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psSGXRegHWRenderContextIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psSGXRegHWRenderContextOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ hHWRenderContextInt =
+ SGXRegisterHWRenderContextKM(hDevCookieInt,
+ psSGXRegHWRenderContextIN->pHWRenderContextCpuVAddr,
+ psSGXRegHWRenderContextIN->ui32HWRenderContextSize,
+ psSGXRegHWRenderContextIN->ui32OffsetToPDDevPAddr,
+ psSGXRegHWRenderContextIN->hDevMemContext,
+ &psSGXRegHWRenderContextOUT->sHWRenderContextDevVAddr,
+ psPerProc);
+
+ if (hHWRenderContextInt == IMG_NULL)
+ {
+ psSGXRegHWRenderContextOUT->eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT;
+ return 0;
+ }
+
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psSGXRegHWRenderContextOUT->hHWRenderContext,
+ hHWRenderContextInt,
+ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHWRenderContextOUT->eError, psPerProc);
+
+ return 0;
+}
+
+
+static IMG_INT
+SGXUnregisterHWRenderContextBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT *psSGXUnregHWRenderContextIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hHWRenderContextInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hHWRenderContextInt,
+ psSGXUnregHWRenderContextIN->hHWRenderContext,
+ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = SGXUnregisterHWRenderContextKM(hHWRenderContextInt,
+ psSGXUnregHWRenderContextIN->bForceCleanup);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psSGXUnregHWRenderContextIN->hHWRenderContext,
+ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
+
+ return 0;
+}
+
+
+static IMG_INT
+SGXRegisterHWTransferContextBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT *psSGXRegHWTransferContextIN,
+ PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT *psSGXRegHWTransferContextOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hHWTransferContextInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHWTransferContextOUT->eError, psPerProc, 1);
+
+ psSGXRegHWTransferContextOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psSGXRegHWTransferContextIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psSGXRegHWTransferContextOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ hHWTransferContextInt =
+ SGXRegisterHWTransferContextKM(hDevCookieInt,
+ psSGXRegHWTransferContextIN->pHWTransferContextCpuVAddr,
+ psSGXRegHWTransferContextIN->ui32HWTransferContextSize,
+ psSGXRegHWTransferContextIN->ui32OffsetToPDDevPAddr,
+ psSGXRegHWTransferContextIN->hDevMemContext,
+ &psSGXRegHWTransferContextOUT->sHWTransferContextDevVAddr,
+ psPerProc);
+
+ if (hHWTransferContextInt == IMG_NULL)
+ {
+ psSGXRegHWTransferContextOUT->eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT;
+ return 0;
+ }
+
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psSGXRegHWTransferContextOUT->hHWTransferContext,
+ hHWTransferContextInt,
+ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHWTransferContextOUT->eError, psPerProc);
+
+ return 0;
+}
+
+
+static IMG_INT
+SGXUnregisterHWTransferContextBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT *psSGXUnregHWTransferContextIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_HANDLE hHWTransferContextInt = 0;
+#else
+ IMG_HANDLE hHWTransferContextInt;
+#endif
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hHWTransferContextInt,
+ psSGXUnregHWTransferContextIN->hHWTransferContext,
+ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = SGXUnregisterHWTransferContextKM(hHWTransferContextInt,
+ psSGXUnregHWTransferContextIN->bForceCleanup);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psSGXUnregHWTransferContextIN->hHWTransferContext,
+ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT);
+
+ return 0;
+}
+
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+static IMG_INT
+SGXRegisterHW2DContextBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT *psSGXRegHW2DContextIN,
+ PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT *psSGXRegHW2DContextOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ IMG_HANDLE hHW2DContextInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHW2DContextOUT->eError, psPerProc, 1);
+
+ psSGXRegHW2DContextOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psSGXRegHW2DContextIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psSGXRegHW2DContextOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ hHW2DContextInt =
+ SGXRegisterHW2DContextKM(hDevCookieInt,
+ psSGXRegHW2DContextIN->pHW2DContextCpuVAddr,
+ psSGXRegHW2DContextIN->ui32HW2DContextSize,
+ psSGXRegHW2DContextIN->ui32OffsetToPDDevPAddr,
+ psSGXRegHW2DContextIN->hDevMemContext,
+ &psSGXRegHW2DContextOUT->sHW2DContextDevVAddr,
+ psPerProc);
+
+ if (hHW2DContextInt == IMG_NULL)
+ {
+ psSGXRegHW2DContextOUT->eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT;
+ return 0;
+ }
+
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psSGXRegHW2DContextOUT->hHW2DContext,
+ hHW2DContextInt,
+ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHW2DContextOUT->eError, psPerProc);
+
+ return 0;
+}
+
+
+static IMG_INT
+SGXUnregisterHW2DContextBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT *psSGXUnregHW2DContextIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hHW2DContextInt;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hHW2DContextInt,
+ psSGXUnregHW2DContextIN->hHW2DContext,
+ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = SGXUnregisterHW2DContextKM(hHW2DContextInt,
+ psSGXUnregHW2DContextIN->bForceCleanup);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psSGXUnregHW2DContextIN->hHW2DContext,
+ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT);
+
+ return 0;
+}
+#endif
+
+static IMG_INT
+SGXFlushHWRenderTargetBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET *psSGXFlushHWRenderTargetIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psSGXFlushHWRenderTargetIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError = SGXFlushHWRenderTargetKM(hDevCookieInt, psSGXFlushHWRenderTargetIN->sHWRTDataSetDevVAddr, IMG_FALSE);
+
+ return 0;
+}
+
+
+static IMG_INT
+SGX2DQueryBlitsCompleteBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE *ps2DQueryBltsCompleteIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ IMG_VOID *pvSyncInfo;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ ps2DQueryBltsCompleteIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvSyncInfo,
+ ps2DQueryBltsCompleteIN->hKernSyncInfo,
+ PVRSRV_HANDLE_TYPE_SYNC_INFO);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
+
+ psRetOUT->eError =
+ SGX2DQueryBlitsCompleteKM(psDevInfo,
+ (PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo,
+ ps2DQueryBltsCompleteIN->bWaitForComplete);
+
+ return 0;
+}
+
+
+static IMG_INT
+SGXFindSharedPBDescBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescIN,
+ PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
+ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
+ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
+ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo;
+ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos = IMG_NULL;
+ IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount = 0;
+ IMG_UINT32 i;
+ IMG_HANDLE hSharedPBDesc = IMG_NULL;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psSGXFindSharedPBDescOUT->eError, psPerProc, PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS + 4);
+
+ psSGXFindSharedPBDescOUT->hSharedPBDesc = IMG_NULL;
+
+ psSGXFindSharedPBDescOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psSGXFindSharedPBDescIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
+ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
+
+ psSGXFindSharedPBDescOUT->eError =
+ SGXFindSharedPBDescKM(psPerProc, hDevCookieInt,
+ psSGXFindSharedPBDescIN->bLockOnFailure,
+ psSGXFindSharedPBDescIN->ui32TotalPBSize,
+ &hSharedPBDesc,
+ &psSharedPBDescKernelMemInfo,
+ &psHWPBDescKernelMemInfo,
+ &psBlockKernelMemInfo,
+ &psHWBlockKernelMemInfo,
+ &ppsSharedPBDescSubKernelMemInfos,
+ &ui32SharedPBDescSubKernelMemInfosCount);
+ if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
+ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
+
+ PVR_ASSERT(ui32SharedPBDescSubKernelMemInfosCount
+ <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS);
+
+ psSGXFindSharedPBDescOUT->ui32SharedPBDescSubKernelMemInfoHandlesCount =
+ ui32SharedPBDescSubKernelMemInfosCount;
+
+ if(hSharedPBDesc == IMG_NULL)
+ {
+ psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle = 0;
+
+ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
+ }
+
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psSGXFindSharedPBDescOUT->hSharedPBDesc,
+ hSharedPBDesc,
+ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle,
+ psSharedPBDescKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ psSGXFindSharedPBDescOUT->hSharedPBDesc);
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psSGXFindSharedPBDescOUT->hHWPBDescKernelMemInfoHandle,
+ psHWPBDescKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ psSGXFindSharedPBDescOUT->hSharedPBDesc);
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psSGXFindSharedPBDescOUT->hBlockKernelMemInfoHandle,
+ psBlockKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ psSGXFindSharedPBDescOUT->hSharedPBDesc);
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psSGXFindSharedPBDescOUT->hHWBlockKernelMemInfoHandle,
+ psHWBlockKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ psSGXFindSharedPBDescOUT->hSharedPBDesc);
+
+
+ for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++)
+ {
+ PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOut =
+ psSGXFindSharedPBDescOUT;
+
+ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
+ &psSGXFindSharedPBDescOut->ahSharedPBDescSubKernelMemInfoHandles[i],
+ ppsSharedPBDescSubKernelMemInfos[i],
+ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle);
+ }
+
+PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT:
+ if (ppsSharedPBDescSubKernelMemInfos != IMG_NULL)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(PVRSRV_KERNEL_MEM_INFO *) * ui32SharedPBDescSubKernelMemInfosCount,
+ ppsSharedPBDescSubKernelMemInfos,
+ IMG_NULL);
+ }
+
+ if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
+ {
+ if(hSharedPBDesc != IMG_NULL)
+ {
+ SGXUnrefSharedPBDescKM(hSharedPBDesc);
+ }
+ }
+ else
+ {
+ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXFindSharedPBDescOUT->eError, psPerProc);
+ }
+
+ return 0;
+}
+
+
+static IMG_INT
+SGXUnrefSharedPBDescBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescIN,
+ PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hSharedPBDesc;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC);
+
+ psSGXUnrefSharedPBDescOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hSharedPBDesc,
+ psSGXUnrefSharedPBDescIN->hSharedPBDesc,
+ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
+ if(psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psSGXUnrefSharedPBDescOUT->eError =
+ SGXUnrefSharedPBDescKM(hSharedPBDesc);
+
+ if(psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psSGXUnrefSharedPBDescOUT->eError =
+ PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psSGXUnrefSharedPBDescIN->hSharedPBDesc,
+ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
+
+ return 0;
+}
+
+
+static IMG_INT
+SGXAddSharedPBDescBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescIN,
+ PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
+ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
+ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
+ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo;
+ IMG_UINT32 ui32KernelMemInfoHandlesCount =
+ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount;
+ IMG_INT ret = 0;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID *phKernelMemInfoHandles = 0;
+#else
+ IMG_HANDLE *phKernelMemInfoHandles = IMG_NULL;
+#endif
+ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfos = IMG_NULL;
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hSharedPBDesc = IMG_NULL;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psSGXAddSharedPBDescOUT->eError, psPerProc, 1);
+
+ psSGXAddSharedPBDescOUT->hSharedPBDesc = IMG_NULL;
+
+ PVR_ASSERT(ui32KernelMemInfoHandlesCount
+ <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS);
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevCookieInt,
+ psSGXAddSharedPBDescIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(eError != PVRSRV_OK)
+ {
+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
+ }
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID **)&psSharedPBDescKernelMemInfo,
+ psSGXAddSharedPBDescIN->hSharedPBDescKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
+ if(eError != PVRSRV_OK)
+ {
+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
+ }
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID **)&psHWPBDescKernelMemInfo,
+ psSGXAddSharedPBDescIN->hHWPBDescKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(eError != PVRSRV_OK)
+ {
+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
+ }
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID **)&psBlockKernelMemInfo,
+ psSGXAddSharedPBDescIN->hBlockKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
+ if(eError != PVRSRV_OK)
+ {
+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
+ }
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID **)&psHWBlockKernelMemInfo,
+ psSGXAddSharedPBDescIN->hHWBlockKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(eError != PVRSRV_OK)
+ {
+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
+ }
+
+
+ if(!OSAccessOK(PVR_VERIFY_READ,
+ psSGXAddSharedPBDescIN->phKernelMemInfoHandles,
+ ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE)))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC:"
+ " Invalid phKernelMemInfos pointer", __FUNCTION__));
+ ret = -EFAULT;
+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
+ }
+
+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE),
+ (IMG_VOID **)&phKernelMemInfoHandles,
+ 0,
+ "Array of Handles");
+ if (eError != PVRSRV_OK)
+ {
+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
+ }
+
+ if(CopyFromUserWrapper(psPerProc,
+ ui32BridgeID,
+ phKernelMemInfoHandles,
+ psSGXAddSharedPBDescIN->phKernelMemInfoHandles,
+ ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE))
+ != PVRSRV_OK)
+ {
+ ret = -EFAULT;
+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
+ }
+
+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ ui32KernelMemInfoHandlesCount * sizeof(PVRSRV_KERNEL_MEM_INFO *),
+ (IMG_VOID **)&ppsKernelMemInfos,
+ 0,
+ "Array of pointers to Kernel Memory Info");
+ if (eError != PVRSRV_OK)
+ {
+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
+ }
+
+ for(i=0; i<ui32KernelMemInfoHandlesCount; i++)
+ {
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID **)&ppsKernelMemInfos[i],
+ phKernelMemInfoHandles[i],
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ if(eError != PVRSRV_OK)
+ {
+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
+ }
+ }
+
+
+
+ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psSGXAddSharedPBDescIN->hSharedPBDescKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+
+ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psSGXAddSharedPBDescIN->hHWPBDescKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+
+ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psSGXAddSharedPBDescIN->hBlockKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+
+ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ psSGXAddSharedPBDescIN->hHWBlockKernelMemInfo,
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ for(i=0; i<ui32KernelMemInfoHandlesCount; i++)
+ {
+
+ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
+ phKernelMemInfoHandles[i],
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+
+ eError = SGXAddSharedPBDescKM(psPerProc, hDevCookieInt,
+ psSharedPBDescKernelMemInfo,
+ psHWPBDescKernelMemInfo,
+ psBlockKernelMemInfo,
+ psHWBlockKernelMemInfo,
+ psSGXAddSharedPBDescIN->ui32TotalPBSize,
+ &hSharedPBDesc,
+ ppsKernelMemInfos,
+ ui32KernelMemInfoHandlesCount,
+ psSGXAddSharedPBDescIN->sHWPBDescDevVAddr);
+
+
+ if (eError != PVRSRV_OK)
+ {
+ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
+ }
+
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psSGXAddSharedPBDescOUT->hSharedPBDesc,
+ hSharedPBDesc,
+ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+
+PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT:
+
+ if(phKernelMemInfoHandles)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE),
+ (IMG_VOID *)phKernelMemInfoHandles,
+ 0);
+ }
+ if(ppsKernelMemInfos)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount * sizeof(PVRSRV_KERNEL_MEM_INFO *),
+ (IMG_VOID *)ppsKernelMemInfos,
+ 0);
+ }
+
+ if(ret == 0 && eError == PVRSRV_OK)
+ {
+ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXAddSharedPBDescOUT->eError, psPerProc);
+ }
+
+ psSGXAddSharedPBDescOUT->eError = eError;
+
+ return ret;
+}
+
+static IMG_INT
+SGXGetInfoForSrvinitBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitIN,
+ PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_HANDLE hDevCookieInt;
+ IMG_UINT32 i;
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_HEAP_INFO_KM asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
+#endif
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT);
+
+ NEW_HANDLE_BATCH_OR_ERROR(psSGXInfoForSrvinitOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS);
+
+ if(!psPerProc->bInitProcess)
+ {
+ psSGXInfoForSrvinitOUT->eError = PVRSRV_ERROR_PROCESS_NOT_INITIALISED;
+ return 0;
+ }
+
+ psSGXInfoForSrvinitOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
+ psSGXInfoForSrvinitIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+
+ if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psSGXInfoForSrvinitOUT->eError =
+ SGXGetInfoForSrvinitKM(hDevCookieInt,
+#if defined (SUPPORT_SID_INTERFACE)
+ &asHeapInfo[0],
+ &psSGXInfoForSrvinitOUT->sInitInfo.sPDDevPAddr);
+#else
+ &psSGXInfoForSrvinitOUT->sInitInfo);
+#endif
+
+ if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ for(i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++)
+ {
+ PVRSRV_HEAP_INFO *psHeapInfo;
+
+ psHeapInfo = &psSGXInfoForSrvinitOUT->sInitInfo.asHeapInfo[i];
+
+#if defined (SUPPORT_SID_INTERFACE)
+ if ((asHeapInfo[i].ui32HeapID != (IMG_UINT32)SGX_UNDEFINED_HEAP_ID) &&
+ (asHeapInfo[i].hDevMemHeap != IMG_NULL))
+ {
+
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &psHeapInfo->hDevMemHeap,
+ asHeapInfo[i].hDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
+ }
+ else
+ {
+ psHeapInfo->hDevMemHeap = 0;
+ }
+
+ psHeapInfo->ui32HeapID = asHeapInfo[i].ui32HeapID;
+ psHeapInfo->sDevVAddrBase = asHeapInfo[i].sDevVAddrBase;
+ psHeapInfo->ui32HeapByteSize = asHeapInfo[i].ui32HeapByteSize;
+ psHeapInfo->ui32Attribs = asHeapInfo[i].ui32Attribs;
+ psHeapInfo->ui32XTileStride = asHeapInfo[i].ui32XTileStride;
+#else
+ if (psHeapInfo->ui32HeapID != (IMG_UINT32)SGX_UNDEFINED_HEAP_ID)
+ {
+ IMG_HANDLE hDevMemHeapExt;
+
+ if (psHeapInfo->hDevMemHeap != IMG_NULL)
+ {
+
+ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
+ &hDevMemHeapExt,
+ psHeapInfo->hDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
+ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
+ psHeapInfo->hDevMemHeap = hDevMemHeapExt;
+ }
+ }
+#endif
+ }
+
+ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXInfoForSrvinitOUT->eError, psPerProc);
+
+ return 0;
+}
+
+#if defined(PDUMP)
+static IMG_VOID
+DumpBufferArray(PVRSRV_PER_PROCESS_DATA *psPerProc,
+#if defined (SUPPORT_SID_INTERFACE)
+ PSGX_KICKTA_DUMP_BUFFER_KM psBufferArray,
+#else
+ PSGX_KICKTA_DUMP_BUFFER psBufferArray,
+#endif
+ IMG_UINT32 ui32BufferArrayLength,
+ IMG_BOOL bDumpPolls)
+{
+ IMG_UINT32 i;
+
+ for (i=0; i<ui32BufferArrayLength; i++)
+ {
+#if defined (SUPPORT_SID_INTERFACE)
+ PSGX_KICKTA_DUMP_BUFFER_KM psBuffer;
+#else
+ PSGX_KICKTA_DUMP_BUFFER psBuffer;
+#endif
+ PVRSRV_KERNEL_MEM_INFO *psCtrlMemInfoKM;
+ IMG_CHAR * pszName;
+ IMG_HANDLE hUniqueTag;
+ IMG_UINT32 ui32Offset;
+
+ psBuffer = &psBufferArray[i];
+ pszName = psBuffer->pszName;
+ if (!pszName)
+ {
+ pszName = "Nameless buffer";
+ }
+
+ hUniqueTag = MAKEUNIQUETAG((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hKernelMemInfo);
+
+ #if defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ psCtrlMemInfoKM = ((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hCtrlKernelMemInfo);
+ ui32Offset = psBuffer->sCtrlDevVAddr.uiAddr - psCtrlMemInfoKM->sDevVAddr.uiAddr;
+ #else
+ psCtrlMemInfoKM = ((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hKernelMemInfo)->psKernelSyncInfo->psSyncDataMemInfoKM;
+ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
+ #endif
+
+ if (psBuffer->ui32Start <= psBuffer->ui32End)
+ {
+ if (bDumpPolls)
+ {
+ PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
+ PDUMPCBP(psCtrlMemInfoKM,
+ ui32Offset,
+ psBuffer->ui32Start,
+ psBuffer->ui32SpaceUsed,
+ psBuffer->ui32BufferSize,
+ 0,
+ MAKEUNIQUETAG(psCtrlMemInfoKM));
+ }
+
+ PDUMPCOMMENTWITHFLAGS(0, "%s\r\n", pszName);
+ PDUMPMEMUM(psPerProc,
+ IMG_NULL,
+ psBuffer->pvLinAddr,
+ (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
+ psBuffer->ui32Start,
+ psBuffer->ui32End - psBuffer->ui32Start,
+ 0,
+ hUniqueTag);
+ }
+ else
+ {
+
+
+ if (bDumpPolls)
+ {
+ PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
+ PDUMPCBP(psCtrlMemInfoKM,
+ ui32Offset,
+ psBuffer->ui32Start,
+ psBuffer->ui32BackEndLength,
+ psBuffer->ui32BufferSize,
+ 0,
+ MAKEUNIQUETAG(psCtrlMemInfoKM));
+ }
+ PDUMPCOMMENTWITHFLAGS(0, "%s (part 1)\r\n", pszName);
+ PDUMPMEMUM(psPerProc,
+ IMG_NULL,
+ psBuffer->pvLinAddr,
+ (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
+ psBuffer->ui32Start,
+ psBuffer->ui32BackEndLength,
+ 0,
+ hUniqueTag);
+
+ if (bDumpPolls)
+ {
+ PDUMPMEMPOL(psCtrlMemInfoKM,
+ ui32Offset,
+ 0,
+ 0xFFFFFFFF,
+ PDUMP_POLL_OPERATOR_NOTEQUAL,
+ 0,
+ MAKEUNIQUETAG(psCtrlMemInfoKM));
+
+ PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
+ PDUMPCBP(psCtrlMemInfoKM,
+ ui32Offset,
+ 0,
+ psBuffer->ui32End,
+ psBuffer->ui32BufferSize,
+ 0,
+ MAKEUNIQUETAG(psCtrlMemInfoKM));
+ }
+ PDUMPCOMMENTWITHFLAGS(0, "%s (part 2)\r\n", pszName);
+ PDUMPMEMUM(psPerProc,
+ IMG_NULL,
+ psBuffer->pvLinAddr,
+ (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
+ 0,
+ psBuffer->ui32End,
+ 0,
+ hUniqueTag);
+ }
+ }
+}
+static IMG_INT
+SGXPDumpBufferArrayBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY *psPDumpBufferArrayIN,
+ IMG_VOID *psBridgeOut,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_UINT32 i;
+#if defined (SUPPORT_SID_INTERFACE)
+ SGX_KICKTA_DUMP_BUFFER *psUMPtr;
+ SGX_KICKTA_DUMP_BUFFER_KM *psKickTADumpBufferKM, *psKMPtr;
+#else
+ SGX_KICKTA_DUMP_BUFFER *psKickTADumpBuffer;
+#endif
+ IMG_UINT32 ui32BufferArrayLength =
+ psPDumpBufferArrayIN->ui32BufferArrayLength;
+ IMG_UINT32 ui32BufferArraySize =
+ ui32BufferArrayLength * sizeof(SGX_KICKTA_DUMP_BUFFER);
+ PVRSRV_ERROR eError = PVRSRV_ERROR_TOO_FEW_BUFFERS;
+
+ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY);
+
+#if defined (SUPPORT_SID_INTERFACE)
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ ui32BufferArraySize,
+ (IMG_PVOID *)&psKickTADumpBufferKM, 0,
+ "Array of Kick Tile Accelerator Dump Buffer") != PVRSRV_OK)
+#else
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ ui32BufferArraySize,
+ (IMG_PVOID *)&psKickTADumpBuffer, 0,
+ "Array of Kick Tile Accelerator Dump Buffer") != PVRSRV_OK)
+#endif
+ {
+ return -ENOMEM;
+ }
+
+#if !defined (SUPPORT_SID_INTERFACE)
+ if(CopyFromUserWrapper(psPerProc,
+ ui32BridgeID,
+ psKickTADumpBuffer,
+ psPDumpBufferArrayIN->psBufferArray,
+ ui32BufferArraySize) != PVRSRV_OK)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0);
+
+ return -EFAULT;
+ }
+#endif
+
+ for(i = 0; i < ui32BufferArrayLength; i++)
+ {
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_VOID *pvMemInfo = IMG_NULL;
+ psUMPtr = &psPDumpBufferArrayIN->psBufferArray[i];
+ psKMPtr = &psKickTADumpBufferKM[i];
+#else
+ IMG_VOID *pvMemInfo;
+#endif
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvMemInfo,
+#if defined (SUPPORT_SID_INTERFACE)
+ psUMPtr->hKernelMemInfo,
+#else
+ psKickTADumpBuffer[i].hKernelMemInfo,
+#endif
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY: "
+ "PVRSRVLookupHandle failed (%d)", eError));
+ break;
+ }
+#if defined (SUPPORT_SID_INTERFACE)
+ psKMPtr->hKernelMemInfo = pvMemInfo;
+#else
+ psKickTADumpBuffer[i].hKernelMemInfo = pvMemInfo;
+#endif
+
+#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &pvMemInfo,
+#if defined (SUPPORT_SID_INTERFACE)
+ psUMPtr->hCtrlKernelMemInfo,
+#else
+ psKickTADumpBuffer[i].hCtrlKernelMemInfo,
+#endif
+ PVRSRV_HANDLE_TYPE_MEM_INFO);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY: "
+ "PVRSRVLookupHandle failed (%d)", eError));
+ break;
+ }
+#if defined (SUPPORT_SID_INTERFACE)
+ psKMPtr->hCtrlKernelMemInfo = pvMemInfo;
+ psKMPtr->sCtrlDevVAddr = psUMPtr->sCtrlDevVAddr;
+#else
+ psKickTADumpBuffer[i].hCtrlKernelMemInfo = pvMemInfo;
+#endif
+#endif
+
+#if defined (SUPPORT_SID_INTERFACE)
+ psKMPtr->ui32SpaceUsed = psUMPtr->ui32SpaceUsed;
+ psKMPtr->ui32Start = psUMPtr->ui32Start;
+ psKMPtr->ui32End = psUMPtr->ui32End;
+ psKMPtr->ui32BufferSize = psUMPtr->ui32BufferSize;
+ psKMPtr->ui32BackEndLength = psUMPtr->ui32BackEndLength;
+ psKMPtr->uiAllocIndex = psUMPtr->uiAllocIndex;
+ psKMPtr->pvLinAddr = psUMPtr->pvLinAddr;
+ psKMPtr->pszName = psUMPtr->pszName;
+#endif
+ }
+
+ if(eError == PVRSRV_OK)
+ {
+ DumpBufferArray(psPerProc,
+#if defined (SUPPORT_SID_INTERFACE)
+ psKickTADumpBufferKM,
+#else
+ psKickTADumpBuffer,
+#endif
+ ui32BufferArrayLength,
+ psPDumpBufferArrayIN->bDumpPolls);
+ }
+
+#if defined (SUPPORT_SID_INTERFACE)
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBufferKM, 0);
+#else
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0);
+#endif
+
+
+ return 0;
+}
+
+static IMG_INT
+SGXPDump3DSignatureRegistersBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS *psPDump3DSignatureRegistersIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_UINT32 ui32RegisterArraySize = psPDump3DSignatureRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32);
+ IMG_UINT32 *pui32Registers = IMG_NULL;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
+ IMG_UINT32 ui32RegVal = 0;
+#endif
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ IMG_HANDLE hDevMemContextInt = 0;
+ IMG_UINT32 ui32MMUContextID;
+ IMG_INT ret = -EFAULT;
+
+ PVR_UNREFERENCED_PARAMETER(psRetOUT);
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS);
+
+ if (ui32RegisterArraySize == 0)
+ {
+ goto ExitNoError;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psDeviceNode,
+ psPDump3DSignatureRegistersIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: hDevCookie lookup failed"));
+ goto Exit;
+ }
+
+ psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+
+#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
+
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT);
+#if defined(PDUMP)
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT,
+ psPDump3DSignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
+#endif
+#endif
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ ui32RegisterArraySize,
+ (IMG_PVOID *)&pui32Registers, 0,
+ "Array of Registers") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDump3DSignatureRegistersBW: OSAllocMem failed"));
+ goto Exit;
+ }
+
+ if(CopyFromUserWrapper(psPerProc,
+ ui32BridgeID,
+ pui32Registers,
+ psPDump3DSignatureRegistersIN->pui32Registers,
+ ui32RegisterArraySize) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDump3DSignatureRegistersBW: CopyFromUserWrapper failed"));
+ goto Exit;
+ }
+
+ PDump3DSignatureRegisters(&psDeviceNode->sDevId,
+ psPDump3DSignatureRegistersIN->ui32DumpFrameNum,
+ psPDump3DSignatureRegistersIN->bLastFrame,
+ pui32Registers,
+ psPDump3DSignatureRegistersIN->ui32NumRegisters);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle( psPerProc->psHandleBase,
+ &hDevMemContextInt,
+ psPDump3DSignatureRegistersIN->hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ PVR_ASSERT(psDeviceNode->pfnMMUGetContextID != IMG_NULL)
+ ui32MMUContextID = psDeviceNode->pfnMMUGetContextID((IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext);
+
+ PDumpSignatureBuffer(&psDeviceNode->sDevId,
+ "out.tasig", "TA", 0,
+ psDevInfo->psKernelTASigBufferMemInfo->sDevVAddr,
+ (IMG_UINT32)psDevInfo->psKernelTASigBufferMemInfo->uAllocSize,
+ ui32MMUContextID,
+ 0 );
+ PDumpSignatureBuffer(&psDeviceNode->sDevId,
+ "out.3dsig", "3D", 0,
+ psDevInfo->psKernel3DSigBufferMemInfo->sDevVAddr,
+ (IMG_UINT32)psDevInfo->psKernel3DSigBufferMemInfo->uAllocSize,
+ ui32MMUContextID,
+ 0 );
+
+ExitNoError:
+ psRetOUT->eError = PVRSRV_OK;
+ ret = 0;
+Exit:
+ if (pui32Registers != IMG_NULL)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0);
+ }
+
+#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
+ if (psDevInfo != IMG_NULL)
+ {
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, ui32RegVal);
+#if defined(PDUMP)
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_CORE, ui32RegVal,
+ psPDump3DSignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
+#endif
+ }
+#endif
+
+ return ret;
+}
+
+static IMG_INT
+SGXPDumpCounterRegistersBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_COUNTER_REGISTERS *psPDumpCounterRegistersIN,
+ IMG_VOID *psBridgeOut,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_UINT32 ui32RegisterArraySize = psPDumpCounterRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32);
+ IMG_UINT32 *pui32Registers = IMG_NULL;
+ PVRSRV_DEVICE_NODE *psDeviceNode ;
+ IMG_INT ret = -EFAULT;
+
+ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS);
+
+ if (ui32RegisterArraySize == 0)
+ {
+ goto ExitNoError;
+ }
+
+ if(PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psDeviceNode,
+ psPDumpCounterRegistersIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXPDumpCounterRegistersBW: hDevCookie lookup failed"));
+ ret = -ENOMEM;
+ goto Exit;
+ }
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ ui32RegisterArraySize,
+ (IMG_PVOID *)&pui32Registers, 0,
+ "Array of Registers") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpCounterRegistersBW: OSAllocMem failed"));
+ ret = -ENOMEM;
+ goto Exit;
+ }
+
+ if(CopyFromUserWrapper(psPerProc,
+ ui32BridgeID,
+ pui32Registers,
+ psPDumpCounterRegistersIN->pui32Registers,
+ ui32RegisterArraySize) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpCounterRegistersBW: CopyFromUserWrapper failed"));
+ goto Exit;
+ }
+
+ PDumpCounterRegisters(&psDeviceNode->sDevId,
+ psPDumpCounterRegistersIN->ui32DumpFrameNum,
+ psPDumpCounterRegistersIN->bLastFrame,
+ pui32Registers,
+ psPDumpCounterRegistersIN->ui32NumRegisters);
+
+ExitNoError:
+ ret = 0;
+Exit:
+ if (pui32Registers != IMG_NULL)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0);
+ }
+
+ return ret;
+}
+
+static IMG_INT
+SGXPDumpTASignatureRegistersBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS *psPDumpTASignatureRegistersIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ IMG_UINT32 ui32RegisterArraySize = psPDumpTASignatureRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32);
+ IMG_UINT32 *pui32Registers = IMG_NULL;
+#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
+ PVRSRV_SGXDEV_INFO *psDevInfo = IMG_NULL;
+ IMG_UINT32 ui32RegVal = 0;
+#endif
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ IMG_INT ret = -EFAULT;
+
+ PVR_UNREFERENCED_PARAMETER(psRetOUT);
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS);
+
+ if (ui32RegisterArraySize == 0)
+ {
+ goto ExitNoError;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_VOID**)&psDeviceNode,
+ psPDumpTASignatureRegistersIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: hDevCookie lookup failed"));
+ goto Exit;
+ }
+
+#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
+
+ psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+
+
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT);
+#if defined(PDUMP)
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT,
+ psPDumpTASignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
+#endif
+#endif
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ ui32RegisterArraySize,
+ (IMG_PVOID *)&pui32Registers, 0,
+ "Array of Registers") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: OSAllocMem failed"));
+ ret = -ENOMEM;
+ goto Exit;
+ }
+
+ if(CopyFromUserWrapper(psPerProc,
+ ui32BridgeID,
+ pui32Registers,
+ psPDumpTASignatureRegistersIN->pui32Registers,
+ ui32RegisterArraySize) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: CopyFromUserWrapper failed"));
+ goto Exit;
+ }
+
+ PDumpTASignatureRegisters(&psDeviceNode->sDevId,
+ psPDumpTASignatureRegistersIN->ui32DumpFrameNum,
+ psPDumpTASignatureRegistersIN->ui32TAKickCount,
+ psPDumpTASignatureRegistersIN->bLastFrame,
+ pui32Registers,
+ psPDumpTASignatureRegistersIN->ui32NumRegisters);
+
+ExitNoError:
+ psRetOUT->eError = PVRSRV_OK;
+ ret = 0;
+Exit:
+ if (pui32Registers != IMG_NULL)
+ {
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0);
+ }
+
+#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
+ if (psDevInfo != IMG_NULL)
+ {
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, ui32RegVal);
+#if defined(PDUMP)
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_CORE, ui32RegVal,
+ psPDumpTASignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
+#endif
+ }
+#endif
+
+ return ret;
+}
+static IMG_INT
+SGXPDumpHWPerfCBBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB *psPDumpHWPerfCBIN,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+#if defined(SUPPORT_SGX_HWPERF)
+#if defined(__linux__)
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ IMG_HANDLE hDevMemContextInt = 0;
+ IMG_UINT32 ui32MMUContextID = 0;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_VOID**)&psDeviceNode,
+ psPDumpHWPerfCBIN->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psDevInfo = psDeviceNode->pvDevice;
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle( psPerProc->psHandleBase,
+ &hDevMemContextInt,
+ psPDumpHWPerfCBIN->hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ PVR_ASSERT(psDeviceNode->pfnMMUGetContextID != IMG_NULL)
+ ui32MMUContextID = psDeviceNode->pfnMMUGetContextID(hDevMemContextInt);
+
+ PDumpHWPerfCBKM(&psDeviceNode->sDevId,
+ &psPDumpHWPerfCBIN->szFileName[0],
+ psPDumpHWPerfCBIN->ui32FileOffset,
+ psDevInfo->psKernelHWPerfCBMemInfo->sDevVAddr,
+ psDevInfo->psKernelHWPerfCBMemInfo->uAllocSize,
+ ui32MMUContextID,
+ psPDumpHWPerfCBIN->ui32PDumpFlags);
+
+ return 0;
+#else
+ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
+ PVR_UNREFERENCED_PARAMETER(psPDumpHWPerfCBIN);
+ PVR_UNREFERENCED_PARAMETER(psRetOUT);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+ return 0;
+#endif
+#else
+ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
+ PVR_UNREFERENCED_PARAMETER(psPDumpHWPerfCBIN);
+ PVR_UNREFERENCED_PARAMETER(psRetOUT);
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+ return -EFAULT;
+#endif
+}
+
+
+static IMG_INT
+SGXPDumpSaveMemBW(IMG_UINT32 ui32BridgeID,
+ PVRSRV_BRIDGE_IN_PDUMP_SAVEMEM *psPDumpSaveMem,
+ PVRSRV_BRIDGE_RETURN *psRetOUT,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ IMG_HANDLE hDevMemContextInt = 0;
+ IMG_UINT32 ui32MMUContextID;
+
+ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_SAVEMEM);
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle(psPerProc->psHandleBase,
+ (IMG_VOID**)&psDeviceNode,
+ psPDumpSaveMem->hDevCookie,
+ PVRSRV_HANDLE_TYPE_DEV_NODE);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+ psRetOUT->eError =
+ PVRSRVLookupHandle( psPerProc->psHandleBase,
+ &hDevMemContextInt,
+ psPDumpSaveMem->hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+ if(psRetOUT->eError != PVRSRV_OK)
+ {
+ return 0;
+ }
+
+
+ PVR_ASSERT(psDeviceNode->pfnMMUGetContextID != IMG_NULL)
+ ui32MMUContextID = psDeviceNode->pfnMMUGetContextID(hDevMemContextInt);
+
+ PDumpSaveMemKM(&psDeviceNode->sDevId,
+ &psPDumpSaveMem->szFileName[0],
+ psPDumpSaveMem->ui32FileOffset,
+ psPDumpSaveMem->sDevVAddr,
+ psPDumpSaveMem->ui32Size,
+ ui32MMUContextID,
+ psPDumpSaveMem->ui32PDumpFlags);
+ return 0;
+}
+
+#endif
+
+
+
+IMG_VOID SetSGXDispatchTableEntry(IMG_VOID)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETCLIENTINFO, SGXGetClientInfoBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO, SGXReleaseClientInfoBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO, SGXGetInternalDevInfoBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DOKICK, SGXDoKickBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR, DummyBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READREGISTRYDWORD, DummyBW);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE, SGX2DQueryBlitsCompleteBW);
+
+#if defined(TRANSFER_QUEUE)
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMITTRANSFER, SGXSubmitTransferBW);
+#endif
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMISCINFO, SGXGetMiscInfoBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT , SGXGetInfoForSrvinitBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DEVINITPART2, SGXDevInitPart2BW);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC, SGXFindSharedPBDescBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC, SGXUnrefSharedPBDescBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC, SGXAddSharedPBDescBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT, SGXRegisterHWRenderContextBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET, SGXFlushHWRenderTargetBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT, SGXUnregisterHWRenderContextBW);
+#if defined(SGX_FEATURE_2D_HARDWARE)
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMIT2D, SGXSubmit2DBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT, SGXRegisterHW2DContextBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT, SGXUnregisterHW2DContextBW);
+#endif
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT, SGXRegisterHWTransferContextBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT, SGXUnregisterHWTransferContextBW);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES, SGXScheduleProcessQueuesBW);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READ_HWPERF_CB, SGXReadHWPerfCBBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SET_RENDER_CONTEXT_PRIORITY, SGXSetRenderContextPriorityBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SET_TRANSFER_CONTEXT_PRIORITY, SGXSetTransferContextPriorityBW);
+
+#if defined(PDUMP)
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY, SGXPDumpBufferArrayBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS, SGXPDump3DSignatureRegistersBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS, SGXPDumpCounterRegistersBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS, SGXPDumpTASignatureRegistersBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB, SGXPDumpHWPerfCBBW);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_SAVEMEM, SGXPDumpSaveMemBW);
+#endif
+}
+
+
+#endif
diff --git a/drivers/gpu/pvr/sgx/bridged_sgx_bridge.h b/drivers/gpu/pvr/sgx/bridged_sgx_bridge.h
new file mode 100644
index 0000000..204450c
--- /dev/null
+++ b/drivers/gpu/pvr/sgx/bridged_sgx_bridge.h
@@ -0,0 +1,42 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __BRIDGED_SGX_BRIDGE_H__
+#define __BRIDGED_SGX_BRIDGE_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+IMG_VOID SetSGXDispatchTableEntry(IMG_VOID);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/sgx/mmu.c b/drivers/gpu/pvr/sgx/mmu.c
new file mode 100644
index 0000000..9d124f5
--- /dev/null
+++ b/drivers/gpu/pvr/sgx/mmu.c
@@ -0,0 +1,3933 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "sgxdefs.h"
+#include "sgxmmu.h"
+#include "services_headers.h"
+#include "buffer_manager.h"
+#include "hash.h"
+#include "ra.h"
+#include "pdump_km.h"
+#include "sgxapi_km.h"
+#include "sgxinfo.h"
+#include "sgxinfokm.h"
+#include "mmu.h"
+#include "sgxconfig.h"
+#include "sgx_bridge_km.h"
+#include "pdump_osfunc.h"
+
+#define UINT32_MAX_VALUE 0xFFFFFFFFUL
+
+#define SGX_MAX_PD_ENTRIES (1<<(SGX_FEATURE_ADDRESS_SPACE_SIZE - SGX_MMU_PT_SHIFT - SGX_MMU_PAGE_SHIFT))
+
+#if defined(FIX_HW_BRN_31620)
+#define SGX_MMU_PDE_DUMMY_PAGE (0)
+#define SGX_MMU_PTE_DUMMY_PAGE (0)
+
+#define BRN31620_PT_ADDRESS_RANGE_SHIFT 22
+#define BRN31620_PT_ADDRESS_RANGE_SIZE (1 << BRN31620_PT_ADDRESS_RANGE_SHIFT)
+
+#define BRN31620_PDE_CACHE_FILL_SHIFT 26
+#define BRN31620_PDE_CACHE_FILL_SIZE (1 << BRN31620_PDE_CACHE_FILL_SHIFT)
+#define BRN31620_PDE_CACHE_FILL_MASK (BRN31620_PDE_CACHE_FILL_SIZE - 1)
+
+#define BRN31620_PDES_PER_CACHE_LINE_SHIFT (BRN31620_PDE_CACHE_FILL_SHIFT - BRN31620_PT_ADDRESS_RANGE_SHIFT)
+#define BRN31620_PDES_PER_CACHE_LINE_SIZE (1 << BRN31620_PDES_PER_CACHE_LINE_SHIFT)
+#define BRN31620_PDES_PER_CACHE_LINE_MASK (BRN31620_PDES_PER_CACHE_LINE_SIZE - 1)
+
+#define BRN31620_DUMMY_PAGE_OFFSET (1 * SGX_MMU_PAGE_SIZE)
+#define BRN31620_DUMMY_PDE_INDEX (BRN31620_DUMMY_PAGE_OFFSET / BRN31620_PT_ADDRESS_RANGE_SIZE)
+#define BRN31620_DUMMY_PTE_INDEX ((BRN31620_DUMMY_PAGE_OFFSET - (BRN31620_DUMMY_PDE_INDEX * BRN31620_PT_ADDRESS_RANGE_SIZE))/SGX_MMU_PAGE_SIZE)
+
+#define BRN31620_CACHE_FLUSH_SHIFT (32 - BRN31620_PDE_CACHE_FILL_SHIFT)
+#define BRN31620_CACHE_FLUSH_SIZE (1 << BRN31620_CACHE_FLUSH_SHIFT)
+
+#define BRN31620_CACHE_FLUSH_BITS_SHIFT 5
+#define BRN31620_CACHE_FLUSH_BITS_SIZE (1 << BRN31620_CACHE_FLUSH_BITS_SHIFT)
+#define BRN31620_CACHE_FLUSH_BITS_MASK (BRN31620_CACHE_FLUSH_BITS_SIZE - 1)
+
+#define BRN31620_CACHE_FLUSH_INDEX_BITS (BRN31620_CACHE_FLUSH_SHIFT - BRN31620_CACHE_FLUSH_BITS_SHIFT)
+#define BRN31620_CACHE_FLUSH_INDEX_SIZE (1 << BRN31620_CACHE_FLUSH_INDEX_BITS)
+
+#define BRN31620_DUMMY_PAGE_SIGNATURE 0xFEEBEE01
+#endif
+
+typedef struct _MMU_PT_INFO_
+{
+
+ IMG_VOID *hPTPageOSMemHandle;
+ IMG_CPU_VIRTADDR PTPageCpuVAddr;
+
+
+ IMG_UINT32 ui32ValidPTECount;
+} MMU_PT_INFO;
+
+struct _MMU_CONTEXT_
+{
+
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+
+ IMG_CPU_VIRTADDR pvPDCpuVAddr;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+
+ IMG_VOID *hPDOSMemHandle;
+
+
+ MMU_PT_INFO *apsPTInfoList[SGX_MAX_PD_ENTRIES];
+
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+
+#if defined(PDUMP)
+ IMG_UINT32 ui32PDumpMMUContextID;
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ IMG_BOOL bPDumpActive;
+#endif
+#endif
+
+#if defined (FIX_HW_BRN_31620)
+ IMG_UINT32 ui32PDChangeMask[BRN31620_CACHE_FLUSH_INDEX_SIZE];
+ IMG_UINT32 ui32PDCacheRangeRefCount[BRN31620_CACHE_FLUSH_SIZE];
+ MMU_PT_INFO *apsPTInfoListSave[SGX_MAX_PD_ENTRIES];
+#endif
+ struct _MMU_CONTEXT_ *psNext;
+};
+
+struct _MMU_HEAP_
+{
+
+ MMU_CONTEXT *psMMUContext;
+
+
+
+
+ IMG_UINT32 ui32PDBaseIndex;
+
+ IMG_UINT32 ui32PageTableCount;
+
+ IMG_UINT32 ui32PTETotalUsable;
+
+ IMG_UINT32 ui32PDEPageSizeCtrl;
+
+
+
+
+ IMG_UINT32 ui32DataPageSize;
+
+ IMG_UINT32 ui32DataPageBitWidth;
+
+ IMG_UINT32 ui32DataPageMask;
+
+
+
+
+ IMG_UINT32 ui32PTShift;
+
+ IMG_UINT32 ui32PTBitWidth;
+
+ IMG_UINT32 ui32PTMask;
+
+ IMG_UINT32 ui32PTSize;
+
+ IMG_UINT32 ui32PTNumEntriesAllocated;
+
+ IMG_UINT32 ui32PTNumEntriesUsable;
+
+
+
+
+ IMG_UINT32 ui32PDShift;
+
+ IMG_UINT32 ui32PDBitWidth;
+
+ IMG_UINT32 ui32PDMask;
+
+
+
+ RA_ARENA *psVMArena;
+ DEV_ARENA_DESCRIPTOR *psDevArena;
+#if defined(PDUMP)
+ PDUMP_MMU_ATTRIB sMMUAttrib;
+#endif
+};
+
+
+
+#if defined (SUPPORT_SGX_MMU_DUMMY_PAGE)
+#define DUMMY_DATA_PAGE_SIGNATURE 0xDEADBEEF
+#endif
+
+static IMG_VOID
+_DeferredFreePageTable (MMU_HEAP *pMMUHeap, IMG_UINT32 ui32PTIndex, IMG_BOOL bOSFreePT);
+
+#if defined(PDUMP)
+static IMG_VOID
+MMU_PDumpPageTables (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_SIZE_T uSize,
+ IMG_BOOL bForUnmap,
+ IMG_HANDLE hUniqueTag);
+#endif
+
+#define PAGE_TEST 0
+#if PAGE_TEST
+static IMG_VOID PageTest(IMG_VOID* pMem, IMG_DEV_PHYADDR sDevPAddr);
+#endif
+
+#define PT_DUMP 1
+
+#define PT_DEBUG 0
+#if (PT_DEBUG || PT_DUMP) && defined(PVRSRV_NEED_PVR_DPF)
+static IMG_VOID DumpPT(MMU_PT_INFO *psPTInfoList)
+{
+ IMG_UINT32 *p = (IMG_UINT32*)psPTInfoList->PTPageCpuVAddr;
+ IMG_UINT32 i;
+
+
+ for(i = 0; i < 1024; i += 8)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%08X %08X %08X %08X %08X %08X %08X %08X\n",
+ p[i + 0], p[i + 1], p[i + 2], p[i + 3],
+ p[i + 4], p[i + 5], p[i + 6], p[i + 7]));
+ }
+}
+#else
+static INLINE IMG_VOID DumpPT(MMU_PT_INFO *psPTInfoList)
+{
+ PVR_UNREFERENCED_PARAMETER(psPTInfoList);
+}
+#endif
+
+#if PT_DEBUG
+static IMG_VOID CheckPT(MMU_PT_INFO *psPTInfoList)
+{
+ IMG_UINT32 *p = (IMG_UINT32*) psPTInfoList->PTPageCpuVAddr;
+ IMG_UINT32 i, ui32Count = 0;
+
+
+ for(i = 0; i < 1024; i++)
+ if(p[i] & SGX_MMU_PTE_VALID)
+ ui32Count++;
+
+ if(psPTInfoList->ui32ValidPTECount != ui32Count)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ui32ValidPTECount: %u ui32Count: %u\n",
+ psPTInfoList->ui32ValidPTECount, ui32Count));
+ DumpPT(psPTInfoList);
+ BUG();
+ }
+}
+#else
+static INLINE IMG_VOID CheckPT(MMU_PT_INFO *psPTInfoList)
+{
+ PVR_UNREFERENCED_PARAMETER(psPTInfoList);
+}
+#endif
+
+#if defined(PVRSRV_MMU_MAKE_READWRITE_ON_DEMAND)
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#else
+#include <generated/autoconf.h>
+#endif
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/highmem.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+static IMG_VOID MakeKernelPageReadWrite(IMG_PVOID ulCPUVAddr)
+{
+ pgd_t *psPGD;
+ pud_t *psPUD;
+ pmd_t *psPMD;
+ pte_t *psPTE;
+ pte_t ptent;
+ IMG_UINT32 ui32CPUVAddr = (IMG_UINT32) ulCPUVAddr;
+
+ psPGD = pgd_offset_k(ui32CPUVAddr);
+ if (pgd_none(*psPGD) || pgd_bad(*psPGD))
+ {
+ PVR_ASSERT(0);
+ }
+
+ psPUD = pud_offset(psPGD, ui32CPUVAddr);
+ if (pud_none(*psPUD) || pud_bad(*psPUD))
+ {
+ PVR_ASSERT(0);
+ }
+
+ psPMD = pmd_offset(psPUD, ui32CPUVAddr);
+ if (pmd_none(*psPMD) || pmd_bad(*psPMD))
+ {
+ PVR_ASSERT(0);
+ }
+ psPTE = (pte_t *)pte_offset_kernel(psPMD, ui32CPUVAddr);
+
+ ptent = ptep_modify_prot_start(&init_mm, ui32CPUVAddr, psPTE);
+ ptent = pte_mkwrite(ptent);
+ ptep_modify_prot_commit(&init_mm, ui32CPUVAddr, psPTE, ptent);
+
+ flush_tlb_all();
+}
+
+static IMG_VOID MakeKernelPageReadOnly(IMG_PVOID ulCPUVAddr)
+{
+ pgd_t *psPGD;
+ pud_t *psPUD;
+ pmd_t *psPMD;
+ pte_t *psPTE;
+ pte_t ptent;
+ IMG_UINT32 ui32CPUVAddr = (IMG_UINT32) ulCPUVAddr;
+
+ OSWriteMemoryBarrier();
+
+ psPGD = pgd_offset_k(ui32CPUVAddr);
+ if (pgd_none(*psPGD) || pgd_bad(*psPGD))
+ {
+ PVR_ASSERT(0);
+ }
+
+ psPUD = pud_offset(psPGD, ui32CPUVAddr);
+ if (pud_none(*psPUD) || pud_bad(*psPUD))
+ {
+ PVR_ASSERT(0);
+ }
+
+ psPMD = pmd_offset(psPUD, ui32CPUVAddr);
+ if (pmd_none(*psPMD) || pmd_bad(*psPMD))
+ {
+ PVR_ASSERT(0);
+ }
+
+ psPTE = (pte_t *)pte_offset_kernel(psPMD, ui32CPUVAddr);
+
+ ptent = ptep_modify_prot_start(&init_mm, ui32CPUVAddr, psPTE);
+ ptent = pte_wrprotect(ptent);
+ ptep_modify_prot_commit(&init_mm, ui32CPUVAddr, psPTE, ptent);
+
+ flush_tlb_all();
+
+}
+
+#else
+
+static INLINE IMG_VOID MakeKernelPageReadWrite(IMG_PVOID ulCPUVAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(ulCPUVAddr);
+}
+
+static INLINE IMG_VOID MakeKernelPageReadOnly(IMG_PVOID ulCPUVAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(ulCPUVAddr);
+}
+
+#endif
+
+IMG_BOOL MMU_IsHeapShared(MMU_HEAP* pMMUHeap)
+{
+ switch(pMMUHeap->psDevArena->DevMemHeapType)
+ {
+ case DEVICE_MEMORY_HEAP_SHARED :
+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
+ return IMG_TRUE;
+ case DEVICE_MEMORY_HEAP_PERCONTEXT :
+ case DEVICE_MEMORY_HEAP_KERNEL :
+ return IMG_FALSE;
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_IsHeapShared: ERROR invalid heap type"));
+ return IMG_FALSE;
+ }
+ }
+}
+
+#ifdef SUPPORT_SGX_MMU_BYPASS
+IMG_VOID
+EnableHostAccess (MMU_CONTEXT *psMMUContext)
+{
+ IMG_UINT32 ui32RegVal;
+ IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
+
+
+
+
+ ui32RegVal = OSReadHWReg(pvRegsBaseKM, EUR_CR_BIF_CTRL);
+
+ OSWriteHWReg(pvRegsBaseKM,
+ EUR_CR_BIF_CTRL,
+ ui32RegVal | EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
+
+ PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
+}
+
+IMG_VOID
+DisableHostAccess (MMU_CONTEXT *psMMUContext)
+{
+ IMG_UINT32 ui32RegVal;
+ IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
+
+
+
+
+
+ OSWriteHWReg(pvRegsBaseKM,
+ EUR_CR_BIF_CTRL,
+ ui32RegVal & ~EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
+
+ PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, 0);
+}
+#endif
+
+
+#if defined(SGX_FEATURE_SYSTEM_CACHE)
+static IMG_VOID MMU_InvalidateSystemLevelCache(PVRSRV_SGXDEV_INFO *psDevInfo)
+{
+ #if defined(SGX_FEATURE_MP)
+ psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_SL;
+ #else
+
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+ #endif
+}
+#endif
+
+IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo)
+{
+ psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_PD;
+ #if defined(SGX_FEATURE_SYSTEM_CACHE)
+ MMU_InvalidateSystemLevelCache(psDevInfo);
+ #endif
+}
+
+
+static IMG_VOID MMU_InvalidatePageTableCache(PVRSRV_SGXDEV_INFO *psDevInfo)
+{
+ psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_PT;
+ #if defined(SGX_FEATURE_SYSTEM_CACHE)
+ MMU_InvalidateSystemLevelCache(psDevInfo);
+ #endif
+}
+
+#if defined(FIX_HW_BRN_31620)
+static IMG_VOID BRN31620InvalidatePageTableEntry(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32PDIndex, IMG_UINT32 ui32PTIndex, IMG_UINT32 *pui32PTE)
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = psMMUContext->psDevInfo;
+
+
+ if (((ui32PDIndex % (BRN31620_PDE_CACHE_FILL_SIZE/BRN31620_PT_ADDRESS_RANGE_SIZE)) == BRN31620_DUMMY_PDE_INDEX)
+ && (ui32PTIndex == BRN31620_DUMMY_PTE_INDEX))
+ {
+ *pui32PTE = (psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_DUMMY_PAGE
+ | SGX_MMU_PTE_READONLY
+ | SGX_MMU_PTE_VALID;
+ }
+ else
+ {
+ *pui32PTE = 0;
+ }
+}
+
+static IMG_BOOL BRN31620FreePageTable(MMU_HEAP *psMMUHeap, IMG_UINT32 ui32PDIndex)
+{
+ MMU_CONTEXT *psMMUContext = psMMUHeap->psMMUContext;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psMMUContext->psDevInfo;
+ IMG_UINT32 ui32PDCacheLine = ui32PDIndex >> BRN31620_PDES_PER_CACHE_LINE_SHIFT;
+ IMG_UINT32 bFreePTs = IMG_FALSE;
+ IMG_UINT32 *pui32Tmp;
+
+ PVR_ASSERT(psMMUHeap != IMG_NULL);
+
+
+ PVR_ASSERT(psMMUContext->apsPTInfoListSave[ui32PDIndex] == IMG_NULL);
+
+ psMMUContext->apsPTInfoListSave[ui32PDIndex] = psMMUContext->apsPTInfoList[ui32PDIndex];
+ psMMUContext->apsPTInfoList[ui32PDIndex] = IMG_NULL;
+
+
+ if (--psMMUContext->ui32PDCacheRangeRefCount[ui32PDCacheLine] == 0)
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 ui32PDIndexStart = ui32PDCacheLine * BRN31620_PDES_PER_CACHE_LINE_SIZE;
+ IMG_UINT32 ui32PDIndexEnd = ui32PDIndexStart + BRN31620_PDES_PER_CACHE_LINE_SIZE;
+ IMG_UINT32 ui32PDBitMaskIndex, ui32PDBitMaskShift;
+
+
+ for (i=ui32PDIndexStart;i<ui32PDIndexEnd;i++)
+ {
+
+ psMMUContext->apsPTInfoList[i] = psMMUContext->apsPTInfoListSave[i];
+ psMMUContext->apsPTInfoListSave[i] = IMG_NULL;
+ _DeferredFreePageTable(psMMUHeap, i - psMMUHeap->ui32PDBaseIndex, IMG_TRUE);
+ }
+
+ ui32PDBitMaskIndex = ui32PDCacheLine >> BRN31620_CACHE_FLUSH_BITS_SHIFT;
+ ui32PDBitMaskShift = ui32PDCacheLine & BRN31620_CACHE_FLUSH_BITS_MASK;
+
+
+ if (MMU_IsHeapShared(psMMUHeap))
+ {
+
+ MMU_CONTEXT *psMMUContextWalker = (MMU_CONTEXT*) psMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
+
+ while(psMMUContextWalker)
+ {
+ psMMUContextWalker->ui32PDChangeMask[ui32PDBitMaskIndex] |= 1 << ui32PDBitMaskShift;
+
+
+ MakeKernelPageReadWrite(psMMUContextWalker->pvPDCpuVAddr);
+ pui32Tmp = (IMG_UINT32 *) psMMUContextWalker->pvPDCpuVAddr;
+ pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX] = (psDevInfo->sBRN31620DummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PDE_PAGE_SIZE_4K
+ | SGX_MMU_PDE_DUMMY_PAGE
+ | SGX_MMU_PDE_VALID;
+ MakeKernelPageReadOnly(psMMUContextWalker->pvPDCpuVAddr);
+
+ PDUMPCOMMENT("BRN31620 Re-wire dummy PT due to releasing PT allocation block");
+ PDUMPPDENTRIES(&psMMUHeap->sMMUAttrib, psMMUContextWalker->hPDOSMemHandle, (IMG_VOID*)&pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ psMMUContextWalker = psMMUContextWalker->psNext;
+ }
+ }
+ else
+ {
+ psMMUContext->ui32PDChangeMask[ui32PDBitMaskIndex] |= 1 << ui32PDBitMaskShift;
+
+
+ MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr);
+ pui32Tmp = (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr;
+ pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX] = (psDevInfo->sBRN31620DummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PDE_PAGE_SIZE_4K
+ | SGX_MMU_PDE_DUMMY_PAGE
+ | SGX_MMU_PDE_VALID;
+ MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr);
+
+ PDUMPCOMMENT("BRN31620 Re-wire dummy PT due to releasing PT allocation block");
+ PDUMPPDENTRIES(&psMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+
+ bFreePTs = IMG_TRUE;
+ }
+
+ return bFreePTs;
+}
+#endif
+
+static IMG_BOOL
+_AllocPageTableMemory (MMU_HEAP *pMMUHeap,
+ MMU_PT_INFO *psPTInfoList,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ IMG_DEV_PHYADDR sDevPAddr;
+ IMG_CPU_PHYADDR sCpuPAddr;
+
+
+
+
+ if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL)
+ {
+
+ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ pMMUHeap->ui32PTSize,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ 0,
+ (IMG_VOID **)&psPTInfoList->PTPageCpuVAddr,
+ &psPTInfoList->hPTPageOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR call to OSAllocPages failed"));
+ return IMG_FALSE;
+ }
+
+
+
+
+ MakeKernelPageReadOnly(psPTInfoList->PTPageCpuVAddr);
+
+
+ if(psPTInfoList->PTPageCpuVAddr)
+ {
+ sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->hPTPageOSMemHandle,
+ psPTInfoList->PTPageCpuVAddr);
+ }
+ else
+ {
+
+ sCpuPAddr = OSMemHandleToCpuPAddr(psPTInfoList->hPTPageOSMemHandle, 0);
+ }
+
+ sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
+ }
+ else
+ {
+ IMG_SYS_PHYADDR sSysPAddr;
+
+
+
+
+
+ if(RA_Alloc(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ IMG_NULL,
+ 0,
+ SGX_MMU_PAGE_SIZE,
+ 0,
+ IMG_NULL,
+ 0,
+ &(sSysPAddr.uiAddr))!= IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR call to RA_Alloc failed"));
+ return IMG_FALSE;
+ }
+
+
+ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
+
+ psPTInfoList->PTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ &psPTInfoList->hPTPageOSMemHandle);
+ if(!psPTInfoList->PTPageCpuVAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR failed to map page tables"));
+ return IMG_FALSE;
+ }
+
+
+ sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
+
+ #if PAGE_TEST
+ PageTest(psPTInfoList->PTPageCpuVAddr, sDevPAddr);
+ #endif
+ }
+
+ MakeKernelPageReadWrite(psPTInfoList->PTPageCpuVAddr);
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ {
+ IMG_UINT32 *pui32Tmp;
+ IMG_UINT32 i;
+
+ pui32Tmp = (IMG_UINT32*)psPTInfoList->PTPageCpuVAddr;
+
+ for(i=0; i<pMMUHeap->ui32PTNumEntriesUsable; i++)
+ {
+ pui32Tmp[i] = (pMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_VALID;
+ }
+
+ for(; i<pMMUHeap->ui32PTNumEntriesAllocated; i++)
+ {
+ pui32Tmp[i] = 0;
+ }
+ }
+#else
+
+ OSMemSet(psPTInfoList->PTPageCpuVAddr, 0, pMMUHeap->ui32PTSize);
+#endif
+ MakeKernelPageReadOnly(psPTInfoList->PTPageCpuVAddr);
+
+#if defined(PDUMP)
+ {
+ IMG_UINT32 ui32Flags = 0;
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+
+ ui32Flags |= ( MMU_IsHeapShared(pMMUHeap) ) ? PDUMP_FLAGS_PERSISTENT : 0;
+#endif
+
+ PDUMPMALLOCPAGETABLE(&pMMUHeap->psMMUContext->psDeviceNode->sDevId, psPTInfoList->hPTPageOSMemHandle, 0, psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize, ui32Flags, PDUMP_PT_UNIQUETAG);
+
+ PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfoList->hPTPageOSMemHandle, psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize, ui32Flags, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+#endif
+
+
+ *psDevPAddr = sDevPAddr;
+
+ return IMG_TRUE;
+}
+
+
+static IMG_VOID
+_FreePageTableMemory (MMU_HEAP *pMMUHeap, MMU_PT_INFO *psPTInfoList)
+{
+
+
+
+
+ if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL)
+ {
+
+ MakeKernelPageReadWrite(psPTInfoList->PTPageCpuVAddr);
+
+
+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ pMMUHeap->ui32PTSize,
+ psPTInfoList->PTPageCpuVAddr,
+ psPTInfoList->hPTPageOSMemHandle);
+ }
+ else
+ {
+ IMG_SYS_PHYADDR sSysPAddr;
+ IMG_CPU_PHYADDR sCpuPAddr;
+
+
+ sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->hPTPageOSMemHandle,
+ psPTInfoList->PTPageCpuVAddr);
+ sSysPAddr = SysCpuPAddrToSysPAddr (sCpuPAddr);
+
+
+
+ OSUnMapPhysToLin(psPTInfoList->PTPageCpuVAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ psPTInfoList->hPTPageOSMemHandle);
+
+
+
+
+ RA_Free (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
+ }
+}
+
+
+
+static IMG_VOID
+_DeferredFreePageTable (MMU_HEAP *pMMUHeap, IMG_UINT32 ui32PTIndex, IMG_BOOL bOSFreePT)
+{
+ IMG_UINT32 *pui32PDEntry;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32PDIndex;
+ SYS_DATA *psSysData;
+ MMU_PT_INFO **ppsPTInfoList;
+
+ SysAcquireData(&psSysData);
+
+
+ ui32PDIndex = pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
+
+
+ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
+
+ {
+#if PT_DEBUG
+ if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount > 0)
+ {
+ DumpPT(ppsPTInfoList[ui32PTIndex]);
+
+ }
+#endif
+
+
+ PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == IMG_NULL || ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount == 0);
+ }
+
+#if defined(PDUMP)
+ {
+ IMG_UINT32 ui32Flags = 0;
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ ui32Flags |= ( MMU_IsHeapShared(pMMUHeap) ) ? PDUMP_FLAGS_PERSISTENT : 0;
+#endif
+
+ PDUMPCOMMENT("Free page table (page count == %08X)", pMMUHeap->ui32PageTableCount);
+ if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr)
+ {
+ PDUMPFREEPAGETABLE(&pMMUHeap->psMMUContext->psDeviceNode->sDevId, ppsPTInfoList[ui32PTIndex]->hPTPageOSMemHandle, ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr, pMMUHeap->ui32PTSize, ui32Flags, PDUMP_PT_UNIQUETAG);
+ }
+ }
+#endif
+
+ switch(pMMUHeap->psDevArena->DevMemHeapType)
+ {
+ case DEVICE_MEMORY_HEAP_SHARED :
+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
+ {
+
+ MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
+
+ while(psMMUContext)
+ {
+
+ MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr);
+ pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr;
+ pui32PDEntry += ui32PDIndex;
+
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+
+ pui32PDEntry[ui32PTIndex] = (psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr
+ >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PDE_PAGE_SIZE_4K
+ | SGX_MMU_PDE_VALID;
+#else
+
+ if(bOSFreePT)
+ {
+ pui32PDEntry[ui32PTIndex] = 0;
+ }
+#endif
+ MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr);
+ #if defined(PDUMP)
+
+ #if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ if(psMMUContext->bPDumpActive)
+ #endif
+ {
+ PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+ #endif
+
+ psMMUContext = psMMUContext->psNext;
+ }
+ break;
+ }
+ case DEVICE_MEMORY_HEAP_PERCONTEXT :
+ case DEVICE_MEMORY_HEAP_KERNEL :
+ {
+ MakeKernelPageReadWrite(pMMUHeap->psMMUContext->pvPDCpuVAddr);
+
+ pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr;
+ pui32PDEntry += ui32PDIndex;
+
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+
+ pui32PDEntry[ui32PTIndex] = (pMMUHeap->psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr
+ >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PDE_PAGE_SIZE_4K
+ | SGX_MMU_PDE_VALID;
+#else
+
+ if(bOSFreePT)
+ {
+ pui32PDEntry[ui32PTIndex] = 0;
+ }
+#endif
+ MakeKernelPageReadOnly(pMMUHeap->psMMUContext->pvPDCpuVAddr);
+
+
+ PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, pMMUHeap->psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ break;
+ }
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_DeferredFreePagetable: ERROR invalid heap type"));
+ return;
+ }
+ }
+
+
+ if(ppsPTInfoList[ui32PTIndex] != IMG_NULL)
+ {
+ if(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != IMG_NULL)
+ {
+ IMG_PUINT32 pui32Tmp;
+
+ MakeKernelPageReadWrite(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr);
+ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr;
+
+
+ for(i=0;
+ (i<pMMUHeap->ui32PTETotalUsable) && (i<pMMUHeap->ui32PTNumEntriesUsable);
+ i++)
+ {
+
+ pui32Tmp[i] = 0;
+ }
+ MakeKernelPageReadOnly(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr);
+
+
+
+ if(bOSFreePT)
+ {
+ _FreePageTableMemory(pMMUHeap, ppsPTInfoList[ui32PTIndex]);
+ }
+
+
+
+
+ pMMUHeap->ui32PTETotalUsable -= i;
+ }
+ else
+ {
+
+ pMMUHeap->ui32PTETotalUsable -= pMMUHeap->ui32PTNumEntriesUsable;
+ }
+
+ if(bOSFreePT)
+ {
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(MMU_PT_INFO),
+ ppsPTInfoList[ui32PTIndex],
+ IMG_NULL);
+ ppsPTInfoList[ui32PTIndex] = IMG_NULL;
+ }
+ }
+ else
+ {
+
+ pMMUHeap->ui32PTETotalUsable -= pMMUHeap->ui32PTNumEntriesUsable;
+ }
+
+ PDUMPCOMMENT("Finished free page table (page count == %08X)", pMMUHeap->ui32PageTableCount);
+}
+
+static IMG_VOID
+_DeferredFreePageTables (MMU_HEAP *pMMUHeap)
+{
+ IMG_UINT32 i;
+#if defined(FIX_HW_BRN_31620)
+ MMU_CONTEXT *psMMUContext = pMMUHeap->psMMUContext;
+ IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
+ IMG_UINT32 ui32PDIndex;
+ IMG_UINT32 *pui32Tmp;
+ IMG_UINT32 j;
+#endif
+#if defined(PDUMP)
+ PDUMPCOMMENT("Free PTs (MMU Context ID == %u, PDBaseIndex == %u, PT count == 0x%x)",
+ pMMUHeap->psMMUContext->ui32PDumpMMUContextID,
+ pMMUHeap->ui32PDBaseIndex,
+ pMMUHeap->ui32PageTableCount);
+#endif
+#if defined(FIX_HW_BRN_31620)
+ for(i=0; i<pMMUHeap->ui32PageTableCount; i++)
+ {
+ ui32PDIndex = (pMMUHeap->ui32PDBaseIndex + i);
+
+ if (psMMUContext->apsPTInfoList[ui32PDIndex])
+ {
+ if (psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr)
+ {
+
+ for (j=0;j<SGX_MMU_PT_SIZE;j++)
+ {
+ pui32Tmp = (IMG_UINT32 *) psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr;
+ BRN31620InvalidatePageTableEntry(psMMUContext, ui32PDIndex, j, &pui32Tmp[j]);
+ }
+ }
+
+ if (BRN31620FreePageTable(pMMUHeap, ui32PDIndex) == IMG_TRUE)
+ {
+ bInvalidateDirectoryCache = IMG_TRUE;
+ }
+ }
+ }
+
+
+ if (bInvalidateDirectoryCache)
+ {
+ MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
+ }
+ else
+ {
+ MMU_InvalidatePageTableCache(pMMUHeap->psMMUContext->psDevInfo);
+ }
+#else
+ for(i=0; i<pMMUHeap->ui32PageTableCount; i++)
+ {
+ _DeferredFreePageTable(pMMUHeap, i, IMG_TRUE);
+ }
+ MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
+#endif
+}
+
+
+static IMG_BOOL
+_DeferredAllocPagetables(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size)
+{
+ IMG_UINT32 ui32PageTableCount;
+ IMG_UINT32 ui32PDIndex;
+ IMG_UINT32 i;
+ IMG_UINT32 *pui32PDEntry;
+ MMU_PT_INFO **ppsPTInfoList;
+ SYS_DATA *psSysData;
+ IMG_DEV_VIRTADDR sHighDevVAddr;
+#if defined(FIX_HW_BRN_31620)
+ IMG_BOOL bFlushSystemCache = IMG_FALSE;
+ IMG_BOOL bSharedPT = IMG_FALSE;
+ IMG_DEV_VIRTADDR sDevVAddrRequestStart;
+ IMG_DEV_VIRTADDR sDevVAddrRequestEnd;
+ IMG_UINT32 ui32PDRequestStart;
+ IMG_UINT32 ui32PDRequestEnd;
+ IMG_UINT32 ui32ModifiedCachelines[BRN31620_CACHE_FLUSH_INDEX_SIZE];
+#endif
+
+
+#if SGX_FEATURE_ADDRESS_SPACE_SIZE < 32
+ PVR_ASSERT(DevVAddr.uiAddr < (1<<SGX_FEATURE_ADDRESS_SPACE_SIZE));
+#endif
+
+
+ SysAcquireData(&psSysData);
+
+
+ ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
+
+
+
+ if((UINT32_MAX_VALUE - DevVAddr.uiAddr)
+ < (ui32Size + pMMUHeap->ui32DataPageMask + pMMUHeap->ui32PTMask))
+ {
+
+ sHighDevVAddr.uiAddr = UINT32_MAX_VALUE;
+ }
+ else
+ {
+ sHighDevVAddr.uiAddr = DevVAddr.uiAddr
+ + ui32Size
+ + pMMUHeap->ui32DataPageMask
+ + pMMUHeap->ui32PTMask;
+ }
+
+ ui32PageTableCount = sHighDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
+
+
+ if (ui32PageTableCount == 0)
+ ui32PageTableCount = 1024;
+
+#if defined(FIX_HW_BRN_31620)
+ for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++)
+ {
+ ui32ModifiedCachelines[i] = 0;
+ }
+
+
+
+
+ sDevVAddrRequestStart = DevVAddr;
+ ui32PDRequestStart = ui32PDIndex;
+ sDevVAddrRequestEnd = sHighDevVAddr;
+ ui32PDRequestEnd = ui32PageTableCount - 1;
+
+
+ DevVAddr.uiAddr = DevVAddr.uiAddr & (~BRN31620_PDE_CACHE_FILL_MASK);
+
+
+ sHighDevVAddr.uiAddr = ((sHighDevVAddr.uiAddr + (BRN31620_PDE_CACHE_FILL_SIZE - 1)) & (~BRN31620_PDE_CACHE_FILL_MASK));
+
+ ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
+ ui32PageTableCount = sHighDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
+
+
+ if (ui32PageTableCount == 0)
+ ui32PageTableCount = 1024;
+#endif
+
+ ui32PageTableCount -= ui32PDIndex;
+
+
+ pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr;
+ pui32PDEntry += ui32PDIndex;
+
+
+ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
+
+#if defined(PDUMP)
+ {
+ IMG_UINT32 ui32Flags = 0;
+
+
+ if( MMU_IsHeapShared(pMMUHeap) )
+ {
+ ui32Flags |= PDUMP_FLAGS_CONTINUOUS;
+ }
+ PDUMPCOMMENTWITHFLAGS(ui32Flags, "Alloc PTs (MMU Context ID == %u, PDBaseIndex == %u, Size == 0x%x)",
+ pMMUHeap->psMMUContext->ui32PDumpMMUContextID,
+ pMMUHeap->ui32PDBaseIndex,
+ ui32Size);
+ PDUMPCOMMENTWITHFLAGS(ui32Flags, "Alloc page table (page count == %08X)", ui32PageTableCount);
+ PDUMPCOMMENTWITHFLAGS(ui32Flags, "Page directory mods (page count == %08X)", ui32PageTableCount);
+ }
+#endif
+
+ for(i=0; i<ui32PageTableCount; i++)
+ {
+ if(ppsPTInfoList[i] == IMG_NULL)
+ {
+#if defined(FIX_HW_BRN_31620)
+
+ if (pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i])
+ {
+
+ if (((ui32PDIndex + i) >= ui32PDRequestStart) && ((ui32PDIndex + i) <= ui32PDRequestEnd))
+ {
+ IMG_UINT32 ui32PDCacheLine = (ui32PDIndex + i) >> BRN31620_PDES_PER_CACHE_LINE_SHIFT;
+
+ ppsPTInfoList[i] = pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i];
+ pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i] = IMG_NULL;
+
+ pMMUHeap->psMMUContext->ui32PDCacheRangeRefCount[ui32PDCacheLine]++;
+ }
+ }
+ else
+ {
+#endif
+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof (MMU_PT_INFO),
+ (IMG_VOID **)&ppsPTInfoList[i], IMG_NULL,
+ "MMU Page Table Info");
+ if (ppsPTInfoList[i] == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to OSAllocMem failed"));
+ return IMG_FALSE;
+ }
+ OSMemSet (ppsPTInfoList[i], 0, sizeof(MMU_PT_INFO));
+#if defined(FIX_HW_BRN_31620)
+ }
+#endif
+ }
+#if defined(FIX_HW_BRN_31620)
+
+ if (ppsPTInfoList[i])
+ {
+#endif
+ if(ppsPTInfoList[i]->hPTPageOSMemHandle == IMG_NULL
+ && ppsPTInfoList[i]->PTPageCpuVAddr == IMG_NULL)
+ {
+ IMG_DEV_PHYADDR sDevPAddr;
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ IMG_UINT32 *pui32Tmp;
+ IMG_UINT32 j;
+#else
+#if !defined(FIX_HW_BRN_31620)
+
+ PVR_ASSERT(pui32PDEntry[i] == 0);
+#endif
+#endif
+ if(_AllocPageTableMemory (pMMUHeap, ppsPTInfoList[i], &sDevPAddr) != IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to _AllocPageTableMemory failed"));
+ return IMG_FALSE;
+ }
+#if defined(FIX_HW_BRN_31620)
+ bFlushSystemCache = IMG_TRUE;
+
+ {
+ IMG_UINT32 ui32PD;
+ IMG_UINT32 ui32PDCacheLine;
+ IMG_UINT32 ui32PDBitMaskIndex;
+ IMG_UINT32 ui32PDBitMaskShift;
+
+ ui32PD = ui32PDIndex + i;
+ ui32PDCacheLine = ui32PD >> BRN31620_PDES_PER_CACHE_LINE_SHIFT;
+ ui32PDBitMaskIndex = ui32PDCacheLine >> BRN31620_CACHE_FLUSH_BITS_SHIFT;
+ ui32PDBitMaskShift = ui32PDCacheLine & BRN31620_CACHE_FLUSH_BITS_MASK;
+ ui32ModifiedCachelines[ui32PDBitMaskIndex] |= 1 << ui32PDBitMaskShift;
+
+
+ if ((pMMUHeap->ui32PDBaseIndex + pMMUHeap->ui32PageTableCount) < (ui32PD + 1))
+ {
+ pMMUHeap->ui32PageTableCount = (ui32PD + 1) - pMMUHeap->ui32PDBaseIndex;
+ }
+
+ if (((ui32PDIndex + i) >= ui32PDRequestStart) && ((ui32PDIndex + i) <= ui32PDRequestEnd))
+ {
+ pMMUHeap->psMMUContext->ui32PDCacheRangeRefCount[ui32PDCacheLine]++;
+ }
+ }
+#endif
+ switch(pMMUHeap->psDevArena->DevMemHeapType)
+ {
+ case DEVICE_MEMORY_HEAP_SHARED :
+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
+ {
+
+ MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
+
+ while(psMMUContext)
+ {
+ MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr);
+
+ pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr;
+ pui32PDEntry += ui32PDIndex;
+
+
+ pui32PDEntry[i] = (sDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | pMMUHeap->ui32PDEPageSizeCtrl
+ | SGX_MMU_PDE_VALID;
+ MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr);
+ #if defined(PDUMP)
+
+ #if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ if(psMMUContext->bPDumpActive)
+ #endif
+ {
+
+ PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+ #endif
+
+ psMMUContext = psMMUContext->psNext;
+ }
+#if defined(FIX_HW_BRN_31620)
+ bSharedPT = IMG_TRUE;
+#endif
+ break;
+ }
+ case DEVICE_MEMORY_HEAP_PERCONTEXT :
+ case DEVICE_MEMORY_HEAP_KERNEL :
+ {
+ MakeKernelPageReadWrite(pMMUHeap->psMMUContext->pvPDCpuVAddr);
+
+ pui32PDEntry[i] = (sDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | pMMUHeap->ui32PDEPageSizeCtrl
+ | SGX_MMU_PDE_VALID;
+ MakeKernelPageReadOnly(pMMUHeap->psMMUContext->pvPDCpuVAddr);
+
+
+ PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, pMMUHeap->psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ break;
+ }
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR invalid heap type"));
+ return IMG_FALSE;
+ }
+ }
+
+#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+
+
+
+
+ MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
+#endif
+#if defined(FIX_HW_BRN_31620)
+
+ if (((ui32PDIndex + i) < ui32PDRequestStart) || ((ui32PDIndex + i) > ui32PDRequestEnd))
+ {
+ pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i] = ppsPTInfoList[i];
+ ppsPTInfoList[i] = IMG_NULL;
+ }
+#endif
+ }
+ else
+ {
+#if !defined(FIX_HW_BRN_31620)
+
+ PVR_ASSERT(pui32PDEntry[i] != 0);
+#endif
+ }
+#if defined(FIX_HW_BRN_31620)
+ }
+#endif
+ }
+
+ #if defined(SGX_FEATURE_SYSTEM_CACHE)
+ #if defined(FIX_HW_BRN_31620)
+
+ if (bFlushSystemCache)
+ {
+ #endif
+
+ MMU_InvalidateSystemLevelCache(pMMUHeap->psMMUContext->psDevInfo);
+ #endif
+ #if defined(FIX_HW_BRN_31620)
+ }
+
+
+ sHighDevVAddr.uiAddr = sHighDevVAddr.uiAddr - 1;
+
+
+ if (bFlushSystemCache)
+ {
+ MMU_CONTEXT *psMMUContext;
+
+ if (bSharedPT)
+ {
+ MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
+
+ while(psMMUContext)
+ {
+ for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++)
+ {
+ psMMUContext->ui32PDChangeMask[i] |= ui32ModifiedCachelines[i];
+ }
+
+
+ psMMUContext = psMMUContext->psNext;
+ }
+ }
+ else
+ {
+ for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++)
+ {
+ pMMUHeap->psMMUContext->ui32PDChangeMask[i] |= ui32ModifiedCachelines[i];
+ }
+ }
+
+
+ psMMUContext = pMMUHeap->psMMUContext;
+ for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++)
+ {
+ IMG_UINT32 j;
+
+ for(j=0;j<BRN31620_CACHE_FLUSH_BITS_SIZE;j++)
+ {
+ if (ui32ModifiedCachelines[i] & (1 << j))
+ {
+ PVRSRV_SGXDEV_INFO *psDevInfo = psMMUContext->psDevInfo;
+ MMU_PT_INFO *psTempPTInfo = IMG_NULL;
+ IMG_UINT32 *pui32Tmp;
+
+ ui32PDIndex = (((i * BRN31620_CACHE_FLUSH_BITS_SIZE) + j) * BRN31620_PDES_PER_CACHE_LINE_SIZE) + BRN31620_DUMMY_PDE_INDEX;
+
+
+ if (psMMUContext->apsPTInfoList[ui32PDIndex])
+ {
+ psTempPTInfo = psMMUContext->apsPTInfoList[ui32PDIndex];
+ }
+ else
+ {
+ psTempPTInfo = psMMUContext->apsPTInfoListSave[ui32PDIndex];
+ }
+
+ PVR_ASSERT(psTempPTInfo != IMG_NULL);
+
+ MakeKernelPageReadWrite(psTempPTInfo->PTPageCpuVAddr);
+ pui32Tmp = (IMG_UINT32 *) psTempPTInfo->PTPageCpuVAddr;
+ PVR_ASSERT(pui32Tmp != IMG_NULL);
+ pui32Tmp[BRN31620_DUMMY_PTE_INDEX] = (psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_DUMMY_PAGE
+ | SGX_MMU_PTE_READONLY
+ | SGX_MMU_PTE_VALID;
+ MakeKernelPageReadOnly(psTempPTInfo->PTPageCpuVAddr);
+ PDUMPCOMMENT("BRN31620 Dump PTE for dummy page after wireing up new PT");
+ PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psTempPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32Tmp[BRN31620_DUMMY_PTE_INDEX], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+ }
+ }
+ }
+ #endif
+
+ return IMG_TRUE;
+}
+
+
+#if defined(PDUMP)
+IMG_UINT32 MMU_GetPDumpContextID(IMG_HANDLE hDevMemContext)
+{
+ BM_CONTEXT *pBMContext = hDevMemContext;
+ PVR_ASSERT(pBMContext);
+
+ return pBMContext->psMMUContext->ui32PDumpMMUContextID;
+}
+
+static IMG_VOID MMU_SetPDumpAttribs(PDUMP_MMU_ATTRIB *psMMUAttrib,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32DataPageMask,
+ IMG_UINT32 ui32PTSize)
+{
+
+ psMMUAttrib->sDevId = psDeviceNode->sDevId;
+
+ psMMUAttrib->pszPDRegRegion = IMG_NULL;
+ psMMUAttrib->ui32DataPageMask = ui32DataPageMask;
+
+ psMMUAttrib->ui32PTEValid = SGX_MMU_PTE_VALID;
+ psMMUAttrib->ui32PTSize = ui32PTSize;
+ psMMUAttrib->ui32PTEAlignShift = SGX_MMU_PTE_ADDR_ALIGNSHIFT;
+
+ psMMUAttrib->ui32PDEMask = SGX_MMU_PDE_ADDR_MASK;
+ psMMUAttrib->ui32PDEAlignShift = SGX_MMU_PDE_ADDR_ALIGNSHIFT;
+}
+#endif
+
+PVRSRV_ERROR
+MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr)
+{
+ IMG_UINT32 *pui32Tmp;
+ IMG_UINT32 i;
+ IMG_CPU_VIRTADDR pvPDCpuVAddr;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+ IMG_CPU_PHYADDR sCpuPAddr;
+ MMU_CONTEXT *psMMUContext;
+ IMG_HANDLE hPDOSMemHandle;
+ SYS_DATA *psSysData;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+#if defined(PDUMP)
+ PDUMP_MMU_ATTRIB sMMUAttrib;
+#endif
+ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Initialise"));
+
+ SysAcquireData(&psSysData);
+#if defined(PDUMP)
+
+
+ MMU_SetPDumpAttribs(&sMMUAttrib, psDeviceNode,
+ SGX_MMU_PAGE_MASK,
+ SGX_MMU_PT_SIZE * sizeof(IMG_UINT32));
+#endif
+
+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof (MMU_CONTEXT),
+ (IMG_VOID **)&psMMUContext, IMG_NULL,
+ "MMU Context");
+ if (psMMUContext == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocMem failed"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ OSMemSet (psMMUContext, 0, sizeof(MMU_CONTEXT));
+
+
+ psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+ psMMUContext->psDevInfo = psDevInfo;
+
+
+ psMMUContext->psDeviceNode = psDeviceNode;
+
+
+ if(psDeviceNode->psLocalDevMemArena == IMG_NULL)
+ {
+ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ 0,
+ &pvPDCpuVAddr,
+ &hPDOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
+ }
+
+ if(pvPDCpuVAddr)
+ {
+ sCpuPAddr = OSMapLinToCPUPhys(hPDOSMemHandle,
+ pvPDCpuVAddr);
+ }
+ else
+ {
+
+ sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
+ }
+ sPDDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
+
+ #if PAGE_TEST
+ PageTest(pvPDCpuVAddr, sPDDevPAddr);
+ #endif
+
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+
+ if(!psDevInfo->pvMMUContextList)
+ {
+
+ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ 0,
+ &psDevInfo->pvDummyPTPageCpuVAddr,
+ &psDevInfo->hDummyPTPageOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
+ }
+
+ if(psDevInfo->pvDummyPTPageCpuVAddr)
+ {
+ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyPTPageOSMemHandle,
+ psDevInfo->pvDummyPTPageCpuVAddr);
+ }
+ else
+ {
+
+ sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyPTPageOSMemHandle, 0);
+ }
+ psDevInfo->sDummyPTDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
+
+
+ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ 0,
+ &psDevInfo->pvDummyDataPageCpuVAddr,
+ &psDevInfo->hDummyDataPageOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
+ }
+
+ if(psDevInfo->pvDummyDataPageCpuVAddr)
+ {
+ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyPTPageOSMemHandle,
+ psDevInfo->pvDummyDataPageCpuVAddr);
+ }
+ else
+ {
+ sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyDataPageOSMemHandle, 0);
+ }
+ psDevInfo->sDummyDataDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
+ }
+#endif
+#if defined(FIX_HW_BRN_31620)
+
+ if(!psDevInfo->pvMMUContextList)
+ {
+ IMG_UINT32 j;
+
+ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ 0,
+ &psDevInfo->pvBRN31620DummyPageCpuVAddr,
+ &psDevInfo->hBRN31620DummyPageOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
+ }
+
+
+ if(psDevInfo->pvBRN31620DummyPageCpuVAddr)
+ {
+ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPageOSMemHandle,
+ psDevInfo->pvBRN31620DummyPageCpuVAddr);
+ }
+ else
+ {
+ sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hBRN31620DummyPageOSMemHandle, 0);
+ }
+
+ pui32Tmp = (IMG_UINT32 *)psDevInfo->pvBRN31620DummyPageCpuVAddr;
+ for(j=0; j<(SGX_MMU_PAGE_SIZE/4); j++)
+ {
+ pui32Tmp[j] = BRN31620_DUMMY_PAGE_SIGNATURE;
+ }
+
+ psDevInfo->sBRN31620DummyPageDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
+ PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, 0, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+
+
+ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ 0,
+ &psDevInfo->pvBRN31620DummyPTCpuVAddr,
+ &psDevInfo->hBRN31620DummyPTOSMemHandle) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
+ }
+
+
+ if(psDevInfo->pvBRN31620DummyPTCpuVAddr)
+ {
+ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPTOSMemHandle,
+ psDevInfo->pvBRN31620DummyPTCpuVAddr);
+ }
+ else
+ {
+ sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hBRN31620DummyPTOSMemHandle, 0);
+ }
+
+ OSMemSet(psDevInfo->pvBRN31620DummyPTCpuVAddr,0,SGX_MMU_PAGE_SIZE);
+ psDevInfo->sBRN31620DummyPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
+ PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, 0, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+ }
+#endif
+ }
+ else
+ {
+ IMG_SYS_PHYADDR sSysPAddr;
+
+
+ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ IMG_NULL,
+ 0,
+ SGX_MMU_PAGE_SIZE,
+ 0,
+ IMG_NULL,
+ 0,
+ &(sSysPAddr.uiAddr))!= IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
+ }
+
+
+ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
+ sPDDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
+ pvPDCpuVAddr = OSMapPhysToLin(sCpuPAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ &hPDOSMemHandle);
+ if(!pvPDCpuVAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
+ return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
+ }
+
+ #if PAGE_TEST
+ PageTest(pvPDCpuVAddr, sPDDevPAddr);
+ #endif
+
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+
+ if(!psDevInfo->pvMMUContextList)
+ {
+
+ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ IMG_NULL,
+ 0,
+ SGX_MMU_PAGE_SIZE,
+ 0,
+ IMG_NULL,
+ 0,
+ &(sSysPAddr.uiAddr))!= IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
+ }
+
+
+ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
+ psDevInfo->sDummyPTDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
+ psDevInfo->pvDummyPTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ &psDevInfo->hDummyPTPageOSMemHandle);
+ if(!psDevInfo->pvDummyPTPageCpuVAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
+ return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
+ }
+
+
+ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ IMG_NULL,
+ 0,
+ SGX_MMU_PAGE_SIZE,
+ 0,
+ IMG_NULL,
+ 0,
+ &(sSysPAddr.uiAddr))!= IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
+ }
+
+
+ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
+ psDevInfo->sDummyDataDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
+ psDevInfo->pvDummyDataPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ &psDevInfo->hDummyDataPageOSMemHandle);
+ if(!psDevInfo->pvDummyDataPageCpuVAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
+ return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
+ }
+ }
+#endif
+#if defined(FIX_HW_BRN_31620)
+
+ if(!psDevInfo->pvMMUContextList)
+ {
+ IMG_UINT32 j;
+
+ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ IMG_NULL,
+ 0,
+ SGX_MMU_PAGE_SIZE,
+ 0,
+ IMG_NULL,
+ 0,
+ &(sSysPAddr.uiAddr))!= IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
+ }
+
+
+ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
+ psDevInfo->sBRN31620DummyPageDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
+ psDevInfo->pvBRN31620DummyPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ &psDevInfo->hBRN31620DummyPageOSMemHandle);
+ if(!psDevInfo->pvBRN31620DummyPageCpuVAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
+ return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
+ }
+
+ MakeKernelPageReadWrite(psDevInfo->pvBRN31620DummyPageCpuVAddr);
+ pui32Tmp = (IMG_UINT32 *)psDevInfo->pvBRN31620DummyPageCpuVAddr;
+ for(j=0; j<(SGX_MMU_PAGE_SIZE/4); j++)
+ {
+ pui32Tmp[j] = BRN31620_DUMMY_PAGE_SIGNATURE;
+ }
+ MakeKernelPageReadOnly(psDevInfo->pvBRN31620DummyPageCpuVAddr);
+ PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, 0, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+
+
+ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ IMG_NULL,
+ 0,
+ SGX_MMU_PAGE_SIZE,
+ 0,
+ IMG_NULL,
+ 0,
+ &(sSysPAddr.uiAddr))!= IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
+ }
+
+
+ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
+ psDevInfo->sBRN31620DummyPTDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
+ psDevInfo->pvBRN31620DummyPTCpuVAddr = OSMapPhysToLin(sCpuPAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ &psDevInfo->hBRN31620DummyPTOSMemHandle);
+
+ if(!psDevInfo->pvBRN31620DummyPTCpuVAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
+ return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
+ }
+
+ OSMemSet(psDevInfo->pvBRN31620DummyPTCpuVAddr,0,SGX_MMU_PAGE_SIZE);
+ PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, 0, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+ }
+#endif
+ }
+
+#if defined(FIX_HW_BRN_31620)
+ if (!psDevInfo->pvMMUContextList)
+ {
+
+ psDevInfo->hKernelMMUContext = psMMUContext;
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: saving kernel mmu context: %p", psMMUContext));
+ }
+#endif
+
+#if defined(PDUMP)
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+
+ {
+ PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData();
+ if(psPerProc == IMG_NULL)
+ {
+
+ psMMUContext->bPDumpActive = IMG_TRUE;
+ }
+ else
+ {
+ psMMUContext->bPDumpActive = psPerProc->bPDumpActive;
+ }
+ }
+#endif
+
+#if IMG_ADDRSPACE_PHYSADDR_BITS == 32
+ PDUMPCOMMENT("Alloc page directory for new MMU context (PDDevPAddr == 0x%08x)",
+ sPDDevPAddr.uiAddr);
+#else
+ PDUMPCOMMENT("Alloc page directory for new MMU context, 64-bit arch detected (PDDevPAddr == 0x%08x%08x)",
+ sPDDevPAddr.uiHighAddr, sPDDevPAddr.uiAddr);
+#endif
+ PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, hPDOSMemHandle, 0, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PD_UNIQUETAG);
+#endif
+
+#ifdef SUPPORT_SGX_MMU_BYPASS
+ EnableHostAccess(psMMUContext);
+#endif
+
+ if (pvPDCpuVAddr)
+ {
+ pui32Tmp = (IMG_UINT32 *)pvPDCpuVAddr;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: pvPDCpuVAddr invalid"));
+ return PVRSRV_ERROR_INVALID_CPU_ADDR;
+ }
+
+
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ MakeKernelPageReadWrite(pvPDCpuVAddr);
+
+ for(i=0; i<SGX_MMU_PD_SIZE; i++)
+ {
+ pui32Tmp[i] = (psDevInfo->sDummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PDE_PAGE_SIZE_4K
+ | SGX_MMU_PDE_VALID;
+ }
+ MakeKernelPageReadOnly(pvPDCpuVAddr);
+
+ if(!psDevInfo->pvMMUContextList)
+ {
+
+
+
+ MakeKernelPageReadWrite(psDevInfo->pvDummyPTPageCpuVAddr);
+ pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyPTPageCpuVAddr;
+ for(i=0; i<SGX_MMU_PT_SIZE; i++)
+ {
+ pui32Tmp[i] = (psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_VALID;
+ }
+ MakeKernelPageReadOnly(psDevInfo->pvDummyPTPageCpuVAddr);
+
+ PDUMPCOMMENT("Dummy Page table contents");
+ PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hDummyPTOSMemHandle, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+
+
+
+ MakeKernelPageReadWrite(psDevInfo->pvDummyDataPageCpuVAddr);
+ pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyDataPageCpuVAddr;
+ for(i=0; i<(SGX_MMU_PAGE_SIZE/4); i++)
+ {
+ pui32Tmp[i] = DUMMY_DATA_PAGE_SIGNATURE;
+ }
+ MakeKernelPageReadOnly(psDevInfo->pvDummyDataPageCpuVAddr);
+
+ PDUMPCOMMENT("Dummy Data Page contents");
+ PDUMPMEMPTENTRIES(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->hDummyDataPageOSMemHandle, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+#else
+
+ MakeKernelPageReadWrite(pvPDCpuVAddr);
+ for(i=0; i<SGX_MMU_PD_SIZE; i++)
+ {
+
+ pui32Tmp[i] = 0;
+ }
+ MakeKernelPageReadOnly(pvPDCpuVAddr);
+#endif
+
+#if defined(PDUMP)
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ if(psMMUContext->bPDumpActive)
+#endif
+ {
+
+ PDUMPCOMMENT("Page directory contents");
+ PDUMPPDENTRIES(&sMMUAttrib, hPDOSMemHandle, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+#endif
+#if defined(FIX_HW_BRN_31620)
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 ui32PDCount = 0;
+ IMG_UINT32 *pui32PT;
+ pui32Tmp = (IMG_UINT32 *)pvPDCpuVAddr;
+
+ PDUMPCOMMENT("BRN31620 Set up dummy PT");
+
+ MakeKernelPageReadWrite(psDevInfo->pvBRN31620DummyPTCpuVAddr);
+ pui32PT = (IMG_UINT32 *) psDevInfo->pvBRN31620DummyPTCpuVAddr;
+ pui32PT[BRN31620_DUMMY_PTE_INDEX] = (psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_DUMMY_PAGE
+ | SGX_MMU_PTE_READONLY
+ | SGX_MMU_PTE_VALID;
+ MakeKernelPageReadOnly(psDevInfo->pvBRN31620DummyPTCpuVAddr);
+
+#if defined(PDUMP)
+
+ PDUMPCOMMENT("BRN31620 Dump dummy PT contents");
+ PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hBRN31620DummyPTOSMemHandle, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ PDUMPCOMMENT("BRN31620 Dump dummy page contents");
+ PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+
+
+ for(i=0;i<SGX_MMU_PT_SIZE;i++)
+ {
+ PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hBRN31620DummyPTOSMemHandle, &pui32PT[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+#endif
+ PDUMPCOMMENT("BRN31620 Dump PDE wire up");
+
+ for(i=0;i<SGX_MMU_PD_SIZE;i++)
+ {
+ pui32Tmp[i] = 0;
+
+ if (ui32PDCount == BRN31620_DUMMY_PDE_INDEX)
+ {
+ MakeKernelPageReadWrite(pvPDCpuVAddr);
+ pui32Tmp[i] = (psDevInfo->sBRN31620DummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PDE_PAGE_SIZE_4K
+ | SGX_MMU_PDE_DUMMY_PAGE
+ | SGX_MMU_PDE_VALID;
+ MakeKernelPageReadOnly(pvPDCpuVAddr);
+ }
+ PDUMPMEMPTENTRIES(&sMMUAttrib, hPDOSMemHandle, (IMG_VOID *) &pui32Tmp[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ ui32PDCount++;
+ if (ui32PDCount == BRN31620_PDES_PER_CACHE_LINE_SIZE)
+ {
+
+ ui32PDCount = 0;
+ }
+ }
+
+
+
+ PDUMPCOMMENT("BRN31620 dummy Page table contents");
+ PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+#endif
+#if defined(PDUMP)
+
+ {
+ PVRSRV_ERROR eError;
+
+ IMG_UINT32 ui32MMUType = 1;
+
+ #if defined(SGX_FEATURE_36BIT_MMU)
+ ui32MMUType = 3;
+ #else
+ #if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
+ ui32MMUType = 2;
+ #endif
+ #endif
+
+ eError = PDumpSetMMUContext(PVRSRV_DEVICE_TYPE_SGX,
+ psDeviceNode->sDevId.pszPDumpDevName,
+ &psMMUContext->ui32PDumpMMUContextID,
+ ui32MMUType,
+ PDUMP_PT_UNIQUETAG,
+ hPDOSMemHandle,
+ pvPDCpuVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to PDumpSetMMUContext failed"));
+ return eError;
+ }
+ }
+
+
+ PDUMPCOMMENT("Set MMU context complete (MMU Context ID == %u)", psMMUContext->ui32PDumpMMUContextID);
+#endif
+
+#if defined(FIX_HW_BRN_31620)
+ for(i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++)
+ {
+ psMMUContext->ui32PDChangeMask[i] = 0;
+ }
+
+ for(i=0;i<BRN31620_CACHE_FLUSH_SIZE;i++)
+ {
+ psMMUContext->ui32PDCacheRangeRefCount[i] = 0;
+ }
+
+ for(i=0;i<SGX_MAX_PD_ENTRIES;i++)
+ {
+ psMMUContext->apsPTInfoListSave[i] = IMG_NULL;
+ }
+#endif
+
+ psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
+ psMMUContext->sPDDevPAddr = sPDDevPAddr;
+ psMMUContext->hPDOSMemHandle = hPDOSMemHandle;
+
+
+ *ppsMMUContext = psMMUContext;
+
+
+ *psPDDevPAddr = sPDDevPAddr;
+
+
+ psMMUContext->psNext = (MMU_CONTEXT*)psDevInfo->pvMMUContextList;
+ psDevInfo->pvMMUContextList = (IMG_VOID*)psMMUContext;
+
+#ifdef SUPPORT_SGX_MMU_BYPASS
+ DisableHostAccess(psMMUContext);
+#endif
+
+ return PVRSRV_OK;
+}
+
+IMG_VOID
+MMU_Finalise (MMU_CONTEXT *psMMUContext)
+{
+ IMG_UINT32 *pui32Tmp, i;
+ SYS_DATA *psSysData;
+ MMU_CONTEXT **ppsMMUContext;
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) || defined(FIX_HW_BRN_31620)
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psMMUContext->psDevInfo;
+ MMU_CONTEXT *psMMUContextList = (MMU_CONTEXT*)psDevInfo->pvMMUContextList;
+#endif
+
+ SysAcquireData(&psSysData);
+
+#if defined(PDUMP)
+
+ PDUMPCOMMENT("Clear MMU context (MMU Context ID == %u)", psMMUContext->ui32PDumpMMUContextID);
+ PDUMPCLEARMMUCONTEXT(PVRSRV_DEVICE_TYPE_SGX, psMMUContext->psDeviceNode->sDevId.pszPDumpDevName, psMMUContext->ui32PDumpMMUContextID, 2);
+
+
+#if IMG_ADDRSPACE_PHYSADDR_BITS == 32
+ PDUMPCOMMENT("Free page directory (PDDevPAddr == 0x%08x)",
+ psMMUContext->sPDDevPAddr.uiAddr);
+#else
+ PDUMPCOMMENT("Free page directory, 64-bit arch detected (PDDevPAddr == 0x%08x%08x)",
+ psMMUContext->sPDDevPAddr.uiHighAddr, psMMUContext->sPDDevPAddr.uiAddr);
+#endif
+#endif
+
+ PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psMMUContext->hPDOSMemHandle, psMMUContext->pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hDummyPTPageOSMemHandle, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+ PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hDummyDataPageOSMemHandle, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+#endif
+
+ pui32Tmp = (IMG_UINT32 *)psMMUContext->pvPDCpuVAddr;
+
+ MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr);
+
+ for(i=0; i<SGX_MMU_PD_SIZE; i++)
+ {
+
+ pui32Tmp[i] = 0;
+ }
+ MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr);
+
+
+
+
+
+ if(psMMUContext->psDeviceNode->psLocalDevMemArena == IMG_NULL)
+ {
+#if defined(FIX_HW_BRN_31620)
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psMMUContext->psDevInfo;
+#endif
+ MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr);
+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ psMMUContext->pvPDCpuVAddr,
+ psMMUContext->hPDOSMemHandle);
+
+#if defined(FIX_HW_BRN_31620)
+
+ if (!psMMUContextList->psNext)
+ {
+ PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ psDevInfo->pvBRN31620DummyPageCpuVAddr,
+ psDevInfo->hBRN31620DummyPageOSMemHandle);
+
+ PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ psDevInfo->pvBRN31620DummyPTCpuVAddr,
+ psDevInfo->hBRN31620DummyPTOSMemHandle);
+
+ }
+#endif
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+
+ if(!psMMUContextList->psNext)
+ {
+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ psDevInfo->pvDummyPTPageCpuVAddr,
+ psDevInfo->hDummyPTPageOSMemHandle);
+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ psDevInfo->pvDummyDataPageCpuVAddr,
+ psDevInfo->hDummyDataPageOSMemHandle);
+ }
+#endif
+ }
+ else
+ {
+ IMG_SYS_PHYADDR sSysPAddr;
+ IMG_CPU_PHYADDR sCpuPAddr;
+
+
+ sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->hPDOSMemHandle,
+ psMMUContext->pvPDCpuVAddr);
+ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
+
+
+ OSUnMapPhysToLin(psMMUContext->pvPDCpuVAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ psMMUContext->hPDOSMemHandle);
+
+ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
+
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+
+ if(!psMMUContextList->psNext)
+ {
+
+ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyPTPageOSMemHandle,
+ psDevInfo->pvDummyPTPageCpuVAddr);
+ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
+
+
+ OSUnMapPhysToLin(psDevInfo->pvDummyPTPageCpuVAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ psDevInfo->hDummyPTPageOSMemHandle);
+
+ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
+
+
+ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyDataPageOSMemHandle,
+ psDevInfo->pvDummyDataPageCpuVAddr);
+ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
+
+
+ OSUnMapPhysToLin(psDevInfo->pvDummyDataPageCpuVAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ psDevInfo->hDummyDataPageOSMemHandle);
+
+ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
+ }
+#endif
+#if defined(FIX_HW_BRN_31620)
+
+ if(!psMMUContextList->psNext)
+ {
+
+ PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+
+ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPageOSMemHandle,
+ psDevInfo->pvBRN31620DummyPageCpuVAddr);
+ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
+
+
+ OSUnMapPhysToLin(psDevInfo->pvBRN31620DummyPageCpuVAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ psDevInfo->hBRN31620DummyPageOSMemHandle);
+
+ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
+
+
+ PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+
+ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPTOSMemHandle,
+ psDevInfo->pvBRN31620DummyPTCpuVAddr);
+ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
+
+
+ OSUnMapPhysToLin(psDevInfo->pvBRN31620DummyPTCpuVAddr,
+ SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ psDevInfo->hBRN31620DummyPTOSMemHandle);
+
+ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
+ }
+#endif
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Finalise"));
+
+
+ ppsMMUContext = (MMU_CONTEXT**)&psMMUContext->psDevInfo->pvMMUContextList;
+ while(*ppsMMUContext)
+ {
+ if(*ppsMMUContext == psMMUContext)
+ {
+
+ *ppsMMUContext = psMMUContext->psNext;
+ break;
+ }
+
+
+ ppsMMUContext = &((*ppsMMUContext)->psNext);
+ }
+
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_CONTEXT), psMMUContext, IMG_NULL);
+
+}
+
+
+IMG_VOID
+MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap)
+{
+ IMG_UINT32 *pui32PDCpuVAddr = (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr;
+ IMG_UINT32 *pui32KernelPDCpuVAddr = (IMG_UINT32 *) psMMUHeap->psMMUContext->pvPDCpuVAddr;
+ IMG_UINT32 ui32PDEntry;
+#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+ IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
+#endif
+
+
+ pui32PDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
+ pui32KernelPDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
+
+
+
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Page directory shared heap range copy");
+ PDUMPCOMMENT(" (Source heap MMU Context ID == %u, PT count == 0x%x)",
+ psMMUHeap->psMMUContext->ui32PDumpMMUContextID,
+ psMMUHeap->ui32PageTableCount);
+ PDUMPCOMMENT(" (Destination MMU Context ID == %u)", psMMUContext->ui32PDumpMMUContextID);
+#endif
+#ifdef SUPPORT_SGX_MMU_BYPASS
+ EnableHostAccess(psMMUContext);
+#endif
+
+ for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PageTableCount; ui32PDEntry++)
+ {
+#if (!defined(SUPPORT_SGX_MMU_DUMMY_PAGE)) && (!defined(FIX_HW_BRN_31620))
+
+ PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0);
+#endif
+ MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr);
+
+ pui32PDCpuVAddr[ui32PDEntry] = pui32KernelPDCpuVAddr[ui32PDEntry];
+ MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr);
+ if (pui32PDCpuVAddr[ui32PDEntry])
+ {
+
+ #if defined(PDUMP)
+
+ #if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ if(psMMUContext->bPDumpActive)
+ #endif
+ {
+ PDUMPPDENTRIES(&psMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID *) &pui32PDCpuVAddr[ui32PDEntry], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ }
+ #endif
+#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+ bInvalidateDirectoryCache = IMG_TRUE;
+#endif
+ }
+ }
+
+#ifdef SUPPORT_SGX_MMU_BYPASS
+ DisableHostAccess(psMMUContext);
+#endif
+
+#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+ if (bInvalidateDirectoryCache)
+ {
+
+
+
+
+ MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo);
+ }
+#endif
+}
+
+
+static IMG_VOID
+MMU_UnmapPagesAndFreePTs (MMU_HEAP *psMMUHeap,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 ui32PageCount,
+ IMG_HANDLE hUniqueTag)
+{
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32PDIndex;
+ IMG_UINT32 ui32PTIndex;
+ IMG_UINT32 *pui32Tmp;
+ IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
+
+#if !defined (PDUMP)
+ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
+#endif
+
+ sTmpDevVAddr = sDevVAddr;
+
+ for(i=0; i<ui32PageCount; i++)
+ {
+ MMU_PT_INFO **ppsPTInfoList;
+
+
+ ui32PDIndex = sTmpDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
+
+
+ ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
+
+ {
+
+ ui32PTIndex = (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->ui32PTShift;
+
+
+ if (!ppsPTInfoList[0])
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Invalid PT for alloc at VAddr:0x%08X (VaddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
+
+
+ sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize;
+
+
+ continue;
+ }
+
+
+ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
+
+
+ if (!pui32Tmp)
+ {
+ continue;
+ }
+
+ CheckPT(ppsPTInfoList[0]);
+
+
+ if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
+ {
+ ppsPTInfoList[0]->ui32ValidPTECount--;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Page is already invalid for alloc at VAddr:0x%08X (VAddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
+ }
+
+
+ PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0);
+ MakeKernelPageReadWrite(ppsPTInfoList[0]->PTPageCpuVAddr);
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+
+ pui32Tmp[ui32PTIndex] = (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_VALID;
+#else
+
+#if defined(FIX_HW_BRN_31620)
+ BRN31620InvalidatePageTableEntry(psMMUHeap->psMMUContext, ui32PDIndex, ui32PTIndex, &pui32Tmp[ui32PTIndex]);
+#else
+ pui32Tmp[ui32PTIndex] = 0;
+#endif
+#endif
+ MakeKernelPageReadOnly(ppsPTInfoList[0]->PTPageCpuVAddr);
+ CheckPT(ppsPTInfoList[0]);
+ }
+
+
+
+ if (ppsPTInfoList[0] && (ppsPTInfoList[0]->ui32ValidPTECount == 0)
+ )
+ {
+#if defined(FIX_HW_BRN_31620)
+ if (BRN31620FreePageTable(psMMUHeap, ui32PDIndex) == IMG_TRUE)
+ {
+ bInvalidateDirectoryCache = IMG_TRUE;
+ }
+#else
+ _DeferredFreePageTable(psMMUHeap, ui32PDIndex - psMMUHeap->ui32PDBaseIndex, IMG_TRUE);
+ bInvalidateDirectoryCache = IMG_TRUE;
+#endif
+ }
+
+
+ sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize;
+ }
+
+ if(bInvalidateDirectoryCache)
+ {
+ MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->psDevInfo);
+ }
+ else
+ {
+ MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
+ }
+
+#if defined(PDUMP)
+ MMU_PDumpPageTables(psMMUHeap,
+ sDevVAddr,
+ psMMUHeap->ui32DataPageSize * ui32PageCount,
+ IMG_TRUE,
+ hUniqueTag);
+#endif
+}
+
+
+static IMG_VOID MMU_FreePageTables(IMG_PVOID pvMMUHeap,
+ IMG_SIZE_T ui32Start,
+ IMG_SIZE_T ui32End,
+ IMG_HANDLE hUniqueTag)
+{
+ MMU_HEAP *pMMUHeap = (MMU_HEAP*)pvMMUHeap;
+ IMG_DEV_VIRTADDR Start;
+
+ Start.uiAddr = (IMG_UINT32)ui32Start;
+
+ MMU_UnmapPagesAndFreePTs(pMMUHeap, Start, (IMG_UINT32)((ui32End - ui32Start) >> pMMUHeap->ui32PTShift), hUniqueTag);
+}
+
+MMU_HEAP *
+MMU_Create (MMU_CONTEXT *psMMUContext,
+ DEV_ARENA_DESCRIPTOR *psDevArena,
+ RA_ARENA **ppsVMArena,
+ PDUMP_MMU_ATTRIB **ppsMMUAttrib)
+{
+ MMU_HEAP *pMMUHeap;
+ IMG_UINT32 ui32ScaleSize;
+
+ PVR_UNREFERENCED_PARAMETER(ppsMMUAttrib);
+
+ PVR_ASSERT (psDevArena != IMG_NULL);
+
+ if (psDevArena == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid parameter"));
+ return IMG_NULL;
+ }
+
+ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof (MMU_HEAP),
+ (IMG_VOID **)&pMMUHeap, IMG_NULL,
+ "MMU Heap");
+ if (pMMUHeap == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to OSAllocMem failed"));
+ return IMG_NULL;
+ }
+
+ pMMUHeap->psMMUContext = psMMUContext;
+ pMMUHeap->psDevArena = psDevArena;
+
+
+
+
+ switch(pMMUHeap->psDevArena->ui32DataPageSize)
+ {
+ case 0x1000:
+ ui32ScaleSize = 0;
+ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4K;
+ break;
+#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
+ case 0x4000:
+ ui32ScaleSize = 2;
+ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_16K;
+ break;
+ case 0x10000:
+ ui32ScaleSize = 4;
+ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_64K;
+ break;
+ case 0x40000:
+ ui32ScaleSize = 6;
+ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_256K;
+ break;
+ case 0x100000:
+ ui32ScaleSize = 8;
+ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_1M;
+ break;
+ case 0x400000:
+ ui32ScaleSize = 10;
+ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4M;
+ break;
+#endif
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid data page size"));
+ goto ErrorFreeHeap;
+ }
+
+
+ pMMUHeap->ui32DataPageSize = psDevArena->ui32DataPageSize;
+ pMMUHeap->ui32DataPageBitWidth = SGX_MMU_PAGE_SHIFT + ui32ScaleSize;
+ pMMUHeap->ui32DataPageMask = pMMUHeap->ui32DataPageSize - 1;
+
+ pMMUHeap->ui32PTShift = pMMUHeap->ui32DataPageBitWidth;
+ pMMUHeap->ui32PTBitWidth = SGX_MMU_PT_SHIFT - ui32ScaleSize;
+ pMMUHeap->ui32PTMask = SGX_MMU_PT_MASK & (SGX_MMU_PT_MASK<<ui32ScaleSize);
+ pMMUHeap->ui32PTSize = (IMG_UINT32)(1UL<<pMMUHeap->ui32PTBitWidth) * sizeof(IMG_UINT32);
+
+
+ if(pMMUHeap->ui32PTSize < 4 * sizeof(IMG_UINT32))
+ {
+ pMMUHeap->ui32PTSize = 4 * sizeof(IMG_UINT32);
+ }
+ pMMUHeap->ui32PTNumEntriesAllocated = pMMUHeap->ui32PTSize >> 2;
+
+
+ pMMUHeap->ui32PTNumEntriesUsable = (IMG_UINT32)(1UL << pMMUHeap->ui32PTBitWidth);
+
+
+ pMMUHeap->ui32PDShift = pMMUHeap->ui32PTBitWidth + pMMUHeap->ui32PTShift;
+ pMMUHeap->ui32PDBitWidth = SGX_FEATURE_ADDRESS_SPACE_SIZE - pMMUHeap->ui32PTBitWidth - pMMUHeap->ui32DataPageBitWidth;
+ pMMUHeap->ui32PDMask = SGX_MMU_PD_MASK & (SGX_MMU_PD_MASK>>(32-SGX_FEATURE_ADDRESS_SPACE_SIZE));
+
+
+#if !defined (SUPPORT_EXTERNAL_SYSTEM_CACHE)
+
+
+
+
+ if(psDevArena->BaseDevVAddr.uiAddr > (pMMUHeap->ui32DataPageMask | pMMUHeap->ui32PTMask))
+ {
+
+
+
+ PVR_ASSERT ((psDevArena->BaseDevVAddr.uiAddr
+ & (pMMUHeap->ui32DataPageMask
+ | pMMUHeap->ui32PTMask)) == 0);
+ }
+#endif
+
+ pMMUHeap->ui32PTETotalUsable = pMMUHeap->psDevArena->ui32Size >> pMMUHeap->ui32PTShift;
+
+
+ pMMUHeap->ui32PDBaseIndex = (pMMUHeap->psDevArena->BaseDevVAddr.uiAddr & pMMUHeap->ui32PDMask) >> pMMUHeap->ui32PDShift;
+
+
+
+
+ pMMUHeap->ui32PageTableCount = (pMMUHeap->ui32PTETotalUsable + pMMUHeap->ui32PTNumEntriesUsable - 1)
+ >> pMMUHeap->ui32PTBitWidth;
+ PVR_ASSERT(pMMUHeap->ui32PageTableCount > 0);
+
+
+ pMMUHeap->psVMArena = RA_Create(psDevArena->pszName,
+ psDevArena->BaseDevVAddr.uiAddr,
+ psDevArena->ui32Size,
+ IMG_NULL,
+ MAX(HOST_PAGESIZE(), pMMUHeap->ui32DataPageSize),
+ IMG_NULL,
+ IMG_NULL,
+ &MMU_FreePageTables,
+ pMMUHeap);
+
+ if (pMMUHeap->psVMArena == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to RA_Create failed"));
+ goto ErrorFreePagetables;
+ }
+
+#if defined(PDUMP)
+
+ MMU_SetPDumpAttribs(&pMMUHeap->sMMUAttrib,
+ psMMUContext->psDeviceNode,
+ pMMUHeap->ui32DataPageMask,
+ pMMUHeap->ui32PTSize);
+ *ppsMMUAttrib = &pMMUHeap->sMMUAttrib;
+
+ PDUMPCOMMENT("Create MMU device from arena %s (Size == 0x%x, DataPageSize == 0x%x, BaseDevVAddr == 0x%x)",
+ psDevArena->pszName,
+ psDevArena->ui32Size,
+ pMMUHeap->ui32DataPageSize,
+ psDevArena->BaseDevVAddr.uiAddr);
+#endif
+
+#if 0
+
+ if(psDevArena->ui32HeapID == SGX_TILED_HEAP_ID)
+ {
+ IMG_UINT32 ui32RegVal;
+ IMG_UINT32 ui32XTileStride;
+
+
+
+
+
+
+ ui32XTileStride = 2;
+
+ ui32RegVal = (EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK
+ & ((psDevArena->BaseDevVAddr.uiAddr>>20)
+ << EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT))
+ |(EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK
+ & (((psDevArena->BaseDevVAddr.uiAddr+psDevArena->ui32Size)>>20)
+ << EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT))
+ |(EUR_CR_BIF_TILE0_CFG_MASK
+ & (((ui32XTileStride<<1)|8) << EUR_CR_BIF_TILE0_CFG_SHIFT));
+ PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_TILE0, ui32RegVal);
+ }
+#endif
+
+
+
+ *ppsVMArena = pMMUHeap->psVMArena;
+
+ return pMMUHeap;
+
+
+ErrorFreePagetables:
+ _DeferredFreePageTables (pMMUHeap);
+
+ErrorFreeHeap:
+ OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, IMG_NULL);
+
+
+ return IMG_NULL;
+}
+
+IMG_VOID
+MMU_Delete (MMU_HEAP *pMMUHeap)
+{
+ if (pMMUHeap != IMG_NULL)
+ {
+ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Delete"));
+
+ if(pMMUHeap->psVMArena)
+ {
+ RA_Delete (pMMUHeap->psVMArena);
+ }
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Delete MMU device from arena %s (BaseDevVAddr == 0x%x, PT count for deferred free == 0x%x)",
+ pMMUHeap->psDevArena->pszName,
+ pMMUHeap->psDevArena->BaseDevVAddr.uiAddr,
+ pMMUHeap->ui32PageTableCount);
+#endif
+
+#ifdef SUPPORT_SGX_MMU_BYPASS
+ EnableHostAccess(pMMUHeap->psMMUContext);
+#endif
+ _DeferredFreePageTables (pMMUHeap);
+#ifdef SUPPORT_SGX_MMU_BYPASS
+ DisableHostAccess(pMMUHeap->psMMUContext);
+#endif
+
+ OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, IMG_NULL);
+
+ }
+}
+
+IMG_BOOL
+MMU_Alloc (MMU_HEAP *pMMUHeap,
+ IMG_SIZE_T uSize,
+ IMG_SIZE_T *pActualSize,
+ IMG_UINT32 uFlags,
+ IMG_UINT32 uDevVAddrAlignment,
+ IMG_DEV_VIRTADDR *psDevVAddr)
+{
+ IMG_BOOL bStatus;
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "MMU_Alloc: uSize=0x%x, flags=0x%x, align=0x%x",
+ uSize, uFlags, uDevVAddrAlignment));
+
+
+
+ if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
+ {
+ IMG_UINTPTR_T uiAddr;
+
+ bStatus = RA_Alloc (pMMUHeap->psVMArena,
+ uSize,
+ pActualSize,
+ IMG_NULL,
+ 0,
+ uDevVAddrAlignment,
+ 0,
+ IMG_NULL,
+ 0,
+ &uiAddr);
+ if(!bStatus)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: RA_Alloc of VMArena failed"));
+ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: Alloc of DevVAddr failed from heap %s ID%d",
+ pMMUHeap->psDevArena->pszName,
+ pMMUHeap->psDevArena->ui32HeapID));
+ return bStatus;
+ }
+
+ psDevVAddr->uiAddr = IMG_CAST_TO_DEVVADDR_UINT(uiAddr);
+ }
+
+ #ifdef SUPPORT_SGX_MMU_BYPASS
+ EnableHostAccess(pMMUHeap->psMMUContext);
+ #endif
+
+
+ bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, (IMG_UINT32)uSize);
+
+ #ifdef SUPPORT_SGX_MMU_BYPASS
+ DisableHostAccess(pMMUHeap->psMMUContext);
+ #endif
+
+ if (!bStatus)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: _DeferredAllocPagetables failed"));
+ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: Failed to alloc pagetable(s) for DevVAddr 0x%8.8x from heap %s ID%d",
+ psDevVAddr->uiAddr,
+ pMMUHeap->psDevArena->pszName,
+ pMMUHeap->psDevArena->ui32HeapID));
+ if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
+ {
+
+ RA_Free (pMMUHeap->psVMArena, psDevVAddr->uiAddr, IMG_FALSE);
+ }
+ }
+
+ return bStatus;
+}
+
+IMG_VOID
+MMU_Free (MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size)
+{
+ PVR_ASSERT (pMMUHeap != IMG_NULL);
+
+ if (pMMUHeap == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Free: invalid parameter"));
+ return;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "MMU_Free: Freeing DevVAddr 0x%08X from heap %s ID%d",
+ DevVAddr.uiAddr,
+ pMMUHeap->psDevArena->pszName,
+ pMMUHeap->psDevArena->ui32HeapID));
+
+ if((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) &&
+ (DevVAddr.uiAddr + ui32Size <= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr + pMMUHeap->psDevArena->ui32Size))
+ {
+ RA_Free (pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE);
+ return;
+ }
+
+ PVR_DPF((PVR_DBG_ERROR,"MMU_Free: Couldn't free DevVAddr %08X from heap %s ID%d (not in range of heap))",
+ DevVAddr.uiAddr,
+ pMMUHeap->psDevArena->pszName,
+ pMMUHeap->psDevArena->ui32HeapID));
+}
+
+IMG_VOID
+MMU_Enable (MMU_HEAP *pMMUHeap)
+{
+ PVR_UNREFERENCED_PARAMETER(pMMUHeap);
+
+}
+
+IMG_VOID
+MMU_Disable (MMU_HEAP *pMMUHeap)
+{
+ PVR_UNREFERENCED_PARAMETER(pMMUHeap);
+
+}
+
+#if defined(FIX_HW_BRN_31620)
+IMG_VOID MMU_GetCacheFlushRange(MMU_CONTEXT *pMMUContext, IMG_UINT32 *pui32RangeMask)
+{
+ IMG_UINT32 i;
+
+ for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++)
+ {
+ pui32RangeMask[i] = pMMUContext->ui32PDChangeMask[i];
+
+
+ pMMUContext->ui32PDChangeMask[i] = 0;
+ }
+}
+
+IMG_VOID MMU_GetPDPhysAddr(MMU_CONTEXT *pMMUContext, IMG_DEV_PHYADDR *psDevPAddr)
+{
+ *psDevPAddr = pMMUContext->sPDDevPAddr;
+}
+
+#endif
+#if defined(PDUMP)
+static IMG_VOID
+MMU_PDumpPageTables (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_SIZE_T uSize,
+ IMG_BOOL bForUnmap,
+ IMG_HANDLE hUniqueTag)
+{
+ IMG_UINT32 ui32NumPTEntries;
+ IMG_UINT32 ui32PTIndex;
+ IMG_UINT32 *pui32PTEntry;
+
+ MMU_PT_INFO **ppsPTInfoList;
+ IMG_UINT32 ui32PDIndex;
+ IMG_UINT32 ui32PTDumpCount;
+
+
+ ui32NumPTEntries = (IMG_UINT32)((uSize + pMMUHeap->ui32DataPageMask) >> pMMUHeap->ui32PTShift);
+
+
+ ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
+
+
+ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
+
+
+ ui32PTIndex = (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
+
+
+ PDUMPCOMMENT("Page table mods (num entries == %08X) %s", ui32NumPTEntries, bForUnmap ? "(for unmap)" : "");
+
+
+ while(ui32NumPTEntries > 0)
+ {
+ MMU_PT_INFO* psPTInfo = *ppsPTInfoList++;
+
+ if(ui32NumPTEntries <= pMMUHeap->ui32PTNumEntriesUsable - ui32PTIndex)
+ {
+ ui32PTDumpCount = ui32NumPTEntries;
+ }
+ else
+ {
+ ui32PTDumpCount = pMMUHeap->ui32PTNumEntriesUsable - ui32PTIndex;
+ }
+
+ if (psPTInfo)
+ {
+ IMG_UINT32 ui32Flags = 0;
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ ui32Flags |= ( MMU_IsHeapShared(pMMUHeap) ) ? PDUMP_FLAGS_PERSISTENT : 0;
+#endif
+ pui32PTEntry = (IMG_UINT32*)psPTInfo->PTPageCpuVAddr;
+ PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32PTEntry[ui32PTIndex], ui32PTDumpCount * sizeof(IMG_UINT32), ui32Flags, IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag);
+ }
+
+
+ ui32NumPTEntries -= ui32PTDumpCount;
+
+
+ ui32PTIndex = 0;
+ }
+
+ PDUMPCOMMENT("Finished page table mods %s", bForUnmap ? "(for unmap)" : "");
+}
+#endif
+
+
+static IMG_VOID
+MMU_MapPage (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_DEV_PHYADDR DevPAddr,
+ IMG_UINT32 ui32MemFlags)
+{
+ IMG_UINT32 ui32Index;
+ IMG_UINT32 *pui32Tmp;
+ IMG_UINT32 ui32MMUFlags = 0;
+ MMU_PT_INFO **ppsPTInfoList;
+
+
+ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
+
+
+
+ if(((PVRSRV_MEM_READ|PVRSRV_MEM_WRITE) & ui32MemFlags) == (PVRSRV_MEM_READ|PVRSRV_MEM_WRITE))
+ {
+
+ ui32MMUFlags = 0;
+ }
+ else if(PVRSRV_MEM_READ & ui32MemFlags)
+ {
+
+ ui32MMUFlags |= SGX_MMU_PTE_READONLY;
+ }
+ else if(PVRSRV_MEM_WRITE & ui32MemFlags)
+ {
+
+ ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY;
+ }
+
+
+ if(PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags)
+ {
+ ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT;
+ }
+
+#if !defined(FIX_HW_BRN_25503)
+
+ if(PVRSRV_MEM_EDM_PROTECT & ui32MemFlags)
+ {
+ ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT;
+ }
+#endif
+
+
+
+
+
+ ui32Index = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
+
+
+ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
+
+ CheckPT(ppsPTInfoList[0]);
+
+
+ ui32Index = (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
+
+
+ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
+
+#if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+ {
+ IMG_UINT32 uTmp = pui32Tmp[ui32Index];
+
+
+#if defined(FIX_HW_BRN_31620)
+ if ((uTmp & SGX_MMU_PTE_VALID) && ((DevVAddr.uiAddr & BRN31620_PDE_CACHE_FILL_MASK) != BRN31620_DUMMY_PAGE_OFFSET))
+#else
+ if ((uTmp & SGX_MMU_PTE_VALID) != 0)
+#endif
+
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page is already valid for alloc at VAddr:0x%08X PDIdx:%u PTIdx:%u",
+ DevVAddr.uiAddr,
+ DevVAddr.uiAddr >> pMMUHeap->ui32PDShift,
+ ui32Index ));
+ PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page table entry value: 0x%08X", uTmp));
+ PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Physical page to map: 0x%08X", DevPAddr.uiAddr));
+#if PT_DUMP
+ DumpPT(ppsPTInfoList[0]);
+#endif
+ }
+#if !defined(FIX_HW_BRN_31620)
+ PVR_ASSERT((uTmp & SGX_MMU_PTE_VALID) == 0);
+#endif
+ }
+#endif
+
+
+ ppsPTInfoList[0]->ui32ValidPTECount++;
+
+ MakeKernelPageReadWrite(ppsPTInfoList[0]->PTPageCpuVAddr);
+
+ pui32Tmp[ui32Index] = ((DevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ & ((~pMMUHeap->ui32DataPageMask)>>SGX_MMU_PTE_ADDR_ALIGNSHIFT))
+ | SGX_MMU_PTE_VALID
+ | ui32MMUFlags;
+ MakeKernelPageReadOnly(ppsPTInfoList[0]->PTPageCpuVAddr);
+ CheckPT(ppsPTInfoList[0]);
+}
+
+
+IMG_VOID
+MMU_MapScatter (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_SYS_PHYADDR *psSysAddr,
+ IMG_SIZE_T uSize,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag)
+{
+#if defined(PDUMP)
+ IMG_DEV_VIRTADDR MapBaseDevVAddr;
+#endif
+ IMG_UINT32 uCount, i;
+ IMG_DEV_PHYADDR DevPAddr;
+
+ PVR_ASSERT (pMMUHeap != IMG_NULL);
+
+#if defined(PDUMP)
+ MapBaseDevVAddr = DevVAddr;
+#else
+ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
+#endif
+
+ for (i=0, uCount=0; uCount<uSize; i++, uCount+=pMMUHeap->ui32DataPageSize)
+ {
+ IMG_SYS_PHYADDR sSysAddr;
+
+ sSysAddr = psSysAddr[i];
+
+
+
+ PVR_ASSERT((sSysAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
+
+ DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr);
+
+ MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
+ DevVAddr.uiAddr += pMMUHeap->ui32DataPageSize;
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "MMU_MapScatter: devVAddr=%08X, SysAddr=%08X, size=0x%x/0x%x",
+ DevVAddr.uiAddr, sSysAddr.uiAddr, uCount, uSize));
+ }
+
+#if defined(PDUMP)
+ MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag);
+#endif
+}
+
+IMG_VOID
+MMU_MapPages (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_SYS_PHYADDR SysPAddr,
+ IMG_SIZE_T uSize,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag)
+{
+ IMG_DEV_PHYADDR DevPAddr;
+#if defined(PDUMP)
+ IMG_DEV_VIRTADDR MapBaseDevVAddr;
+#endif
+ IMG_UINT32 uCount;
+ IMG_UINT32 ui32VAdvance;
+ IMG_UINT32 ui32PAdvance;
+
+ PVR_ASSERT (pMMUHeap != IMG_NULL);
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_MapPages: heap:%s, heap_id:%d devVAddr=%08X, SysPAddr=%08X, size=0x%x",
+ pMMUHeap->psDevArena->pszName,
+ pMMUHeap->psDevArena->ui32HeapID,
+ DevVAddr.uiAddr,
+ SysPAddr.uiAddr,
+ uSize));
+
+
+ ui32VAdvance = pMMUHeap->ui32DataPageSize;
+ ui32PAdvance = pMMUHeap->ui32DataPageSize;
+
+#if defined(PDUMP)
+ MapBaseDevVAddr = DevVAddr;
+#else
+ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
+#endif
+
+ DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
+
+
+ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
+
+#if defined(FIX_HW_BRN_23281)
+ if(ui32MemFlags & PVRSRV_MEM_INTERLEAVED)
+ {
+ ui32VAdvance *= 2;
+ }
+#endif
+
+
+
+
+ if(ui32MemFlags & PVRSRV_MEM_DUMMY)
+ {
+ ui32PAdvance = 0;
+ }
+
+ for (uCount=0; uCount<uSize; uCount+=ui32VAdvance)
+ {
+ MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
+ DevVAddr.uiAddr += ui32VAdvance;
+ DevPAddr.uiAddr += ui32PAdvance;
+ }
+
+#if defined(PDUMP)
+ MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag);
+#endif
+}
+
+IMG_VOID
+MMU_MapShadow (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR MapBaseDevVAddr,
+ IMG_SIZE_T uByteSize,
+ IMG_CPU_VIRTADDR CpuVAddr,
+ IMG_HANDLE hOSMemHandle,
+ IMG_DEV_VIRTADDR *pDevVAddr,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag)
+{
+ IMG_UINT32 i;
+ IMG_UINT32 uOffset = 0;
+ IMG_DEV_VIRTADDR MapDevVAddr;
+ IMG_UINT32 ui32VAdvance;
+ IMG_UINT32 ui32PAdvance;
+
+#if !defined (PDUMP)
+ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
+#endif
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "MMU_MapShadow: DevVAddr:%08X, Bytes:0x%x, CPUVAddr:%08X",
+ MapBaseDevVAddr.uiAddr,
+ uByteSize,
+ (IMG_UINTPTR_T)CpuVAddr));
+
+
+ ui32VAdvance = pMMUHeap->ui32DataPageSize;
+ ui32PAdvance = pMMUHeap->ui32DataPageSize;
+
+
+ PVR_ASSERT(((IMG_UINTPTR_T)CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
+ PVR_ASSERT(((IMG_UINT32)uByteSize & pMMUHeap->ui32DataPageMask) == 0);
+ pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
+
+#if defined(FIX_HW_BRN_23281)
+ if(ui32MemFlags & PVRSRV_MEM_INTERLEAVED)
+ {
+ ui32VAdvance *= 2;
+ }
+#endif
+
+
+
+
+ if(ui32MemFlags & PVRSRV_MEM_DUMMY)
+ {
+ ui32PAdvance = 0;
+ }
+
+
+ MapDevVAddr = MapBaseDevVAddr;
+ for (i=0; i<uByteSize; i+=ui32VAdvance)
+ {
+ IMG_CPU_PHYADDR CpuPAddr;
+ IMG_DEV_PHYADDR DevPAddr;
+
+ if(CpuVAddr)
+ {
+ CpuPAddr = OSMapLinToCPUPhys (hOSMemHandle,
+ (IMG_VOID *)((IMG_UINTPTR_T)CpuVAddr + uOffset));
+ }
+ else
+ {
+ CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
+ }
+ DevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
+
+
+ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "Offset=0x%x: CpuVAddr=%08X, CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X",
+ uOffset,
+ (IMG_UINTPTR_T)CpuVAddr + uOffset,
+ CpuPAddr.uiAddr,
+ MapDevVAddr.uiAddr,
+ DevPAddr.uiAddr));
+
+ MMU_MapPage (pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
+
+
+ MapDevVAddr.uiAddr += ui32VAdvance;
+ uOffset += ui32PAdvance;
+ }
+
+#if defined(PDUMP)
+ MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE, hUniqueTag);
+#endif
+}
+
+
+IMG_VOID
+MMU_UnmapPages (MMU_HEAP *psMMUHeap,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 ui32PageCount,
+ IMG_HANDLE hUniqueTag)
+{
+ IMG_UINT32 uPageSize = psMMUHeap->ui32DataPageSize;
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32PDIndex;
+ IMG_UINT32 ui32PTIndex;
+ IMG_UINT32 *pui32Tmp;
+
+#if !defined (PDUMP)
+ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
+#endif
+
+
+ sTmpDevVAddr = sDevVAddr;
+
+ for(i=0; i<ui32PageCount; i++)
+ {
+ MMU_PT_INFO **ppsPTInfoList;
+
+
+ ui32PDIndex = sTmpDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
+
+
+ ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
+
+
+ ui32PTIndex = (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->ui32PTShift;
+
+
+ if (!ppsPTInfoList[0])
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: ERROR Invalid PT for alloc at VAddr:0x%08X (VaddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",
+ sTmpDevVAddr.uiAddr,
+ sDevVAddr.uiAddr,
+ i,
+ ui32PDIndex,
+ ui32PTIndex));
+
+
+ sTmpDevVAddr.uiAddr += uPageSize;
+
+
+ continue;
+ }
+
+ CheckPT(ppsPTInfoList[0]);
+
+
+ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
+
+
+ if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
+ {
+ ppsPTInfoList[0]->ui32ValidPTECount--;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page is already invalid for alloc at VAddr:0x%08X (VAddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",
+ sTmpDevVAddr.uiAddr,
+ sDevVAddr.uiAddr,
+ i,
+ ui32PDIndex,
+ ui32PTIndex));
+ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page table entry value: 0x%08X", pui32Tmp[ui32PTIndex]));
+ }
+
+
+ PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0);
+
+ MakeKernelPageReadWrite(ppsPTInfoList[0]->PTPageCpuVAddr);
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+
+ pui32Tmp[ui32PTIndex] = (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_VALID;
+#else
+
+#if defined(FIX_HW_BRN_31620)
+ BRN31620InvalidatePageTableEntry(psMMUHeap->psMMUContext, ui32PDIndex, ui32PTIndex, &pui32Tmp[ui32PTIndex]);
+#else
+ pui32Tmp[ui32PTIndex] = 0;
+#endif
+#endif
+ MakeKernelPageReadOnly(ppsPTInfoList[0]->PTPageCpuVAddr);
+
+ CheckPT(ppsPTInfoList[0]);
+
+
+ sTmpDevVAddr.uiAddr += uPageSize;
+ }
+
+ MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
+
+#if defined(PDUMP)
+ MMU_PDumpPageTables (psMMUHeap, sDevVAddr, uPageSize*ui32PageCount, IMG_TRUE, hUniqueTag);
+#endif
+}
+
+
+IMG_DEV_PHYADDR
+MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr)
+{
+ IMG_UINT32 *pui32PageTable;
+ IMG_UINT32 ui32Index;
+ IMG_DEV_PHYADDR sDevPAddr;
+ MMU_PT_INFO **ppsPTInfoList;
+
+
+ ui32Index = sDevVPageAddr.uiAddr >> pMMUHeap->ui32PDShift;
+
+
+ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
+ if (!ppsPTInfoList[0])
+ {
+ PVR_DPF((PVR_DBG_ERROR,"MMU_GetPhysPageAddr: Not mapped in at 0x%08x", sDevVPageAddr.uiAddr));
+ sDevPAddr.uiAddr = 0;
+ return sDevPAddr;
+ }
+
+
+ ui32Index = (sDevVPageAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
+
+
+ pui32PageTable = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
+
+
+ sDevPAddr.uiAddr = pui32PageTable[ui32Index];
+
+
+ sDevPAddr.uiAddr &= ~(pMMUHeap->ui32DataPageMask>>SGX_MMU_PTE_ADDR_ALIGNSHIFT);
+
+
+ sDevPAddr.uiAddr <<= SGX_MMU_PTE_ADDR_ALIGNSHIFT;
+
+ return sDevPAddr;
+}
+
+
+IMG_DEV_PHYADDR MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext)
+{
+ return (pMMUContext->sPDDevPAddr);
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR SGXGetPhysPageAddrKM (IMG_HANDLE hDevMemHeap,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEV_PHYADDR *pDevPAddr,
+ IMG_CPU_PHYADDR *pCpuPAddr)
+{
+ MMU_HEAP *pMMUHeap;
+ IMG_DEV_PHYADDR DevPAddr;
+
+
+
+ pMMUHeap = (MMU_HEAP*)BM_GetMMUHeap(hDevMemHeap);
+
+ DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr);
+ pCpuPAddr->uiAddr = DevPAddr.uiAddr;
+ pDevPAddr->uiAddr = DevPAddr.uiAddr;
+
+ return (pDevPAddr->uiAddr != 0) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+
+PVRSRV_ERROR SGXGetMMUPDAddrKM(IMG_HANDLE hDevCookie,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_PHYADDR *psPDDevPAddr)
+{
+ if (!hDevCookie || !hDevMemContext || !psPDDevPAddr)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+ *psPDDevPAddr = ((BM_CONTEXT*)hDevMemContext)->psMMUContext->sPDDevPAddr;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo)
+{
+ PVRSRV_ERROR eError;
+ SYS_DATA *psSysData;
+ RA_ARENA *psLocalDevMemArena;
+ IMG_HANDLE hOSMemHandle = IMG_NULL;
+ IMG_BYTE *pui8MemBlock = IMG_NULL;
+ IMG_SYS_PHYADDR sMemBlockSysPAddr;
+ IMG_CPU_PHYADDR sMemBlockCpuPAddr;
+
+ SysAcquireData(&psSysData);
+
+ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
+
+
+ if(psLocalDevMemArena == IMG_NULL)
+ {
+
+ eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ 3 * SGX_MMU_PAGE_SIZE,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ 0,
+ (IMG_VOID **)&pui8MemBlock,
+ &hOSMemHandle);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to OSAllocPages failed"));
+ return eError;
+ }
+
+
+ if(pui8MemBlock)
+ {
+ sMemBlockCpuPAddr = OSMapLinToCPUPhys(hOSMemHandle,
+ pui8MemBlock);
+ }
+ else
+ {
+
+ sMemBlockCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, 0);
+ }
+ }
+ else
+ {
+
+
+ if(RA_Alloc(psLocalDevMemArena,
+ 3 * SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ IMG_NULL,
+ 0,
+ SGX_MMU_PAGE_SIZE,
+ 0,
+ IMG_NULL,
+ 0,
+ &(sMemBlockSysPAddr.uiAddr)) != IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to RA_Alloc failed"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+
+ sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr);
+ pui8MemBlock = OSMapPhysToLin(sMemBlockCpuPAddr,
+ SGX_MMU_PAGE_SIZE * 3,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ &hOSMemHandle);
+ if(!pui8MemBlock)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR failed to map page tables"));
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+ }
+
+ psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle;
+ psDevInfo->sBIFResetPDDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr);
+ psDevInfo->sBIFResetPTDevPAddr.uiAddr = psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
+ psDevInfo->sBIFResetPageDevPAddr.uiAddr = psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
+
+
+ psDevInfo->pui32BIFResetPD = (IMG_UINT32 *)pui8MemBlock;
+ psDevInfo->pui32BIFResetPT = (IMG_UINT32 *)(pui8MemBlock + SGX_MMU_PAGE_SIZE);
+
+
+ OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE);
+ OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE);
+
+ OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB, SGX_MMU_PAGE_SIZE);
+
+ return PVRSRV_OK;
+}
+
+IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo)
+{
+ SYS_DATA *psSysData;
+ RA_ARENA *psLocalDevMemArena;
+ IMG_SYS_PHYADDR sPDSysPAddr;
+
+ SysAcquireData(&psSysData);
+
+ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
+
+
+ if(psLocalDevMemArena == IMG_NULL)
+ {
+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ 3 * SGX_MMU_PAGE_SIZE,
+ psDevInfo->pui32BIFResetPD,
+ psDevInfo->hBIFResetPDOSMemHandle);
+ }
+ else
+ {
+ OSUnMapPhysToLin(psDevInfo->pui32BIFResetPD,
+ 3 * SGX_MMU_PAGE_SIZE,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ psDevInfo->hBIFResetPDOSMemHandle);
+
+ sPDSysPAddr = SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->sBIFResetPDDevPAddr);
+ RA_Free(psLocalDevMemArena, sPDSysPAddr.uiAddr, IMG_FALSE);
+ }
+}
+
+IMG_VOID MMU_CheckFaultAddr(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32PDDevPAddr, IMG_UINT32 ui32FaultAddr)
+{
+ MMU_CONTEXT *psMMUContext = psDevInfo->pvMMUContextList;
+
+ while (psMMUContext && (psMMUContext->sPDDevPAddr.uiAddr != ui32PDDevPAddr))
+ {
+ psMMUContext = psMMUContext->psNext;
+ }
+
+ if (psMMUContext)
+ {
+ IMG_UINT32 ui32PTIndex;
+ IMG_UINT32 ui32PDIndex;
+
+ PVR_LOG(("Found MMU context for page fault 0x%08x", ui32FaultAddr));
+
+ ui32PTIndex = (ui32FaultAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
+ ui32PDIndex = (ui32FaultAddr & SGX_MMU_PD_MASK) >> (SGX_MMU_PT_SHIFT + SGX_MMU_PAGE_SHIFT);
+
+ if (psMMUContext->apsPTInfoList[ui32PDIndex])
+ {
+ if (psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr)
+ {
+ IMG_UINT32 *pui32Ptr = psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr;
+ IMG_UINT32 ui32PTE = pui32Ptr[ui32PTIndex];
+
+ PVR_LOG(("PDE valid: PTE = 0x%08x (PhysAddr = 0x%08x, %s)",
+ ui32PTE,
+ ui32PTE & SGX_MMU_PTE_ADDR_MASK,
+ ui32PTE & SGX_MMU_PTE_VALID?"valid":"Invalid"));
+ }
+ else
+ {
+ PVR_LOG(("Found PT info but no CPU address"));
+ }
+ }
+ else
+ {
+ PVR_LOG(("No PDE found"));
+ }
+ }
+}
+
+#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
+PVRSRV_ERROR WorkaroundBRN22997Alloc(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+ SYS_DATA *psSysData;
+ RA_ARENA *psLocalDevMemArena;
+ IMG_HANDLE hPTPageOSMemHandle = IMG_NULL;
+ IMG_HANDLE hPDPageOSMemHandle = IMG_NULL;
+ IMG_UINT32 *pui32PD = IMG_NULL;
+ IMG_UINT32 *pui32PT = IMG_NULL;
+ IMG_CPU_PHYADDR sCpuPAddr;
+ IMG_DEV_PHYADDR sPTDevPAddr;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+ IMG_UINT32 ui32PDOffset;
+ IMG_UINT32 ui32PTOffset;
+
+ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ SysAcquireData(&psSysData);
+
+ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
+
+
+ if(psLocalDevMemArena == IMG_NULL)
+ {
+
+ eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ 0,
+ (IMG_VOID **)&pui32PT,
+ &hPTPageOSMemHandle);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR call to OSAllocPages failed"));
+ return eError;
+ }
+ ui32PTOffset = 0;
+
+ eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ SGX_MMU_PAGE_SIZE,
+ IMG_NULL,
+ 0,
+ (IMG_VOID **)&pui32PD,
+ &hPDPageOSMemHandle);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR call to OSAllocPages failed"));
+ return eError;
+ }
+ ui32PDOffset = 0;
+
+
+ if(pui32PT)
+ {
+ sCpuPAddr = OSMapLinToCPUPhys(hPTPageOSMemHandle,
+ pui32PT);
+ }
+ else
+ {
+
+ sCpuPAddr = OSMemHandleToCpuPAddr(hPTPageOSMemHandle, 0);
+ }
+ sPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
+
+ if(pui32PD)
+ {
+ sCpuPAddr = OSMapLinToCPUPhys(hPDPageOSMemHandle,
+ pui32PD);
+ }
+ else
+ {
+
+ sCpuPAddr = OSMemHandleToCpuPAddr(hPDPageOSMemHandle, 0);
+ }
+ sPDDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
+
+ }
+ else
+ {
+
+
+ if(RA_Alloc(psLocalDevMemArena,
+ SGX_MMU_PAGE_SIZE * 2,
+ IMG_NULL,
+ IMG_NULL,
+ 0,
+ SGX_MMU_PAGE_SIZE,
+ 0,
+ IMG_NULL,
+ 0,
+ &(psDevInfo->sBRN22997SysPAddr.uiAddr))!= IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR call to RA_Alloc failed"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+
+ sCpuPAddr = SysSysPAddrToCpuPAddr(psDevInfo->sBRN22997SysPAddr);
+ pui32PT = OSMapPhysToLin(sCpuPAddr,
+ SGX_MMU_PAGE_SIZE * 2,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ &hPTPageOSMemHandle);
+ if(!pui32PT)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR failed to map page tables"));
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+ ui32PTOffset = 0;
+
+
+ sPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
+
+ pui32PD = pui32PT + SGX_MMU_PAGE_SIZE/sizeof(IMG_UINT32);
+ ui32PDOffset = SGX_MMU_PAGE_SIZE;
+ hPDPageOSMemHandle = hPTPageOSMemHandle;
+ sPDDevPAddr.uiAddr = sPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
+ }
+
+ OSMemSet(pui32PD, 0, SGX_MMU_PAGE_SIZE);
+ OSMemSet(pui32PT, 0, SGX_MMU_PAGE_SIZE);
+
+
+ PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, hPDPageOSMemHandle, ui32PDOffset, pui32PD, SGX_MMU_PAGE_SIZE, 0, PDUMP_PD_UNIQUETAG);
+ PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, hPTPageOSMemHandle, ui32PTOffset, pui32PT, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+ PDUMPMEMPTENTRIES(&psDevInfo->sMMUAttrib, hPDPageOSMemHandle, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ PDUMPMEMPTENTRIES(&psDevInfo->sMMUAttrib, hPTPageOSMemHandle, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
+
+ psDevInfo->hBRN22997PTPageOSMemHandle = hPTPageOSMemHandle;
+ psDevInfo->hBRN22997PDPageOSMemHandle = hPDPageOSMemHandle;
+ psDevInfo->sBRN22997PTDevPAddr = sPTDevPAddr;
+ psDevInfo->sBRN22997PDDevPAddr = sPDDevPAddr;
+ psDevInfo->pui32BRN22997PD = pui32PD;
+ psDevInfo->pui32BRN22997PT = pui32PT;
+
+ return PVRSRV_OK;
+}
+
+
+IMG_VOID WorkaroundBRN22997ReadHostPort(PVRSRV_SGXDEV_INFO *psDevInfo)
+{
+ IMG_UINT32 *pui32PD = psDevInfo->pui32BRN22997PD;
+ IMG_UINT32 *pui32PT = psDevInfo->pui32BRN22997PT;
+ IMG_UINT32 ui32PDIndex;
+ IMG_UINT32 ui32PTIndex;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ volatile IMG_UINT32 *pui32HostPort;
+ IMG_UINT32 ui32BIFCtrl;
+
+
+
+
+ pui32HostPort = (volatile IMG_UINT32*)(((IMG_UINT8*)psDevInfo->pvHostPortBaseKM) + SYS_SGX_HOSTPORT_BRN23030_OFFSET);
+
+
+ sDevVAddr.uiAddr = SYS_SGX_HOSTPORT_BASE_DEVVADDR + SYS_SGX_HOSTPORT_BRN23030_OFFSET;
+
+ ui32PDIndex = (sDevVAddr.uiAddr & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
+ ui32PTIndex = (sDevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
+
+
+ pui32PD[ui32PDIndex] = (psDevInfo->sBRN22997PTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PDE_VALID;
+
+ pui32PT[ui32PTIndex] = (psDevInfo->sBRN22997PTDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_VALID;
+
+ PDUMPMEMPTENTRIES(&psDevInfo->sMMUAttrib, psDevInfo->hBRN22997PDPageOSMemHandle, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ PDUMPMEMPTENTRIES(&psDevInfo->sMMUAttrib, psDevInfo->hBRN22997PTPageOSMemHandle, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
+
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0,
+ psDevInfo->sBRN22997PDDevPAddr.uiAddr);
+ PDUMPPDREG(&psDevInfo->sMMUAttrib, EUR_CR_BIF_DIR_LIST_BASE0, psDevInfo->sBRN22997PDDevPAddr.uiAddr, PDUMP_PD_UNIQUETAG);
+
+
+ ui32BIFCtrl = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
+ PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl);
+ PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32BIFCtrl);
+
+
+ if (pui32HostPort)
+ {
+
+ IMG_UINT32 ui32Tmp;
+ ui32Tmp = *pui32HostPort;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Host Port not present for BRN22997 workaround"));
+ }
+
+
+
+
+
+
+
+ PDUMPCOMMENT("RDW :SGXMEM:v4:%08X\r\n", sDevVAddr.uiAddr);
+
+ PDUMPCOMMENT("SAB :SGXMEM:v4:%08X 4 0 hostport.bin", sDevVAddr.uiAddr);
+
+
+ pui32PD[ui32PDIndex] = 0;
+ pui32PT[ui32PTIndex] = 0;
+
+
+ PDUMPMEMPTENTRIES(&psDevInfo->sMMUAttrib, psDevInfo->hBRN22997PDPageOSMemHandle, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+ PDUMPMEMPTENTRIES(&psDevInfo->sMMUAttrib, psDevInfo->hBRN22997PTPageOSMemHandle, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
+ PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl);
+ PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32BIFCtrl);
+}
+
+
+IMG_VOID WorkaroundBRN22997Free(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ SYS_DATA *psSysData;
+ RA_ARENA *psLocalDevMemArena;
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+
+
+ SysAcquireData(&psSysData);
+
+ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
+
+ PDUMPFREEPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN22997PDPageOSMemHandle, psDevInfo->pui32BRN22997PD, SGX_MMU_PAGE_SIZE, 0, PDUMP_PD_UNIQUETAG);
+ PDUMPFREEPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN22997PTPageOSMemHandle, psDevInfo->pui32BRN22997PT, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
+
+
+ if(psLocalDevMemArena == IMG_NULL)
+ {
+ if (psDevInfo->pui32BRN22997PD != IMG_NULL)
+ {
+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ psDevInfo->pui32BRN22997PD,
+ psDevInfo->hBRN22997PDPageOSMemHandle);
+ }
+
+ if (psDevInfo->pui32BRN22997PT != IMG_NULL)
+ {
+ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
+ SGX_MMU_PAGE_SIZE,
+ psDevInfo->pui32BRN22997PT,
+ psDevInfo->hBRN22997PTPageOSMemHandle);
+ }
+ }
+ else
+ {
+ if (psDevInfo->pui32BRN22997PT != IMG_NULL)
+ {
+ OSUnMapPhysToLin(psDevInfo->pui32BRN22997PT,
+ SGX_MMU_PAGE_SIZE * 2,
+ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
+ psDevInfo->hBRN22997PTPageOSMemHandle);
+
+
+ RA_Free(psLocalDevMemArena, psDevInfo->sBRN22997SysPAddr.uiAddr, IMG_FALSE);
+ }
+ }
+}
+#endif
+
+
+#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
+PVRSRV_ERROR MMU_MapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ IMG_UINT32 *pui32PT;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+ IMG_UINT32 ui32PDIndex;
+ IMG_UINT32 ui32PTIndex;
+ PDUMP_MMU_ATTRIB sMMUAttrib;
+
+ psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+
+ sMMUAttrib = psDevInfo->sMMUAttrib;
+#if defined(PDUMP)
+ MMU_SetPDumpAttribs(&sMMUAttrib, psDeviceNode,
+ SGX_MMU_PAGE_MASK,
+ SGX_MMU_PT_SIZE * sizeof(IMG_UINT32));
+#endif
+
+#if defined(PDUMP)
+ {
+ IMG_CHAR szScript[128];
+
+ sprintf(szScript, "MALLOC :EXTSYSCACHE:PA_%08X%08X %u %u 0x%08X\r\n", 0, psDevInfo->sExtSysCacheRegsDevPBase.uiAddr, SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, psDevInfo->sExtSysCacheRegsDevPBase.uiAddr);
+ PDumpOSWriteString2(szScript, PDUMP_FLAGS_CONTINUOUS);
+ }
+#endif
+
+ ui32PDIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
+ ui32PTIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
+
+ pui32PT = (IMG_UINT32 *) psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr;
+
+ MakeKernelPageReadWrite(pui32PT);
+
+ pui32PT[ui32PTIndex] = (psDevInfo->sExtSysCacheRegsDevPBase.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_VALID;
+ MakeKernelPageReadOnly(pui32PT);
+#if defined(PDUMP)
+
+ {
+ IMG_DEV_PHYADDR sDevPAddr;
+ IMG_CPU_PHYADDR sCpuPAddr;
+ IMG_UINT32 ui32PageMask;
+ IMG_UINT32 ui32PTE;
+ PVRSRV_ERROR eErr;
+
+ PDUMP_GET_SCRIPT_AND_FILE_STRING();
+
+ ui32PageMask = sMMUAttrib.ui32PTSize - 1;
+ sCpuPAddr = OSMapLinToCPUPhys(psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->hPTPageOSMemHandle, &pui32PT[ui32PTIndex]);
+ sDevPAddr = SysCpuPAddrToDevPAddr(sMMUAttrib.sDevId.eDeviceType, sCpuPAddr);
+ ui32PTE = *((IMG_UINT32 *) (&pui32PT[ui32PTIndex]));
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "WRW :%s:PA_%08X%08X:0x%08X :%s:PA_%08X%08X:0x%08X\r\n",
+ sMMUAttrib.sDevId.pszPDumpDevName,
+ (IMG_UINT32)(IMG_UINTPTR_T)PDUMP_PT_UNIQUETAG,
+ (sDevPAddr.uiAddr) & ~ui32PageMask,
+ (sDevPAddr.uiAddr) & ui32PageMask,
+ "EXTSYSCACHE",
+ (IMG_UINT32)(IMG_UINTPTR_T)PDUMP_PD_UNIQUETAG,
+ (ui32PTE & sMMUAttrib.ui32PDEMask) << sMMUAttrib.ui32PTEAlignShift,
+ ui32PTE & ~sMMUAttrib.ui32PDEMask);
+ if(eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR MMU_UnmapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ SYS_DATA *psSysData;
+ RA_ARENA *psLocalDevMemArena;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+ IMG_UINT32 ui32PDIndex;
+ IMG_UINT32 ui32PTIndex;
+ IMG_UINT32 *pui32PT;
+ PDUMP_MMU_ATTRIB sMMUAttrib;
+
+ psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+
+ sMMUAttrib = psDevInfo->sMMUAttrib;
+
+#if defined(PDUMP)
+ MMU_SetPDumpAttribs(&sMMUAttrib, psDeviceNode,
+ SGX_MMU_PAGE_MASK,
+ SGX_MMU_PT_SIZE * sizeof(IMG_UINT32));
+#endif
+ SysAcquireData(&psSysData);
+
+ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
+
+
+ ui32PDIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
+ ui32PTIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
+
+
+ if (psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex])
+ {
+ if (psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr)
+ {
+ pui32PT = (IMG_UINT32 *) psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr;
+ }
+ }
+
+ MakeKernelPageReadWrite(pui32PT);
+ pui32PT[ui32PTIndex] = 0;
+ MakeKernelPageReadOnly(pui32PT);
+
+ PDUMPMEMPTENTRIES(&sMMUAttrib, psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->hPDOSMemHandle, &pui32PT[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
+
+ return PVRSRV_OK;
+}
+#endif
+
+
+#if PAGE_TEST
+static IMG_VOID PageTest(IMG_VOID* pMem, IMG_DEV_PHYADDR sDevPAddr)
+{
+ volatile IMG_UINT32 ui32WriteData;
+ volatile IMG_UINT32 ui32ReadData;
+ volatile IMG_UINT32 *pMem32 = (volatile IMG_UINT32 *)pMem;
+ IMG_INT n;
+ IMG_BOOL bOK=IMG_TRUE;
+
+ ui32WriteData = 0xffffffff;
+
+ for (n=0; n<1024; n++)
+ {
+ pMem32[n] = ui32WriteData;
+ ui32ReadData = pMem32[n];
+
+ if (ui32WriteData != ui32ReadData)
+ {
+
+ PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x%08X", sDevPAddr.uiAddr + (n<<2) ));
+ PVR_DBG_BREAK;
+ bOK = IMG_FALSE;
+ }
+ }
+
+ ui32WriteData = 0;
+
+ for (n=0; n<1024; n++)
+ {
+ pMem32[n] = ui32WriteData;
+ ui32ReadData = pMem32[n];
+
+ if (ui32WriteData != ui32ReadData)
+ {
+
+ PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x%08X", sDevPAddr.uiAddr + (n<<2) ));
+ PVR_DBG_BREAK;
+ bOK = IMG_FALSE;
+ }
+ }
+
+ if (bOK)
+ {
+ PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x%08X is OK", sDevPAddr.uiAddr));
+ }
+ else
+ {
+ PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x%08X *** FAILED ***", sDevPAddr.uiAddr));
+ }
+}
+#endif
+
diff --git a/drivers/gpu/pvr/sgx/mmu.h b/drivers/gpu/pvr/sgx/mmu.h
new file mode 100644
index 0000000..dd92bf0
--- /dev/null
+++ b/drivers/gpu/pvr/sgx/mmu.h
@@ -0,0 +1,156 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _MMU_H_
+#define _MMU_H_
+
+#include "sgxinfokm.h"
+
+PVRSRV_ERROR
+MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr);
+
+IMG_VOID
+MMU_Finalise (MMU_CONTEXT *psMMUContext);
+
+
+IMG_VOID
+MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap);
+
+MMU_HEAP *
+MMU_Create (MMU_CONTEXT *psMMUContext,
+ DEV_ARENA_DESCRIPTOR *psDevArena,
+ RA_ARENA **ppsVMArena,
+ PDUMP_MMU_ATTRIB **ppsMMUAttrib);
+
+IMG_VOID
+MMU_Delete (MMU_HEAP *pMMUHeap);
+
+IMG_BOOL
+MMU_Alloc (MMU_HEAP *pMMUHeap,
+ IMG_SIZE_T uSize,
+ IMG_SIZE_T *pActualSize,
+ IMG_UINT32 uFlags,
+ IMG_UINT32 uDevVAddrAlignment,
+ IMG_DEV_VIRTADDR *pDevVAddr);
+
+IMG_VOID
+MMU_Free (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_UINT32 ui32Size);
+
+IMG_VOID
+MMU_Enable (MMU_HEAP *pMMUHeap);
+
+IMG_VOID
+MMU_Disable (MMU_HEAP *pMMUHeap);
+
+IMG_VOID
+MMU_MapPages (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_SYS_PHYADDR SysPAddr,
+ IMG_SIZE_T uSize,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag);
+
+IMG_VOID
+MMU_MapShadow (MMU_HEAP * pMMUHeap,
+ IMG_DEV_VIRTADDR MapBaseDevVAddr,
+ IMG_SIZE_T uByteSize,
+ IMG_CPU_VIRTADDR CpuVAddr,
+ IMG_HANDLE hOSMemHandle,
+ IMG_DEV_VIRTADDR * pDevVAddr,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag);
+
+IMG_VOID
+MMU_UnmapPages (MMU_HEAP *psMMUHeap,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 ui32PageCount,
+ IMG_HANDLE hUniqueTag);
+
+IMG_VOID
+MMU_MapScatter (MMU_HEAP *pMMUHeap,
+ IMG_DEV_VIRTADDR DevVAddr,
+ IMG_SYS_PHYADDR *psSysAddr,
+ IMG_SIZE_T uSize,
+ IMG_UINT32 ui32MemFlags,
+ IMG_HANDLE hUniqueTag);
+
+
+IMG_DEV_PHYADDR
+MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr);
+
+
+IMG_DEV_PHYADDR
+MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext);
+
+
+#ifdef SUPPORT_SGX_MMU_BYPASS
+IMG_VOID
+EnableHostAccess (MMU_CONTEXT *psMMUContext);
+
+
+IMG_VOID
+DisableHostAccess (MMU_CONTEXT *psMMUContext);
+#endif
+
+IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo);
+
+PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo);
+
+IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo);
+
+#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
+PVRSRV_ERROR WorkaroundBRN22997Alloc(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_VOID WorkaroundBRN22997ReadHostPort(PVRSRV_SGXDEV_INFO *psDevInfo);
+
+IMG_VOID WorkaroundBRN22997Free(PVRSRV_DEVICE_NODE *psDeviceNode);
+#endif
+
+#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
+PVRSRV_ERROR MMU_MapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR MMU_UnmapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode);
+#endif
+
+IMG_BOOL MMU_IsHeapShared(MMU_HEAP* pMMU_Heap);
+
+#if defined(FIX_HW_BRN_31620)
+IMG_VOID MMU_GetCacheFlushRange(MMU_CONTEXT *pMMUContext, IMG_UINT32 *pui32RangeMask);
+
+IMG_VOID MMU_GetPDPhysAddr(MMU_CONTEXT *pMMUContext, IMG_DEV_PHYADDR *psDevPAddr);
+
+#endif
+
+
+IMG_VOID MMU_CheckFaultAddr(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32PDDevPAddr, IMG_UINT32 ui32RegVal);
+
+#if defined(PDUMP)
+IMG_UINT32 MMU_GetPDumpContextID(IMG_HANDLE hDevMemContext);
+#endif
+
+#endif
diff --git a/drivers/gpu/pvr/sgx/pb.c b/drivers/gpu/pvr/sgx/pb.c
new file mode 100644
index 0000000..ab6523a
--- /dev/null
+++ b/drivers/gpu/pvr/sgx/pb.c
@@ -0,0 +1,466 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <stddef.h>
+
+#include "services_headers.h"
+#include "sgx_bridge_km.h"
+#include "sgxapi_km.h"
+#include "sgxinfo.h"
+#include "sgxinfokm.h"
+#include "pvr_bridge_km.h"
+#include "pdump_km.h"
+#include "sgxutils.h"
+
+#ifndef __linux__
+#pragma message("TODO: Review use of OS_PAGEABLE vs OS_NON_PAGEABLE")
+#endif
+
+#include "lists.h"
+
+static IMPLEMENT_LIST_INSERT(PVRSRV_STUB_PBDESC)
+static IMPLEMENT_LIST_REMOVE(PVRSRV_STUB_PBDESC)
+
+static PRESMAN_ITEM psResItemCreateSharedPB = IMG_NULL;
+static PVRSRV_PER_PROCESS_DATA *psPerProcCreateSharedPB = IMG_NULL;
+
+static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bDummy);
+static PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bDummy);
+
+IMG_EXPORT PVRSRV_ERROR
+SGXFindSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDevCookie,
+ IMG_BOOL bLockOnFailure,
+ IMG_UINT32 ui32TotalPBSize,
+ IMG_HANDLE *phSharedPBDesc,
+ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO **ppsHWBlockKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
+ IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount)
+{
+ PVRSRV_STUB_PBDESC *psStubPBDesc;
+ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos=IMG_NULL;
+ PVRSRV_SGXDEV_INFO *psSGXDevInfo;
+ PVRSRV_ERROR eError;
+
+ psSGXDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
+
+ psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
+ if (psStubPBDesc != IMG_NULL)
+ {
+ IMG_UINT32 i;
+ PRESMAN_ITEM psResItem;
+
+ if(psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "SGXFindSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored",
+ ui32TotalPBSize, psStubPBDesc->ui32TotalPBSize));
+ }
+
+ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(PVRSRV_KERNEL_MEM_INFO *)
+ * psStubPBDesc->ui32SubKernelMemInfosCount,
+ (IMG_VOID **)&ppsSharedPBDescSubKernelMemInfos,
+ IMG_NULL,
+ "Array of Kernel Memory Info") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: OSAllocMem failed"));
+
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ExitNotFound;
+ }
+
+ psResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_SHARED_PB_DESC,
+ psStubPBDesc,
+ 0,
+ &SGXCleanupSharedPBDescCallback);
+
+ if (psResItem == IMG_NULL)
+ {
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_KERNEL_MEM_INFO *) * psStubPBDesc->ui32SubKernelMemInfosCount,
+ ppsSharedPBDescSubKernelMemInfos,
+ 0);
+
+
+ PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: ResManRegisterRes failed"));
+
+ eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE;
+ goto ExitNotFound;
+ }
+
+ *ppsSharedPBDescKernelMemInfo = psStubPBDesc->psSharedPBDescKernelMemInfo;
+ *ppsHWPBDescKernelMemInfo = psStubPBDesc->psHWPBDescKernelMemInfo;
+ *ppsBlockKernelMemInfo = psStubPBDesc->psBlockKernelMemInfo;
+ *ppsHWBlockKernelMemInfo = psStubPBDesc->psHWBlockKernelMemInfo;
+
+ *ui32SharedPBDescSubKernelMemInfosCount =
+ psStubPBDesc->ui32SubKernelMemInfosCount;
+
+ *pppsSharedPBDescSubKernelMemInfos = ppsSharedPBDescSubKernelMemInfos;
+
+ for(i=0; i<psStubPBDesc->ui32SubKernelMemInfosCount; i++)
+ {
+ ppsSharedPBDescSubKernelMemInfos[i] =
+ psStubPBDesc->ppsSubKernelMemInfos[i];
+ }
+
+ psStubPBDesc->ui32RefCount++;
+ *phSharedPBDesc = (IMG_HANDLE)psResItem;
+ return PVRSRV_OK;
+ }
+
+ eError = PVRSRV_OK;
+ if (bLockOnFailure)
+ {
+ if (psResItemCreateSharedPB == IMG_NULL)
+ {
+ psResItemCreateSharedPB = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK,
+ psPerProc,
+ 0,
+ &SGXCleanupSharedPBDescCreateLockCallback);
+
+ if (psResItemCreateSharedPB == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: ResManRegisterRes failed"));
+
+ eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE;
+ goto ExitNotFound;
+ }
+ PVR_ASSERT(psPerProcCreateSharedPB == IMG_NULL);
+ psPerProcCreateSharedPB = psPerProc;
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_PROCESSING_BLOCKED;
+ }
+ }
+ExitNotFound:
+ *phSharedPBDesc = IMG_NULL;
+
+ return eError;
+}
+
+
+static PVRSRV_ERROR
+SGXCleanupSharedPBDescKM(PVRSRV_STUB_PBDESC *psStubPBDescIn)
+{
+
+ IMG_UINT32 i;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE*)psStubPBDescIn->hDevCookie;
+
+
+
+
+ psStubPBDescIn->ui32RefCount--;
+ if (psStubPBDescIn->ui32RefCount == 0)
+ {
+ IMG_DEV_VIRTADDR sHWPBDescDevVAddr = psStubPBDescIn->sHWPBDescDevVAddr;
+ List_PVRSRV_STUB_PBDESC_Remove(psStubPBDescIn);
+ for(i=0 ; i<psStubPBDescIn->ui32SubKernelMemInfosCount; i++)
+ {
+
+ PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie,
+ psStubPBDescIn->ppsSubKernelMemInfos[i]);
+ }
+
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_KERNEL_MEM_INFO *) * psStubPBDescIn->ui32SubKernelMemInfosCount,
+ psStubPBDescIn->ppsSubKernelMemInfos,
+ 0);
+ psStubPBDescIn->ppsSubKernelMemInfos = IMG_NULL;
+
+ PVRSRVFreeSharedSysMemoryKM(psStubPBDescIn->psBlockKernelMemInfo);
+
+ PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie, psStubPBDescIn->psHWBlockKernelMemInfo);
+
+ PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie, psStubPBDescIn->psHWPBDescKernelMemInfo);
+
+ PVRSRVFreeSharedSysMemoryKM(psStubPBDescIn->psSharedPBDescKernelMemInfo);
+
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_STUB_PBDESC),
+ psStubPBDescIn,
+ 0);
+
+
+
+ SGXCleanupRequest(psDeviceNode,
+ &sHWPBDescDevVAddr,
+ PVRSRV_CLEANUPCMD_PB,
+ CLEANUP_WITH_POLL);
+ }
+ return PVRSRV_OK;
+
+}
+
+static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bDummy)
+{
+ PVRSRV_STUB_PBDESC *psStubPBDesc = (PVRSRV_STUB_PBDESC *)pvParam;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+ PVR_UNREFERENCED_PARAMETER(bDummy);
+
+ return SGXCleanupSharedPBDescKM(psStubPBDesc);
+}
+
+static PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bDummy)
+{
+#ifdef DEBUG
+ PVRSRV_PER_PROCESS_DATA *psPerProc = (PVRSRV_PER_PROCESS_DATA *)pvParam;
+ PVR_ASSERT(psPerProc == psPerProcCreateSharedPB);
+#else
+ PVR_UNREFERENCED_PARAMETER(pvParam);
+#endif
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+ PVR_UNREFERENCED_PARAMETER(bDummy);
+
+ psPerProcCreateSharedPB = IMG_NULL;
+ psResItemCreateSharedPB = IMG_NULL;
+
+ return PVRSRV_OK;
+}
+
+
+IMG_EXPORT PVRSRV_ERROR
+SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc)
+{
+ PVR_ASSERT(hSharedPBDesc != IMG_NULL);
+
+ return ResManFreeResByPtr(hSharedPBDesc, CLEANUP_WITH_POLL);
+}
+
+
+IMG_EXPORT PVRSRV_ERROR
+SGXAddSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDevCookie,
+ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo,
+ IMG_UINT32 ui32TotalPBSize,
+ IMG_HANDLE *phSharedPBDesc,
+ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos,
+ IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount,
+ IMG_DEV_VIRTADDR sHWPBDescDevVAddr)
+{
+ PVRSRV_STUB_PBDESC *psStubPBDesc=IMG_NULL;
+ PVRSRV_ERROR eRet = PVRSRV_ERROR_INVALID_PERPROC;
+ IMG_UINT32 i;
+ PVRSRV_SGXDEV_INFO *psSGXDevInfo;
+ PRESMAN_ITEM psResItem;
+
+
+ if (psPerProcCreateSharedPB != psPerProc)
+ {
+ goto NoAdd;
+ }
+ else
+ {
+ PVR_ASSERT(psResItemCreateSharedPB != IMG_NULL);
+
+ ResManFreeResByPtr(psResItemCreateSharedPB, CLEANUP_WITH_POLL);
+
+ PVR_ASSERT(psResItemCreateSharedPB == IMG_NULL);
+ PVR_ASSERT(psPerProcCreateSharedPB == IMG_NULL);
+ }
+
+ psSGXDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
+
+ psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
+ if (psStubPBDesc != IMG_NULL)
+ {
+ if(psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "SGXAddSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored",
+ ui32TotalPBSize, psStubPBDesc->ui32TotalPBSize));
+
+ }
+
+
+ psResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_SHARED_PB_DESC,
+ psStubPBDesc,
+ 0,
+ &SGXCleanupSharedPBDescCallback);
+ if (psResItem == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "SGXAddSharedPBDescKM: "
+ "Failed to register existing shared "
+ "PBDesc with the resource manager"));
+ goto NoAddKeepPB;
+ }
+
+
+ psStubPBDesc->ui32RefCount++;
+
+ *phSharedPBDesc = (IMG_HANDLE)psResItem;
+ eRet = PVRSRV_OK;
+ goto NoAddKeepPB;
+ }
+
+ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_STUB_PBDESC),
+ (IMG_VOID **)&psStubPBDesc,
+ 0,
+ "Stub Parameter Buffer Description") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: Failed to alloc "
+ "StubPBDesc"));
+ eRet = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto NoAdd;
+ }
+
+
+ psStubPBDesc->ppsSubKernelMemInfos = IMG_NULL;
+
+ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_KERNEL_MEM_INFO *)
+ * ui32SharedPBDescSubKernelMemInfosCount,
+ (IMG_VOID **)&psStubPBDesc->ppsSubKernelMemInfos,
+ 0,
+ "Array of Kernel Memory Info") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
+ "Failed to alloc "
+ "StubPBDesc->ppsSubKernelMemInfos"));
+ eRet = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto NoAdd;
+ }
+
+ if(PVRSRVDissociateMemFromResmanKM(psSharedPBDescKernelMemInfo)
+ != PVRSRV_OK)
+ {
+ goto NoAdd;
+ }
+
+ if(PVRSRVDissociateMemFromResmanKM(psHWPBDescKernelMemInfo)
+ != PVRSRV_OK)
+ {
+ goto NoAdd;
+ }
+
+ if(PVRSRVDissociateMemFromResmanKM(psBlockKernelMemInfo)
+ != PVRSRV_OK)
+ {
+ goto NoAdd;
+ }
+
+ if(PVRSRVDissociateMemFromResmanKM(psHWBlockKernelMemInfo)
+ != PVRSRV_OK)
+ {
+ goto NoAdd;
+ }
+
+ psStubPBDesc->ui32RefCount = 1;
+ psStubPBDesc->ui32TotalPBSize = ui32TotalPBSize;
+ psStubPBDesc->psSharedPBDescKernelMemInfo = psSharedPBDescKernelMemInfo;
+ psStubPBDesc->psHWPBDescKernelMemInfo = psHWPBDescKernelMemInfo;
+ psStubPBDesc->psBlockKernelMemInfo = psBlockKernelMemInfo;
+ psStubPBDesc->psHWBlockKernelMemInfo = psHWBlockKernelMemInfo;
+
+ psStubPBDesc->ui32SubKernelMemInfosCount =
+ ui32SharedPBDescSubKernelMemInfosCount;
+ for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++)
+ {
+ psStubPBDesc->ppsSubKernelMemInfos[i] = ppsSharedPBDescSubKernelMemInfos[i];
+ if(PVRSRVDissociateMemFromResmanKM(ppsSharedPBDescSubKernelMemInfos[i])
+ != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
+ "Failed to dissociate shared PBDesc "
+ "from process"));
+ goto NoAdd;
+ }
+ }
+
+ psStubPBDesc->sHWPBDescDevVAddr = sHWPBDescDevVAddr;
+
+ psResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_SHARED_PB_DESC,
+ psStubPBDesc,
+ 0,
+ &SGXCleanupSharedPBDescCallback);
+ if (psResItem == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
+ "Failed to register shared PBDesc "
+ " with the resource manager"));
+ goto NoAdd;
+ }
+ psStubPBDesc->hDevCookie = hDevCookie;
+
+
+ List_PVRSRV_STUB_PBDESC_Insert(&(psSGXDevInfo->psStubPBDescListKM),
+ psStubPBDesc);
+
+ *phSharedPBDesc = (IMG_HANDLE)psResItem;
+
+ return PVRSRV_OK;
+
+NoAdd:
+ if(psStubPBDesc)
+ {
+ if(psStubPBDesc->ppsSubKernelMemInfos)
+ {
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_KERNEL_MEM_INFO *) * ui32SharedPBDescSubKernelMemInfosCount,
+ psStubPBDesc->ppsSubKernelMemInfos,
+ 0);
+ psStubPBDesc->ppsSubKernelMemInfos = IMG_NULL;
+ }
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_STUB_PBDESC),
+ psStubPBDesc,
+ 0);
+
+ }
+
+NoAddKeepPB:
+ for (i = 0; i < ui32SharedPBDescSubKernelMemInfosCount; i++)
+ {
+ PVRSRVFreeDeviceMemKM(hDevCookie, ppsSharedPBDescSubKernelMemInfos[i]);
+ }
+
+ PVRSRVFreeSharedSysMemoryKM(psSharedPBDescKernelMemInfo);
+ PVRSRVFreeDeviceMemKM(hDevCookie, psHWPBDescKernelMemInfo);
+
+ PVRSRVFreeSharedSysMemoryKM(psBlockKernelMemInfo);
+ PVRSRVFreeDeviceMemKM(hDevCookie, psHWBlockKernelMemInfo);
+
+ return eRet;
+}
+
diff --git a/drivers/gpu/pvr/sgx/sgx_bridge_km.h b/drivers/gpu/pvr/sgx/sgx_bridge_km.h
new file mode 100644
index 0000000..8fb3002
--- /dev/null
+++ b/drivers/gpu/pvr/sgx/sgx_bridge_km.h
@@ -0,0 +1,160 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined(__SGX_BRIDGE_KM_H__)
+#define __SGX_BRIDGE_KM_H__
+
+#include "sgxapi_km.h"
+#include "sgxinfo.h"
+#include "sgxinfokm.h"
+#include "sgx_bridge.h"
+#include "pvr_bridge.h"
+#include "perproc.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+IMG_IMPORT
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK_KM *psKick);
+#else
+PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick);
+#endif
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+IMG_IMPORT
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK_KM *psKick);
+#else
+PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK *psKick);
+#endif
+#endif
+
+IMG_IMPORT
+PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle,
+#if defined (SUPPORT_SID_INTERFACE)
+ SGX_CCB_KICK_KM *psCCBKick);
+#else
+ SGX_CCB_KICK *psCCBKick);
+#endif
+
+IMG_IMPORT
+PVRSRV_ERROR SGXGetPhysPageAddrKM(IMG_HANDLE hDevMemHeap,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEV_PHYADDR *pDevPAddr,
+ IMG_CPU_PHYADDR *pCpuPAddr);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV SGXGetMMUPDAddrKM(IMG_HANDLE hDevCookie,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_PHYADDR *psPDDevPAddr);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE hDevCookie,
+ SGX_CLIENT_INFO* psClientInfo);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo,
+ SGX_MISC_INFO *psMiscInfo,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hDevMemContext);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXReadHWPerfCBKM(IMG_HANDLE hDevHandle,
+ IMG_UINT32 ui32ArraySize,
+ PVRSRV_SGX_HWPERF_CB_ENTRY *psHWPerfCBData,
+ IMG_UINT32 *pui32DataCount,
+ IMG_UINT32 *pui32ClockSpeed,
+ IMG_UINT32 *pui32HostTimeStamp);
+
+IMG_IMPORT
+PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO *psDevInfo,
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
+ IMG_BOOL bWaitForComplete);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle,
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_HEAP_INFO_KM *pasHeapInfo,
+ IMG_DEV_PHYADDR *psPDDevPAddr);
+#else
+ SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo);
+#endif
+
+IMG_IMPORT
+PVRSRV_ERROR DevInitSGXPart2KM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDevHandle,
+#if defined (SUPPORT_SID_INTERFACE)
+ SGX_BRIDGE_INIT_INFO_KM *psInitInfo);
+#else
+ SGX_BRIDGE_INIT_INFO *psInitInfo);
+#endif
+
+IMG_IMPORT PVRSRV_ERROR
+SGXFindSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDevCookie,
+ IMG_BOOL bLockOnFailure,
+ IMG_UINT32 ui32TotalPBSize,
+ IMG_HANDLE *phSharedPBDesc,
+ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO **ppsHWBlockKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
+ IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount);
+
+IMG_IMPORT PVRSRV_ERROR
+SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc);
+
+IMG_IMPORT PVRSRV_ERROR
+SGXAddSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDevCookie,
+ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
+ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo,
+ IMG_UINT32 ui32TotalPBSize,
+ IMG_HANDLE *phSharedPBDesc,
+ PVRSRV_KERNEL_MEM_INFO **psSharedPBDescSubKernelMemInfos,
+ IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount,
+ IMG_DEV_VIRTADDR sHWPBDescDevVAddr);
+
+
+IMG_IMPORT PVRSRV_ERROR
+SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie,
+#if defined (SUPPORT_SID_INTERFACE)
+ SGX_INTERNAL_DEVINFO_KM *psSGXInternalDevInfo);
+#else
+ SGX_INTERNAL_DEVINFO *psSGXInternalDevInfo);
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/sgx/sgxconfig.h b/drivers/gpu/pvr/sgx/sgxconfig.h
new file mode 100644
index 0000000..c5cb093
--- /dev/null
+++ b/drivers/gpu/pvr/sgx/sgxconfig.h
@@ -0,0 +1,401 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __SGXCONFIG_H__
+#define __SGXCONFIG_H__
+
+#include "sgxdefs.h"
+
+#define DEV_DEVICE_TYPE PVRSRV_DEVICE_TYPE_SGX
+#define DEV_DEVICE_CLASS PVRSRV_DEVICE_CLASS_3D
+
+#define DEV_MAJOR_VERSION 1
+#define DEV_MINOR_VERSION 0
+
+#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
+#define SGX_KERNEL_DATA_HEAP_OFFSET 0x00001000
+#else
+#define SGX_KERNEL_DATA_HEAP_OFFSET 0x00000000
+#endif
+
+#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 32
+#if defined(FIX_HW_BRN_31620)
+ #if defined(SGX_FEATURE_2D_HARDWARE)
+ #define SGX_2D_HEAP_BASE 0x04000000
+ #define SGX_2D_HEAP_SIZE (0x08000000-0x04000000-0x00001000)
+ #endif
+
+ #define SGX_GENERAL_HEAP_BASE 0x08000000
+ #define SGX_GENERAL_HEAP_SIZE (0xB8000000-0x00001000)
+
+
+ #define SGX_3DPARAMETERS_HEAP_SIZE 0x10000000
+
+
+#if !defined(HYBRID_SHARED_PB_SIZE)
+ #define HYBRID_SHARED_PB_SIZE (SGX_3DPARAMETERS_HEAP_SIZE >> 1)
+#endif
+#if defined(SUPPORT_HYBRID_PB)
+ #define SGX_SHARED_3DPARAMETERS_SIZE (HYBRID_SHARED_PB_SIZE)
+ #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (HYBRID_SHARED_PB_SIZE-0x00001000)
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - SGX_SHARED_3DPARAMETERS_SIZE - 0x00001000)
+#else
+#if defined(SUPPORT_PERCONTEXT_PB)
+ #define SGX_SHARED_3DPARAMETERS_SIZE 0
+ #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE 0
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000)
+#endif
+#if defined(SUPPORT_SHARED_PB)
+ #define SGX_SHARED_3DPARAMETERS_SIZE SGX_3DPARAMETERS_HEAP_SIZE
+ #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000)
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE 0
+#endif
+#endif
+
+ #define SGX_SHARED_3DPARAMETERS_HEAP_BASE 0xC0000000
+
+
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE (SGX_SHARED_3DPARAMETERS_HEAP_BASE + SGX_SHARED_3DPARAMETERS_SIZE)
+
+
+ #define SGX_TADATA_HEAP_BASE 0xD0000000
+ #define SGX_TADATA_HEAP_SIZE (0x0D000000-0x00001000)
+
+ #define SGX_SYNCINFO_HEAP_BASE 0xE0000000
+ #define SGX_SYNCINFO_HEAP_SIZE (0x01000000-0x00001000)
+
+ #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0xE4000000
+ #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x02000000-0x00001000)
+
+ #define SGX_KERNEL_CODE_HEAP_BASE 0xE8000000
+ #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000)
+
+ #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0xEC000000
+ #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x01C00000-0x00001000)
+
+ #define SGX_KERNEL_DATA_HEAP_BASE (0xF0000000+SGX_KERNEL_DATA_HEAP_OFFSET)
+ #define SGX_KERNEL_DATA_HEAP_SIZE (0x03000000-(0x00001000+SGX_KERNEL_DATA_HEAP_OFFSET))
+
+
+ #define SGX_PIXELSHADER_HEAP_BASE 0xF4000000
+ #define SGX_PIXELSHADER_HEAP_SIZE (0x05000000-0x00001000)
+
+ #define SGX_VERTEXSHADER_HEAP_BASE 0xFC000000
+ #define SGX_VERTEXSHADER_HEAP_SIZE (0x02000000-0x00001000)
+#else
+ #if defined(SGX_FEATURE_2D_HARDWARE)
+ #define SGX_2D_HEAP_BASE 0x00100000
+ #define SGX_2D_HEAP_SIZE (0x08000000-0x00100000-0x00001000)
+ #else
+ #if defined(FIX_HW_BRN_26915)
+ #define SGX_CGBUFFER_HEAP_BASE 0x00100000
+ #define SGX_CGBUFFER_HEAP_SIZE (0x08000000-0x00100000-0x00001000)
+ #endif
+ #endif
+
+ #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
+ #define SGX_GENERAL_MAPPING_HEAP_BASE 0x08000000
+ #define SGX_GENERAL_MAPPING_HEAP_SIZE (0x08000000-0x00001000)
+ #endif
+
+ #if !defined(SUPPORT_MEMORY_TILING)
+ #define SGX_GENERAL_HEAP_BASE 0x10000000
+ #define SGX_GENERAL_HEAP_SIZE (0xC2000000-0x00001000)
+ #else
+ #include <sgx_msvdx_defs.h>
+
+
+ #define SGX_GENERAL_HEAP_BASE 0x10000000
+ #define SGX_GENERAL_HEAP_SIZE (0xB5000000-0x00001000)
+
+ #define SGX_VPB_TILED_HEAP_STRIDE TILING_TILE_STRIDE_2K
+ #define SGX_VPB_TILED_HEAP_BASE 0xC5000000
+ #define SGX_VPB_TILED_HEAP_SIZE (0x0D000000-0x00001000)
+
+
+ #if((SGX_VPB_TILED_HEAP_BASE & SGX_BIF_TILING_ADDR_INV_MASK) != 0)
+ #error "sgxconfig.h: SGX_VPB_TILED_HEAP has insufficient alignment"
+ #endif
+
+ #endif
+
+
+ #define SGX_3DPARAMETERS_HEAP_SIZE 0x10000000
+
+
+#if !defined(HYBRID_SHARED_PB_SIZE)
+ #define HYBRID_SHARED_PB_SIZE (SGX_3DPARAMETERS_HEAP_SIZE >> 1)
+#endif
+#if defined(SUPPORT_HYBRID_PB)
+ #define SGX_SHARED_3DPARAMETERS_SIZE (HYBRID_SHARED_PB_SIZE)
+ #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (HYBRID_SHARED_PB_SIZE-0x00001000)
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - SGX_SHARED_3DPARAMETERS_SIZE - 0x00001000)
+#else
+#if defined(SUPPORT_PERCONTEXT_PB)
+ #define SGX_SHARED_3DPARAMETERS_SIZE 0
+ #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE 0
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000)
+#endif
+#if defined(SUPPORT_SHARED_PB)
+ #define SGX_SHARED_3DPARAMETERS_SIZE SGX_3DPARAMETERS_HEAP_SIZE
+ #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000)
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE 0
+#endif
+#endif
+
+ #define SGX_SHARED_3DPARAMETERS_HEAP_BASE 0xD2000000
+
+
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE (SGX_SHARED_3DPARAMETERS_HEAP_BASE + SGX_SHARED_3DPARAMETERS_SIZE)
+
+
+ #define SGX_TADATA_HEAP_BASE 0xE2000000
+ #define SGX_TADATA_HEAP_SIZE (0x0D000000-0x00001000)
+
+ #define SGX_SYNCINFO_HEAP_BASE 0xEF000000
+ #define SGX_SYNCINFO_HEAP_SIZE (0x01000000-0x00001000)
+
+ #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0xF0000000
+ #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x02000000-0x00001000)
+
+ #define SGX_KERNEL_CODE_HEAP_BASE 0xF2000000
+ #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000)
+
+ #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0xF2400000
+ #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x01C00000-0x00001000)
+
+ #define SGX_KERNEL_DATA_HEAP_BASE (0xF4000000+SGX_KERNEL_DATA_HEAP_OFFSET)
+ #define SGX_KERNEL_DATA_HEAP_SIZE (0x05000000-(0x00001000+SGX_KERNEL_DATA_HEAP_OFFSET))
+
+
+ #define SGX_PIXELSHADER_HEAP_BASE 0xF9000000
+ #define SGX_PIXELSHADER_HEAP_SIZE (0x05000000-0x00001000)
+
+ #define SGX_VERTEXSHADER_HEAP_BASE 0xFE000000
+ #define SGX_VERTEXSHADER_HEAP_SIZE (0x02000000-0x00001000)
+#endif
+
+ #define SGX_CORE_IDENTIFIED
+#endif
+
+#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 28
+
+#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
+ #define SGX_GENERAL_MAPPING_HEAP_BASE 0x00001000
+ #define SGX_GENERAL_MAPPING_HEAP_SIZE (0x01800000-0x00001000-0x00001000)
+
+ #define SGX_GENERAL_HEAP_BASE 0x01800000
+ #define SGX_GENERAL_HEAP_SIZE (0x07000000-0x00001000)
+
+#else
+ #define SGX_GENERAL_HEAP_BASE 0x00001000
+#if defined(SUPPORT_LARGE_GENERAL_HEAP)
+ #define SGX_GENERAL_HEAP_SIZE (0x0B800000-0x00001000-0x00001000)
+#else
+ #define SGX_GENERAL_HEAP_SIZE (0x08800000-0x00001000-0x00001000)
+#endif
+#endif
+
+#if defined(SUPPORT_LARGE_GENERAL_HEAP)
+ #define SGX_3DPARAMETERS_HEAP_SIZE 0x01000000
+#else
+ #define SGX_3DPARAMETERS_HEAP_SIZE 0x04000000
+#endif
+
+
+#if !defined(HYBRID_SHARED_PB_SIZE)
+ #define HYBRID_SHARED_PB_SIZE (SGX_3DPARAMETERS_HEAP_SIZE >> 1)
+#endif
+#if defined(SUPPORT_HYBRID_PB)
+ #define SGX_SHARED_3DPARAMETERS_SIZE (HYBRID_SHARED_PB_SIZE)
+ #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (HYBRID_SHARED_PB_SIZE-0x00001000)
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - SGX_SHARED_3DPARAMETERS_SIZE - 0x00001000)
+#else
+#if defined(SUPPORT_PERCONTEXT_PB)
+ #define SGX_SHARED_3DPARAMETERS_SIZE 0
+ #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE 0
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000)
+#endif
+#if defined(SUPPORT_SHARED_PB)
+ #define SGX_SHARED_3DPARAMETERS_SIZE SGX_3DPARAMETERS_HEAP_SIZE
+ #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000)
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE 0
+#endif
+#endif
+
+#if defined(SUPPORT_LARGE_GENERAL_HEAP)
+ #define SGX_SHARED_3DPARAMETERS_HEAP_BASE 0x0B800000
+#else
+ #define SGX_SHARED_3DPARAMETERS_HEAP_BASE 0x08800000
+#endif
+
+
+
+ #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE (SGX_SHARED_3DPARAMETERS_HEAP_BASE + SGX_SHARED_3DPARAMETERS_SIZE)
+
+
+ #define SGX_TADATA_HEAP_BASE 0x0C800000
+ #define SGX_TADATA_HEAP_SIZE (0x01000000-0x00001000)
+
+ #define SGX_SYNCINFO_HEAP_BASE 0x0D800000
+ #define SGX_SYNCINFO_HEAP_SIZE (0x00400000-0x00001000)
+
+ #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0x0DC00000
+ #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x00800000-0x00001000)
+
+ #define SGX_KERNEL_CODE_HEAP_BASE 0x0E400000
+ #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000)
+
+ #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0x0E800000
+ #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x00800000-0x00001000)
+
+ #define SGX_KERNEL_DATA_HEAP_BASE (0x0F000000+SGX_KERNEL_DATA_HEAP_OFFSET)
+ #define SGX_KERNEL_DATA_HEAP_SIZE (0x00400000-(0x00001000+SGX_KERNEL_DATA_HEAP_OFFSET))
+
+ #define SGX_PIXELSHADER_HEAP_BASE 0x0F400000
+ #define SGX_PIXELSHADER_HEAP_SIZE (0x00500000-0x00001000)
+
+ #define SGX_VERTEXSHADER_HEAP_BASE 0x0FC00000
+ #define SGX_VERTEXSHADER_HEAP_SIZE (0x00200000-0x00001000)
+
+
+ #define SGX_CORE_IDENTIFIED
+
+#endif
+
+#if !defined(SGX_CORE_IDENTIFIED)
+ #error "sgxconfig.h: ERROR: unspecified SGX Core version"
+#endif
+
+#if !defined (SGX_FEATURE_EDM_VERTEX_PDSADDR_FULL_RANGE)
+ #if ((SGX_KERNEL_CODE_HEAP_BASE + SGX_KERNEL_CODE_HEAP_SIZE - SGX_PDSPIXEL_CODEDATA_HEAP_BASE) > 0x4000000)
+ #error "sgxconfig.h: ERROR: SGX_KERNEL_CODE_HEAP_BASE out of range of SGX_PDSPIXEL_CODEDATA_HEAP_BASE"
+ #endif
+
+ #if ((SGX_PDSVERTEX_CODEDATA_HEAP_BASE + SGX_PDSVERTEX_CODEDATA_HEAP_SIZE - SGX_PDSPIXEL_CODEDATA_HEAP_BASE) > 0x4000000)
+ #error "sgxconfig.h: ERROR: SGX_PDSVERTEX_CODEDATA_HEAP_BASE out of range of SGX_PDSPIXEL_CODEDATA_HEAP_BASE"
+ #endif
+#endif
+
+#if defined(SGX_FEATURE_2D_HARDWARE) && defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
+ #if ((SGX_GENERAL_MAPPING_HEAP_BASE + SGX_GENERAL_MAPPING_HEAP_SIZE - SGX_2D_HEAP_BASE) >= EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK)
+ #error "sgxconfig.h: ERROR: SGX_GENERAL_MAPPING_HEAP inaccessable by 2D requestor"
+ #endif
+#endif
+
+#if defined (EURASIA_USE_CODE_PAGE_SIZE)
+ #if ((SGX_KERNEL_CODE_HEAP_BASE & (EURASIA_USE_CODE_PAGE_SIZE - 1)) != 0)
+ #error "sgxconfig.h: ERROR: Kernel code heap base misalignment"
+ #endif
+#endif
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+ #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
+ #if ((SGX_2D_HEAP_BASE + SGX_2D_HEAP_SIZE) >= SGX_GENERAL_MAPPING_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_2D_HEAP overlaps SGX_GENERAL_MAPPING_HEAP"
+ #endif
+ #else
+ #if ((SGX_2D_HEAP_BASE + SGX_2D_HEAP_SIZE) >= SGX_GENERAL_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_2D_HEAP overlaps SGX_GENERAL_HEAP_BASE"
+ #endif
+ #endif
+#else
+ #if defined(FIX_HW_BRN_26915)
+ #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
+ #if ((SGX_CGBUFFER_HEAP_BASE + SGX_CGBUFFER_HEAP_SIZE) >= SGX_GENERAL_MAPPING_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_CGBUFFER_HEAP overlaps SGX_GENERAL_MAPPING_HEAP"
+ #endif
+ #else
+ #if ((SGX_CGBUFFER_HEAP_BASE + SGX_CGBUFFER_HEAP_SIZE) >= SGX_GENERAL_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_CGBUFFER_HEAP overlaps SGX_GENERAL_HEAP_BASE"
+ #endif
+ #endif
+ #endif
+#endif
+
+#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
+ #if ((SGX_GENERAL_MAPPING_HEAP_BASE + SGX_GENERAL_MAPPING_HEAP_SIZE) >= SGX_GENERAL_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_GENERAL_MAPPING_HEAP overlaps SGX_GENERAL_HEAP"
+ #endif
+#endif
+
+#if defined(SUPPORT_HYBRID_PB)
+ #if ((HYBRID_SHARED_PB_SIZE + 0x000001000) > SGX_3DPARAMETERS_HEAP_SIZE)
+ #error "sgxconfig.h: ERROR: HYBRID_SHARED_PB_SIZE too large"
+ #endif
+#endif
+
+#if defined(SUPPORT_MEMORY_TILING)
+ #if ((SGX_GENERAL_HEAP_BASE + SGX_GENERAL_HEAP_SIZE) >= SGX_VPB_TILED_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_GENERAL_HEAP overlaps SGX_VPB_TILED_HEAP"
+ #endif
+ #if ((SGX_VPB_TILED_HEAP_BASE + SGX_VPB_TILED_HEAP_SIZE) >= SGX_SHARED_3DPARAMETERS_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_VPB_TILED_HEAP overlaps SGX_3DPARAMETERS_HEAP"
+ #endif
+#else
+ #if ((SGX_GENERAL_HEAP_BASE + SGX_GENERAL_HEAP_SIZE) >= SGX_SHARED_3DPARAMETERS_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_GENERAL_HEAP overlaps SGX_3DPARAMETERS_HEAP"
+ #endif
+#endif
+
+#if (((SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE + SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE) >= SGX_TADATA_HEAP_BASE) && (SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE > 0))
+ #error "sgxconfig.h: ERROR: SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE overlaps SGX_TADATA_HEAP"
+#endif
+
+#if ((SGX_TADATA_HEAP_BASE + SGX_TADATA_HEAP_SIZE) >= SGX_SYNCINFO_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_TADATA_HEAP overlaps SGX_SYNCINFO_HEAP"
+#endif
+
+#if ((SGX_SYNCINFO_HEAP_BASE + SGX_SYNCINFO_HEAP_SIZE) >= SGX_PDSPIXEL_CODEDATA_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_SYNCINFO_HEAP overlaps SGX_PDSPIXEL_CODEDATA_HEAP"
+#endif
+
+#if ((SGX_PDSPIXEL_CODEDATA_HEAP_BASE + SGX_PDSPIXEL_CODEDATA_HEAP_SIZE) >= SGX_KERNEL_CODE_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_PDSPIXEL_CODEDATA_HEAP overlaps SGX_KERNEL_CODE_HEAP"
+#endif
+
+#if ((SGX_KERNEL_CODE_HEAP_BASE + SGX_KERNEL_CODE_HEAP_SIZE) >= SGX_PDSVERTEX_CODEDATA_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_KERNEL_CODE_HEAP overlaps SGX_PDSVERTEX_CODEDATA_HEAP"
+#endif
+
+#if ((SGX_PDSVERTEX_CODEDATA_HEAP_BASE + SGX_PDSVERTEX_CODEDATA_HEAP_SIZE) >= SGX_KERNEL_DATA_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_PDSVERTEX_CODEDATA_HEAP overlaps SGX_KERNEL_DATA_HEAP"
+#endif
+
+#if ((SGX_KERNEL_DATA_HEAP_BASE + SGX_KERNEL_DATA_HEAP_SIZE) >= SGX_PIXELSHADER_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_KERNEL_DATA_HEAP overlaps SGX_PIXELSHADER_HEAP"
+#endif
+
+#if ((SGX_PIXELSHADER_HEAP_BASE + SGX_PIXELSHADER_HEAP_SIZE) >= SGX_VERTEXSHADER_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_PIXELSHADER_HEAP overlaps SGX_VERTEXSHADER_HEAP"
+#endif
+
+#if ((SGX_VERTEXSHADER_HEAP_BASE + SGX_VERTEXSHADER_HEAP_SIZE) < SGX_VERTEXSHADER_HEAP_BASE)
+ #error "sgxconfig.h: ERROR: SGX_VERTEXSHADER_HEAP_BASE size cause wraparound"
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/sgx/sgxinfokm.h b/drivers/gpu/pvr/sgx/sgxinfokm.h
new file mode 100644
index 0000000..44a48be
--- /dev/null
+++ b/drivers/gpu/pvr/sgx/sgxinfokm.h
@@ -0,0 +1,573 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __SGXINFOKM_H__
+#define __SGXINFOKM_H__
+
+#include "sgxdefs.h"
+#include "device.h"
+#include "power.h"
+#include "sysconfig.h"
+#include "sgxscript.h"
+#include "sgxinfo.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#define SGX_HOSTPORT_PRESENT 0x00000001UL
+
+
+#define SGX_PDUMPREG_NAME "SGXREG"
+
+typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC;
+
+
+typedef struct _PVRSRV_SGX_CCB_INFO_ *PPVRSRV_SGX_CCB_INFO;
+
+typedef struct _PVRSRV_SGXDEV_INFO_
+{
+ PVRSRV_DEVICE_TYPE eDeviceType;
+ PVRSRV_DEVICE_CLASS eDeviceClass;
+
+ IMG_UINT8 ui8VersionMajor;
+ IMG_UINT8 ui8VersionMinor;
+ IMG_UINT32 ui32CoreConfig;
+ IMG_UINT32 ui32CoreFlags;
+
+
+ IMG_PVOID pvRegsBaseKM;
+
+#if defined(SGX_FEATURE_HOST_PORT)
+
+ IMG_PVOID pvHostPortBaseKM;
+
+ IMG_UINT32 ui32HPSize;
+
+ IMG_SYS_PHYADDR sHPSysPAddr;
+#endif
+
+
+ IMG_HANDLE hRegMapping;
+
+
+ IMG_SYS_PHYADDR sRegsPhysBase;
+
+ IMG_UINT32 ui32RegSize;
+
+#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
+
+ IMG_UINT32 ui32ExtSysCacheRegsSize;
+
+ IMG_DEV_PHYADDR sExtSysCacheRegsDevPBase;
+
+ IMG_UINT32 *pui32ExtSystemCacheRegsPT;
+
+ IMG_HANDLE hExtSystemCacheRegsPTPageOSMemHandle;
+
+ IMG_SYS_PHYADDR sExtSystemCacheRegsPTSysPAddr;
+#endif
+
+
+ IMG_UINT32 ui32CoreClockSpeed;
+ IMG_UINT32 ui32uKernelTimerClock;
+ IMG_BOOL bSGXIdle;
+
+ PVRSRV_STUB_PBDESC *psStubPBDescListKM;
+
+
+
+ IMG_DEV_PHYADDR sKernelPDDevPAddr;
+
+ IMG_UINT32 ui32HeapCount;
+ IMG_VOID *pvDeviceMemoryHeap;
+ PPVRSRV_KERNEL_MEM_INFO psKernelCCBMemInfo;
+ PVRSRV_SGX_KERNEL_CCB *psKernelCCB;
+ PPVRSRV_SGX_CCB_INFO psKernelCCBInfo;
+ PPVRSRV_KERNEL_MEM_INFO psKernelCCBCtlMemInfo;
+ PVRSRV_SGX_CCB_CTL *psKernelCCBCtl;
+ PPVRSRV_KERNEL_MEM_INFO psKernelCCBEventKickerMemInfo;
+ IMG_UINT32 *pui32KernelCCBEventKicker;
+#if defined(PDUMP)
+ IMG_UINT32 ui32KernelCCBEventKickerDumpVal;
+#endif
+ PVRSRV_KERNEL_MEM_INFO *psKernelSGXMiscMemInfo;
+ IMG_UINT32 aui32HostKickAddr[SGXMKIF_CMD_MAX];
+#if defined(SGX_SUPPORT_HWPROFILING)
+ PPVRSRV_KERNEL_MEM_INFO psKernelHWProfilingMemInfo;
+#endif
+ PPVRSRV_KERNEL_MEM_INFO psKernelHWPerfCBMemInfo;
+ PPVRSRV_KERNEL_MEM_INFO psKernelTASigBufferMemInfo;
+ PPVRSRV_KERNEL_MEM_INFO psKernel3DSigBufferMemInfo;
+#if defined(FIX_HW_BRN_29702)
+ PPVRSRV_KERNEL_MEM_INFO psKernelCFIMemInfo;
+#endif
+#if defined(FIX_HW_BRN_29823)
+ PPVRSRV_KERNEL_MEM_INFO psKernelDummyTermStreamMemInfo;
+#endif
+#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && defined(FIX_HW_BRN_31559)
+ PPVRSRV_KERNEL_MEM_INFO psKernelVDMSnapShotBufferMemInfo;
+ PPVRSRV_KERNEL_MEM_INFO psKernelVDMCtrlStreamBufferMemInfo;
+#endif
+#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \
+ defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX)
+ PPVRSRV_KERNEL_MEM_INFO psKernelVDMStateUpdateBufferMemInfo;
+#endif
+#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
+ PPVRSRV_KERNEL_MEM_INFO psKernelEDMStatusBufferMemInfo;
+#endif
+
+ IMG_UINT32 ui32ClientRefCount;
+
+
+ IMG_UINT32 ui32CacheControl;
+
+
+ IMG_UINT32 ui32ClientBuildOptions;
+
+
+ SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes;
+
+
+
+
+ IMG_VOID *pvMMUContextList;
+
+
+ IMG_BOOL bForcePTOff;
+
+ IMG_UINT32 ui32EDMTaskReg0;
+ IMG_UINT32 ui32EDMTaskReg1;
+
+ IMG_UINT32 ui32ClkGateCtl;
+ IMG_UINT32 ui32ClkGateCtl2;
+ IMG_UINT32 ui32ClkGateStatusReg;
+ IMG_UINT32 ui32ClkGateStatusMask;
+#if defined(SGX_FEATURE_MP)
+ IMG_UINT32 ui32MasterClkGateStatusReg;
+ IMG_UINT32 ui32MasterClkGateStatusMask;
+ IMG_UINT32 ui32MasterClkGateStatus2Reg;
+ IMG_UINT32 ui32MasterClkGateStatus2Mask;
+#endif
+ SGX_INIT_SCRIPTS sScripts;
+
+
+ IMG_HANDLE hBIFResetPDOSMemHandle;
+ IMG_DEV_PHYADDR sBIFResetPDDevPAddr;
+ IMG_DEV_PHYADDR sBIFResetPTDevPAddr;
+ IMG_DEV_PHYADDR sBIFResetPageDevPAddr;
+ IMG_UINT32 *pui32BIFResetPD;
+ IMG_UINT32 *pui32BIFResetPT;
+
+#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
+
+ IMG_HANDLE hBRN22997PTPageOSMemHandle;
+ IMG_HANDLE hBRN22997PDPageOSMemHandle;
+ IMG_DEV_PHYADDR sBRN22997PTDevPAddr;
+ IMG_DEV_PHYADDR sBRN22997PDDevPAddr;
+ IMG_UINT32 *pui32BRN22997PT;
+ IMG_UINT32 *pui32BRN22997PD;
+ IMG_SYS_PHYADDR sBRN22997SysPAddr;
+#endif
+
+#if defined(SUPPORT_HW_RECOVERY)
+
+ IMG_HANDLE hTimer;
+
+ IMG_UINT32 ui32TimeStamp;
+#endif
+
+
+ IMG_UINT32 ui32NumResets;
+
+
+ PVRSRV_KERNEL_MEM_INFO *psKernelSGXHostCtlMemInfo;
+ SGXMKIF_HOST_CTL *psSGXHostCtl;
+
+
+ PVRSRV_KERNEL_MEM_INFO *psKernelSGXTA3DCtlMemInfo;
+
+#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920)
+ PVRSRV_KERNEL_MEM_INFO *psKernelSGXPTLAWriteBackMemInfo;
+#endif
+
+ IMG_UINT32 ui32Flags;
+
+
+ IMG_UINT32 ui32MemTilingUsage;
+
+ #if defined(PDUMP)
+ PVRSRV_SGX_PDUMP_CONTEXT sPDContext;
+ #endif
+
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+
+ IMG_VOID *pvDummyPTPageCpuVAddr;
+ IMG_DEV_PHYADDR sDummyPTDevPAddr;
+ IMG_HANDLE hDummyPTPageOSMemHandle;
+ IMG_VOID *pvDummyDataPageCpuVAddr;
+ IMG_DEV_PHYADDR sDummyDataDevPAddr;
+ IMG_HANDLE hDummyDataPageOSMemHandle;
+#endif
+#if defined(PDUMP)
+ PDUMP_MMU_ATTRIB sMMUAttrib;
+#endif
+ IMG_UINT32 asSGXDevData[SGX_MAX_DEV_DATA];
+
+#if defined(FIX_HW_BRN_31620)
+
+ IMG_VOID *pvBRN31620DummyPageCpuVAddr;
+ IMG_HANDLE hBRN31620DummyPageOSMemHandle;
+ IMG_DEV_PHYADDR sBRN31620DummyPageDevPAddr;
+
+
+ IMG_VOID *pvBRN31620DummyPTCpuVAddr;
+ IMG_HANDLE hBRN31620DummyPTOSMemHandle;
+ IMG_DEV_PHYADDR sBRN31620DummyPTDevPAddr;
+
+ IMG_HANDLE hKernelMMUContext;
+#endif
+
+} PVRSRV_SGXDEV_INFO;
+
+
+typedef struct _SGX_TIMING_INFORMATION_
+{
+ IMG_UINT32 ui32CoreClockSpeed;
+ IMG_UINT32 ui32HWRecoveryFreq;
+ IMG_BOOL bEnableActivePM;
+ IMG_UINT32 ui32ActivePowManLatencyms;
+ IMG_UINT32 ui32uKernelFreq;
+} SGX_TIMING_INFORMATION;
+
+typedef struct _SGX_DEVICE_MAP_
+{
+ IMG_UINT32 ui32Flags;
+
+
+ IMG_SYS_PHYADDR sRegsSysPBase;
+ IMG_CPU_PHYADDR sRegsCpuPBase;
+ IMG_CPU_VIRTADDR pvRegsCpuVBase;
+ IMG_UINT32 ui32RegsSize;
+
+#if defined(SGX_FEATURE_HOST_PORT)
+ IMG_SYS_PHYADDR sHPSysPBase;
+ IMG_CPU_PHYADDR sHPCpuPBase;
+ IMG_UINT32 ui32HPSize;
+#endif
+
+
+ IMG_SYS_PHYADDR sLocalMemSysPBase;
+ IMG_DEV_PHYADDR sLocalMemDevPBase;
+ IMG_CPU_PHYADDR sLocalMemCpuPBase;
+ IMG_UINT32 ui32LocalMemSize;
+
+#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
+ IMG_UINT32 ui32ExtSysCacheRegsSize;
+ IMG_DEV_PHYADDR sExtSysCacheRegsDevPBase;
+#endif
+
+
+ IMG_UINT32 ui32IRQ;
+
+#if !defined(SGX_DYNAMIC_TIMING_INFO)
+
+ SGX_TIMING_INFORMATION sTimingInfo;
+#endif
+#if defined(PDUMP)
+
+ IMG_CHAR *pszPDumpDevName;
+#endif
+} SGX_DEVICE_MAP;
+
+
+struct _PVRSRV_STUB_PBDESC_
+{
+ IMG_UINT32 ui32RefCount;
+ IMG_UINT32 ui32TotalPBSize;
+ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
+ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
+ PVRSRV_KERNEL_MEM_INFO **ppsSubKernelMemInfos;
+ IMG_UINT32 ui32SubKernelMemInfosCount;
+ IMG_HANDLE hDevCookie;
+ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
+ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo;
+ IMG_DEV_VIRTADDR sHWPBDescDevVAddr;
+ PVRSRV_STUB_PBDESC *psNext;
+ PVRSRV_STUB_PBDESC **ppsThis;
+};
+
+typedef struct _PVRSRV_SGX_CCB_INFO_
+{
+ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo;
+ PVRSRV_KERNEL_MEM_INFO *psCCBCtlMemInfo;
+ SGXMKIF_COMMAND *psCommands;
+ IMG_UINT32 *pui32WriteOffset;
+ volatile IMG_UINT32 *pui32ReadOffset;
+#if defined(PDUMP)
+ IMG_UINT32 ui32CCBDumpWOff;
+#endif
+} PVRSRV_SGX_CCB_INFO;
+
+
+typedef struct _SGX_BRIDGE_INIT_INFO_KM_
+{
+ IMG_HANDLE hKernelCCBMemInfo;
+ IMG_HANDLE hKernelCCBCtlMemInfo;
+ IMG_HANDLE hKernelCCBEventKickerMemInfo;
+ IMG_HANDLE hKernelSGXHostCtlMemInfo;
+ IMG_HANDLE hKernelSGXTA3DCtlMemInfo;
+#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920)
+ IMG_HANDLE hKernelSGXPTLAWriteBackMemInfo;
+#endif
+ IMG_HANDLE hKernelSGXMiscMemInfo;
+
+ IMG_UINT32 aui32HostKickAddr[SGXMKIF_CMD_MAX];
+
+ SGX_INIT_SCRIPTS sScripts;
+
+ IMG_UINT32 ui32ClientBuildOptions;
+ SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes;
+
+#if defined(SGX_SUPPORT_HWPROFILING)
+ IMG_HANDLE hKernelHWProfilingMemInfo;
+#endif
+#if defined(SUPPORT_SGX_HWPERF)
+ IMG_HANDLE hKernelHWPerfCBMemInfo;
+#endif
+ IMG_HANDLE hKernelTASigBufferMemInfo;
+ IMG_HANDLE hKernel3DSigBufferMemInfo;
+
+#if defined(FIX_HW_BRN_29702)
+ IMG_HANDLE hKernelCFIMemInfo;
+#endif
+#if defined(FIX_HW_BRN_29823)
+ IMG_HANDLE hKernelDummyTermStreamMemInfo;
+#endif
+#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
+ IMG_HANDLE hKernelEDMStatusBufferMemInfo;
+#endif
+
+ IMG_UINT32 ui32EDMTaskReg0;
+ IMG_UINT32 ui32EDMTaskReg1;
+
+ IMG_UINT32 ui32ClkGateStatusReg;
+ IMG_UINT32 ui32ClkGateStatusMask;
+#if defined(SGX_FEATURE_MP)
+#endif
+
+ IMG_UINT32 ui32CacheControl;
+
+ IMG_UINT32 asInitDevData[SGX_MAX_DEV_DATA];
+ IMG_HANDLE asInitMemHandles[SGX_MAX_INIT_MEM_HANDLES];
+
+} SGX_BRIDGE_INIT_INFO_KM;
+
+
+typedef struct _SGX_INTERNEL_STATUS_UPDATE_KM_
+{
+ CTL_STATUS sCtlStatus;
+ IMG_HANDLE hKernelMemInfo;
+} SGX_INTERNEL_STATUS_UPDATE_KM;
+
+
+typedef struct _SGX_CCB_KICK_KM_
+{
+ SGXMKIF_COMMAND sCommand;
+ IMG_HANDLE hCCBKernelMemInfo;
+
+ IMG_UINT32 ui32NumDstSyncObjects;
+ IMG_HANDLE hKernelHWSyncListMemInfo;
+
+
+ IMG_HANDLE *pahDstSyncHandles;
+
+ IMG_UINT32 ui32NumTAStatusVals;
+ IMG_UINT32 ui32Num3DStatusVals;
+
+#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ SGX_INTERNEL_STATUS_UPDATE_KM asTAStatusUpdate[SGX_MAX_TA_STATUS_VALS];
+ SGX_INTERNEL_STATUS_UPDATE_KM as3DStatusUpdate[SGX_MAX_3D_STATUS_VALS];
+#else
+ IMG_HANDLE ahTAStatusSyncInfo[SGX_MAX_TA_STATUS_VALS];
+ IMG_HANDLE ah3DStatusSyncInfo[SGX_MAX_3D_STATUS_VALS];
+#endif
+
+ IMG_BOOL bFirstKickOrResume;
+#if (defined(NO_HARDWARE) || defined(PDUMP))
+ IMG_BOOL bTerminateOrAbort;
+#endif
+
+
+ IMG_UINT32 ui32CCBOffset;
+
+#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
+
+ IMG_UINT32 ui32NumTASrcSyncs;
+ IMG_HANDLE ahTASrcKernelSyncInfo[SGX_MAX_TA_SRC_SYNCS];
+ IMG_UINT32 ui32NumTADstSyncs;
+ IMG_HANDLE ahTADstKernelSyncInfo[SGX_MAX_TA_DST_SYNCS];
+ IMG_UINT32 ui32Num3DSrcSyncs;
+ IMG_HANDLE ah3DSrcKernelSyncInfo[SGX_MAX_3D_SRC_SYNCS];
+#else
+
+ IMG_UINT32 ui32NumSrcSyncs;
+ IMG_HANDLE ahSrcKernelSyncInfo[SGX_MAX_SRC_SYNCS_TA];
+#endif
+
+
+ IMG_BOOL bTADependency;
+ IMG_HANDLE hTA3DSyncInfo;
+
+ IMG_HANDLE hTASyncInfo;
+ IMG_HANDLE h3DSyncInfo;
+#if defined(PDUMP)
+ IMG_UINT32 ui32CCBDumpWOff;
+#endif
+#if defined(NO_HARDWARE)
+ IMG_UINT32 ui32WriteOpsPendingVal;
+#endif
+} SGX_CCB_KICK_KM;
+
+
+#if defined(TRANSFER_QUEUE)
+typedef struct _PVRSRV_TRANSFER_SGX_KICK_KM_
+{
+ IMG_HANDLE hCCBMemInfo;
+ IMG_UINT32 ui32SharedCmdCCBOffset;
+
+ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
+
+ IMG_HANDLE hTASyncInfo;
+ IMG_HANDLE h3DSyncInfo;
+
+ IMG_UINT32 ui32NumSrcSync;
+ IMG_HANDLE ahSrcSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
+
+ IMG_UINT32 ui32NumDstSync;
+ IMG_HANDLE ahDstSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
+
+ IMG_UINT32 ui32Flags;
+
+ IMG_UINT32 ui32PDumpFlags;
+#if defined(PDUMP)
+ IMG_UINT32 ui32CCBDumpWOff;
+#endif
+} PVRSRV_TRANSFER_SGX_KICK_KM, *PPVRSRV_TRANSFER_SGX_KICK_KM;
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+typedef struct _PVRSRV_2D_SGX_KICK_KM_
+{
+ IMG_HANDLE hCCBMemInfo;
+ IMG_UINT32 ui32SharedCmdCCBOffset;
+
+ IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
+
+ IMG_UINT32 ui32NumSrcSync;
+ IMG_HANDLE ahSrcSyncInfo[SGX_MAX_2D_SRC_SYNC_OPS];
+
+
+ IMG_HANDLE hDstSyncInfo;
+
+
+ IMG_HANDLE hTASyncInfo;
+
+
+ IMG_HANDLE h3DSyncInfo;
+
+ IMG_UINT32 ui32PDumpFlags;
+#if defined(PDUMP)
+ IMG_UINT32 ui32CCBDumpWOff;
+#endif
+} PVRSRV_2D_SGX_KICK_KM, *PPVRSRV_2D_SGX_KICK_KM;
+#endif
+#endif
+
+PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_VOID SGXOSTimer(IMG_VOID *pvData);
+
+IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_BOOL bHardwareRecovery,
+ IMG_UINT32 ui32PDUMPFlags);
+
+IMG_VOID SGXInitClocks(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32PDUMPFlags);
+
+PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_BOOL bHardwareRecovery);
+PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie);
+
+PVRSRV_ERROR SGXPrePowerState(IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+PVRSRV_ERROR SGXPostPowerState(IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+PVRSRV_ERROR SGXPreClockSpeedChange(IMG_HANDLE hDevHandle,
+ IMG_BOOL bIdleDevice,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+PVRSRV_ERROR SGXPostClockSpeedChange(IMG_HANDLE hDevHandle,
+ IMG_BOOL bIdleDevice,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+IMG_VOID SGXPanic(PVRSRV_SGXDEV_INFO *psDevInfo);
+
+IMG_VOID SGXDumpDebugInfo (PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_BOOL bDumpSGXRegs);
+
+PVRSRV_ERROR SGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#if defined(SGX_DYNAMIC_TIMING_INFO)
+IMG_VOID SysGetSGXTimingInformation(SGX_TIMING_INFORMATION *psSGXTimingInfo);
+#endif
+
+#if defined(NO_HARDWARE)
+static INLINE IMG_VOID NoHardwareGenerateEvent(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32StatusRegister,
+ IMG_UINT32 ui32StatusValue,
+ IMG_UINT32 ui32StatusMask)
+{
+ IMG_UINT32 ui32RegVal;
+
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister);
+
+ ui32RegVal &= ~ui32StatusMask;
+ ui32RegVal |= (ui32StatusValue & ui32StatusMask);
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister, ui32RegVal);
+}
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/sgx/sgxinit.c b/drivers/gpu/pvr/sgx/sgxinit.c
new file mode 100644
index 0000000..622b6f5
--- /dev/null
+++ b/drivers/gpu/pvr/sgx/sgxinit.c
@@ -0,0 +1,2877 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <stddef.h>
+
+#include "sgxdefs.h"
+#include "sgxmmu.h"
+#include "services_headers.h"
+#include "buffer_manager.h"
+#include "sgxapi_km.h"
+#include "sgxinfo.h"
+#include "sgx_mkif_km.h"
+#include "sgxconfig.h"
+#include "sysconfig.h"
+#include "pvr_bridge_km.h"
+
+#include "sgx_bridge_km.h"
+
+#include "pdump_km.h"
+#include "ra.h"
+#include "mmu.h"
+#include "handle.h"
+#include "perproc.h"
+
+#include "sgxutils.h"
+#include "pvrversion.h"
+#include "sgx_options.h"
+
+#include "lists.h"
+#include "srvkm.h"
+#include "ttrace.h"
+
+#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
+
+static const IMG_CHAR *SGXUKernelStatusString(IMG_UINT32 code)
+{
+ switch(code)
+ {
+#define MKTC_ST(x) \
+ case x: \
+ return #x;
+#include "sgx_ukernel_status_codes.h"
+ default:
+ return "(Unknown)";
+ }
+}
+
+#endif
+
+#define VAR(x) #x
+
+
+#define CHECK_SIZE(NAME) \
+{ \
+ if (psSGXStructSizes->ui32Sizeof_##NAME != psDevInfo->sSGXStructSizes.ui32Sizeof_##NAME) \
+ { \
+ PVR_DPF((PVR_DBG_ERROR, "SGXDevInitCompatCheck: Size check failed for SGXMKIF_%s (client) = %d bytes, (ukernel) = %d bytes\n", \
+ VAR(NAME), \
+ psDevInfo->sSGXStructSizes.ui32Sizeof_##NAME, \
+ psSGXStructSizes->ui32Sizeof_##NAME )); \
+ bStructSizesFailed = IMG_TRUE; \
+ } \
+}
+
+#if defined (SYS_USING_INTERRUPTS)
+IMG_BOOL SGX_ISRHandler(IMG_VOID *pvData);
+#endif
+
+
+static
+PVRSRV_ERROR SGXGetMiscInfoUkernel(PVRSRV_SGXDEV_INFO *psDevInfo,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hDevMemContext);
+#if defined(PDUMP)
+static
+PVRSRV_ERROR SGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode);
+#endif
+
+static IMG_VOID SGXCommandComplete(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(OS_SUPPORTS_IN_LISR)
+ if (OSInLISR(psDeviceNode->psSysData))
+ {
+
+ psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
+ }
+ else
+ {
+ SGXScheduleProcessQueuesKM(psDeviceNode);
+ }
+#else
+ SGXScheduleProcessQueuesKM(psDeviceNode);
+#endif
+}
+
+static IMG_UINT32 DeinitDevInfo(PVRSRV_SGXDEV_INFO *psDevInfo)
+{
+ if (psDevInfo->psKernelCCBInfo != IMG_NULL)
+ {
+
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_SGX_CCB_INFO), psDevInfo->psKernelCCBInfo, IMG_NULL);
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR InitDevInfo(PVRSRV_PER_PROCESS_DATA *psPerProc,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+#if defined (SUPPORT_SID_INTERFACE)
+ SGX_BRIDGE_INIT_INFO_KM *psInitInfo)
+#else
+ SGX_BRIDGE_INIT_INFO *psInitInfo)
+#endif
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+
+ PVRSRV_SGX_CCB_INFO *psKernelCCBInfo = IMG_NULL;
+
+ PVR_UNREFERENCED_PARAMETER(psPerProc);
+ psDevInfo->sScripts = psInitInfo->sScripts;
+
+ psDevInfo->psKernelCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBMemInfo;
+ psDevInfo->psKernelCCB = (PVRSRV_SGX_KERNEL_CCB *) psDevInfo->psKernelCCBMemInfo->pvLinAddrKM;
+
+ psDevInfo->psKernelCCBCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBCtlMemInfo;
+ psDevInfo->psKernelCCBCtl = (PVRSRV_SGX_CCB_CTL *) psDevInfo->psKernelCCBCtlMemInfo->pvLinAddrKM;
+
+ psDevInfo->psKernelCCBEventKickerMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBEventKickerMemInfo;
+ psDevInfo->pui32KernelCCBEventKicker = (IMG_UINT32 *)psDevInfo->psKernelCCBEventKickerMemInfo->pvLinAddrKM;
+
+ psDevInfo->psKernelSGXHostCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXHostCtlMemInfo;
+ psDevInfo->psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
+
+ psDevInfo->psKernelSGXTA3DCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXTA3DCtlMemInfo;
+
+#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920)
+ psDevInfo->psKernelSGXPTLAWriteBackMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXPTLAWriteBackMemInfo;
+#endif
+
+ psDevInfo->psKernelSGXMiscMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXMiscMemInfo;
+
+#if defined(SGX_SUPPORT_HWPROFILING)
+ psDevInfo->psKernelHWProfilingMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWProfilingMemInfo;
+#endif
+#if defined(SUPPORT_SGX_HWPERF)
+ psDevInfo->psKernelHWPerfCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWPerfCBMemInfo;
+#endif
+ psDevInfo->psKernelTASigBufferMemInfo = psInitInfo->hKernelTASigBufferMemInfo;
+ psDevInfo->psKernel3DSigBufferMemInfo = psInitInfo->hKernel3DSigBufferMemInfo;
+#if defined(FIX_HW_BRN_29702)
+ psDevInfo->psKernelCFIMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCFIMemInfo;
+#endif
+#if defined(FIX_HW_BRN_29823)
+ psDevInfo->psKernelDummyTermStreamMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelDummyTermStreamMemInfo;
+#endif
+#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && defined(FIX_HW_BRN_31559)
+ psDevInfo->psKernelVDMSnapShotBufferMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelVDMSnapShotBufferMemInfo;
+ psDevInfo->psKernelVDMCtrlStreamBufferMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelVDMCtrlStreamBufferMemInfo;
+#endif
+#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \
+ defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX)
+ psDevInfo->psKernelVDMStateUpdateBufferMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelVDMStateUpdateBufferMemInfo;
+#endif
+#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
+ psDevInfo->psKernelEDMStatusBufferMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelEDMStatusBufferMemInfo;
+#endif
+
+ psDevInfo->ui32ClientBuildOptions = psInitInfo->ui32ClientBuildOptions;
+
+
+ psDevInfo->sSGXStructSizes = psInitInfo->sSGXStructSizes;
+
+
+
+ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_SGX_CCB_INFO),
+ (IMG_VOID **)&psKernelCCBInfo, 0,
+ "SGX Circular Command Buffer Info");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"InitDevInfo: Failed to alloc memory"));
+ goto failed_allockernelccb;
+ }
+
+
+ OSMemSet(psKernelCCBInfo, 0, sizeof(PVRSRV_SGX_CCB_INFO));
+ psKernelCCBInfo->psCCBMemInfo = psDevInfo->psKernelCCBMemInfo;
+ psKernelCCBInfo->psCCBCtlMemInfo = psDevInfo->psKernelCCBCtlMemInfo;
+ psKernelCCBInfo->psCommands = psDevInfo->psKernelCCB->asCommands;
+ psKernelCCBInfo->pui32WriteOffset = &psDevInfo->psKernelCCBCtl->ui32WriteOffset;
+ psKernelCCBInfo->pui32ReadOffset = &psDevInfo->psKernelCCBCtl->ui32ReadOffset;
+ psDevInfo->psKernelCCBInfo = psKernelCCBInfo;
+
+
+
+ OSMemCopy(psDevInfo->aui32HostKickAddr, psInitInfo->aui32HostKickAddr,
+ SGXMKIF_CMD_MAX * sizeof(psDevInfo->aui32HostKickAddr[0]));
+
+ psDevInfo->bForcePTOff = IMG_FALSE;
+
+ psDevInfo->ui32CacheControl = psInitInfo->ui32CacheControl;
+
+ psDevInfo->ui32EDMTaskReg0 = psInitInfo->ui32EDMTaskReg0;
+ psDevInfo->ui32EDMTaskReg1 = psInitInfo->ui32EDMTaskReg1;
+ psDevInfo->ui32ClkGateCtl = psInitInfo->ui32ClkGateCtl;
+ psDevInfo->ui32ClkGateCtl2 = psInitInfo->ui32ClkGateCtl2;
+ psDevInfo->ui32ClkGateStatusReg = psInitInfo->ui32ClkGateStatusReg;
+ psDevInfo->ui32ClkGateStatusMask = psInitInfo->ui32ClkGateStatusMask;
+#if defined(SGX_FEATURE_MP)
+ psDevInfo->ui32MasterClkGateStatusReg = psInitInfo->ui32MasterClkGateStatusReg;
+ psDevInfo->ui32MasterClkGateStatusMask = psInitInfo->ui32MasterClkGateStatusMask;
+ psDevInfo->ui32MasterClkGateStatus2Reg = psInitInfo->ui32MasterClkGateStatus2Reg;
+ psDevInfo->ui32MasterClkGateStatus2Mask = psInitInfo->ui32MasterClkGateStatus2Mask;
+#endif
+
+
+
+ OSMemCopy(&psDevInfo->asSGXDevData, &psInitInfo->asInitDevData, sizeof(psDevInfo->asSGXDevData));
+
+ return PVRSRV_OK;
+
+failed_allockernelccb:
+ DeinitDevInfo(psDevInfo);
+
+ return eError;
+}
+
+
+
+
+static PVRSRV_ERROR SGXRunScript(PVRSRV_SGXDEV_INFO *psDevInfo, SGX_INIT_COMMAND *psScript, IMG_UINT32 ui32NumInitCommands)
+{
+ IMG_UINT32 ui32PC;
+ SGX_INIT_COMMAND *psComm;
+
+ for (ui32PC = 0, psComm = psScript;
+ ui32PC < ui32NumInitCommands;
+ ui32PC++, psComm++)
+ {
+ switch (psComm->eOp)
+ {
+ case SGX_INIT_OP_WRITE_HW_REG:
+ {
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value);
+ PDUMPCOMMENT("SGXRunScript: Write HW reg operation");
+ PDUMPREG(SGX_PDUMPREG_NAME, psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value);
+ break;
+ }
+#if defined(PDUMP)
+ case SGX_INIT_OP_PDUMP_HW_REG:
+ {
+ PDUMPCOMMENT("SGXRunScript: Dump HW reg operation");
+ PDUMPREG(SGX_PDUMPREG_NAME, psComm->sPDumpHWReg.ui32Offset, psComm->sPDumpHWReg.ui32Value);
+ break;
+ }
+#endif
+ case SGX_INIT_OP_HALT:
+ {
+ return PVRSRV_OK;
+ }
+ case SGX_INIT_OP_ILLEGAL:
+
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXRunScript: PC %d: Illegal command: %d", ui32PC, psComm->eOp));
+ return PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION;
+ }
+ }
+
+ }
+
+ return PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION;
+}
+
+#if defined(SUPPORT_MEMORY_TILING)
+static PVRSRV_ERROR SGX_AllocMemTilingRangeInt(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32Start,
+ IMG_UINT32 ui32End,
+ IMG_UINT32 ui32TilingStride,
+ IMG_UINT32 *pui32RangeIndex)
+{
+ IMG_UINT32 i;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Val;
+
+
+ for(i=0; i < SGX_BIF_NUM_TILING_RANGES; i++)
+ {
+ if((psDevInfo->ui32MemTilingUsage & (1U << i)) == 0)
+ {
+
+ psDevInfo->ui32MemTilingUsage |= 1U << i;
+
+ if(pui32RangeIndex != IMG_NULL)
+ {
+ *pui32RangeIndex = i;
+ }
+ goto RangeAllocated;
+ }
+ }
+
+ PVR_DPF((PVR_DBG_ERROR,"SGX_AllocMemTilingRange: all tiling ranges in use"));
+ return PVRSRV_ERROR_EXCEEDED_HW_LIMITS;
+
+RangeAllocated:
+
+
+ if(ui32Start & ~SGX_BIF_TILING_ADDR_MASK)
+ {
+ PVR_DPF((PVR_DBG_WARNING,"SGX_AllocMemTilingRangeInt: Tiling range start (0x%08X) fails"
+ "alignment test", ui32Start));
+ }
+ if((ui32End + 0x00001000) & ~SGX_BIF_TILING_ADDR_MASK)
+ {
+ PVR_DPF((PVR_DBG_WARNING,"SGX_AllocMemTilingRangeInt: Tiling range end (0x%08X) fails"
+ "alignment test", ui32End));
+ }
+
+ ui32Offset = EUR_CR_BIF_TILE0 + (i<<2);
+
+ ui32Val = ((ui32TilingStride << EUR_CR_BIF_TILE0_CFG_SHIFT) & EUR_CR_BIF_TILE0_CFG_MASK)
+ | (((ui32End>>SGX_BIF_TILING_ADDR_LSB) << EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT) & EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK)
+ | (((ui32Start>>SGX_BIF_TILING_ADDR_LSB) << EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT) & EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK)
+ | (EUR_CR_BIF_TILE0_ENABLE << EUR_CR_BIF_TILE0_CFG_SHIFT);
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Offset, ui32Val);
+ PDUMPREG(SGX_PDUMPREG_NAME, ui32Offset, ui32Val);
+
+#if defined(SGX_FEATURE_BIF_WIDE_TILING_AND_4K_ADDRESS)
+ ui32Offset = EUR_CR_BIF_TILE0_ADDR_EXT + (i<<2);
+
+ ui32Val = (((ui32End>>SGX_BIF_TILING_EXT_ADDR_LSB) << EUR_CR_BIF_TILE0_ADDR_EXT_MAX_SHIFT) & EUR_CR_BIF_TILE0_ADDR_EXT_MAX_MASK)
+ | (((ui32Start>>SGX_BIF_TILING_EXT_ADDR_LSB) << EUR_CR_BIF_TILE0_ADDR_EXT_MIN_SHIFT) & EUR_CR_BIF_TILE0_ADDR_EXT_MIN_MASK);
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Offset, ui32Val);
+ PDUMPREG(SGX_PDUMPREG_NAME, ui32Offset, ui32Val);
+#endif
+
+ return PVRSRV_OK;
+}
+
+#endif
+
+PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_BOOL bHardwareRecovery)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo = psDevInfo->psKernelSGXHostCtlMemInfo;
+ SGXMKIF_HOST_CTL *psSGXHostCtl = psSGXHostCtlMemInfo->pvLinAddrKM;
+ static IMG_BOOL bFirstTime = IMG_TRUE;
+#if defined(PDUMP)
+ IMG_BOOL bPDumpIsSuspended = PDumpIsSuspended();
+#endif
+
+#if defined(SGX_FEATURE_MP)
+
+#else
+ SGXInitClocks(psDevInfo, PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "SGX initialisation script part 1\n");
+ eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommandsPart1, SGX_MAX_INIT_COMMANDS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise: SGXRunScript (part 1) failed (%d)", eError));
+ return eError;
+ }
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "End of SGX initialisation script part 1\n");
+
+
+ psDevInfo->ui32NumResets++;
+ SGXReset(psDevInfo, bFirstTime || bHardwareRecovery, PDUMP_FLAGS_CONTINUOUS);
+
+#if defined(EUR_CR_POWER)
+#if defined(SGX531)
+
+
+
+
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_POWER, 1);
+ PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_POWER, 1);
+#else
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_POWER, 0);
+ PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_POWER, 0);
+#endif
+#endif
+
+
+ *psDevInfo->pui32KernelCCBEventKicker = 0;
+#if defined(PDUMP)
+ if (!bPDumpIsSuspended)
+ {
+ psDevInfo->ui32KernelCCBEventKickerDumpVal = 0;
+ PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal,
+ psDevInfo->psKernelCCBEventKickerMemInfo, 0,
+ sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
+ }
+#endif
+
+#if defined(SUPPORT_MEMORY_TILING)
+ {
+
+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap = psDevInfo->pvDeviceMemoryHeap;
+ IMG_UINT32 i;
+
+ psDevInfo->ui32MemTilingUsage = 0;
+
+ for(i=0; i<psDevInfo->ui32HeapCount; i++)
+ {
+ if(psDeviceMemoryHeap[i].ui32XTileStride > 0)
+ {
+
+ eError = SGX_AllocMemTilingRangeInt(
+ psDevInfo,
+ psDeviceMemoryHeap[i].sDevVAddrBase.uiAddr,
+ psDeviceMemoryHeap[i].sDevVAddrBase.uiAddr
+ + psDeviceMemoryHeap[i].ui32HeapSize,
+ psDeviceMemoryHeap[i].ui32XTileStride,
+ NULL);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Unable to allocate SGX BIF tiling range for heap: %s",
+ psDeviceMemoryHeap[i].pszName));
+ break;
+ }
+ }
+ }
+ }
+#endif
+
+
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "SGX initialisation script part 2\n");
+ eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommandsPart2, SGX_MAX_INIT_COMMANDS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise: SGXRunScript (part 2) failed (%d)", eError));
+ return eError;
+ }
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "End of SGX initialisation script part 2\n");
+
+
+ psSGXHostCtl->ui32HostClock = OSClockus();
+
+ psSGXHostCtl->ui32InitStatus = 0;
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "Reset the SGX microkernel initialisation status\n");
+ PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32InitStatus),
+ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psSGXHostCtlMemInfo));
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "Initialise the microkernel\n");
+#endif
+
+#if defined(SGX_FEATURE_MULTI_EVENT_KICK)
+ OSWriteMemoryBarrier();
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
+ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0),
+ EUR_CR_EVENT_KICK2_NOW_MASK);
+#else
+ *psDevInfo->pui32KernelCCBEventKicker = (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF;
+ OSWriteMemoryBarrier();
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
+ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0),
+ EUR_CR_EVENT_KICK_NOW_MASK);
+#endif
+
+ OSMemoryBarrier();
+
+#if defined(PDUMP)
+
+
+ if (!bPDumpIsSuspended)
+ {
+#if defined(SGX_FEATURE_MULTI_EVENT_KICK)
+ PDUMPREG(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0), EUR_CR_EVENT_KICK2_NOW_MASK);
+#else
+ psDevInfo->ui32KernelCCBEventKickerDumpVal = 1;
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "First increment of the SGX event kicker value\n");
+ PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal,
+ psDevInfo->psKernelCCBEventKickerMemInfo,
+ 0,
+ sizeof(IMG_UINT32),
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
+ PDUMPREG(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0), EUR_CR_EVENT_KICK_NOW_MASK);
+#endif
+ }
+#endif
+
+#if !defined(NO_HARDWARE)
+
+
+ if (PollForValueKM(&psSGXHostCtl->ui32InitStatus,
+ PVRSRV_USSE_EDM_INIT_COMPLETE,
+ PVRSRV_USSE_EDM_INIT_COMPLETE,
+ MAX_HW_TIME_US,
+ MAX_HW_TIME_US/WAIT_TRY_COUNT,
+ IMG_FALSE) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXInitialise: Wait for uKernel initialisation failed"));
+ #if !defined(FIX_HW_BRN_23281)
+ SGXDumpDebugInfo(psDevInfo, IMG_FALSE);
+ PVR_DBG_BREAK;
+ #endif
+ return PVRSRV_ERROR_RETRY;
+ }
+#endif
+
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "Wait for the SGX microkernel initialisation to complete");
+ PDUMPMEMPOL(psSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32InitStatus),
+ PVRSRV_USSE_EDM_INIT_COMPLETE,
+ PVRSRV_USSE_EDM_INIT_COMPLETE,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psSGXHostCtlMemInfo));
+#endif
+
+#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
+
+
+
+ WorkaroundBRN22997ReadHostPort(psDevInfo);
+#endif
+
+ PVR_ASSERT(psDevInfo->psKernelCCBCtl->ui32ReadOffset == psDevInfo->psKernelCCBCtl->ui32WriteOffset);
+
+ bFirstTime = IMG_FALSE;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie)
+
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *) hDevCookie;
+ PVRSRV_ERROR eError;
+
+
+ if (psDevInfo->pvRegsBaseKM == IMG_NULL)
+ {
+ return PVRSRV_OK;
+ }
+
+ eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asDeinitCommands, SGX_MAX_DEINIT_COMMANDS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXDeinitialise: SGXRunScript failed (%d)", eError));
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+static PVRSRV_ERROR DevInitSGXPart1 (IMG_VOID *pvDeviceNode)
+{
+ IMG_HANDLE hDevMemHeap = IMG_NULL;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+ IMG_HANDLE hKernelDevMemContext;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+ IMG_UINT32 i;
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
+ PVRSRV_ERROR eError;
+
+
+ PDUMPCOMMENT("SGX Core Version Information: %s", SGX_CORE_FRIENDLY_NAME);
+
+ #if defined(SGX_FEATURE_MP)
+ #if !defined(SGX_FEATURE_MP_PLUS)
+ PDUMPCOMMENT("SGX Multi-processor: %d cores", SGX_FEATURE_MP_CORE_COUNT);
+ #else
+ PDUMPCOMMENT("SGX Multi-processor: %d TA cores, %d 3D cores", SGX_FEATURE_MP_CORE_COUNT_TA, SGX_FEATURE_MP_CORE_COUNT_3D);
+ #endif
+ #endif
+
+#if (SGX_CORE_REV == 0)
+ PDUMPCOMMENT("SGX Core Revision Information: head RTL");
+#else
+ PDUMPCOMMENT("SGX Core Revision Information: %d", SGX_CORE_REV);
+#endif
+
+ #if defined(SGX_FEATURE_SYSTEM_CACHE)
+ PDUMPCOMMENT("SGX System Level Cache is present\r\n");
+ #if defined(SGX_BYPASS_SYSTEM_CACHE)
+ PDUMPCOMMENT("SGX System Level Cache is bypassed\r\n");
+ #endif
+ #endif
+
+ PDUMPCOMMENT("SGX Initialisation Part 1");
+
+
+ if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_SGXDEV_INFO),
+ (IMG_VOID **)&psDevInfo, IMG_NULL,
+ "SGX Device Info") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1 : Failed to alloc memory for DevInfo"));
+ return (PVRSRV_ERROR_OUT_OF_MEMORY);
+ }
+ OSMemSet (psDevInfo, 0, sizeof(PVRSRV_SGXDEV_INFO));
+
+
+ psDevInfo->eDeviceType = DEV_DEVICE_TYPE;
+ psDevInfo->eDeviceClass = DEV_DEVICE_CLASS;
+
+
+ psDeviceNode->pvDevice = (IMG_PVOID)psDevInfo;
+
+
+ psDevInfo->ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
+ psDevInfo->pvDeviceMemoryHeap = (IMG_VOID*)psDeviceMemoryHeap;
+
+
+ hKernelDevMemContext = BM_CreateContext(psDeviceNode,
+ &sPDDevPAddr,
+ IMG_NULL,
+ IMG_NULL);
+ if (hKernelDevMemContext == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1: Failed BM_CreateContext"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psDevInfo->sKernelPDDevPAddr = sPDDevPAddr;
+
+
+ for(i=0; i<psDeviceNode->sDevMemoryInfo.ui32HeapCount; i++)
+ {
+ switch(psDeviceMemoryHeap[i].DevMemHeapType)
+ {
+ case DEVICE_MEMORY_HEAP_KERNEL:
+ case DEVICE_MEMORY_HEAP_SHARED:
+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
+ {
+
+ if (psDeviceMemoryHeap[i].ui32HeapSize > 0)
+ {
+ hDevMemHeap = BM_CreateHeap (hKernelDevMemContext,
+ &psDeviceMemoryHeap[i]);
+
+
+
+ psDeviceMemoryHeap[i].hDevMemHeap = hDevMemHeap;
+ }
+ break;
+ }
+ }
+ }
+#if defined(PDUMP)
+ if(hDevMemHeap)
+ {
+
+ psDevInfo->sMMUAttrib = *((BM_HEAP*)hDevMemHeap)->psMMUAttrib;
+ }
+#endif
+ eError = MMU_BIFResetPDAlloc(psDevInfo);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevInitSGX : Failed to alloc memory for BIF reset"));
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+IMG_EXPORT
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle, PVRSRV_HEAP_INFO_KM *pasHeapInfo, IMG_DEV_PHYADDR *psPDDevPAddr)
+#else
+PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle, SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo)
+#endif
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+ PVRSRV_ERROR eError;
+
+ PDUMPCOMMENT("SGXGetInfoForSrvinit");
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
+ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
+
+#if defined (SUPPORT_SID_INTERFACE)
+ *psPDDevPAddr = psDevInfo->sKernelPDDevPAddr;
+
+ eError = PVRSRVGetDeviceMemHeapsKM(hDevHandle, pasHeapInfo);
+#else
+ psInitInfo->sPDDevPAddr = psDevInfo->sKernelPDDevPAddr;
+
+ eError = PVRSRVGetDeviceMemHeapsKM(hDevHandle, &psInitInfo->asHeapInfo[0]);
+#endif
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXGetInfoForSrvinit: PVRSRVGetDeviceMemHeapsKM failed (%d)", eError));
+ return eError;
+ }
+
+ return eError;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR DevInitSGXPart2KM (PVRSRV_PER_PROCESS_DATA *psPerProc,
+ IMG_HANDLE hDevHandle,
+#if defined (SUPPORT_SID_INTERFACE)
+ SGX_BRIDGE_INIT_INFO_KM *psInitInfo)
+#else
+ SGX_BRIDGE_INIT_INFO *psInitInfo)
+#endif
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+ PVRSRV_ERROR eError;
+ SGX_DEVICE_MAP *psSGXDeviceMap;
+ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
+
+ PDUMPCOMMENT("SGX Initialisation Part 2");
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
+ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
+
+
+
+ eError = InitDevInfo(psPerProc, psDeviceNode, psInitInfo);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to load EDM program"));
+ goto failed_init_dev_info;
+ }
+
+
+ eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
+ (IMG_VOID**)&psSGXDeviceMap);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to get device memory map!"));
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+
+
+ if (psSGXDeviceMap->pvRegsCpuVBase)
+ {
+ psDevInfo->pvRegsBaseKM = psSGXDeviceMap->pvRegsCpuVBase;
+ }
+ else
+ {
+
+ psDevInfo->pvRegsBaseKM = OSMapPhysToLin(psSGXDeviceMap->sRegsCpuPBase,
+ psSGXDeviceMap->ui32RegsSize,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ IMG_NULL);
+ if (!psDevInfo->pvRegsBaseKM)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to map in regs\n"));
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+ }
+ psDevInfo->ui32RegSize = psSGXDeviceMap->ui32RegsSize;
+ psDevInfo->sRegsPhysBase = psSGXDeviceMap->sRegsSysPBase;
+
+
+#if defined(SGX_FEATURE_HOST_PORT)
+ if (psSGXDeviceMap->ui32Flags & SGX_HOSTPORT_PRESENT)
+ {
+
+ psDevInfo->pvHostPortBaseKM = OSMapPhysToLin(psSGXDeviceMap->sHPCpuPBase,
+ psSGXDeviceMap->ui32HPSize,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ IMG_NULL);
+ if (!psDevInfo->pvHostPortBaseKM)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to map in host port\n"));
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+ psDevInfo->ui32HPSize = psSGXDeviceMap->ui32HPSize;
+ psDevInfo->sHPSysPAddr = psSGXDeviceMap->sHPSysPBase;
+ }
+#endif
+
+#if defined (SYS_USING_INTERRUPTS)
+
+
+ psDeviceNode->pvISRData = psDeviceNode;
+
+ PVR_ASSERT(psDeviceNode->pfnDeviceISR == SGX_ISRHandler);
+
+#endif
+
+
+ psDevInfo->psSGXHostCtl->ui32PowerStatus |= PVRSRV_USSE_EDM_POWMAN_NO_WORK;
+ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
+
+ eError = PVRSRVRegisterPowerDevice (psDeviceNode->sDevId.ui32DeviceIndex,
+ &SGXPrePowerState, &SGXPostPowerState,
+ &SGXPreClockSpeedChange, &SGXPostClockSpeedChange,
+ (IMG_HANDLE)psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_OFF,
+ eDefaultPowerState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: failed to register device with power manager"));
+ return eError;
+ }
+
+#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
+ eError = WorkaroundBRN22997Alloc(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise : Failed to alloc memory for BRN22997 workaround"));
+ return eError;
+ }
+#endif
+
+#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
+
+ psDevInfo->ui32ExtSysCacheRegsSize = psSGXDeviceMap->ui32ExtSysCacheRegsSize;
+ psDevInfo->sExtSysCacheRegsDevPBase = psSGXDeviceMap->sExtSysCacheRegsDevPBase;
+ eError = MMU_MapExtSystemCacheRegs(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise : Failed to map external system cache registers"));
+ return eError;
+ }
+#endif
+
+
+
+ OSMemSet(psDevInfo->psKernelCCB, 0, sizeof(PVRSRV_SGX_KERNEL_CCB));
+ OSMemSet(psDevInfo->psKernelCCBCtl, 0, sizeof(PVRSRV_SGX_CCB_CTL));
+ OSMemSet(psDevInfo->pui32KernelCCBEventKicker, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker));
+ PDUMPCOMMENT("Initialise Kernel CCB");
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBMemInfo, 0, sizeof(PVRSRV_SGX_KERNEL_CCB), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBMemInfo));
+ PDUMPCOMMENT("Initialise Kernel CCB Control");
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBCtlMemInfo, 0, sizeof(PVRSRV_SGX_CCB_CTL), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBCtlMemInfo));
+ PDUMPCOMMENT("Initialise Kernel CCB Event Kicker");
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBEventKickerMemInfo, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
+
+ return PVRSRV_OK;
+
+failed_init_dev_info:
+ return eError;
+}
+
+static PVRSRV_ERROR DevDeInitSGX (IMG_VOID *pvDeviceNode)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Heap;
+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
+ SGX_DEVICE_MAP *psSGXDeviceMap;
+
+ if (!psDevInfo)
+ {
+
+ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Null DevInfo"));
+ return PVRSRV_OK;
+ }
+
+#if defined(SUPPORT_HW_RECOVERY)
+ if (psDevInfo->hTimer)
+ {
+ eError = OSRemoveTimer(psDevInfo->hTimer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to remove timer"));
+ return eError;
+ }
+ psDevInfo->hTimer = IMG_NULL;
+ }
+#endif
+
+#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
+
+ eError = MMU_UnmapExtSystemCacheRegs(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to unmap ext system cache registers"));
+ return eError;
+ }
+#endif
+
+#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
+ WorkaroundBRN22997Free(psDeviceNode);
+#endif
+
+ MMU_BIFResetPDFree(psDevInfo);
+
+
+
+ DeinitDevInfo(psDevInfo);
+
+
+ psDeviceMemoryHeap = (DEVICE_MEMORY_HEAP_INFO *)psDevInfo->pvDeviceMemoryHeap;
+ for(ui32Heap=0; ui32Heap<psDeviceNode->sDevMemoryInfo.ui32HeapCount; ui32Heap++)
+ {
+ switch(psDeviceMemoryHeap[ui32Heap].DevMemHeapType)
+ {
+ case DEVICE_MEMORY_HEAP_KERNEL:
+ case DEVICE_MEMORY_HEAP_SHARED:
+ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
+ {
+ if (psDeviceMemoryHeap[ui32Heap].hDevMemHeap != IMG_NULL)
+ {
+ BM_DestroyHeap(psDeviceMemoryHeap[ui32Heap].hDevMemHeap);
+ }
+ break;
+ }
+ }
+ }
+
+
+ eError = BM_DestroyContext(psDeviceNode->sDevMemoryInfo.pBMKernelContext, IMG_NULL);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX : Failed to destroy kernel context"));
+ return eError;
+ }
+
+
+ eError = PVRSRVRemovePowerDevice (((PVRSRV_DEVICE_NODE*)pvDeviceNode)->sDevId.ui32DeviceIndex);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
+ (IMG_VOID**)&psSGXDeviceMap);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to get device memory map!"));
+ return eError;
+ }
+
+
+ if (!psSGXDeviceMap->pvRegsCpuVBase)
+ {
+
+ if (psDevInfo->pvRegsBaseKM != IMG_NULL)
+ {
+ OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
+ psDevInfo->ui32RegSize,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ IMG_NULL);
+ }
+ }
+
+#if defined(SGX_FEATURE_HOST_PORT)
+ if (psSGXDeviceMap->ui32Flags & SGX_HOSTPORT_PRESENT)
+ {
+
+ if (psDevInfo->pvHostPortBaseKM != IMG_NULL)
+ {
+ OSUnMapPhysToLin(psDevInfo->pvHostPortBaseKM,
+ psDevInfo->ui32HPSize,
+ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
+ IMG_NULL);
+ }
+ }
+#endif
+
+
+
+ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+ sizeof(PVRSRV_SGXDEV_INFO),
+ psDevInfo,
+ 0);
+
+ psDeviceNode->pvDevice = IMG_NULL;
+
+ if (psDeviceMemoryHeap != IMG_NULL)
+ {
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID,
+ psDeviceMemoryHeap,
+ 0);
+ }
+
+ return PVRSRV_OK;
+}
+
+
+#if defined(RESTRICTED_REGISTERS) && defined(SGX_FEATURE_MP)
+
+static IMG_VOID SGXDumpMasterDebugReg (PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_CHAR *pszName,
+ IMG_UINT32 ui32RegAddr)
+{
+ IMG_UINT32 ui32RegVal;
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32RegAddr);
+ PVR_LOG(("(HYD) %s%08X", pszName, ui32RegVal));
+}
+
+#endif
+
+static IMG_VOID SGXDumpDebugReg (PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32CoreNum,
+ IMG_CHAR *pszName,
+ IMG_UINT32 ui32RegAddr)
+{
+ IMG_UINT32 ui32RegVal;
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(ui32RegAddr, ui32CoreNum));
+ PVR_LOG(("(P%u) %s%08X", ui32CoreNum, pszName, ui32RegVal));
+}
+
+void dsscomp_kdump(void);
+IMG_VOID SGXDumpDebugInfo (PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_BOOL bDumpSGXRegs)
+{
+ IMG_UINT32 ui32CoreNum;
+
+ dsscomp_kdump();
+
+ PVR_LOG(("SGX debug (%s)", PVRVERSION_STRING));
+
+ if (bDumpSGXRegs)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGX Register Base Address (Linear): 0x%08X", (IMG_UINTPTR_T)psDevInfo->pvRegsBaseKM));
+ PVR_DPF((PVR_DBG_ERROR,"SGX Register Base Address (Physical): 0x%08X", psDevInfo->sRegsPhysBase.uiAddr));
+
+ SGXDumpDebugReg(psDevInfo, 0, "EUR_CR_CORE_ID: ", EUR_CR_CORE_ID);
+ SGXDumpDebugReg(psDevInfo, 0, "EUR_CR_CORE_REVISION: ", EUR_CR_CORE_REVISION);
+#if defined(RESTRICTED_REGISTERS) && defined(SGX_FEATURE_MP)
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_BIF_INT_STAT: ", EUR_CR_MASTER_BIF_INT_STAT);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_BIF_FAULT: ",EUR_CR_MASTER_BIF_FAULT);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_CLKGATESTATUS2: ",EUR_CR_MASTER_CLKGATESTATUS2 );
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_PIM_STATUS: ",EUR_CR_MASTER_VDM_PIM_STATUS);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_BIF_BANK_SET: ",EUR_CR_MASTER_BIF_BANK_SET);
+
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_EVENT_STATUS: ",EUR_CR_MASTER_EVENT_STATUS);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_EVENT_STATUS2: ",EUR_CR_MASTER_EVENT_STATUS2);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_MP_PRIMITIVE: ",EUR_CR_MASTER_MP_PRIMITIVE);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_DPM_DPLIST_STATUS: ",EUR_CR_MASTER_DPM_DPLIST_STATUS);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_DPM_PROACTIVE_PIM_SPEC: ",EUR_CR_MASTER_DPM_PROACTIVE_PIM_SPEC);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_PAGE_MANAGEOP: ",EUR_CR_MASTER_DPM_PAGE_MANAGEOP);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_SNAPSHOT: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_SNAPSHOT);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_LOAD_STATUS: ",EUR_CR_MASTER_VDM_CONTEXT_LOAD_STATUS);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_STREAM: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_STREAM);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_STATUS: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_STATUS);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_STATE0: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_STATE0);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_STATE1: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_STATE1);
+ SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_WAIT_FOR_KICK: ",EUR_CR_MASTER_VDM_WAIT_FOR_KICK);
+#endif
+ for (ui32CoreNum = 0; ui32CoreNum < SGX_FEATURE_MP_CORE_COUNT_3D; ui32CoreNum++)
+ {
+
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_EVENT_STATUS: ", EUR_CR_EVENT_STATUS);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_EVENT_STATUS2: ", EUR_CR_EVENT_STATUS2);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_CTRL: ", EUR_CR_BIF_CTRL);
+ #if defined(EUR_CR_BIF_BANK0)
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_BANK0: ", EUR_CR_BIF_BANK0);
+ #endif
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_INT_STAT: ", EUR_CR_BIF_INT_STAT);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_FAULT: ", EUR_CR_BIF_FAULT);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_MEM_REQ_STAT: ", EUR_CR_BIF_MEM_REQ_STAT);
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_CLKGATECTL: ", EUR_CR_CLKGATECTL);
+ #if defined(EUR_CR_PDS_PC_BASE)
+ SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_PDS_PC_BASE: ", EUR_CR_PDS_PC_BASE);
+ #endif
+ }
+
+#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) && !defined(FIX_HW_BRN_31620)
+ {
+ IMG_UINT32 ui32RegVal;
+ IMG_UINT32 ui32PDDevPAddr;
+
+
+
+
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
+ if (ui32RegVal & EUR_CR_BIF_INT_STAT_PF_N_RW_MASK)
+ {
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
+ ui32RegVal &= EUR_CR_BIF_FAULT_ADDR_MASK;
+ ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0);
+ ui32PDDevPAddr &= EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK;
+ MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32RegVal);
+ }
+ }
+#endif
+ }
+
+
+
+ QueueDumpDebugInfo();
+
+ {
+
+
+ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
+ IMG_UINT32 *pui32HostCtlBuffer = (IMG_UINT32 *)psSGXHostCtl;
+ IMG_UINT32 ui32LoopCounter;
+
+ if (psSGXHostCtl->ui32AssertFail != 0)
+ {
+ PVR_LOG(("SGX Microkernel assert fail: 0x%08X", psSGXHostCtl->ui32AssertFail));
+ psSGXHostCtl->ui32AssertFail = 0;
+ }
+
+ PVR_LOG(("SGX Host control:"));
+
+ for (ui32LoopCounter = 0;
+ ui32LoopCounter < sizeof(*psDevInfo->psSGXHostCtl) / sizeof(*pui32HostCtlBuffer);
+ ui32LoopCounter += 4)
+ {
+ PVR_LOG(("\t(HC-%X) 0x%08X 0x%08X 0x%08X 0x%08X", ui32LoopCounter * sizeof(*pui32HostCtlBuffer),
+ pui32HostCtlBuffer[ui32LoopCounter + 0], pui32HostCtlBuffer[ui32LoopCounter + 1],
+ pui32HostCtlBuffer[ui32LoopCounter + 2], pui32HostCtlBuffer[ui32LoopCounter + 3]));
+ }
+ }
+
+ {
+
+
+ IMG_UINT32 *pui32TA3DCtlBuffer = psDevInfo->psKernelSGXTA3DCtlMemInfo->pvLinAddrKM;
+ IMG_UINT32 ui32LoopCounter;
+
+ PVR_LOG(("SGX TA/3D control:"));
+
+ for (ui32LoopCounter = 0;
+ ui32LoopCounter < psDevInfo->psKernelSGXTA3DCtlMemInfo->uAllocSize / sizeof(*pui32TA3DCtlBuffer);
+ ui32LoopCounter += 4)
+ {
+ PVR_LOG(("\t(T3C-%X) 0x%08X 0x%08X 0x%08X 0x%08X", ui32LoopCounter * sizeof(*pui32TA3DCtlBuffer),
+ pui32TA3DCtlBuffer[ui32LoopCounter + 0], pui32TA3DCtlBuffer[ui32LoopCounter + 1],
+ pui32TA3DCtlBuffer[ui32LoopCounter + 2], pui32TA3DCtlBuffer[ui32LoopCounter + 3]));
+ }
+ }
+
+ #if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
+ {
+ IMG_UINT32 *pui32MKTraceBuffer = psDevInfo->psKernelEDMStatusBufferMemInfo->pvLinAddrKM;
+ IMG_UINT32 ui32LastStatusCode, ui32WriteOffset;
+
+ ui32LastStatusCode = *pui32MKTraceBuffer;
+ pui32MKTraceBuffer++;
+ ui32WriteOffset = *pui32MKTraceBuffer;
+ pui32MKTraceBuffer++;
+
+ PVR_LOG(("Last SGX microkernel status code: %08X %s",
+ ui32LastStatusCode, SGXUKernelStatusString(ui32LastStatusCode)));
+
+ #if defined(PVRSRV_DUMP_MK_TRACE)
+
+
+ {
+ IMG_UINT32 ui32LoopCounter;
+
+ for (ui32LoopCounter = 0;
+ ui32LoopCounter < SGXMK_TRACE_BUFFER_SIZE;
+ ui32LoopCounter++)
+ {
+ IMG_UINT32 *pui32BufPtr;
+ pui32BufPtr = pui32MKTraceBuffer +
+ (((ui32WriteOffset + ui32LoopCounter) % SGXMK_TRACE_BUFFER_SIZE) * 4);
+ PVR_LOG(("\t(MKT-%X) %08X %08X %08X %08X %s", ui32LoopCounter,
+ pui32BufPtr[2], pui32BufPtr[3], pui32BufPtr[1], pui32BufPtr[0],
+ SGXUKernelStatusString(pui32BufPtr[0])));
+ }
+ }
+ #endif
+ }
+ #endif
+
+ {
+
+
+ PVR_LOG(("SGX Kernel CCB WO:0x%X RO:0x%X",
+ psDevInfo->psKernelCCBCtl->ui32WriteOffset,
+ psDevInfo->psKernelCCBCtl->ui32ReadOffset));
+
+ #if defined(PVRSRV_DUMP_KERNEL_CCB)
+ {
+ IMG_UINT32 ui32LoopCounter;
+
+ for (ui32LoopCounter = 0;
+ ui32LoopCounter < sizeof(psDevInfo->psKernelCCB->asCommands) /
+ sizeof(psDevInfo->psKernelCCB->asCommands[0]);
+ ui32LoopCounter++)
+ {
+ SGXMKIF_COMMAND *psCommand = &psDevInfo->psKernelCCB->asCommands[ui32LoopCounter];
+
+ PVR_LOG(("\t(KCCB-%X) %08X %08X - %08X %08X %08X %08X", ui32LoopCounter,
+ psCommand->ui32ServiceAddress, psCommand->ui32CacheControl,
+ psCommand->ui32Data[0], psCommand->ui32Data[1],
+ psCommand->ui32Data[2], psCommand->ui32Data[3]));
+ }
+ }
+ #endif
+ }
+ #if defined (TTRACE)
+ PVRSRVDumpTimeTraceBuffers();
+ #endif
+
+}
+
+
+#if defined(SYS_USING_INTERRUPTS) || defined(SUPPORT_HW_RECOVERY)
+static
+IMG_VOID HWRecoveryResetSGX (PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Component,
+ IMG_UINT32 ui32CallerID)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+ SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Component);
+
+
+
+ eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
+ if(eError != PVRSRV_OK)
+ {
+
+
+
+ PVR_DPF((PVR_DBG_WARNING,"HWRecoveryResetSGX: Power transition in progress"));
+ return;
+ }
+
+ psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_HWR;
+
+ PVR_LOG(("HWRecoveryResetSGX: SGX Hardware Recovery triggered"));
+
+ SGXDumpDebugInfo(psDeviceNode->pvDevice, IMG_TRUE);
+
+
+ PDUMPSUSPEND();
+
+
+#if defined(FIX_HW_BRN_23281)
+
+ for (eError = PVRSRV_ERROR_RETRY; eError == PVRSRV_ERROR_RETRY;)
+#endif
+ {
+ eError = SGXInitialise(psDevInfo, IMG_TRUE);
+ }
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"HWRecoveryResetSGX: SGXInitialise failed (%d)", eError));
+ }
+
+
+ PDUMPRESUME();
+
+ PVRSRVPowerUnlock(ui32CallerID);
+
+
+ SGXScheduleProcessQueuesKM(psDeviceNode);
+
+
+
+ PVRSRVProcessQueues(IMG_TRUE);
+}
+#endif
+
+
+#if defined(SUPPORT_HW_RECOVERY)
+IMG_VOID SGXOSTimer(IMG_VOID *pvData)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ static IMG_UINT32 ui32EDMTasks = 0;
+ static IMG_UINT32 ui32LockupCounter = 0;
+ static IMG_UINT32 ui32OpenCLDelayCounter = 0;
+ static IMG_UINT32 ui32NumResets = 0;
+#if defined(FIX_HW_BRN_31093)
+ static IMG_BOOL bBRN31093Inval = IMG_FALSE;
+#endif
+ IMG_UINT32 ui32CurrentEDMTasks;
+ IMG_UINT32 ui32CurrentOpenCLDelayCounter=0;
+ IMG_BOOL bLockup = IMG_FALSE;
+ IMG_BOOL bPoweredDown;
+
+
+ psDevInfo->ui32TimeStamp++;
+
+#if defined(NO_HARDWARE)
+ bPoweredDown = IMG_TRUE;
+#else
+ bPoweredDown = (SGXIsDevicePowered(psDeviceNode)) ? IMG_FALSE : IMG_TRUE;
+#endif
+
+
+
+ if (bPoweredDown)
+ {
+ ui32LockupCounter = 0;
+ #if defined(FIX_HW_BRN_31093)
+ bBRN31093Inval = IMG_FALSE;
+ #endif
+ }
+ else
+ {
+
+ ui32CurrentEDMTasks = OSReadHWReg(psDevInfo->pvRegsBaseKM, psDevInfo->ui32EDMTaskReg0);
+ if (psDevInfo->ui32EDMTaskReg1 != 0)
+ {
+ ui32CurrentEDMTasks ^= OSReadHWReg(psDevInfo->pvRegsBaseKM, psDevInfo->ui32EDMTaskReg1);
+ }
+ if ((ui32CurrentEDMTasks == ui32EDMTasks) &&
+ (psDevInfo->ui32NumResets == ui32NumResets))
+ {
+ ui32LockupCounter++;
+ if (ui32LockupCounter == 3)
+ {
+ ui32LockupCounter = 0;
+ ui32CurrentOpenCLDelayCounter = (psDevInfo->psSGXHostCtl)->ui32OpenCLDelayCount;
+ if(0 != ui32CurrentOpenCLDelayCounter)
+ {
+ if(ui32OpenCLDelayCounter != ui32CurrentOpenCLDelayCounter){
+ ui32OpenCLDelayCounter = ui32CurrentOpenCLDelayCounter;
+ }else{
+ ui32OpenCLDelayCounter -= 1;
+ (psDevInfo->psSGXHostCtl)->ui32OpenCLDelayCount = ui32OpenCLDelayCounter;
+ }
+ goto SGX_NoUKernel_LockUp;
+ }
+
+
+ #if defined(FIX_HW_BRN_31093)
+ if (bBRN31093Inval == IMG_FALSE)
+ {
+
+ #if defined(FIX_HW_BRN_29997)
+ IMG_UINT32 ui32BIFCtrl;
+
+ ui32BIFCtrl = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_PAUSE_MASK);
+
+ SGXWaitClocks(psDevInfo, 200);
+ #endif
+
+ bBRN31093Inval = IMG_TRUE;
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL_INVAL, EUR_CR_BIF_CTRL_INVAL_PTE_MASK);
+
+ SGXWaitClocks(psDevInfo, 200);
+
+ #if defined(FIX_HW_BRN_29997)
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl);
+ #endif
+ }
+ else
+ #endif
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXOSTimer() detected SGX lockup (0x%x tasks)", ui32EDMTasks));
+
+ bLockup = IMG_TRUE;
+ (psDevInfo->psSGXHostCtl)->ui32OpenCLDelayCount = 0;
+ }
+ }
+ }
+ else
+ {
+ #if defined(FIX_HW_BRN_31093)
+ bBRN31093Inval = IMG_FALSE;
+ #endif
+ ui32LockupCounter = 0;
+ ui32EDMTasks = ui32CurrentEDMTasks;
+ ui32NumResets = psDevInfo->ui32NumResets;
+ }
+ }
+SGX_NoUKernel_LockUp:
+
+ if (bLockup)
+ {
+ SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl;
+
+
+ psSGXHostCtl->ui32HostDetectedLockups ++;
+
+
+ HWRecoveryResetSGX(psDeviceNode, 0, ISR_ID);
+ }
+}
+#endif
+
+
+
+#if defined(SYS_USING_INTERRUPTS)
+
+IMG_BOOL SGX_ISRHandler (IMG_VOID *pvData)
+{
+ IMG_BOOL bInterruptProcessed = IMG_FALSE;
+
+
+
+ {
+ IMG_UINT32 ui32EventStatus, ui32EventEnable;
+ IMG_UINT32 ui32EventClear = 0;
+#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
+ IMG_UINT32 ui32EventStatus2, ui32EventEnable2;
+#endif
+ IMG_UINT32 ui32EventClear2 = 0;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+
+
+ if(pvData == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGX_ISRHandler: Invalid params\n"));
+ return bInterruptProcessed;
+ }
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ ui32EventStatus = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
+ ui32EventEnable = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_ENABLE);
+
+
+ ui32EventStatus &= ui32EventEnable;
+
+#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
+ ui32EventStatus2 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
+ ui32EventEnable2 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_ENABLE2);
+
+
+ ui32EventStatus2 &= ui32EventEnable2;
+#endif
+
+
+
+ if (ui32EventStatus & EUR_CR_EVENT_STATUS_SW_EVENT_MASK)
+ {
+ ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK;
+ }
+
+#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
+ if (ui32EventStatus2 & EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_MASK)
+ {
+ ui32EventClear2 |= EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_MASK;
+ }
+
+ if (ui32EventStatus2 & EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_MASK)
+ {
+ ui32EventClear2 |= EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_MASK;
+ }
+#endif
+
+ if (ui32EventClear || ui32EventClear2)
+ {
+ bInterruptProcessed = IMG_TRUE;
+
+
+ ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK;
+
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32EventClear);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32EventClear2);
+ }
+ }
+
+ return bInterruptProcessed;
+}
+
+
+static IMG_VOID SGX_MISRHandler (IMG_VOID *pvData)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+ SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl;
+
+ if (((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) != 0UL) &&
+ ((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) == 0UL))
+ {
+ HWRecoveryResetSGX(psDeviceNode, 0, ISR_ID);
+ }
+
+#if defined(OS_SUPPORTS_IN_LISR)
+ if (psDeviceNode->bReProcessDeviceCommandComplete)
+ {
+ SGXScheduleProcessQueuesKM(psDeviceNode);
+ }
+#endif
+
+ SGXTestActivePowerEvent(psDeviceNode, ISR_ID);
+}
+#endif
+
+#if defined(SUPPORT_MEMORY_TILING)
+
+IMG_INTERNAL
+PVRSRV_ERROR SGX_AllocMemTilingRange(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
+ IMG_UINT32 ui32XTileStride,
+ IMG_UINT32 *pui32RangeIndex)
+{
+ return SGX_AllocMemTilingRangeInt(psDeviceNode->pvDevice,
+ psMemInfo->sDevVAddr.uiAddr,
+ psMemInfo->sDevVAddr.uiAddr + ((IMG_UINT32) psMemInfo->uAllocSize) + SGX_MMU_PAGE_SIZE - 1,
+ ui32XTileStride,
+ pui32RangeIndex);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SGX_FreeMemTilingRange(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32RangeIndex)
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Val;
+
+ if(ui32RangeIndex >= 10)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGX_FreeMemTilingRange: invalid Range index "));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+ psDevInfo->ui32MemTilingUsage &= ~(1<<ui32RangeIndex);
+
+
+ ui32Offset = EUR_CR_BIF_TILE0 + (ui32RangeIndex<<2);
+ ui32Val = 0;
+
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Offset, ui32Val);
+ PDUMPREG(SGX_PDUMPREG_NAME, ui32Offset, ui32Val);
+
+ return PVRSRV_OK;
+}
+
+#endif
+
+
+static IMG_VOID SGXCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ #if defined(SGX_FEATURE_MP)
+ psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_SL;
+ #else
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+ #endif
+}
+
+PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
+
+
+ psDeviceNode->sDevId.eDeviceType = DEV_DEVICE_TYPE;
+ psDeviceNode->sDevId.eDeviceClass = DEV_DEVICE_CLASS;
+#if defined(PDUMP)
+ {
+
+ SGX_DEVICE_MAP *psSGXDeviceMemMap;
+ SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
+ (IMG_VOID**)&psSGXDeviceMemMap);
+
+ psDeviceNode->sDevId.pszPDumpDevName = psSGXDeviceMemMap->pszPDumpDevName;
+ PVR_ASSERT(psDeviceNode->sDevId.pszPDumpDevName != IMG_NULL);
+ }
+
+ psDeviceNode->sDevId.pszPDumpRegName = SGX_PDUMPREG_NAME;
+#endif
+
+ psDeviceNode->pfnInitDevice = &DevInitSGXPart1;
+ psDeviceNode->pfnDeInitDevice = &DevDeInitSGX;
+
+ psDeviceNode->pfnInitDeviceCompatCheck = &SGXDevInitCompatCheck;
+#if defined(PDUMP)
+ psDeviceNode->pfnPDumpInitDevice = &SGXResetPDump;
+ psDeviceNode->pfnMMUGetContextID = &MMU_GetPDumpContextID;
+#endif
+
+
+ psDeviceNode->pfnMMUInitialise = &MMU_Initialise;
+ psDeviceNode->pfnMMUFinalise = &MMU_Finalise;
+ psDeviceNode->pfnMMUInsertHeap = &MMU_InsertHeap;
+ psDeviceNode->pfnMMUCreate = &MMU_Create;
+ psDeviceNode->pfnMMUDelete = &MMU_Delete;
+ psDeviceNode->pfnMMUAlloc = &MMU_Alloc;
+ psDeviceNode->pfnMMUFree = &MMU_Free;
+ psDeviceNode->pfnMMUMapPages = &MMU_MapPages;
+ psDeviceNode->pfnMMUMapShadow = &MMU_MapShadow;
+ psDeviceNode->pfnMMUUnmapPages = &MMU_UnmapPages;
+ psDeviceNode->pfnMMUMapScatter = &MMU_MapScatter;
+ psDeviceNode->pfnMMUGetPhysPageAddr = &MMU_GetPhysPageAddr;
+ psDeviceNode->pfnMMUGetPDDevPAddr = &MMU_GetPDDevPAddr;
+#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
+ psDeviceNode->pfnMMUIsHeapShared = &MMU_IsHeapShared;
+#endif
+#if defined(FIX_HW_BRN_31620)
+ psDeviceNode->pfnMMUGetCacheFlushRange = &MMU_GetCacheFlushRange;
+ psDeviceNode->pfnMMUGetPDPhysAddr = &MMU_GetPDPhysAddr;
+#else
+ psDeviceNode->pfnMMUGetCacheFlushRange = IMG_NULL;
+ psDeviceNode->pfnMMUGetPDPhysAddr = IMG_NULL;
+#endif
+#if defined (SYS_USING_INTERRUPTS)
+
+
+ psDeviceNode->pfnDeviceISR = SGX_ISRHandler;
+ psDeviceNode->pfnDeviceMISR = SGX_MISRHandler;
+#endif
+
+#if defined(SUPPORT_MEMORY_TILING)
+ psDeviceNode->pfnAllocMemTilingRange = SGX_AllocMemTilingRange;
+ psDeviceNode->pfnFreeMemTilingRange = SGX_FreeMemTilingRange;
+#endif
+
+
+
+ psDeviceNode->pfnDeviceCommandComplete = &SGXCommandComplete;
+
+ psDeviceNode->pfnCacheInvalidate = SGXCacheInvalidate;
+
+
+
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+
+ psDevMemoryInfo->ui32AddressSpaceSizeLog2 = SGX_FEATURE_ADDRESS_SPACE_SIZE;
+
+
+ psDevMemoryInfo->ui32Flags = 0;
+
+
+ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID,
+ (IMG_VOID **)&psDevMemoryInfo->psDeviceMemoryHeap, 0,
+ "Array of Device Memory Heap Info") != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXRegisterDevice : Failed to alloc memory for DEVICE_MEMORY_HEAP_INFO"));
+ return (PVRSRV_ERROR_OUT_OF_MEMORY);
+ }
+ OSMemSet(psDevMemoryInfo->psDeviceMemoryHeap, 0, sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID);
+
+ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
+
+
+
+
+
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_GENERAL_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_GENERAL_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_SINGLE_PROCESS;
+ psDeviceMemoryHeap->pszName = "General";
+ psDeviceMemoryHeap->pszBSName = "General BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+#if !defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
+
+ psDevMemoryInfo->ui32MappingHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
+#endif
+ psDeviceMemoryHeap++;
+
+#if defined(SUPPORT_MEMORY_TILING)
+
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_VPB_TILED_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_VPB_TILED_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_VPB_TILED_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_SINGLE_PROCESS;
+ psDeviceMemoryHeap->pszName = "VPB Tiled";
+ psDeviceMemoryHeap->pszBSName = "VPB Tiled BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap->ui32XTileStride = SGX_VPB_TILED_HEAP_STRIDE;
+ PVR_DPF((PVR_DBG_WARNING, "VPB tiling heap tiling stride = 0x%x", psDeviceMemoryHeap->ui32XTileStride));
+ psDeviceMemoryHeap++;
+#endif
+
+
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_TADATA_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_TADATA_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_TADATA_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_MULTI_PROCESS;
+ psDeviceMemoryHeap->pszName = "TA Data";
+ psDeviceMemoryHeap->pszBSName = "TA Data BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;
+
+
+
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_CODE_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_KERNEL_CODE_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_KERNEL_CODE_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_MULTI_PROCESS;
+ psDeviceMemoryHeap->pszName = "Kernel Code";
+ psDeviceMemoryHeap->pszBSName = "Kernel Code BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
+
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;
+
+
+
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_DATA_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_KERNEL_DATA_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_KERNEL_DATA_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_MULTI_PROCESS;
+ psDeviceMemoryHeap->pszName = "KernelData";
+ psDeviceMemoryHeap->pszBSName = "KernelData BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
+
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;
+
+
+
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PIXELSHADER_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PIXELSHADER_HEAP_BASE;
+
+
+
+
+
+
+ psDeviceMemoryHeap->ui32HeapSize = ((10 << SGX_USE_CODE_SEGMENT_RANGE_BITS) - 0x00001000);
+ PVR_ASSERT(psDeviceMemoryHeap->ui32HeapSize <= SGX_PIXELSHADER_HEAP_SIZE);
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_SINGLE_PROCESS;
+ psDeviceMemoryHeap->pszName = "PixelShaderUSSE";
+ psDeviceMemoryHeap->pszBSName = "PixelShaderUSSE BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;
+
+
+
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_VERTEXSHADER_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_VERTEXSHADER_HEAP_BASE;
+
+ psDeviceMemoryHeap->ui32HeapSize = ((4 << SGX_USE_CODE_SEGMENT_RANGE_BITS) - 0x00001000);
+ PVR_ASSERT(psDeviceMemoryHeap->ui32HeapSize <= SGX_VERTEXSHADER_HEAP_SIZE);
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_SINGLE_PROCESS;
+ psDeviceMemoryHeap->pszName = "VertexShaderUSSE";
+ psDeviceMemoryHeap->pszBSName = "VertexShaderUSSE BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;
+
+
+
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PDSPIXEL_CODEDATA_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PDSPIXEL_CODEDATA_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_PDSPIXEL_CODEDATA_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_SINGLE_PROCESS;
+ psDeviceMemoryHeap->pszName = "PDSPixelCodeData";
+ psDeviceMemoryHeap->pszBSName = "PDSPixelCodeData BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;
+
+
+
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PDSVERTEX_CODEDATA_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PDSVERTEX_CODEDATA_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_PDSVERTEX_CODEDATA_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_SINGLE_PROCESS;
+ psDeviceMemoryHeap->pszName = "PDSVertexCodeData";
+ psDeviceMemoryHeap->pszBSName = "PDSVertexCodeData BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;
+
+
+
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_SYNCINFO_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_SYNCINFO_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_SYNCINFO_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_MULTI_PROCESS;
+ psDeviceMemoryHeap->pszName = "CacheCoherent";
+ psDeviceMemoryHeap->pszBSName = "CacheCoherent BS";
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
+
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+
+ psDevMemoryInfo->ui32SyncHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
+ psDeviceMemoryHeap++;
+
+
+
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_SHARED_3DPARAMETERS_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_SHARED_3DPARAMETERS_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_SHARED_3DPARAMETERS_HEAP_SIZE;
+ psDeviceMemoryHeap->pszName = "Shared 3DParameters";
+ psDeviceMemoryHeap->pszBSName = "Shared 3DParameters BS";
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_MULTI_PROCESS;
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
+
+
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;
+
+
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PERCONTEXT_3DPARAMETERS_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE;
+ psDeviceMemoryHeap->pszName = "Percontext 3DParameters";
+ psDeviceMemoryHeap->pszBSName = "Percontext 3DParameters BS";
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_SINGLE_PROCESS;
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;
+
+
+#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
+
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_MAPPING_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_GENERAL_MAPPING_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_GENERAL_MAPPING_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_MULTI_PROCESS;
+ psDeviceMemoryHeap->pszName = "GeneralMapping";
+ psDeviceMemoryHeap->pszBSName = "GeneralMapping BS";
+ #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) && defined(FIX_HW_BRN_23410)
+
+
+
+
+
+
+
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
+ #else
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+ #endif
+
+
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+
+ psDevMemoryInfo->ui32MappingHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
+ psDeviceMemoryHeap++;
+#endif
+
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_2D_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_2D_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_2D_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_SINGLE_PROCESS;
+ psDeviceMemoryHeap->pszName = "2D";
+ psDeviceMemoryHeap->pszBSName = "2D BS";
+
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
+
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;
+#endif
+
+
+#if defined(FIX_HW_BRN_26915)
+
+
+ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_CGBUFFER_HEAP_ID);
+ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_CGBUFFER_HEAP_BASE;
+ psDeviceMemoryHeap->ui32HeapSize = SGX_CGBUFFER_HEAP_SIZE;
+ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+ | PVRSRV_HAP_SINGLE_PROCESS;
+ psDeviceMemoryHeap->pszName = "CGBuffer";
+ psDeviceMemoryHeap->pszBSName = "CGBuffer BS";
+
+ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+
+ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
+ psDeviceMemoryHeap++;
+#endif
+
+
+ psDevMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
+
+ return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+static
+PVRSRV_ERROR SGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)(psDeviceNode->pvDevice);
+ psDevInfo->psKernelCCBInfo->ui32CCBDumpWOff = 0;
+ PVR_DPF((PVR_DBG_MESSAGE, "Reset pdump CCB write offset."));
+
+ return PVRSRV_OK;
+}
+#endif
+
+
+IMG_EXPORT
+PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE hDevCookie,
+ SGX_CLIENT_INFO* psClientInfo)
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
+
+
+
+ psDevInfo->ui32ClientRefCount++;
+
+
+
+ psClientInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
+
+
+
+ OSMemCopy(&psClientInfo->asDevData, &psDevInfo->asSGXDevData, sizeof(psClientInfo->asDevData));
+
+
+ return PVRSRV_OK;
+}
+
+
+IMG_VOID SGXPanic(PVRSRV_SGXDEV_INFO *psDevInfo)
+{
+ PVR_LOG(("SGX panic"));
+ SGXDumpDebugInfo(psDevInfo, IMG_FALSE);
+#if defined(PVRSRV_RESET_ON_HWTIMEOUT)
+ OSPanic();
+#else
+ PVR_LOG(("OSPanic disabled"));
+#endif
+}
+
+
+PVRSRV_ERROR SGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_SGXDEV_INFO *psDevInfo;
+ IMG_UINT32 ui32BuildOptions, ui32BuildOptionsMismatch;
+#if !defined(NO_HARDWARE)
+ PPVRSRV_KERNEL_MEM_INFO psMemInfo;
+ PVRSRV_SGX_MISCINFO_INFO *psSGXMiscInfoInt;
+ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
+ SGX_MISCINFO_STRUCT_SIZES *psSGXStructSizes;
+ IMG_BOOL bStructSizesFailed;
+
+
+ IMG_BOOL bCheckCoreRev;
+ const IMG_UINT32 aui32CoreRevExceptions[] =
+ {
+ 0x10100, 0x10101
+ };
+ const IMG_UINT32 ui32NumCoreExceptions = sizeof(aui32CoreRevExceptions) / (2*sizeof(IMG_UINT32));
+ IMG_UINT i;
+#endif
+
+
+ if(psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_SGX)
+ {
+ PVR_LOG(("(FAIL) SGXInit: Device not of type SGX"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto chk_exit;
+ }
+
+ psDevInfo = psDeviceNode->pvDevice;
+
+
+
+ ui32BuildOptions = (SGX_BUILD_OPTIONS);
+ if (ui32BuildOptions != psDevInfo->ui32ClientBuildOptions)
+ {
+ ui32BuildOptionsMismatch = ui32BuildOptions ^ psDevInfo->ui32ClientBuildOptions;
+ if ( (psDevInfo->ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) SGXInit: Mismatch in client-side and KM driver build options; "
+ "extra options present in client-side driver: (0x%x). Please check sgx_options.h",
+ psDevInfo->ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
+ }
+
+ if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) SGXInit: Mismatch in client-side and KM driver build options; "
+ "extra options present in KM: (0x%x). Please check sgx_options.h",
+ ui32BuildOptions & ui32BuildOptionsMismatch ));
+ }
+ eError = PVRSRV_ERROR_BUILD_MISMATCH;
+ goto chk_exit;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: Client-side and KM driver build options match. [ OK ]"));
+ }
+
+#if !defined (NO_HARDWARE)
+ psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
+
+
+ psSGXMiscInfoInt = psMemInfo->pvLinAddrKM;
+ psSGXMiscInfoInt->ui32MiscInfoFlags = 0;
+ psSGXMiscInfoInt->ui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_GET_STRUCT_SIZES;
+ eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode, IMG_NULL);
+
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_LOG(("(FAIL) SGXInit: Unable to validate device DDK version"));
+ goto chk_exit;
+ }
+ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
+ if( (psSGXFeatures->ui32DDKVersion !=
+ ((PVRVERSION_MAJ << 16) |
+ (PVRVERSION_MIN << 8) |
+ PVRVERSION_BRANCH) ) ||
+ (psSGXFeatures->ui32DDKBuild != PVRVERSION_BUILD) )
+ {
+ PVR_LOG(("(FAIL) SGXInit: Incompatible driver DDK revision (%d)/device DDK revision (%d).",
+ PVRVERSION_BUILD, psSGXFeatures->ui32DDKBuild));
+ eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
+ goto chk_exit;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: driver DDK (%d) and device DDK (%d) match. [ OK ]",
+ PVRVERSION_BUILD, psSGXFeatures->ui32DDKBuild));
+ }
+
+
+ if (psSGXFeatures->ui32CoreRevSW == 0)
+ {
+
+
+ PVR_LOG(("SGXInit: HW core rev (%x) check skipped.",
+ psSGXFeatures->ui32CoreRev));
+ }
+ else
+ {
+
+ bCheckCoreRev = IMG_TRUE;
+ for(i=0; i<ui32NumCoreExceptions; i+=2)
+ {
+ if( (psSGXFeatures->ui32CoreRev==aui32CoreRevExceptions[i]) &&
+ (psSGXFeatures->ui32CoreRevSW==aui32CoreRevExceptions[i+1]) )
+ {
+ PVR_LOG(("SGXInit: HW core rev (%x), SW core rev (%x) check skipped.",
+ psSGXFeatures->ui32CoreRev,
+ psSGXFeatures->ui32CoreRevSW));
+ bCheckCoreRev = IMG_FALSE;
+ }
+ }
+
+ if (bCheckCoreRev)
+ {
+ if (psSGXFeatures->ui32CoreRev != psSGXFeatures->ui32CoreRevSW)
+ {
+ PVR_LOG(("(FAIL) SGXInit: Incompatible HW core rev (%x) and SW core rev (%x).",
+ psSGXFeatures->ui32CoreRev, psSGXFeatures->ui32CoreRevSW));
+ eError = PVRSRV_ERROR_BUILD_MISMATCH;
+ goto chk_exit;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: HW core rev (%x) and SW core rev (%x) match. [ OK ]",
+ psSGXFeatures->ui32CoreRev, psSGXFeatures->ui32CoreRevSW));
+ }
+ }
+ }
+
+
+ psSGXStructSizes = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXStructSizes;
+
+ bStructSizesFailed = IMG_FALSE;
+
+ CHECK_SIZE(HOST_CTL);
+ CHECK_SIZE(COMMAND);
+#if defined(SGX_FEATURE_2D_HARDWARE)
+ CHECK_SIZE(2DCMD);
+ CHECK_SIZE(2DCMD_SHARED);
+#endif
+ CHECK_SIZE(CMDTA);
+ CHECK_SIZE(CMDTA_SHARED);
+ CHECK_SIZE(TRANSFERCMD);
+ CHECK_SIZE(TRANSFERCMD_SHARED);
+
+ CHECK_SIZE(3DREGISTERS);
+ CHECK_SIZE(HWPBDESC);
+ CHECK_SIZE(HWRENDERCONTEXT);
+ CHECK_SIZE(HWRENDERDETAILS);
+ CHECK_SIZE(HWRTDATA);
+ CHECK_SIZE(HWRTDATASET);
+ CHECK_SIZE(HWTRANSFERCONTEXT);
+
+ if (bStructSizesFailed == IMG_TRUE)
+ {
+ PVR_LOG(("(FAIL) SGXInit: Mismatch in SGXMKIF structure sizes."));
+ eError = PVRSRV_ERROR_BUILD_MISMATCH;
+ goto chk_exit;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: SGXMKIF structure sizes match. [ OK ]"));
+ }
+
+
+ ui32BuildOptions = psSGXFeatures->ui32BuildOptions;
+ if (ui32BuildOptions != (SGX_BUILD_OPTIONS))
+ {
+ ui32BuildOptionsMismatch = ui32BuildOptions ^ (SGX_BUILD_OPTIONS);
+ if ( ((SGX_BUILD_OPTIONS) & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) SGXInit: Mismatch in driver and microkernel build options; "
+ "extra options present in driver: (0x%x). Please check sgx_options.h",
+ (SGX_BUILD_OPTIONS) & ui32BuildOptionsMismatch ));
+ }
+
+ if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) SGXInit: Mismatch in driver and microkernel build options; "
+ "extra options present in microkernel: (0x%x). Please check sgx_options.h",
+ ui32BuildOptions & ui32BuildOptionsMismatch ));
+ }
+ eError = PVRSRV_ERROR_BUILD_MISMATCH;
+ goto chk_exit;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: Driver and microkernel build options match. [ OK ]"));
+ }
+#endif
+
+ eError = PVRSRV_OK;
+chk_exit:
+#if defined(IGNORE_SGX_INIT_COMPATIBILITY_CHECK)
+ return PVRSRV_OK;
+#else
+ return eError;
+#endif
+}
+
+static
+PVRSRV_ERROR SGXGetMiscInfoUkernel(PVRSRV_SGXDEV_INFO *psDevInfo,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hDevMemContext)
+{
+ PVRSRV_ERROR eError;
+ SGXMKIF_COMMAND sCommandData;
+ PVRSRV_SGX_MISCINFO_INFO *psSGXMiscInfoInt;
+ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
+ SGX_MISCINFO_STRUCT_SIZES *psSGXStructSizes;
+
+ PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
+
+ if (! psMemInfo->pvLinAddrKM)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: Invalid address."));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ psSGXMiscInfoInt = psMemInfo->pvLinAddrKM;
+ psSGXFeatures = &psSGXMiscInfoInt->sSGXFeatures;
+ psSGXStructSizes = &psSGXMiscInfoInt->sSGXStructSizes;
+
+ psSGXMiscInfoInt->ui32MiscInfoFlags &= ~PVRSRV_USSE_MISCINFO_READY;
+
+
+ OSMemSet(psSGXFeatures, 0, sizeof(*psSGXFeatures));
+ OSMemSet(psSGXStructSizes, 0, sizeof(*psSGXStructSizes));
+
+
+ sCommandData.ui32Data[1] = psMemInfo->sDevVAddr.uiAddr;
+
+ PDUMPCOMMENT("Microkernel kick for SGXGetMiscInfo");
+ eError = SGXScheduleCCBCommandKM(psDeviceNode,
+ SGXMKIF_CMD_GETMISCINFO,
+ &sCommandData,
+ KERNEL_ID,
+ 0,
+ hDevMemContext,
+ IMG_FALSE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: SGXScheduleCCBCommandKM failed."));
+ return eError;
+ }
+
+
+#if !defined(NO_HARDWARE)
+ {
+ IMG_BOOL bExit;
+
+ bExit = IMG_FALSE;
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ if ((psSGXMiscInfoInt->ui32MiscInfoFlags & PVRSRV_USSE_MISCINFO_READY) != 0)
+ {
+ bExit = IMG_TRUE;
+ break;
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+
+ if (!bExit)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: Timeout occurred waiting for misc info."));
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+
+
+IMG_EXPORT
+PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo,
+ SGX_MISC_INFO *psMiscInfo,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hDevMemContext)
+{
+ PVRSRV_ERROR eError;
+ PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
+ IMG_UINT32 *pui32MiscInfoFlags = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->ui32MiscInfoFlags;
+
+
+ *pui32MiscInfoFlags = 0;
+
+#if !defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
+ PVR_UNREFERENCED_PARAMETER(hDevMemContext);
+#endif
+
+ switch(psMiscInfo->eRequest)
+ {
+#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
+ case SGX_MISC_INFO_REQUEST_SET_BREAKPOINT:
+ {
+ IMG_UINT32 ui32MaskDM;
+ IMG_UINT32 ui32CtrlWEnable;
+ IMG_UINT32 ui32CtrlREnable;
+ IMG_UINT32 ui32CtrlTrapEnable;
+ IMG_UINT32 ui32RegVal;
+ IMG_UINT32 ui32StartRegVal;
+ IMG_UINT32 ui32EndRegVal;
+ SGXMKIF_COMMAND sCommandData;
+
+
+ if(psMiscInfo->uData.sSGXBreakpointInfo.bBPEnable)
+ {
+
+ IMG_DEV_VIRTADDR sBPDevVAddr = psMiscInfo->uData.sSGXBreakpointInfo.sBPDevVAddr;
+ IMG_DEV_VIRTADDR sBPDevVAddrEnd = psMiscInfo->uData.sSGXBreakpointInfo.sBPDevVAddrEnd;
+
+
+ ui32StartRegVal = sBPDevVAddr.uiAddr & EUR_CR_BREAKPOINT0_START_ADDRESS_MASK;
+ ui32EndRegVal = sBPDevVAddrEnd.uiAddr & EUR_CR_BREAKPOINT0_END_ADDRESS_MASK;
+
+ ui32MaskDM = psMiscInfo->uData.sSGXBreakpointInfo.ui32DataMasterMask;
+ ui32CtrlWEnable = psMiscInfo->uData.sSGXBreakpointInfo.bWrite;
+ ui32CtrlREnable = psMiscInfo->uData.sSGXBreakpointInfo.bRead;
+ ui32CtrlTrapEnable = psMiscInfo->uData.sSGXBreakpointInfo.bTrapped;
+
+
+ ui32RegVal = ((ui32MaskDM<<EUR_CR_BREAKPOINT0_MASK_DM_SHIFT) & EUR_CR_BREAKPOINT0_MASK_DM_MASK) |
+ ((ui32CtrlWEnable<<EUR_CR_BREAKPOINT0_CTRL_WENABLE_SHIFT) & EUR_CR_BREAKPOINT0_CTRL_WENABLE_MASK) |
+ ((ui32CtrlREnable<<EUR_CR_BREAKPOINT0_CTRL_RENABLE_SHIFT) & EUR_CR_BREAKPOINT0_CTRL_RENABLE_MASK) |
+ ((ui32CtrlTrapEnable<<EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_SHIFT) & EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_MASK);
+ }
+ else
+ {
+
+ ui32RegVal = ui32StartRegVal = ui32EndRegVal = 0;
+ }
+
+
+ sCommandData.ui32Data[0] = psMiscInfo->uData.sSGXBreakpointInfo.ui32BPIndex;
+ sCommandData.ui32Data[1] = ui32StartRegVal;
+ sCommandData.ui32Data[2] = ui32EndRegVal;
+ sCommandData.ui32Data[3] = ui32RegVal;
+
+
+ psDevInfo->psSGXHostCtl->ui32BPSetClearSignal = 0;
+
+ PDUMPCOMMENT("Microkernel kick for setting a data breakpoint");
+ eError = SGXScheduleCCBCommandKM(psDeviceNode,
+ SGXMKIF_CMD_DATABREAKPOINT,
+ &sCommandData,
+ KERNEL_ID,
+ 0,
+ hDevMemContext,
+ IMG_FALSE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoKM: SGXScheduleCCBCommandKM failed."));
+ return eError;
+ }
+
+#if defined(NO_HARDWARE)
+
+ psDevInfo->psSGXHostCtl->ui32BPSetClearSignal = 0;
+#else
+ {
+ IMG_BOOL bExit;
+
+ bExit = IMG_FALSE;
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ if (psDevInfo->psSGXHostCtl->ui32BPSetClearSignal != 0)
+ {
+ bExit = IMG_TRUE;
+
+ psDevInfo->psSGXHostCtl->ui32BPSetClearSignal = 0;
+ break;
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+
+ if (!bExit)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoKM: Timeout occurred waiting BP set/clear"));
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+ }
+#endif
+
+ return PVRSRV_OK;
+ }
+
+ case SGX_MISC_INFO_REQUEST_POLL_BREAKPOINT:
+ {
+
+
+
+
+
+
+
+#if !defined(NO_HARDWARE)
+#if defined(SGX_FEATURE_MP)
+ IMG_BOOL bTrappedBPMaster;
+ IMG_UINT32 ui32CoreNum, ui32TrappedBPCoreNum;
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+ IMG_UINT32 ui32PipeNum, ui32TrappedBPPipeNum;
+#define NUM_PIPES_PLUS_ONE (SGX_FEATURE_PERPIPE_BKPT_REGS_NUMPIPES+1)
+#endif
+ IMG_BOOL bTrappedBPAny;
+#endif
+ IMG_BOOL bFoundOne;
+
+#if defined(SGX_FEATURE_MP)
+ ui32TrappedBPCoreNum = 0;
+ bTrappedBPMaster = !!(EUR_CR_MASTER_BREAKPOINT_TRAPPED_MASK & OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BREAKPOINT));
+ bTrappedBPAny = bTrappedBPMaster;
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+ ui32TrappedBPPipeNum = 0;
+#endif
+ for (ui32CoreNum = 0; ui32CoreNum < SGX_FEATURE_MP_CORE_COUNT_3D; ui32CoreNum++)
+ {
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+
+
+
+#define SGX_MP_CORE_PIPE_SELECT(r,c,p) \
+ ((SGX_MP_CORE_SELECT(EUR_CR_PARTITION_##r,c) + p*(EUR_CR_PIPE0_##r-EUR_CR_PARTITION_##r)))
+ for (ui32PipeNum = 0; ui32PipeNum < NUM_PIPES_PLUS_ONE; ui32PipeNum++)
+ {
+ bFoundOne =
+ 0 != (EUR_CR_PARTITION_BREAKPOINT_TRAPPED_MASK &
+ OSReadHWReg(psDevInfo->pvRegsBaseKM,
+ SGX_MP_CORE_PIPE_SELECT(BREAKPOINT,
+ ui32CoreNum,
+ ui32PipeNum)));
+ if (bFoundOne)
+ {
+ bTrappedBPAny = IMG_TRUE;
+ ui32TrappedBPCoreNum = ui32CoreNum;
+ ui32TrappedBPPipeNum = ui32PipeNum;
+ }
+ }
+#else
+ bFoundOne = !!(EUR_CR_BREAKPOINT_TRAPPED_MASK & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT, ui32CoreNum)));
+ if (bFoundOne)
+ {
+ bTrappedBPAny = IMG_TRUE;
+ ui32TrappedBPCoreNum = ui32CoreNum;
+ }
+#endif
+ }
+
+ psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBP = bTrappedBPAny;
+#else
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+ #error Not yet considered the case for per-pipe regs in non-mp case
+#endif
+ psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBP = 0 != (EUR_CR_BREAKPOINT_TRAPPED_MASK & OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BREAKPOINT));
+#endif
+
+ if (psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBP)
+ {
+ IMG_UINT32 ui32Info0, ui32Info1;
+
+#if defined(SGX_FEATURE_MP)
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+ ui32Info0 = OSReadHWReg(psDevInfo->pvRegsBaseKM, bTrappedBPMaster?EUR_CR_MASTER_BREAKPOINT_TRAP_INFO0:SGX_MP_CORE_PIPE_SELECT(BREAKPOINT_TRAP_INFO0, ui32TrappedBPCoreNum, ui32TrappedBPPipeNum));
+ ui32Info1 = OSReadHWReg(psDevInfo->pvRegsBaseKM, bTrappedBPMaster?EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1:SGX_MP_CORE_PIPE_SELECT(BREAKPOINT_TRAP_INFO1, ui32TrappedBPCoreNum, ui32TrappedBPPipeNum));
+#else
+ ui32Info0 = OSReadHWReg(psDevInfo->pvRegsBaseKM, bTrappedBPMaster?EUR_CR_MASTER_BREAKPOINT_TRAP_INFO0:SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT_TRAP_INFO0, ui32TrappedBPCoreNum));
+ ui32Info1 = OSReadHWReg(psDevInfo->pvRegsBaseKM, bTrappedBPMaster?EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1:SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT_TRAP_INFO1, ui32TrappedBPCoreNum));
+#endif
+#else
+ ui32Info0 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BREAKPOINT_TRAP_INFO0);
+ ui32Info1 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BREAKPOINT_TRAP_INFO1);
+#endif
+
+#ifdef SGX_FEATURE_PERPIPE_BKPT_REGS
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32BPIndex = (ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_MASK) >> EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT;
+ psMiscInfo->uData.sSGXBreakpointInfo.sTrappedBPDevVAddr.uiAddr = ui32Info0 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK;
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPBurstLength = (ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_MASK) >> EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT;
+ psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBPRead = !!(ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_RNW_MASK);
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPDataMaster = (ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK) >> EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT;
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPTag = (ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_MASK) >> EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_SHIFT;
+#else
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32BPIndex = (ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_NUMBER_MASK) >> EUR_CR_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT;
+ psMiscInfo->uData.sSGXBreakpointInfo.sTrappedBPDevVAddr.uiAddr = ui32Info0 & EUR_CR_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK;
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPBurstLength = (ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_SIZE_MASK) >> EUR_CR_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT;
+ psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBPRead = !!(ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_RNW_MASK);
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPDataMaster = (ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK) >> EUR_CR_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT;
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPTag = (ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_TAG_MASK) >> EUR_CR_BREAKPOINT_TRAP_INFO1_TAG_SHIFT;
+#endif
+#if defined(SGX_FEATURE_MP)
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum = bTrappedBPMaster?65535:(ui32TrappedBPCoreNum + (ui32TrappedBPPipeNum<<10));
+#else
+
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum = bTrappedBPMaster?65535:ui32TrappedBPCoreNum;
+#endif
+#else
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+
+#error non-mp perpipe regs not yet supported
+#else
+
+ psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum = 65534;
+#endif
+#endif
+ }
+#endif
+ return PVRSRV_OK;
+ }
+
+ case SGX_MISC_INFO_REQUEST_RESUME_BREAKPOINT:
+ {
+
+
+
+#if !defined(NO_HARDWARE)
+#if defined(SGX_FEATURE_MP)
+ IMG_UINT32 ui32CoreNum;
+ IMG_BOOL bMaster;
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+ IMG_UINT32 ui32PipeNum;
+#endif
+#endif
+ IMG_UINT32 ui32OldSeqNum, ui32NewSeqNum;
+
+#if defined(SGX_FEATURE_MP)
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+ ui32PipeNum = psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum >> 10;
+ ui32CoreNum = psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum & 1023;
+ bMaster = psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum > 32767;
+#else
+ ui32CoreNum = psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum;
+ bMaster = ui32CoreNum > SGX_FEATURE_MP_CORE_COUNT_3D;
+#endif
+ if (bMaster)
+ {
+
+
+ ui32OldSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BREAKPOINT);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BREAKPOINT_TRAP, EUR_CR_MASTER_BREAKPOINT_TRAP_WRNOTIFY_MASK | EUR_CR_MASTER_BREAKPOINT_TRAP_CONTINUE_MASK);
+ do
+ {
+ ui32NewSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BREAKPOINT);
+ }
+ while (ui32OldSeqNum == ui32NewSeqNum);
+ }
+ else
+#endif
+ {
+
+#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS)
+ ui32OldSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_PIPE_SELECT(BREAKPOINT, ui32CoreNum, ui32PipeNum));
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_PIPE_SELECT(BREAKPOINT_TRAP, ui32CoreNum, ui32PipeNum), EUR_CR_PARTITION_BREAKPOINT_TRAP_WRNOTIFY_MASK | EUR_CR_PARTITION_BREAKPOINT_TRAP_CONTINUE_MASK);
+ do
+ {
+ ui32NewSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_PIPE_SELECT(BREAKPOINT, ui32CoreNum, ui32PipeNum));
+ }
+ while (ui32OldSeqNum == ui32NewSeqNum);
+#else
+ ui32OldSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT, ui32CoreNum));
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT_TRAP, ui32CoreNum), EUR_CR_BREAKPOINT_TRAP_WRNOTIFY_MASK | EUR_CR_BREAKPOINT_TRAP_CONTINUE_MASK);
+ do
+ {
+ ui32NewSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT, ui32CoreNum));
+ }
+ while (ui32OldSeqNum == ui32NewSeqNum);
+#endif
+ }
+#endif
+ return PVRSRV_OK;
+ }
+#endif
+
+ case SGX_MISC_INFO_REQUEST_CLOCKSPEED:
+ {
+ psMiscInfo->uData.ui32SGXClockSpeed = psDevInfo->ui32CoreClockSpeed;
+ return PVRSRV_OK;
+ }
+
+ case SGX_MISC_INFO_REQUEST_ACTIVEPOWER:
+ {
+ psMiscInfo->uData.sActivePower.ui32NumActivePowerEvents = psDevInfo->psSGXHostCtl->ui32NumActivePowerEvents;
+ return PVRSRV_OK;
+ }
+
+ case SGX_MISC_INFO_REQUEST_LOCKUPS:
+ {
+#if defined(SUPPORT_HW_RECOVERY)
+ psMiscInfo->uData.sLockups.ui32uKernelDetectedLockups = psDevInfo->psSGXHostCtl->ui32uKernelDetectedLockups;
+ psMiscInfo->uData.sLockups.ui32HostDetectedLockups = psDevInfo->psSGXHostCtl->ui32HostDetectedLockups;
+#else
+ psMiscInfo->uData.sLockups.ui32uKernelDetectedLockups = 0;
+ psMiscInfo->uData.sLockups.ui32HostDetectedLockups = 0;
+#endif
+ return PVRSRV_OK;
+ }
+
+ case SGX_MISC_INFO_REQUEST_SPM:
+ {
+
+ return PVRSRV_OK;
+ }
+
+ case SGX_MISC_INFO_REQUEST_SGXREV:
+ {
+ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
+ eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode, hDevMemContext);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "An error occurred in SGXGetMiscInfoUkernel: %d\n",
+ eError));
+ return eError;
+ }
+ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
+
+
+ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
+
+
+ PVR_DPF((PVR_DBG_MESSAGE, "SGXGetMiscInfoKM: Core 0x%x, sw ID 0x%x, sw Rev 0x%x\n",
+ psSGXFeatures->ui32CoreRev,
+ psSGXFeatures->ui32CoreIdSW,
+ psSGXFeatures->ui32CoreRevSW));
+ PVR_DPF((PVR_DBG_MESSAGE, "SGXGetMiscInfoKM: DDK version 0x%x, DDK build 0x%x\n",
+ psSGXFeatures->ui32DDKVersion,
+ psSGXFeatures->ui32DDKBuild));
+
+
+ return PVRSRV_OK;
+ }
+
+ case SGX_MISC_INFO_REQUEST_DRIVER_SGXREV:
+ {
+ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
+
+ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
+
+
+ OSMemSet(psMemInfo->pvLinAddrKM, 0,
+ sizeof(PVRSRV_SGX_MISCINFO_INFO));
+
+ psSGXFeatures->ui32DDKVersion =
+ (PVRVERSION_MAJ << 16) |
+ (PVRVERSION_MIN << 8) |
+ PVRVERSION_BRANCH;
+ psSGXFeatures->ui32DDKBuild = PVRVERSION_BUILD;
+
+
+ psSGXFeatures->ui32BuildOptions = (SGX_BUILD_OPTIONS);
+
+#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
+
+ psSGXFeatures->sDevVAEDMStatusBuffer = psDevInfo->psKernelEDMStatusBufferMemInfo->sDevVAddr;
+ psSGXFeatures->pvEDMStatusBuffer = psDevInfo->psKernelEDMStatusBufferMemInfo->pvLinAddrKM;
+#endif
+
+
+ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
+ return PVRSRV_OK;
+ }
+
+#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
+ case SGX_MISC_INFO_REQUEST_MEMREAD:
+ case SGX_MISC_INFO_REQUEST_MEMCOPY:
+ {
+ PVRSRV_ERROR eError;
+ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
+ PVRSRV_SGX_MISCINFO_MEMACCESS *psSGXMemSrc;
+ PVRSRV_SGX_MISCINFO_MEMACCESS *psSGXMemDest;
+
+ {
+
+ *pui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_MEMREAD;
+ psSGXMemSrc = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXMemAccessSrc;
+
+ if(psMiscInfo->sDevVAddrSrc.uiAddr != 0)
+ {
+ psSGXMemSrc->sDevVAddr = psMiscInfo->sDevVAddrSrc;
+ }
+ else
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+
+ if( psMiscInfo->eRequest == SGX_MISC_INFO_REQUEST_MEMCOPY)
+ {
+
+ *pui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_MEMWRITE;
+ psSGXMemDest = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXMemAccessDest;
+
+ if(psMiscInfo->sDevVAddrDest.uiAddr != 0)
+ {
+ psSGXMemDest->sDevVAddr = psMiscInfo->sDevVAddrDest;
+ }
+ else
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+
+
+ if(psMiscInfo->hDevMemContext != IMG_NULL)
+ {
+ SGXGetMMUPDAddrKM( (IMG_HANDLE)psDeviceNode, hDevMemContext, &psSGXMemSrc->sPDDevPAddr);
+
+
+ psSGXMemDest->sPDDevPAddr = psSGXMemSrc->sPDDevPAddr;
+ }
+ else
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+ eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "An error occurred in SGXGetMiscInfoUkernel: %d\n",
+ eError));
+ return eError;
+ }
+ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
+
+#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+ if(*pui32MiscInfoFlags & PVRSRV_USSE_MISCINFO_MEMREAD_FAIL)
+ {
+ return PVRSRV_ERROR_INVALID_MISCINFO;
+ }
+#endif
+
+ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
+ return PVRSRV_OK;
+ }
+#endif
+
+#if defined(SUPPORT_SGX_HWPERF)
+ case SGX_MISC_INFO_REQUEST_SET_HWPERF_STATUS:
+ {
+ PVRSRV_SGX_MISCINFO_SET_HWPERF_STATUS *psSetHWPerfStatus = &psMiscInfo->uData.sSetHWPerfStatus;
+ const IMG_UINT32 ui32ValidFlags = PVRSRV_SGX_HWPERF_STATUS_RESET_COUNTERS |
+ PVRSRV_SGX_HWPERF_STATUS_GRAPHICS_ON |
+ PVRSRV_SGX_HWPERF_STATUS_PERIODIC_ON |
+ PVRSRV_SGX_HWPERF_STATUS_MK_EXECUTION_ON;
+ SGXMKIF_COMMAND sCommandData = {0};
+
+
+ if ((psSetHWPerfStatus->ui32NewHWPerfStatus & ~ui32ValidFlags) != 0)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ #if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "SGX ukernel HWPerf status %u\n",
+ psSetHWPerfStatus->ui32NewHWPerfStatus);
+ #endif
+
+
+ #if defined(SGX_FEATURE_EXTENDED_PERF_COUNTERS)
+ OSMemCopy(&psDevInfo->psSGXHostCtl->aui32PerfGroup[0],
+ &psSetHWPerfStatus->aui32PerfGroup[0],
+ sizeof(psDevInfo->psSGXHostCtl->aui32PerfGroup));
+ OSMemCopy(&psDevInfo->psSGXHostCtl->aui32PerfBit[0],
+ &psSetHWPerfStatus->aui32PerfBit[0],
+ sizeof(psDevInfo->psSGXHostCtl->aui32PerfBit));
+ #if defined(PDUMP)
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, aui32PerfGroup),
+ sizeof(psDevInfo->psSGXHostCtl->aui32PerfGroup),
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, aui32PerfBit),
+ sizeof(psDevInfo->psSGXHostCtl->aui32PerfBit),
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
+ #endif
+ #else
+ psDevInfo->psSGXHostCtl->ui32PerfGroup = psSetHWPerfStatus->ui32PerfGroup;
+ #if defined(PDUMP)
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32PerfGroup),
+ sizeof(psDevInfo->psSGXHostCtl->ui32PerfGroup),
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
+ #endif
+ #endif
+
+
+ sCommandData.ui32Data[0] = psSetHWPerfStatus->ui32NewHWPerfStatus;
+ eError = SGXScheduleCCBCommandKM(psDeviceNode,
+ SGXMKIF_CMD_SETHWPERFSTATUS,
+ &sCommandData,
+ KERNEL_ID,
+ 0,
+ hDevMemContext,
+ IMG_FALSE);
+ return eError;
+ }
+#endif
+
+ case SGX_MISC_INFO_DUMP_DEBUG_INFO:
+ {
+ PVR_LOG(("User requested SGX debug info"));
+
+
+ SGXDumpDebugInfo(psDeviceNode->pvDevice, IMG_FALSE);
+
+ return PVRSRV_OK;
+ }
+
+ case SGX_MISC_INFO_PANIC:
+ {
+ PVR_LOG(("User requested SGX panic"));
+
+ SGXPanic(psDeviceNode->pvDevice);
+
+ return PVRSRV_OK;
+ }
+
+ default:
+ {
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR SGXReadHWPerfCBKM(IMG_HANDLE hDevHandle,
+ IMG_UINT32 ui32ArraySize,
+ PVRSRV_SGX_HWPERF_CB_ENTRY *psClientHWPerfEntry,
+ IMG_UINT32 *pui32DataCount,
+ IMG_UINT32 *pui32ClockSpeed,
+ IMG_UINT32 *pui32HostTimeStamp)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ SGXMKIF_HWPERF_CB *psHWPerfCB = psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
+ IMG_UINT i;
+
+ for (i = 0;
+ psHWPerfCB->ui32Woff != psHWPerfCB->ui32Roff && i < ui32ArraySize;
+ i++)
+ {
+ SGXMKIF_HWPERF_CB_ENTRY *psMKPerfEntry = &psHWPerfCB->psHWPerfCBData[psHWPerfCB->ui32Roff];
+
+ psClientHWPerfEntry[i].ui32FrameNo = psMKPerfEntry->ui32FrameNo;
+ psClientHWPerfEntry[i].ui32PID = psMKPerfEntry->ui32PID;
+ psClientHWPerfEntry[i].ui32RTData = psMKPerfEntry->ui32RTData;
+ psClientHWPerfEntry[i].ui32Type = psMKPerfEntry->ui32Type;
+ psClientHWPerfEntry[i].ui32Ordinal = psMKPerfEntry->ui32Ordinal;
+ psClientHWPerfEntry[i].ui32Info = psMKPerfEntry->ui32Info;
+ psClientHWPerfEntry[i].ui32Clocksx16 = SGXConvertTimeStamp(psDevInfo,
+ psMKPerfEntry->ui32TimeWraps,
+ psMKPerfEntry->ui32Time);
+ OSMemCopy(&psClientHWPerfEntry[i].ui32Counters[0][0],
+ &psMKPerfEntry->ui32Counters[0][0],
+ sizeof(psMKPerfEntry->ui32Counters));
+
+ OSMemCopy(&psClientHWPerfEntry[i].ui32MiscCounters[0][0],
+ &psMKPerfEntry->ui32MiscCounters[0][0],
+ sizeof(psMKPerfEntry->ui32MiscCounters));
+
+ psHWPerfCB->ui32Roff = (psHWPerfCB->ui32Roff + 1) & (SGXMKIF_HWPERF_CB_SIZE - 1);
+ }
+
+ *pui32DataCount = i;
+ *pui32ClockSpeed = psDevInfo->ui32CoreClockSpeed;
+ *pui32HostTimeStamp = OSClockus();
+
+ return eError;
+}
+
+
diff --git a/drivers/gpu/pvr/sgx/sgxkick.c b/drivers/gpu/pvr/sgx/sgxkick.c
new file mode 100644
index 0000000..d5441b2
--- /dev/null
+++ b/drivers/gpu/pvr/sgx/sgxkick.c
@@ -0,0 +1,808 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <stddef.h>
+#include "services_headers.h"
+#include "sgxinfo.h"
+#include "sgxinfokm.h"
+#if defined (PDUMP)
+#include "sgxapi_km.h"
+#include "pdump_km.h"
+#endif
+#include "sgx_bridge_km.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "sgxutils.h"
+#include "ttrace.h"
+
+IMG_EXPORT
+#if defined (SUPPORT_SID_INTERFACE)
+PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, SGX_CCB_KICK_KM *psCCBKick)
+#else
+PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, SGX_CCB_KICK *psCCBKick)
+#endif
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
+ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *) psCCBKick->hCCBKernelMemInfo;
+ SGXMKIF_CMDTA_SHARED *psTACmd;
+ IMG_UINT32 i;
+ IMG_HANDLE hDevMemContext = IMG_NULL;
+#if defined(FIX_HW_BRN_31620)
+ hDevMemContext = psCCBKick->hDevMemContext;
+#endif
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_ENTER, KICK_TOKEN_DOKICK);
+
+ if (!CCB_OFFSET_IS_VALID(SGXMKIF_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: Invalid CCB offset"));
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_EXIT, KICK_TOKEN_DOKICK);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+ psTACmd = CCB_DATA_FROM_OFFSET(SGXMKIF_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset);
+
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_CMD_START, KICK_TOKEN_DOKICK);
+ PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_CCB,
+ KICK_TOKEN_CCB_OFFSET, psCCBKick->ui32CCBOffset);
+
+
+ if (psCCBKick->hTA3DSyncInfo)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo;
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_TA3D_SYNC,
+ psSyncInfo, PVRSRV_SYNCOP_SAMPLE);
+
+ psTACmd->sTA3DDependency.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+
+ psTACmd->sTA3DDependency.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+
+ if (psCCBKick->bTADependency)
+ {
+ psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ }
+ }
+
+ if (psCCBKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo;
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_TA_SYNC,
+ psSyncInfo, PVRSRV_SYNCOP_SAMPLE);
+
+ psTACmd->sTATQSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ psTACmd->sTATQSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+
+ psTACmd->ui32TATQSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
+ psTACmd->ui32TATQSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+
+ if (psCCBKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo;
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_3D_SYNC,
+ psSyncInfo, PVRSRV_SYNCOP_SAMPLE);
+
+ psTACmd->s3DTQSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ psTACmd->s3DTQSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+
+ psTACmd->ui323DTQSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
+ psTACmd->ui323DTQSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+
+ psTACmd->ui32NumTAStatusVals = psCCBKick->ui32NumTAStatusVals;
+ if (psCCBKick->ui32NumTAStatusVals != 0)
+ {
+
+ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
+ {
+#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ psTACmd->sCtlTAStatusInfo[i] = psCCBKick->asTAStatusUpdate[i].sCtlStatus;
+#else
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
+ psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ psTACmd->sCtlTAStatusInfo[i].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
+#endif
+ }
+ }
+
+ psTACmd->ui32Num3DStatusVals = psCCBKick->ui32Num3DStatusVals;
+ if (psCCBKick->ui32Num3DStatusVals != 0)
+ {
+
+ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
+ {
+#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ psTACmd->sCtl3DStatusInfo[i] = psCCBKick->as3DStatusUpdate[i].sCtlStatus;
+#else
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
+ psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ psTACmd->sCtl3DStatusInfo[i].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
+#endif
+ }
+ }
+
+
+#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
+
+ psTACmd->ui32NumTASrcSyncs = psCCBKick->ui32NumTASrcSyncs;
+ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
+
+ psTACmd->asTASrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ psTACmd->asTASrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+
+
+ psTACmd->asTASrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
+
+ psTACmd->asTASrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+
+ psTACmd->ui32NumTADstSyncs = psCCBKick->ui32NumTADstSyncs;
+ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
+
+ psTACmd->asTADstSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ psTACmd->asTADstSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+
+
+ psTACmd->asTADstSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+
+ psTACmd->asTADstSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ }
+
+ psTACmd->ui32Num3DSrcSyncs = psCCBKick->ui32Num3DSrcSyncs;
+ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
+
+ psTACmd->as3DSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ psTACmd->as3DSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+
+
+ psTACmd->as3DSrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
+
+ psTACmd->as3DSrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+#else
+
+ psTACmd->ui32NumSrcSyncs = psCCBKick->ui32NumSrcSyncs;
+ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_SRC_SYNC,
+ psSyncInfo, PVRSRV_SYNCOP_SAMPLE);
+
+ psTACmd->asSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ psTACmd->asSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+
+
+ psTACmd->asSrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
+
+ psTACmd->asSrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+#endif
+
+ if (psCCBKick->bFirstKickOrResume && psCCBKick->ui32NumDstSyncObjects > 0)
+ {
+ PVRSRV_KERNEL_MEM_INFO *psHWDstSyncListMemInfo =
+ (PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo;
+ SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList = psHWDstSyncListMemInfo->pvLinAddrKM;
+ IMG_UINT32 ui32NumDstSyncs = psCCBKick->ui32NumDstSyncObjects;
+
+ PVR_ASSERT(((PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo)->uAllocSize >= (sizeof(SGXMKIF_HWDEVICE_SYNC_LIST) +
+ (sizeof(PVRSRV_DEVICE_SYNC_OBJECT) * ui32NumDstSyncs)));
+
+ psHWDeviceSyncList->ui32NumSyncObjects = ui32NumDstSyncs;
+#if defined(PDUMP)
+ if (PDumpIsCaptureFrameKM())
+ {
+ PDUMPCOMMENT("HWDeviceSyncList for TACmd\r\n");
+ PDUMPMEM(IMG_NULL,
+ psHWDstSyncListMemInfo,
+ 0,
+ sizeof(SGXMKIF_HWDEVICE_SYNC_LIST),
+ 0,
+ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
+ }
+#endif
+
+ for (i=0; i<ui32NumDstSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->pahDstSyncHandles[i];
+
+ if (psSyncInfo)
+ {
+ psSyncInfo->psSyncData->ui64LastWrite = ui64KickCount;
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_DST_SYNC,
+ psSyncInfo, PVRSRV_SYNCOP_SAMPLE);
+
+ psHWDeviceSyncList->asSyncData[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ psHWDeviceSyncList->asSyncData[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ psHWDeviceSyncList->asSyncData[i].sReadOps2CompleteDevVAddr = psSyncInfo->sReadOps2CompleteDevVAddr;
+
+ psHWDeviceSyncList->asSyncData[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ psHWDeviceSyncList->asSyncData[i].ui32ReadOps2PendingVal = psSyncInfo->psSyncData->ui32ReadOps2Pending;
+
+ #if defined(PDUMP)
+ if (PDumpIsCaptureFrameKM())
+ {
+ IMG_UINT32 ui32ModifiedValue;
+ IMG_UINT32 ui32SyncOffset = offsetof(SGXMKIF_HWDEVICE_SYNC_LIST, asSyncData)
+ + (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT));
+ IMG_UINT32 ui32WOpsOffset = ui32SyncOffset
+ + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal);
+ IMG_UINT32 ui32ROpsOffset = ui32SyncOffset
+ + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal);
+ IMG_UINT32 ui32ROps2Offset = ui32SyncOffset
+ + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOps2PendingVal);
+
+ PDUMPCOMMENT("HWDeviceSyncObject for RT: %i\r\n", i);
+
+ PDUMPMEM(IMG_NULL,
+ psHWDstSyncListMemInfo,
+ ui32SyncOffset,
+ sizeof(PVRSRV_DEVICE_SYNC_OBJECT),
+ 0,
+ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
+
+ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
+ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
+ {
+
+ PDUMPCOMMENT("Init RT ROpsComplete\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+
+ PDUMPCOMMENT("Init RT WOpsComplete\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+ }
+
+ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
+
+ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastOpDumpVal - 1;
+
+ PDUMPCOMMENT("Modify RT %d WOpPendingVal in HWDevSyncList\r\n", i);
+
+ PDUMPMEM(&ui32ModifiedValue,
+ psHWDstSyncListMemInfo,
+ ui32WOpsOffset,
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
+
+ ui32ModifiedValue = 0;
+ PDUMPCOMMENT("Modify RT %d ROpsPendingVal in HWDevSyncList\r\n", i);
+
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psHWDstSyncListMemInfo,
+ ui32ROpsOffset,
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
+
+
+ PDUMPCOMMENT("Modify RT %d ROps2PendingVal in HWDevSyncList\r\n", i);
+ PDUMPMEM(&ui32ModifiedValue,
+ psHWDstSyncListMemInfo,
+ ui32ROps2Offset,
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
+ }
+ #endif
+ }
+ else
+ {
+ psHWDeviceSyncList->asSyncData[i].sWriteOpsCompleteDevVAddr.uiAddr = 0;
+ psHWDeviceSyncList->asSyncData[i].sReadOpsCompleteDevVAddr.uiAddr = 0;
+ psHWDeviceSyncList->asSyncData[i].sReadOps2CompleteDevVAddr.uiAddr = 0;
+
+ psHWDeviceSyncList->asSyncData[i].ui32ReadOpsPendingVal = 0;
+ psHWDeviceSyncList->asSyncData[i].ui32ReadOps2PendingVal = 0;
+ psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal = 0;
+ }
+ }
+ }
+
+
+
+
+ psTACmd->ui32CtrlFlags |= SGXMKIF_CMDTA_CTRLFLAGS_READY;
+
+#if defined(PDUMP)
+ if (PDumpIsCaptureFrameKM())
+ {
+ PDUMPCOMMENT("Shared part of TA command\r\n");
+
+ PDUMPMEM(psTACmd,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff,
+ sizeof(SGXMKIF_CMDTA_SHARED),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
+ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
+ {
+ IMG_UINT32 ui32ModifiedValue;
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
+
+ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
+ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
+ {
+
+ PDUMPCOMMENT("Init RT TA-SRC ROpsComplete\r\n", i);
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+
+ PDUMPCOMMENT("Init RT TA-SRC WOpsComplete\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+ }
+
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
+
+ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
+
+ PDUMPCOMMENT("Modify TA SrcSync %d ROpsPendingVal\r\n", i);
+
+ PDUMPMEM(&ui32ModifiedValue,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTASrcSyncs) +
+ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ PDUMPCOMMENT("Modify TA SrcSync %d WOpPendingVal\r\n", i);
+
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTASrcSyncs) +
+ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ }
+
+ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
+ {
+ IMG_UINT32 ui32ModifiedValue;
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
+
+ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
+ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
+ {
+
+ PDUMPCOMMENT("Init RT TA-DST ROpsComplete\r\n", i);
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+
+ PDUMPCOMMENT("Init RT TA-DST WOpsComplete\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+ }
+
+ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
+
+ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastOpDumpVal - 1;
+
+ PDUMPCOMMENT("Modify TA DstSync %d WOpPendingVal\r\n", i);
+
+ PDUMPMEM(&ui32ModifiedValue,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTADstSyncs) +
+ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ PDUMPCOMMENT("Modify TA DstSync %d ROpsPendingVal\r\n", i);
+
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTADstSyncs) +
+ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ }
+
+ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
+ {
+ IMG_UINT32 ui32ModifiedValue;
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
+
+ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
+ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
+ {
+
+ PDUMPCOMMENT("Init RT 3D-SRC ROpsComplete\r\n", i);
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+
+ PDUMPCOMMENT("Init RT 3D-SRC WOpsComplete\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+ }
+
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
+
+ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
+
+ PDUMPCOMMENT("Modify 3D SrcSync %d ROpsPendingVal\r\n", i);
+
+ PDUMPMEM(&ui32ModifiedValue,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, as3DSrcSyncs) +
+ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ PDUMPCOMMENT("Modify 3D SrcSync %d WOpPendingVal\r\n", i);
+
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, as3DSrcSyncs) +
+ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ }
+#else
+ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
+ {
+ IMG_UINT32 ui32ModifiedValue;
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
+
+ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
+ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
+ {
+
+ PDUMPCOMMENT("Init RT ROpsComplete\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+
+ PDUMPCOMMENT("Init RT WOpsComplete\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
+ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+
+ PDUMPCOMMENT("Init RT WOpsComplete\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psSyncInfo->psSyncDataMemInfoKM,
+ offsetof(PVRSRV_SYNC_DATA, ui32ReadOps2Complete),
+ sizeof(psSyncInfo->psSyncData->ui32ReadOps2Complete),
+ 0,
+ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+ }
+
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
+
+ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
+
+ PDUMPCOMMENT("Modify SrcSync %d ROpsPendingVal\r\n", i);
+
+ PDUMPMEM(&ui32ModifiedValue,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asSrcSyncs) +
+ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ PDUMPCOMMENT("Modify SrcSync %d WOpPendingVal\r\n", i);
+
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asSrcSyncs) +
+ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ }
+
+ if (psCCBKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo;
+
+ PDUMPCOMMENT("Modify TA/TQ ROpPendingVal\r\n");
+
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, ui32TATQSyncReadOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
+ }
+
+ if (psCCBKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo;
+
+ PDUMPCOMMENT("Modify 3D/TQ ROpPendingVal\r\n");
+
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, ui323DTQSyncReadOpsPendingVal),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
+ }
+
+#endif
+
+ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
+ {
+#if !defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
+ PDUMPCOMMENT("Modify TA status value in TA cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_CMDTA_SHARED, sCtlTAStatusInfo[i].ui32StatusValue),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+#endif
+ }
+
+ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
+ {
+#if !defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
+ PDUMPCOMMENT("Modify 3D status value in TA cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psCCBKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_CMDTA_SHARED, sCtl3DStatusInfo[i].ui32StatusValue),
+ sizeof(IMG_UINT32),
+ 0,
+ MAKEUNIQUETAG(psCCBMemInfo));
+#endif
+ }
+ }
+#endif
+
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_CMD_END,
+ KICK_TOKEN_DOKICK);
+
+ eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TA, &psCCBKick->sCommand, KERNEL_ID, 0, hDevMemContext, psCCBKick->bLastInScene);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ if (psCCBKick->bFirstKickOrResume && psCCBKick->ui32NumDstSyncObjects > 0)
+ {
+ for (i=0; i < psCCBKick->ui32NumDstSyncObjects; i++)
+ {
+
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->pahDstSyncHandles[i];
+
+ if (psSyncInfo)
+ {
+ psSyncInfo->psSyncData->ui32WriteOpsPending--;
+#if defined(PDUMP)
+ if (PDumpIsCaptureFrameKM())
+ {
+ psSyncInfo->psSyncData->ui32LastOpDumpVal--;
+ }
+#endif
+ }
+ }
+ }
+
+#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
+ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsPending--;
+ }
+ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
+ psSyncInfo->psSyncData->ui32WriteOpsPending--;
+ }
+ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsPending--;
+ }
+#else
+ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsPending--;
+ }
+#endif
+
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_EXIT,
+ KICK_TOKEN_DOKICK);
+ return eError;
+ }
+ else if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: SGXScheduleCCBCommandKM failed."));
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_EXIT,
+ KICK_TOKEN_DOKICK);
+ return eError;
+ }
+
+
+#if defined(NO_HARDWARE)
+
+
+
+ if (psCCBKick->hTA3DSyncInfo)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo;
+
+ if (psCCBKick->bTADependency)
+ {
+ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+ }
+
+ if (psCCBKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo;
+
+ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ }
+
+ if (psCCBKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo;
+
+ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ }
+
+
+ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
+ {
+#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO*)psCCBKick->asTAStatusUpdate[i].hKernelMemInfo;
+
+ *(IMG_UINT32*)((IMG_UINTPTR_T)psKernelMemInfo->pvLinAddrKM
+ + (psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr.uiAddr
+ - psKernelMemInfo->sDevVAddr.uiAddr)) = psTACmd->sCtlTAStatusInfo[i].ui32StatusValue;
+#else
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsComplete = psTACmd->sCtlTAStatusInfo[i].ui32StatusValue;
+#endif
+ }
+
+#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
+
+ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ }
+ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
+ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ }
+#else
+
+ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ }
+#endif
+
+ if (psCCBKick->bTerminateOrAbort)
+ {
+ if (psCCBKick->ui32NumDstSyncObjects > 0)
+ {
+ PVRSRV_KERNEL_MEM_INFO *psHWDstSyncListMemInfo =
+ (PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo;
+ SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList = psHWDstSyncListMemInfo->pvLinAddrKM;
+
+ for (i=0; i<psCCBKick->ui32NumDstSyncObjects; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->pahDstSyncHandles[i];
+ if (psSyncInfo)
+ psSyncInfo->psSyncData->ui32WriteOpsComplete = psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal+1;
+ }
+ }
+
+
+ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
+ {
+#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO*)psCCBKick->as3DStatusUpdate[i].hKernelMemInfo;
+
+ *(IMG_UINT32*)((IMG_UINTPTR_T)psKernelMemInfo->pvLinAddrKM
+ + (psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr.uiAddr
+ - psKernelMemInfo->sDevVAddr.uiAddr)) = psTACmd->sCtl3DStatusInfo[i].ui32StatusValue;
+#else
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsComplete = psTACmd->sCtl3DStatusInfo[i].ui32StatusValue;
+#endif
+ }
+ }
+#endif
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_EXIT,
+ KICK_TOKEN_DOKICK);
+ return eError;
+}
+
diff --git a/drivers/gpu/pvr/sgx/sgxpower.c b/drivers/gpu/pvr/sgx/sgxpower.c
new file mode 100644
index 0000000..b647b68
--- /dev/null
+++ b/drivers/gpu/pvr/sgx/sgxpower.c
@@ -0,0 +1,483 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <stddef.h>
+
+#include "sgxdefs.h"
+#include "services_headers.h"
+#include "sgxapi_km.h"
+#include "sgx_mkif_km.h"
+#include "sgxutils.h"
+#include "pdump_km.h"
+
+
+#if defined(SUPPORT_HW_RECOVERY)
+static PVRSRV_ERROR SGXAddTimer(PVRSRV_DEVICE_NODE *psDeviceNode,
+ SGX_TIMING_INFORMATION *psSGXTimingInfo,
+ IMG_HANDLE *phTimer)
+{
+
+
+
+ *phTimer = OSAddTimer(SGXOSTimer, psDeviceNode,
+ 1000 * 50 / psSGXTimingInfo->ui32uKernelFreq);
+ if(*phTimer == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXAddTimer : Failed to register timer callback function"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ return PVRSRV_OK;
+}
+#endif
+
+
+static PVRSRV_ERROR SGXUpdateTimingInfo(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+#if defined(SGX_DYNAMIC_TIMING_INFO)
+ SGX_TIMING_INFORMATION sSGXTimingInfo = {0};
+#else
+ SGX_DEVICE_MAP *psSGXDeviceMap;
+#endif
+ IMG_UINT32 ui32ActivePowManSampleRate;
+ SGX_TIMING_INFORMATION *psSGXTimingInfo;
+
+
+#if defined(SGX_DYNAMIC_TIMING_INFO)
+ psSGXTimingInfo = &sSGXTimingInfo;
+ SysGetSGXTimingInformation(psSGXTimingInfo);
+#else
+ SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
+ (IMG_VOID**)&psSGXDeviceMap);
+ psSGXTimingInfo = &psSGXDeviceMap->sTimingInfo;
+#endif
+
+#if defined(SUPPORT_HW_RECOVERY)
+ {
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32OlduKernelFreq;
+
+ if (psDevInfo->hTimer != IMG_NULL)
+ {
+ ui32OlduKernelFreq = psDevInfo->ui32CoreClockSpeed / psDevInfo->ui32uKernelTimerClock;
+ if (ui32OlduKernelFreq != psSGXTimingInfo->ui32uKernelFreq)
+ {
+
+
+ IMG_HANDLE hNewTimer;
+
+ eError = SGXAddTimer(psDeviceNode, psSGXTimingInfo, &hNewTimer);
+ if (eError == PVRSRV_OK)
+ {
+ eError = OSRemoveTimer(psDevInfo->hTimer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXUpdateTimingInfo: Failed to remove timer"));
+ }
+ psDevInfo->hTimer = hNewTimer;
+ }
+ else
+ {
+
+ }
+ }
+ }
+ else
+ {
+ eError = SGXAddTimer(psDeviceNode, psSGXTimingInfo, &psDevInfo->hTimer);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ psDevInfo->psSGXHostCtl->ui32HWRecoverySampleRate =
+ psSGXTimingInfo->ui32uKernelFreq / psSGXTimingInfo->ui32HWRecoveryFreq;
+ }
+#endif
+
+
+ psDevInfo->ui32CoreClockSpeed = psSGXTimingInfo->ui32CoreClockSpeed;
+ psDevInfo->ui32uKernelTimerClock = psSGXTimingInfo->ui32CoreClockSpeed / psSGXTimingInfo->ui32uKernelFreq;
+
+
+ psDevInfo->psSGXHostCtl->ui32uKernelTimerClock = psDevInfo->ui32uKernelTimerClock;
+#if defined(PDUMP)
+ PDUMPCOMMENT("Host Control - Microkernel clock");
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32uKernelTimerClock),
+ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
+#endif
+
+ if (psSGXTimingInfo->bEnableActivePM)
+ {
+ ui32ActivePowManSampleRate =
+ psSGXTimingInfo->ui32uKernelFreq * psSGXTimingInfo->ui32ActivePowManLatencyms / 1000;
+
+
+
+
+
+
+
+ ui32ActivePowManSampleRate += 1;
+ }
+ else
+ {
+ ui32ActivePowManSampleRate = 0;
+ }
+
+ psDevInfo->psSGXHostCtl->ui32ActivePowManSampleRate = ui32ActivePowManSampleRate;
+#if defined(PDUMP)
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32ActivePowManSampleRate),
+ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
+#endif
+
+ return PVRSRV_OK;
+}
+
+
+static IMG_VOID SGXStartTimer(PVRSRV_SGXDEV_INFO *psDevInfo)
+{
+ #if defined(SUPPORT_HW_RECOVERY)
+ PVRSRV_ERROR eError;
+
+ eError = OSEnableTimer(psDevInfo->hTimer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXStartTimer : Failed to enable host timer"));
+ }
+ #else
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+ #endif
+}
+
+
+static IMG_VOID SGXPollForClockGating (PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32Register,
+ IMG_UINT32 ui32RegisterValue,
+ IMG_CHAR *pszComment)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+ PVR_UNREFERENCED_PARAMETER(ui32Register);
+ PVR_UNREFERENCED_PARAMETER(ui32RegisterValue);
+ PVR_UNREFERENCED_PARAMETER(pszComment);
+
+ #if !defined(NO_HARDWARE)
+ PVR_ASSERT(psDevInfo != IMG_NULL);
+
+
+ if (PollForValueKM((IMG_UINT32 *)psDevInfo->pvRegsBaseKM + (ui32Register >> 2),
+ 0,
+ ui32RegisterValue,
+ MAX_HW_TIME_US,
+ MAX_HW_TIME_US/WAIT_TRY_COUNT,
+ IMG_FALSE) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXPollForClockGating: %s failed.", pszComment));
+ SGXDumpDebugInfo(psDevInfo, IMG_FALSE);
+ PVR_DBG_BREAK;
+ }
+ #endif
+
+ PDUMPCOMMENT("%s", pszComment);
+ PDUMPREGPOL(SGX_PDUMPREG_NAME, ui32Register, 0, ui32RegisterValue, PDUMP_POLL_OPERATOR_EQUAL);
+}
+
+
+PVRSRV_ERROR SGXPrePowerState (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+ if ((eNewPowerState != eCurrentPowerState) &&
+ (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON))
+ {
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_UINT32 ui32PowerCmd, ui32CompleteStatus;
+ SGXMKIF_COMMAND sCommand = {0};
+ IMG_UINT32 ui32Core;
+ IMG_UINT32 ui32CoresEnabled;
+
+ #if defined(SUPPORT_HW_RECOVERY)
+
+ eError = OSDisableTimer(psDevInfo->hTimer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Failed to disable timer"));
+ return eError;
+ }
+ #endif
+
+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+ {
+
+ ui32PowerCmd = PVRSRV_POWERCMD_POWEROFF;
+ ui32CompleteStatus = PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE;
+ PDUMPCOMMENT("SGX power off request");
+ }
+ else
+ {
+
+ ui32PowerCmd = PVRSRV_POWERCMD_IDLE;
+ ui32CompleteStatus = PVRSRV_USSE_EDM_POWMAN_IDLE_COMPLETE;
+ PDUMPCOMMENT("SGX idle request");
+ }
+
+ sCommand.ui32Data[1] = ui32PowerCmd;
+
+ eError = SGXScheduleCCBCommand(psDeviceNode, SGXMKIF_CMD_POWER, &sCommand, KERNEL_ID, 0, IMG_NULL, IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Failed to submit power down command"));
+ return eError;
+ }
+
+
+ #if !defined(NO_HARDWARE)
+ if (PollForValueKM(&psDevInfo->psSGXHostCtl->ui32PowerStatus,
+ ui32CompleteStatus,
+ ui32CompleteStatus,
+ MAX_HW_TIME_US,
+ MAX_HW_TIME_US/WAIT_TRY_COUNT,
+ IMG_FALSE) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Wait for SGX ukernel power transition failed."));
+ SGXDumpDebugInfo(psDevInfo, IMG_FALSE);
+ PVR_DBG_BREAK;
+ }
+ #endif
+
+ #if defined(PDUMP)
+ PDUMPCOMMENT("TA/3D CCB Control - Wait for power event on uKernel.");
+ PDUMPMEMPOL(psDevInfo->psKernelSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32PowerStatus),
+ ui32CompleteStatus,
+ ui32CompleteStatus,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0,
+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
+ #endif
+
+#if defined(SGX_FEATURE_MP)
+ ui32CoresEnabled = ((OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE) & EUR_CR_MASTER_CORE_ENABLE_MASK) >> EUR_CR_MASTER_CORE_ENABLE_SHIFT) + 1;
+#else
+ ui32CoresEnabled = 1;
+#endif
+
+ for (ui32Core = 0; ui32Core < ui32CoresEnabled; ui32Core++)
+ {
+
+ SGXPollForClockGating(psDevInfo,
+ SGX_MP_CORE_SELECT(psDevInfo->ui32ClkGateStatusReg, ui32Core),
+ psDevInfo->ui32ClkGateStatusMask,
+ "Wait for SGX clock gating");
+ }
+
+ #if defined(SGX_FEATURE_MP)
+
+ SGXPollForClockGating(psDevInfo,
+ psDevInfo->ui32MasterClkGateStatusReg,
+ psDevInfo->ui32MasterClkGateStatusMask,
+ "Wait for SGX master clock gating");
+
+ SGXPollForClockGating(psDevInfo,
+ psDevInfo->ui32MasterClkGateStatus2Reg,
+ psDevInfo->ui32MasterClkGateStatus2Mask,
+ "Wait for SGX master clock gating (2)");
+ #endif
+
+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+ {
+
+ eError = SGXDeinitialise(psDevInfo);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: SGXDeinitialise failed: %u", eError));
+ return eError;
+ }
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR SGXPostPowerState (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+ if ((eNewPowerState != eCurrentPowerState) &&
+ (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON))
+ {
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
+
+
+ psSGXHostCtl->ui32PowerStatus = 0;
+ #if defined(PDUMP)
+ PDUMPCOMMENT("Host Control - Reset power status");
+ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32PowerStatus),
+ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
+ #endif
+
+ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+ {
+
+
+
+
+ eError = SGXUpdateTimingInfo(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXUpdateTimingInfo failed"));
+ return eError;
+ }
+
+
+
+ eError = SGXInitialise(psDevInfo, IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXInitialise failed"));
+ return eError;
+ }
+ }
+ else
+ {
+
+
+ SGXMKIF_COMMAND sCommand = {0};
+
+ sCommand.ui32Data[1] = PVRSRV_POWERCMD_RESUME;
+ eError = SGXScheduleCCBCommand(psDeviceNode, SGXMKIF_CMD_POWER, &sCommand, ISR_ID, 0, IMG_NULL, IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState failed to schedule CCB command: %u", eError));
+ return eError;
+ }
+ }
+
+ SGXStartTimer(psDevInfo);
+ }
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR SGXPreClockSpeedChange (IMG_HANDLE hDevHandle,
+ IMG_BOOL bIdleDevice,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+
+ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON)
+ {
+ if (bIdleDevice)
+ {
+
+ PDUMPSUSPEND();
+
+ eError = SGXPrePowerState(hDevHandle, PVRSRV_DEV_POWER_STATE_IDLE,
+ PVRSRV_DEV_POWER_STATE_ON);
+
+ if (eError != PVRSRV_OK)
+ {
+ PDUMPRESUME();
+ return eError;
+ }
+ }
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,"SGXPreClockSpeedChange: SGX clock speed was %uHz",
+ psDevInfo->ui32CoreClockSpeed));
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR SGXPostClockSpeedChange (IMG_HANDLE hDevHandle,
+ IMG_BOOL bIdleDevice,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_UINT32 ui32OldClockSpeed = psDevInfo->ui32CoreClockSpeed;
+
+ PVR_UNREFERENCED_PARAMETER(ui32OldClockSpeed);
+
+ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON)
+ {
+ PVRSRV_ERROR eError;
+
+
+
+ eError = SGXUpdateTimingInfo(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXUpdateTimingInfo failed"));
+ return eError;
+ }
+
+ if (bIdleDevice)
+ {
+
+ eError = SGXPostPowerState(hDevHandle, PVRSRV_DEV_POWER_STATE_ON,
+ PVRSRV_DEV_POWER_STATE_IDLE);
+
+ PDUMPRESUME();
+
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+ else
+ {
+ SGXStartTimer(psDevInfo);
+ }
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,"SGXPostClockSpeedChange: SGX clock speed changed from %uHz to %uHz",
+ ui32OldClockSpeed, psDevInfo->ui32CoreClockSpeed));
+
+ return PVRSRV_OK;
+}
+
+
diff --git a/drivers/gpu/pvr/sgx/sgxreset.c b/drivers/gpu/pvr/sgx/sgxreset.c
new file mode 100644
index 0000000..45e6d79
--- /dev/null
+++ b/drivers/gpu/pvr/sgx/sgxreset.c
@@ -0,0 +1,671 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "sgxdefs.h"
+#include "sgxmmu.h"
+#include "services_headers.h"
+#include "sgxinfokm.h"
+#include "sgxconfig.h"
+#include "sgxutils.h"
+
+#include "pdump_km.h"
+
+
+IMG_VOID SGXInitClocks(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32PDUMPFlags)
+{
+ IMG_UINT32 ui32RegVal;
+
+#if !defined(PDUMP)
+ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
+#endif
+
+ ui32RegVal = psDevInfo->ui32ClkGateCtl;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_CLKGATECTL, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_CLKGATECTL, ui32RegVal, ui32PDUMPFlags);
+
+#if defined(EUR_CR_CLKGATECTL2)
+ ui32RegVal = psDevInfo->ui32ClkGateCtl2;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_CLKGATECTL2, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_CLKGATECTL2, ui32RegVal, ui32PDUMPFlags);
+#endif
+}
+
+
+static IMG_VOID SGXResetInitBIFContexts(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32PDUMPFlags)
+{
+ IMG_UINT32 ui32RegVal;
+
+#if !defined(PDUMP)
+ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
+#endif
+
+ ui32RegVal = 0;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+
+#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the BIF bank settings\r\n");
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK_SET, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_BANK_SET, ui32RegVal, ui32PDUMPFlags);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags);
+#endif
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the BIF directory list\r\n");
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal, ui32PDUMPFlags);
+
+#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+ {
+ IMG_UINT32 ui32DirList, ui32DirListReg;
+
+ for (ui32DirList = 1;
+ ui32DirList < SGX_FEATURE_BIF_NUM_DIRLISTS;
+ ui32DirList++)
+ {
+ ui32DirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (ui32DirList - 1);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32DirListReg, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, ui32DirListReg, ui32RegVal, ui32PDUMPFlags);
+ }
+ }
+#endif
+}
+
+
+static IMG_VOID SGXResetSetupBIFContexts(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32PDUMPFlags)
+{
+ IMG_UINT32 ui32RegVal;
+
+#if !defined(PDUMP)
+ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
+#endif
+
+ #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+
+ ui32RegVal = (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT);
+
+ #if defined(SGX_FEATURE_2D_HARDWARE) && !defined(SGX_FEATURE_PTLA)
+
+ ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_2D_SHIFT);
+ #endif
+
+ #if defined(FIX_HW_BRN_23410)
+
+ ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_TA_SHIFT);
+ #endif
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal);
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Set up EDM requestor page table in BIF\r\n");
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags);
+ #endif
+
+ {
+ IMG_UINT32 ui32EDMDirListReg;
+
+
+ #if (SGX_BIF_DIR_LIST_INDEX_EDM == 0)
+ ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE0;
+ #else
+
+ ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (SGX_BIF_DIR_LIST_INDEX_EDM - 1);
+ #endif
+
+ ui32RegVal = psDevInfo->sKernelPDDevPAddr.uiAddr >> SGX_MMU_PDE_ADDR_ALIGNSHIFT;
+
+#if defined(FIX_HW_BRN_28011)
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal);
+ PDUMPPDREGWITHFLAGS(&psDevInfo->sMMUAttrib, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
+#endif
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32EDMDirListReg, ui32RegVal);
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the EDM's directory list base\r\n");
+ PDUMPPDREGWITHFLAGS(&psDevInfo->sMMUAttrib, ui32EDMDirListReg, ui32RegVal, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
+ }
+}
+
+
+static IMG_VOID SGXResetSleep(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32PDUMPFlags,
+ IMG_BOOL bPDump)
+{
+#if defined(PDUMP) || defined(EMULATOR)
+ IMG_UINT32 ui32ReadRegister;
+
+ #if defined(SGX_FEATURE_MP)
+ ui32ReadRegister = EUR_CR_MASTER_SOFT_RESET;
+ #else
+ ui32ReadRegister = EUR_CR_SOFT_RESET;
+ #endif
+#endif
+
+#if !defined(PDUMP)
+ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
+#endif
+
+
+ SGXWaitClocks(psDevInfo, 100);
+ if (bPDump)
+ {
+ PDUMPIDLWITHFLAGS(30, ui32PDUMPFlags);
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Read back to flush the register writes\r\n");
+ PDumpRegRead(SGX_PDUMPREG_NAME, ui32ReadRegister, ui32PDUMPFlags);
+#endif
+ }
+
+#if defined(EMULATOR)
+
+
+ OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32ReadRegister);
+#endif
+}
+
+
+#if !defined(SGX_FEATURE_MP)
+static IMG_VOID SGXResetSoftReset(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_BOOL bResetBIF,
+ IMG_UINT32 ui32PDUMPFlags,
+ IMG_BOOL bPDump)
+{
+ IMG_UINT32 ui32SoftResetRegVal;
+
+ ui32SoftResetRegVal =
+
+ EUR_CR_SOFT_RESET_DPM_RESET_MASK |
+ EUR_CR_SOFT_RESET_TA_RESET_MASK |
+ EUR_CR_SOFT_RESET_USE_RESET_MASK |
+ EUR_CR_SOFT_RESET_ISP_RESET_MASK |
+ EUR_CR_SOFT_RESET_TSP_RESET_MASK;
+
+#ifdef EUR_CR_SOFT_RESET_TWOD_RESET_MASK
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TWOD_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_TE_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TE_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_MTE_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_MTE_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_ISP2_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_ISP2_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_PDS_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_PDS_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_PBE_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_PBE_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_MADD_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_MADD_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_ITR_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_ITR_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_TEX_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TEX_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_VDM_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_VDM_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK;
+#endif
+#if defined(EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK)
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK;
+#endif
+
+#if !defined(PDUMP)
+ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
+#endif
+
+ if (bResetBIF)
+ {
+ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_BIF_RESET_MASK;
+ }
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32SoftResetRegVal);
+ if (bPDump)
+ {
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_SOFT_RESET, ui32SoftResetRegVal, ui32PDUMPFlags);
+ }
+}
+
+
+static IMG_VOID SGXResetInvalDC(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32PDUMPFlags,
+ IMG_BOOL bPDump)
+{
+ IMG_UINT32 ui32RegVal;
+
+
+#if defined(EUR_CR_BIF_CTRL_INVAL)
+ ui32RegVal = EUR_CR_BIF_CTRL_INVAL_ALL_MASK;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL_INVAL, ui32RegVal);
+ if (bPDump)
+ {
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL_INVAL, ui32RegVal, ui32PDUMPFlags);
+ }
+#else
+ ui32RegVal = EUR_CR_BIF_CTRL_INVALDC_MASK;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+ if (bPDump)
+ {
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+ }
+
+ ui32RegVal = 0;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+ if (bPDump)
+ {
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+ }
+#endif
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump);
+
+#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+ {
+
+
+
+ if (PollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + EUR_CR_BIF_MEM_REQ_STAT),
+ 0,
+ EUR_CR_BIF_MEM_REQ_STAT_READS_MASK,
+ MAX_HW_TIME_US,
+ MAX_HW_TIME_US/WAIT_TRY_COUNT,
+ IMG_FALSE) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Wait for DC invalidate failed."));
+ PVR_DBG_BREAK;
+ }
+
+ if (bPDump)
+ {
+ PDUMPREGPOLWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_MEM_REQ_STAT, 0, EUR_CR_BIF_MEM_REQ_STAT_READS_MASK, ui32PDUMPFlags, PDUMP_POLL_OPERATOR_EQUAL);
+ }
+ }
+#endif
+}
+#endif
+
+
+IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_BOOL bHardwareRecovery,
+ IMG_UINT32 ui32PDUMPFlags)
+#if !defined(SGX_FEATURE_MP)
+{
+ IMG_UINT32 ui32RegVal;
+#if defined(EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK)
+ const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK;
+#else
+ const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_MASK;
+#endif
+
+#if !defined(PDUMP)
+ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
+#endif
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX reset sequence\r\n");
+
+#if defined(FIX_HW_BRN_23944)
+
+ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
+ if (ui32RegVal & ui32BifFaultMask)
+ {
+
+ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK | EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+
+ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+ }
+#endif
+
+
+ SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_TRUE);
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+
+
+
+#if defined(SGX_FEATURE_36BIT_MMU)
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK, ui32PDUMPFlags);
+#endif
+
+ SGXResetInitBIFContexts(psDevInfo, ui32PDUMPFlags);
+
+#if defined(EUR_CR_BIF_MEM_ARB_CONFIG)
+
+
+ ui32RegVal = (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT) |
+ (7UL << EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT) |
+ (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal, ui32PDUMPFlags);
+#endif
+
+#if defined(SGX_FEATURE_SYSTEM_CACHE)
+ #if defined(SGX_BYPASS_SYSTEM_CACHE)
+
+ ui32RegVal = MNE_CR_CTRL_BYPASS_ALL_MASK;
+ #else
+ #if defined(FIX_HW_BRN_26620)
+ ui32RegVal = 0;
+ #else
+
+ ui32RegVal = MNE_CR_CTRL_BYP_CC_MASK;
+ #endif
+ #if defined(FIX_HW_BRN_34028)
+
+ ui32RegVal |= (8 << MNE_CR_CTRL_BYPASS_SHIFT);
+ #endif
+ #endif
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, MNE_CR_CTRL, ui32RegVal);
+ PDUMPREG(SGX_PDUMPREG_NAME, MNE_CR_CTRL, ui32RegVal);
+#endif
+
+ if (bHardwareRecovery)
+ {
+
+
+
+
+
+
+
+ ui32RegVal = (IMG_UINT32)psDevInfo->sBIFResetPDDevPAddr.uiAddr;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal);
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+
+
+ SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE);
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+
+ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+
+
+
+ for (;;)
+ {
+ IMG_UINT32 ui32BifIntStat = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
+ IMG_DEV_VIRTADDR sBifFault;
+ IMG_UINT32 ui32PDIndex, ui32PTIndex;
+
+ if ((ui32BifIntStat & ui32BifFaultMask) == 0)
+ {
+ break;
+ }
+
+
+
+
+ sBifFault.uiAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
+ PVR_DPF((PVR_DBG_WARNING, "SGXReset: Page fault 0x%x/0x%x", ui32BifIntStat, sBifFault.uiAddr));
+ ui32PDIndex = sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
+ ui32PTIndex = (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
+
+
+ SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_FALSE);
+
+
+ psDevInfo->pui32BIFResetPD[ui32PDIndex] = (psDevInfo->sBIFResetPTDevPAddr.uiAddr
+ >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PDE_PAGE_SIZE_4K
+ | SGX_MMU_PDE_VALID;
+ psDevInfo->pui32BIFResetPT[ui32PTIndex] = (psDevInfo->sBIFResetPageDevPAddr.uiAddr
+ >>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
+ | SGX_MMU_PTE_VALID;
+
+
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32RegVal);
+ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32RegVal);
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+
+
+ SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_FALSE);
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+
+
+ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+
+
+ psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0;
+ psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0;
+ }
+ }
+ else
+ {
+
+ SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE);
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+ }
+
+
+
+ SGXResetSetupBIFContexts(psDevInfo, ui32PDUMPFlags);
+
+#if defined(SGX_FEATURE_2D_HARDWARE) && !defined(SGX_FEATURE_PTLA)
+
+ #if ((SGX_2D_HEAP_BASE & ~EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK) != 0)
+ #error "SGXReset: SGX_2D_HEAP_BASE doesn't match EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK alignment"
+ #endif
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE, ui32PDUMPFlags);
+#endif
+
+
+ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+
+ PVR_DPF((PVR_DBG_MESSAGE,"Soft Reset of SGX"));
+
+
+ ui32RegVal = 0;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
+
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX reset sequence\r\n");
+}
+
+#else
+
+{
+ IMG_UINT32 ui32RegVal;
+
+ PVR_UNREFERENCED_PARAMETER(bHardwareRecovery);
+
+#if !defined(PDUMP)
+ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
+#endif
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX MP reset sequence\r\n");
+
+
+ ui32RegVal = EUR_CR_MASTER_SOFT_RESET_BIF_RESET_MASK |
+ EUR_CR_MASTER_SOFT_RESET_IPF_RESET_MASK |
+ EUR_CR_MASTER_SOFT_RESET_DPM_RESET_MASK |
+ EUR_CR_MASTER_SOFT_RESET_MCI_RESET_MASK |
+ EUR_CR_MASTER_SOFT_RESET_VDM_RESET_MASK;
+
+#if defined(SGX_FEATURE_PTLA)
+ ui32RegVal |= EUR_CR_MASTER_SOFT_RESET_PTLA_RESET_MASK;
+#endif
+#if defined(SGX_FEATURE_SYSTEM_CACHE)
+ ui32RegVal |= EUR_CR_MASTER_SOFT_RESET_SLC_RESET_MASK;
+#endif
+
+
+ ui32RegVal |= EUR_CR_MASTER_SOFT_RESET_CORE_RESET_MASK(0) |
+ EUR_CR_MASTER_SOFT_RESET_CORE_RESET_MASK(1) |
+ EUR_CR_MASTER_SOFT_RESET_CORE_RESET_MASK(2) |
+ EUR_CR_MASTER_SOFT_RESET_CORE_RESET_MASK(3);
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SOFT_RESET, ui32RegVal);
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Soft reset hydra partition, hard reset the cores\r\n");
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+
+ ui32RegVal = 0;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BIF_CTRL, ui32RegVal);
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the hydra BIF control\r\n");
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+
+#if defined(SGX_FEATURE_SYSTEM_CACHE)
+ #if defined(SGX_BYPASS_SYSTEM_CACHE)
+ ui32RegVal = EUR_CR_MASTER_SLC_CTRL_BYPASS_ALL_MASK;
+ #else
+ ui32RegVal = EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ0_MASK |
+ #if defined(FIX_HW_BRN_30954)
+ EUR_CR_MASTER_SLC_CTRL_DISABLE_REORDERING_MASK |
+ #endif
+ #if defined(PVR_SLC_8KB_ADDRESS_MODE)
+ (4 << EUR_CR_MASTER_SLC_CTRL_ADDR_DECODE_MODE_SHIFT) |
+ #endif
+ #if defined(FIX_HW_BRN_33809)
+ (2 << EUR_CR_MASTER_SLC_CTRL_ADDR_DECODE_MODE_SHIFT) |
+ #endif
+ (0xC << EUR_CR_MASTER_SLC_CTRL_ARB_PAGE_SIZE_SHIFT);
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL, ui32RegVal);
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the hydra SLC control\r\n");
+ PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SLC_CTRL, ui32RegVal);
+
+ ui32RegVal = EUR_CR_MASTER_SLC_CTRL_BYPASS_BYP_CC_MASK;
+ #if defined(FIX_HW_BRN_31620)
+ ui32RegVal |= EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_MMU_MASK;
+ #endif
+ #if defined(FIX_HW_BRN_31195)
+ ui32RegVal |= EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE0_MASK |
+ EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE1_MASK |
+ EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE2_MASK |
+ EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE3_MASK |
+ EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_TA_MASK;
+ #endif
+ #endif
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal);
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the hydra SLC bypass control\r\n");
+ PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal);
+#endif
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+
+
+ ui32RegVal = 0;
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SOFT_RESET, ui32RegVal);
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Remove the resets from all of SGX\r\n");
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Turn on the slave cores' clock gating\r\n");
+ SGXInitClocks(psDevInfo, ui32PDUMPFlags);
+
+ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the slave BIFs\r\n");
+
+#if defined(FIX_HW_BRN_31278) || defined(FIX_HW_BRN_31620) || defined(FIX_HW_BRN_31671) || defined(FIX_HW_BRN_32085)
+ #if defined(FIX_HW_BRN_31278) || defined(FIX_HW_BRN_32085)
+
+ ui32RegVal = (1<<EUR_CR_MASTER_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT);
+ #else
+ ui32RegVal = (1<<EUR_CR_MASTER_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT) | EUR_CR_MASTER_BIF_MMU_CTRL_PREFETCHING_ON_MASK;
+ #endif
+ #if !defined(FIX_HW_BRN_31620) && !defined(FIX_HW_BRN_31671)
+
+ ui32RegVal |= EUR_CR_MASTER_BIF_MMU_CTRL_ENABLE_DC_TLB_MASK;
+ #endif
+
+
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BIF_MMU_CTRL, ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_BIF_MMU_CTRL, ui32RegVal, ui32PDUMPFlags);
+
+ #if defined(FIX_HW_BRN_31278) || defined(FIX_HW_BRN_32085)
+
+ ui32RegVal = (1<<EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT);
+ #else
+ ui32RegVal = (1<<EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT) | EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_MASK;
+ #endif
+ #if !defined(FIX_HW_BRN_31620) && !defined(FIX_HW_BRN_31671)
+
+ ui32RegVal |= EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_MASK;
+ #endif
+
+
+ {
+ IMG_UINT32 ui32Core;
+
+ for (ui32Core=0;ui32Core<SGX_FEATURE_MP_CORE_COUNT;ui32Core++)
+ {
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BIF_MMU_CTRL, ui32Core), ui32RegVal);
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_BIF_MMU_CTRL, ui32Core), ui32RegVal, ui32PDUMPFlags);
+ }
+ }
+#endif
+
+ SGXResetInitBIFContexts(psDevInfo, ui32PDUMPFlags);
+ SGXResetSetupBIFContexts(psDevInfo, ui32PDUMPFlags);
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX MP reset sequence\r\n");
+}
+#endif
+
+
diff --git a/drivers/gpu/pvr/sgx/sgxtransfer.c b/drivers/gpu/pvr/sgx/sgxtransfer.c
new file mode 100644
index 0000000..f0ce1b5
--- /dev/null
+++ b/drivers/gpu/pvr/sgx/sgxtransfer.c
@@ -0,0 +1,773 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if defined(TRANSFER_QUEUE)
+
+#include <stddef.h>
+
+#include "sgxdefs.h"
+#include "services_headers.h"
+#include "buffer_manager.h"
+#include "sgxinfo.h"
+#include "sysconfig.h"
+#include "pdump_km.h"
+#include "mmu.h"
+#include "pvr_bridge.h"
+#include "sgx_bridge_km.h"
+#include "sgxinfokm.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "sgxutils.h"
+#include "ttrace.h"
+
+#if defined (SUPPORT_SID_INTERFACE)
+IMG_EXPORT PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK_KM *psKick)
+#else
+IMG_EXPORT PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick)
+#endif
+{
+ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo;
+ SGXMKIF_COMMAND sCommand = {0};
+ SGXMKIF_TRANSFERCMD_SHARED *psSharedTransferCmd;
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 loop;
+ IMG_HANDLE hDevMemContext = IMG_NULL;
+ IMG_BOOL abSrcSyncEnable[SGX_MAX_TRANSFER_SYNC_OPS];
+ IMG_UINT32 ui32RealSrcSyncNum = 0;
+ IMG_BOOL abDstSyncEnable[SGX_MAX_TRANSFER_SYNC_OPS];
+ IMG_UINT32 ui32RealDstSyncNum = 0;
+
+
+#if defined(PDUMP)
+ IMG_BOOL bPersistentProcess = IMG_FALSE;
+
+ {
+ PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData();
+ if(psPerProc != IMG_NULL)
+ {
+ bPersistentProcess = psPerProc->bPDumpPersistent;
+ }
+ }
+#endif
+#if defined(FIX_HW_BRN_31620)
+ hDevMemContext = psKick->hDevMemContext;
+#endif
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_FUNCTION_ENTER, TRANSFER_TOKEN_SUBMIT);
+
+ for (loop = 0; loop < SGX_MAX_TRANSFER_SYNC_OPS; loop++)
+ {
+ abSrcSyncEnable[loop] = IMG_TRUE;
+ abDstSyncEnable[loop] = IMG_TRUE;
+ }
+
+ if (!CCB_OFFSET_IS_VALID(SGXMKIF_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXSubmitTransferKM: Invalid CCB offset"));
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_FUNCTION_EXIT,
+ TRANSFER_TOKEN_SUBMIT);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+ psSharedTransferCmd = CCB_DATA_FROM_OFFSET(SGXMKIF_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset);
+
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_CMD_START, TRANSFER_TOKEN_SUBMIT);
+ PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_CCB,
+ TRANSFER_TOKEN_CCB_OFFSET, psKick->ui32SharedCmdCCBOffset);
+
+ if (psKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_TRANSFER, TRANSFER_TOKEN_TA_SYNC,
+ psSyncInfo, PVRSRV_SYNCOP_SAMPLE);
+
+ psSharedTransferCmd->ui32TASyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ psSharedTransferCmd->ui32TASyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+
+ psSharedTransferCmd->sTASyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ psSharedTransferCmd->sTASyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ }
+ else
+ {
+ psSharedTransferCmd->sTASyncWriteOpsCompleteDevVAddr.uiAddr = 0;
+ psSharedTransferCmd->sTASyncReadOpsCompleteDevVAddr.uiAddr = 0;
+ }
+
+ if (psKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_TRANSFER, TRANSFER_TOKEN_3D_SYNC,
+ psSyncInfo, PVRSRV_SYNCOP_SAMPLE);
+
+ psSharedTransferCmd->ui323DSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ psSharedTransferCmd->ui323DSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+
+ psSharedTransferCmd->s3DSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ psSharedTransferCmd->s3DSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ }
+ else
+ {
+ psSharedTransferCmd->s3DSyncWriteOpsCompleteDevVAddr.uiAddr = 0;
+ psSharedTransferCmd->s3DSyncReadOpsCompleteDevVAddr.uiAddr = 0;
+ }
+
+
+ for (loop = 0; loop < MIN(SGX_MAX_TRANSFER_SYNC_OPS, psKick->ui32NumSrcSync); loop++)
+ {
+ IMG_UINT32 i;
+
+ PVRSRV_KERNEL_SYNC_INFO * psMySyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop];
+
+ for (i = 0; i < loop; i++)
+ {
+ if (abSrcSyncEnable[i])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i];
+
+ if (psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr == psMySyncInfo->sWriteOpsCompleteDevVAddr.uiAddr)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "SGXSubmitTransferKM : Same src synchronized multiple times!"));
+ abSrcSyncEnable[loop] = IMG_FALSE;
+ break;
+ }
+ }
+ }
+ if (abSrcSyncEnable[loop])
+ {
+ ui32RealSrcSyncNum++;
+ }
+ }
+ for (loop = 0; loop < MIN(SGX_MAX_TRANSFER_SYNC_OPS, psKick->ui32NumDstSync); loop++)
+ {
+ IMG_UINT32 i;
+
+ PVRSRV_KERNEL_SYNC_INFO * psMySyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop];
+
+ for (i = 0; i < loop; i++)
+ {
+ if (abDstSyncEnable[i])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[i];
+
+ if (psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr == psMySyncInfo->sWriteOpsCompleteDevVAddr.uiAddr)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "SGXSubmitTransferKM : Same dst synchronized multiple times!"));
+ abDstSyncEnable[loop] = IMG_FALSE;
+ break;
+ }
+ }
+ }
+ if (abDstSyncEnable[loop])
+ {
+ ui32RealDstSyncNum++;
+ }
+ }
+
+ psSharedTransferCmd->ui32NumSrcSyncs = ui32RealSrcSyncNum;
+ psSharedTransferCmd->ui32NumDstSyncs = ui32RealDstSyncNum;
+
+ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL)
+ {
+ IMG_UINT32 i = 0;
+
+ for (loop = 0; loop < psKick->ui32NumSrcSync; loop++)
+ {
+ if (abSrcSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop];
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_TRANSFER, TRANSFER_TOKEN_SRC_SYNC,
+ psSyncInfo, PVRSRV_SYNCOP_SAMPLE);
+
+ psSharedTransferCmd->asSrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ psSharedTransferCmd->asSrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+
+ psSharedTransferCmd->asSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ psSharedTransferCmd->asSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ i++;
+ }
+ }
+ PVR_ASSERT(i == ui32RealSrcSyncNum);
+
+ i = 0;
+ for (loop = 0; loop < psKick->ui32NumDstSync; loop++)
+ {
+ if (abDstSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop];
+
+ psSyncInfo->psSyncData->ui64LastWrite = ui64KickCount;
+
+ PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_TRANSFER, TRANSFER_TOKEN_DST_SYNC,
+ psSyncInfo, PVRSRV_SYNCOP_SAMPLE);
+
+ psSharedTransferCmd->asDstSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ psSharedTransferCmd->asDstSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ psSharedTransferCmd->asDstSyncs[i].ui32ReadOps2PendingVal = psSyncInfo->psSyncData->ui32ReadOps2Pending;
+
+ psSharedTransferCmd->asDstSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ psSharedTransferCmd->asDstSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ psSharedTransferCmd->asDstSyncs[i].sReadOps2CompleteDevVAddr = psSyncInfo->sReadOps2CompleteDevVAddr;
+ i++;
+ }
+ }
+ PVR_ASSERT(i == ui32RealDstSyncNum);
+
+
+ for (loop = 0; loop < psKick->ui32NumSrcSync; loop++)
+ {
+ if (abSrcSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop];
+ psSyncInfo->psSyncData->ui32ReadOpsPending++;
+ }
+ }
+ for (loop = 0; loop < psKick->ui32NumDstSync; loop++)
+ {
+ if (abDstSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop];
+ psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ }
+ }
+ }
+
+#if defined(PDUMP)
+ if ((PDumpIsCaptureFrameKM()
+ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
+ && (bPersistentProcess == IMG_FALSE) )
+ {
+ PDUMPCOMMENT("Shared part of transfer command\r\n");
+ PDUMPMEM(psSharedTransferCmd,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff,
+ sizeof(SGXMKIF_TRANSFERCMD_SHARED),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL)
+ {
+ IMG_UINT32 i = 0;
+
+ for (loop = 0; loop < psKick->ui32NumSrcSync; loop++)
+ {
+ if (abSrcSyncEnable[loop])
+ {
+ psSyncInfo = psKick->ahSrcSyncInfo[loop];
+
+ PDUMPCOMMENT("Hack src surface write op in transfer cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asSrcSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal)),
+ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ PDUMPCOMMENT("Hack src surface read op in transfer cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asSrcSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal)),
+ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ i++;
+ }
+ }
+
+ i = 0;
+ for (loop = 0; loop < psKick->ui32NumDstSync; loop++)
+ {
+ if (abDstSyncEnable[i])
+ {
+ IMG_UINT32 ui32PDumpReadOp2 = 0;
+ psSyncInfo = psKick->ahDstSyncInfo[loop];
+
+ PDUMPCOMMENT("Hack dest surface write op in transfer cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asDstSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal)),
+ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ PDUMPCOMMENT("Hack dest surface read op in transfer cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asDstSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal)),
+ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ PDUMPCOMMENT("Hack dest surface read op2 in transfer cmd\r\n");
+ PDUMPMEM(&ui32PDumpReadOp2,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asDstSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOps2PendingVal)),
+ sizeof(ui32PDumpReadOp2),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ i++;
+ }
+ }
+
+
+ for (loop = 0; loop < (psKick->ui32NumSrcSync); loop++)
+ {
+ if (abSrcSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop];
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
+ }
+ }
+
+ for (loop = 0; loop < (psKick->ui32NumDstSync); loop++)
+ {
+ if (abDstSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
+ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
+ }
+ }
+ }
+
+ if (psKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = psKick->hTASyncInfo;
+
+ PDUMPCOMMENT("Tweak TA/TQ surface write op in transfer cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui32TASyncWriteOpsPendingVal)),
+ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
+ }
+
+ if (psKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = psKick->h3DSyncInfo;
+
+ PDUMPCOMMENT("Tweak 3D/TQ surface write op in transfer cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui323DSyncWriteOpsPendingVal)),
+ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
+ }
+ }
+#endif
+
+ sCommand.ui32Data[1] = psKick->sHWTransferContextDevVAddr.uiAddr;
+
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_CMD_END,
+ TRANSFER_TOKEN_SUBMIT);
+
+ eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TRANSFER, &sCommand, KERNEL_ID, psKick->ui32PDumpFlags, hDevMemContext, IMG_FALSE);
+
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+
+ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL)
+ {
+ for (loop = 0; loop < psKick->ui32NumSrcSync; loop++)
+ {
+ if (abSrcSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop];
+ psSyncInfo->psSyncData->ui32ReadOpsPending--;
+#if defined(PDUMP)
+ if (PDumpIsCaptureFrameKM()
+ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
+ {
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal--;
+ }
+#endif
+ }
+ }
+ for (loop = 0; loop < psKick->ui32NumDstSync; loop++)
+ {
+ if (abDstSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop];
+ psSyncInfo->psSyncData->ui32WriteOpsPending--;
+#if defined(PDUMP)
+ if (PDumpIsCaptureFrameKM()
+ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
+ {
+ psSyncInfo->psSyncData->ui32LastOpDumpVal--;
+ }
+#endif
+ }
+ }
+ }
+
+
+ if (psKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
+ psSyncInfo->psSyncData->ui32WriteOpsPending--;
+ }
+
+
+ if (psKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
+ psSyncInfo->psSyncData->ui32WriteOpsPending--;
+ }
+ }
+
+ else if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXSubmitTransferKM: SGXScheduleCCBCommandKM failed."));
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_FUNCTION_EXIT,
+ TRANSFER_TOKEN_SUBMIT);
+ return eError;
+ }
+
+
+#if defined(NO_HARDWARE)
+ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_NOSYNCUPDATE) == 0)
+ {
+
+ for (loop = 0; loop < psKick->ui32NumSrcSync; loop++)
+ {
+ if (abSrcSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop];
+ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ }
+ }
+
+ for (loop = 0; loop < psKick->ui32NumDstSync; loop++)
+ {
+ if (abDstSyncEnable[loop])
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop];
+ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+ }
+
+ if (psKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
+
+ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+
+ if (psKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
+
+ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+ }
+#endif
+ PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_FUNCTION_EXIT,
+ TRANSFER_TOKEN_SUBMIT);
+ return eError;
+}
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+#if defined (SUPPORT_SID_INTERFACE)
+IMG_EXPORT PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK_KM *psKick)
+#else
+IMG_EXPORT PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK *psKick)
+#endif
+
+{
+ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo;
+ SGXMKIF_COMMAND sCommand = {0};
+ SGXMKIF_2DCMD_SHARED *ps2DCmd;
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+ IMG_HANDLE hDevMemContext = IMG_NULL;
+#if defined(PDUMP)
+ IMG_BOOL bPersistentProcess = IMG_FALSE;
+
+ {
+ PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData();
+ if(psPerProc != IMG_NULL)
+ {
+ bPersistentProcess = psPerProc->bPDumpPersistent;
+ }
+ }
+#endif
+#if defined(FIX_HW_BRN_31620)
+ hDevMemContext = psKick->hDevMemContext;
+#endif
+
+ if (!CCB_OFFSET_IS_VALID(SGXMKIF_2DCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXSubmit2DKM: Invalid CCB offset"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+ ps2DCmd = CCB_DATA_FROM_OFFSET(SGXMKIF_2DCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset);
+
+ OSMemSet(ps2DCmd, 0, sizeof(*ps2DCmd));
+
+
+ if (psKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
+
+ ps2DCmd->sTASyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ ps2DCmd->sTASyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+
+ ps2DCmd->sTASyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ ps2DCmd->sTASyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ }
+
+
+ if (psKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
+
+ ps2DCmd->s3DSyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ ps2DCmd->s3DSyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+
+ ps2DCmd->s3DSyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ ps2DCmd->s3DSyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ }
+
+
+ ps2DCmd->ui32NumSrcSync = psKick->ui32NumSrcSync;
+ for (i = 0; i < psKick->ui32NumSrcSync; i++)
+ {
+ psSyncInfo = psKick->ahSrcSyncInfo[i];
+
+ ps2DCmd->sSrcSyncData[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ ps2DCmd->sSrcSyncData[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+
+ ps2DCmd->sSrcSyncData[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ ps2DCmd->sSrcSyncData[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ }
+
+ if (psKick->hDstSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = psKick->hDstSyncInfo;
+
+ ps2DCmd->sDstSyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ ps2DCmd->sDstSyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ ps2DCmd->sDstSyncData.ui32ReadOps2PendingVal = psSyncInfo->psSyncData->ui32ReadOps2Pending;
+
+ ps2DCmd->sDstSyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
+ ps2DCmd->sDstSyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
+ ps2DCmd->sDstSyncData.sReadOps2CompleteDevVAddr = psSyncInfo->sReadOps2CompleteDevVAddr;
+ }
+
+
+ for (i = 0; i < psKick->ui32NumSrcSync; i++)
+ {
+ psSyncInfo = psKick->ahSrcSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsPending++;
+ }
+
+ if (psKick->hDstSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = psKick->hDstSyncInfo;
+ psSyncInfo->psSyncData->ui32WriteOpsPending++;
+ }
+
+#if defined(PDUMP)
+ if ((PDumpIsCaptureFrameKM()
+ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
+ && (bPersistentProcess == IMG_FALSE) )
+ {
+
+ PDUMPCOMMENT("Shared part of 2D command\r\n");
+ PDUMPMEM(ps2DCmd,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff,
+ sizeof(SGXMKIF_2DCMD_SHARED),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ for (i = 0; i < psKick->ui32NumSrcSync; i++)
+ {
+ psSyncInfo = psKick->ahSrcSyncInfo[i];
+
+ PDUMPCOMMENT("Hack src surface write op in 2D cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sSrcSyncData[i].ui32WriteOpsPendingVal),
+ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ PDUMPCOMMENT("Hack src surface read op in 2D cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sSrcSyncData[i].ui32ReadOpsPendingVal),
+ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ }
+
+ if (psKick->hDstSyncInfo != IMG_NULL)
+ {
+ IMG_UINT32 ui32PDumpReadOp2 = 0;
+ psSyncInfo = psKick->hDstSyncInfo;
+
+ PDUMPCOMMENT("Hack dest surface write op in 2D cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sDstSyncData.ui32WriteOpsPendingVal),
+ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+
+ PDUMPCOMMENT("Hack dest surface read op in 2D cmd\r\n");
+ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sDstSyncData.ui32ReadOpsPendingVal),
+ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ PDUMPCOMMENT("Hack dest surface read op2 in 2D cmd\r\n");
+ PDUMPMEM(&ui32PDumpReadOp2,
+ psCCBMemInfo,
+ psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sDstSyncData.ui32ReadOps2PendingVal),
+ sizeof(ui32PDumpReadOp2),
+ psKick->ui32PDumpFlags,
+ MAKEUNIQUETAG(psCCBMemInfo));
+ }
+
+
+ for (i = 0; i < psKick->ui32NumSrcSync; i++)
+ {
+ psSyncInfo = psKick->ahSrcSyncInfo[i];
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
+ }
+
+ if (psKick->hDstSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = psKick->hDstSyncInfo;
+ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
+ }
+ }
+#endif
+
+ sCommand.ui32Data[1] = psKick->sHW2DContextDevVAddr.uiAddr;
+
+ eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_2D, &sCommand, KERNEL_ID, psKick->ui32PDumpFlags, hDevMemContext, IMG_FALSE);
+
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+
+
+#if defined(PDUMP)
+ if (PDumpIsCaptureFrameKM())
+ {
+ for (i = 0; i < psKick->ui32NumSrcSync; i++)
+ {
+ psSyncInfo = psKick->ahSrcSyncInfo[i];
+ psSyncInfo->psSyncData->ui32LastReadOpDumpVal--;
+ }
+
+ if (psKick->hDstSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = psKick->hDstSyncInfo;
+ psSyncInfo->psSyncData->ui32LastOpDumpVal--;
+ }
+ }
+#endif
+
+ for (i = 0; i < psKick->ui32NumSrcSync; i++)
+ {
+ psSyncInfo = psKick->ahSrcSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsPending--;
+ }
+
+ if (psKick->hDstSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = psKick->hDstSyncInfo;
+ psSyncInfo->psSyncData->ui32WriteOpsPending--;
+ }
+
+
+ if (psKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
+
+ psSyncInfo->psSyncData->ui32WriteOpsPending--;
+ }
+
+
+ if (psKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
+
+ psSyncInfo->psSyncData->ui32WriteOpsPending--;
+ }
+ }
+
+
+
+
+#if defined(NO_HARDWARE)
+
+ for(i = 0; i < psKick->ui32NumSrcSync; i++)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i];
+ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ }
+
+ if (psKick->hDstSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hDstSyncInfo;
+
+ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+
+ if (psKick->hTASyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
+
+ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+
+ if (psKick->h3DSyncInfo != IMG_NULL)
+ {
+ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
+
+ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
+ }
+#endif
+
+ return eError;
+}
+#endif
+#endif
diff --git a/drivers/gpu/pvr/sgx/sgxutils.c b/drivers/gpu/pvr/sgx/sgxutils.c
new file mode 100644
index 0000000..528490b
--- /dev/null
+++ b/drivers/gpu/pvr/sgx/sgxutils.c
@@ -0,0 +1,1725 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include <stddef.h>
+
+#include "sgxdefs.h"
+#include "services_headers.h"
+#include "buffer_manager.h"
+#include "sgx_bridge_km.h"
+#include "sgxapi_km.h"
+#include "sgxinfo.h"
+#include "sgx_mkif_km.h"
+#include "sysconfig.h"
+#include "pdump_km.h"
+#include "mmu.h"
+#include "pvr_bridge_km.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "sgxutils.h"
+#include "ttrace.h"
+
+#ifdef __linux__
+#include <linux/kernel.h>
+#include <linux/string.h>
+#else
+#include <stdio.h>
+#endif
+
+IMG_UINT64 ui64KickCount;
+
+
+#if defined(SYS_CUSTOM_POWERDOWN)
+PVRSRV_ERROR SysPowerDownMISR(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32CallerID);
+#endif
+
+
+
+static IMG_VOID SGXPostActivePowerEvent(PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32CallerID)
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
+
+
+ psSGXHostCtl->ui32NumActivePowerEvents++;
+
+ if ((psSGXHostCtl->ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE) != 0)
+ {
+
+
+
+ if (ui32CallerID == ISR_ID)
+ {
+ psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
+ }
+ else
+ {
+ SGXScheduleProcessQueuesKM(psDeviceNode);
+ }
+ }
+}
+
+
+IMG_VOID SGXTestActivePowerEvent (PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32CallerID)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
+
+#if defined(SYS_SUPPORTS_SGX_IDLE_CALLBACK)
+ if (!psDevInfo->bSGXIdle &&
+ ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_IDLE) != 0))
+ {
+ psDevInfo->bSGXIdle = IMG_TRUE;
+ SysSGXIdleTransition(psDevInfo->bSGXIdle);
+ }
+ else if (psDevInfo->bSGXIdle &&
+ ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_IDLE) == 0))
+ {
+ psDevInfo->bSGXIdle = IMG_FALSE;
+ SysSGXIdleTransition(psDevInfo->bSGXIdle);
+ }
+#endif
+
+ if (((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) != 0) &&
+ ((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) == 0))
+ {
+
+ psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER;
+
+
+ PDUMPSUSPEND();
+
+#if defined(SYS_CUSTOM_POWERDOWN)
+
+
+
+ eError = SysPowerDownMISR(psDeviceNode, ui32CallerID);
+#else
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
+ PVRSRV_DEV_POWER_STATE_OFF,
+ ui32CallerID, IMG_FALSE);
+ if (eError == PVRSRV_OK)
+ {
+ SGXPostActivePowerEvent(psDeviceNode, ui32CallerID);
+ }
+#endif
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+
+
+ psSGXHostCtl->ui32InterruptClearFlags &= ~PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER;
+ eError = PVRSRV_OK;
+ }
+
+
+ PDUMPRESUME();
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXTestActivePowerEvent error:%u", eError));
+ }
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SGXAcquireKernelCCBSlot)
+#endif
+static INLINE SGXMKIF_COMMAND * SGXAcquireKernelCCBSlot(PVRSRV_SGX_CCB_INFO *psCCB)
+{
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ if(((*psCCB->pui32WriteOffset + 1) & 255) != *psCCB->pui32ReadOffset)
+ {
+ return &psCCB->psCommands[*psCCB->pui32WriteOffset];
+ }
+
+ OSSleepms(1);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+
+ return IMG_NULL;
+}
+
+PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_DEVICE_NODE *psDeviceNode,
+ SGXMKIF_CMD_TYPE eCmdType,
+ SGXMKIF_COMMAND *psCommandData,
+ IMG_UINT32 ui32CallerID,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_HANDLE hDevMemContext,
+ IMG_BOOL bLastInScene)
+{
+ PVRSRV_SGX_CCB_INFO *psKernelCCB;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SGXMKIF_COMMAND *psSGXCommand;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+#if defined(FIX_HW_BRN_31620)
+ IMG_UINT32 ui32CacheMasks[4];
+ IMG_UINT32 i;
+ MMU_CONTEXT *psMMUContext;
+#endif
+#if defined(PDUMP)
+ IMG_VOID *pvDumpCommand;
+ IMG_BOOL bPDumpIsSuspended = PDumpIsSuspended();
+ IMG_BOOL bPersistentProcess = IMG_FALSE;
+#else
+ PVR_UNREFERENCED_PARAMETER(ui32CallerID);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+#endif
+
+#if defined(FIX_HW_BRN_31620)
+ for(i=0;i<4;i++)
+ {
+ ui32CacheMasks[i] = 0;
+ }
+
+ psMMUContext = psDevInfo->hKernelMMUContext;
+ psDeviceNode->pfnMMUGetCacheFlushRange(psMMUContext, &ui32CacheMasks[0]);
+
+
+ if (hDevMemContext)
+ {
+ BM_CONTEXT *psBMContext = (BM_CONTEXT *) hDevMemContext;
+
+ psMMUContext = psBMContext->psMMUContext;
+ psDeviceNode->pfnMMUGetCacheFlushRange(psMMUContext, &ui32CacheMasks[2]);
+ }
+
+
+ if (ui32CacheMasks[0] || ui32CacheMasks[1] || ui32CacheMasks[2] || ui32CacheMasks[3])
+ {
+ psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_PD;
+ }
+#endif
+
+#if defined(FIX_HW_BRN_28889)
+
+
+
+
+ if ( (eCmdType != SGXMKIF_CMD_PROCESS_QUEUES) &&
+ ((psDevInfo->ui32CacheControl & SGXMKIF_CC_INVAL_DATA) != 0) &&
+ ((psDevInfo->ui32CacheControl & (SGXMKIF_CC_INVAL_BIF_PT | SGXMKIF_CC_INVAL_BIF_PD)) != 0))
+ {
+ #if defined(PDUMP)
+ PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo = psDevInfo->psKernelSGXHostCtlMemInfo;
+ #endif
+ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
+ SGXMKIF_COMMAND sCacheCommand = {0};
+
+ eError = SGXScheduleCCBCommand(psDeviceNode,
+ SGXMKIF_CMD_PROCESS_QUEUES,
+ &sCacheCommand,
+ ui32CallerID,
+ ui32PDumpFlags,
+ hDevMemContext,
+ bLastInScene);
+ if (eError != PVRSRV_OK)
+ {
+ goto Exit;
+ }
+
+
+ #if !defined(NO_HARDWARE)
+ if(PollForValueKM(&psSGXHostCtl->ui32InvalStatus,
+ PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE,
+ PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE,
+ 2 * MAX_HW_TIME_US,
+ MAX_HW_TIME_US/WAIT_TRY_COUNT,
+ IMG_FALSE) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommand: Wait for uKernel to Invalidate BIF cache failed"));
+ PVR_DBG_BREAK;
+ }
+ #endif
+
+ #if defined(PDUMP)
+
+ PDUMPCOMMENTWITHFLAGS(0, "Host Control - Poll for BIF cache invalidate request to complete");
+ PDUMPMEMPOL(psSGXHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32InvalStatus),
+ PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE,
+ PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0,
+ MAKEUNIQUETAG(psSGXHostCtlMemInfo));
+ #endif
+
+ psSGXHostCtl->ui32InvalStatus &= ~(PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE);
+ PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus), sizeof(IMG_UINT32), 0, MAKEUNIQUETAG(psSGXHostCtlMemInfo));
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(hDevMemContext);
+#endif
+
+#if defined(FIX_HW_BRN_31620)
+ if ((eCmdType != SGXMKIF_CMD_FLUSHPDCACHE) && (psDevInfo->ui32CacheControl & SGXMKIF_CC_INVAL_BIF_PD))
+ {
+ SGXMKIF_COMMAND sPDECacheCommand = {0};
+ IMG_DEV_PHYADDR sDevPAddr;
+
+
+ psMMUContext = psDevInfo->hKernelMMUContext;
+
+ psDeviceNode->pfnMMUGetPDPhysAddr(psMMUContext, &sDevPAddr);
+ sPDECacheCommand.ui32Data[0] = sDevPAddr.uiAddr | 1;
+ sPDECacheCommand.ui32Data[1] = ui32CacheMasks[0];
+ sPDECacheCommand.ui32Data[2] = ui32CacheMasks[1];
+
+
+ if (hDevMemContext)
+ {
+ BM_CONTEXT *psBMContext = (BM_CONTEXT *) hDevMemContext;
+
+ psMMUContext = psBMContext->psMMUContext;
+
+ psDeviceNode->pfnMMUGetPDPhysAddr(psMMUContext, &sDevPAddr);
+
+ sPDECacheCommand.ui32Data[3] = sDevPAddr.uiAddr | 1;
+ sPDECacheCommand.ui32Data[4] = ui32CacheMasks[2];
+ sPDECacheCommand.ui32Data[5] = ui32CacheMasks[3];
+ }
+
+
+ if (sPDECacheCommand.ui32Data[1] | sPDECacheCommand.ui32Data[2] | sPDECacheCommand.ui32Data[4] |
+ sPDECacheCommand.ui32Data[5])
+ {
+ eError = SGXScheduleCCBCommand(psDeviceNode,
+ SGXMKIF_CMD_FLUSHPDCACHE,
+ &sPDECacheCommand,
+ ui32CallerID,
+ ui32PDumpFlags,
+ hDevMemContext,
+ bLastInScene);
+ if (eError != PVRSRV_OK)
+ {
+ goto Exit;
+ }
+ }
+ }
+#endif
+#if defined(PDUMP)
+
+ {
+ PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData();
+ if(psPerProc != IMG_NULL)
+ {
+ bPersistentProcess = psPerProc->bPDumpPersistent;
+ }
+ }
+#endif
+ psKernelCCB = psDevInfo->psKernelCCBInfo;
+
+ psSGXCommand = SGXAcquireKernelCCBSlot(psKernelCCB);
+
+
+ if(!psSGXCommand)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXScheduleCCBCommand: Wait for CCB space timed out")) ;
+ eError = PVRSRV_ERROR_TIMEOUT;
+ goto Exit;
+ }
+
+
+ psCommandData->ui32CacheControl = psDevInfo->ui32CacheControl;
+
+#if defined(PDUMP)
+
+ psDevInfo->sPDContext.ui32CacheControl |= psDevInfo->ui32CacheControl;
+#endif
+
+
+ psDevInfo->ui32CacheControl = 0;
+
+
+ *psSGXCommand = *psCommandData;
+
+ if (eCmdType >= SGXMKIF_CMD_MAX)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXScheduleCCBCommand: Unknown command type: %d", eCmdType)) ;
+ eError = PVRSRV_ERROR_INVALID_CCB_COMMAND;
+ goto Exit;
+ }
+
+ if (eCmdType == SGXMKIF_CMD_2D ||
+ eCmdType == SGXMKIF_CMD_TRANSFER ||
+ ((eCmdType == SGXMKIF_CMD_TA) && bLastInScene))
+ {
+ SYS_DATA *psSysData;
+
+
+ SysAcquireData(&psSysData);
+
+ if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH)
+ {
+ OSFlushCPUCacheKM();
+ }
+ else if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN)
+ {
+ OSCleanCPUCacheKM();
+ }
+
+
+ psSysData->ePendingCacheOpType = PVRSRV_MISC_INFO_CPUCACHEOP_NONE;
+ }
+
+ PVR_ASSERT(eCmdType < SGXMKIF_CMD_MAX);
+ psSGXCommand->ui32ServiceAddress = psDevInfo->aui32HostKickAddr[eCmdType];
+
+#if defined(PDUMP)
+ if ((ui32CallerID != ISR_ID) && (bPDumpIsSuspended == IMG_FALSE) &&
+ (bPersistentProcess == IMG_FALSE) )
+ {
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Poll for space in the Kernel CCB\r\n");
+ PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo,
+ offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset),
+ (psKernelCCB->ui32CCBDumpWOff + 1) & 0xff,
+ 0xff,
+ PDUMP_POLL_OPERATOR_NOTEQUAL,
+ ui32PDumpFlags,
+ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB command (type == %d)\r\n", eCmdType);
+ pvDumpCommand = (IMG_VOID *)((IMG_UINT8 *)psKernelCCB->psCCBMemInfo->pvLinAddrKM + (*psKernelCCB->pui32WriteOffset * sizeof(SGXMKIF_COMMAND)));
+
+ PDUMPMEM(pvDumpCommand,
+ psKernelCCB->psCCBMemInfo,
+ psKernelCCB->ui32CCBDumpWOff * sizeof(SGXMKIF_COMMAND),
+ sizeof(SGXMKIF_COMMAND),
+ ui32PDumpFlags,
+ MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo));
+
+
+ PDUMPMEM(&psDevInfo->sPDContext.ui32CacheControl,
+ psKernelCCB->psCCBMemInfo,
+ psKernelCCB->ui32CCBDumpWOff * sizeof(SGXMKIF_COMMAND) +
+ offsetof(SGXMKIF_COMMAND, ui32CacheControl),
+ sizeof(IMG_UINT32),
+ ui32PDumpFlags,
+ MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo));
+
+ if (PDumpIsCaptureFrameKM()
+ || ((ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
+ {
+
+ psDevInfo->sPDContext.ui32CacheControl = 0;
+ }
+ }
+#endif
+
+#if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
+
+ eError = PollForValueKM (psKernelCCB->pui32ReadOffset,
+ *psKernelCCB->pui32WriteOffset,
+ 0xFF,
+ MAX_HW_TIME_US,
+ MAX_HW_TIME_US/WAIT_TRY_COUNT,
+ IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXScheduleCCBCommand: Timeout waiting for previous command to be read")) ;
+ eError = PVRSRV_ERROR_TIMEOUT;
+ goto Exit;
+ }
+#endif
+
+
+
+ *psKernelCCB->pui32WriteOffset = (*psKernelCCB->pui32WriteOffset + 1) & 255;
+
+#if defined(PDUMP)
+ if ((ui32CallerID != ISR_ID) && (bPDumpIsSuspended == IMG_FALSE) &&
+ (bPersistentProcess == IMG_FALSE) )
+ {
+ #if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Poll for previous Kernel CCB CMD to be read\r\n");
+ PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo,
+ offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset),
+ (psKernelCCB->ui32CCBDumpWOff),
+ 0xFF,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ ui32PDumpFlags,
+ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
+ #endif
+
+ if (PDumpIsCaptureFrameKM()
+ || ((ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
+ {
+ psKernelCCB->ui32CCBDumpWOff = (psKernelCCB->ui32CCBDumpWOff + 1) & 0xFF;
+ psDevInfo->ui32KernelCCBEventKickerDumpVal = (psDevInfo->ui32KernelCCBEventKickerDumpVal + 1) & 0xFF;
+ }
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB write offset\r\n");
+ PDUMPMEM(&psKernelCCB->ui32CCBDumpWOff,
+ psKernelCCB->psCCBCtlMemInfo,
+ offsetof(PVRSRV_SGX_CCB_CTL, ui32WriteOffset),
+ sizeof(IMG_UINT32),
+ ui32PDumpFlags,
+ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB event kicker\r\n");
+ PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal,
+ psDevInfo->psKernelCCBEventKickerMemInfo,
+ 0,
+ sizeof(IMG_UINT32),
+ ui32PDumpFlags,
+ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kick the SGX microkernel\r\n");
+ #if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0), EUR_CR_EVENT_KICK2_NOW_MASK, ui32PDumpFlags);
+ #else
+ PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0), EUR_CR_EVENT_KICK_NOW_MASK, ui32PDumpFlags);
+ #endif
+ }
+#endif
+
+ *psDevInfo->pui32KernelCCBEventKicker = (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF;
+
+ OSWriteMemoryBarrier();
+
+
+ PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_MKSYNC, PVRSRV_TRACE_CLASS_NONE,
+ MKSYNC_TOKEN_KERNEL_CCB_OFFSET, *psKernelCCB->pui32WriteOffset);
+ PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_MKSYNC, PVRSRV_TRACE_CLASS_NONE,
+ MKSYNC_TOKEN_CORE_CLK, psDevInfo->ui32CoreClockSpeed);
+ PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_MKSYNC, PVRSRV_TRACE_CLASS_NONE,
+ MKSYNC_TOKEN_UKERNEL_CLK, psDevInfo->ui32uKernelTimerClock);
+
+
+#if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
+ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0),
+ EUR_CR_EVENT_KICK2_NOW_MASK);
+#else
+ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
+ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0),
+ EUR_CR_EVENT_KICK_NOW_MASK);
+#endif
+
+ OSMemoryBarrier();
+
+#if defined(NO_HARDWARE)
+
+ *psKernelCCB->pui32ReadOffset = (*psKernelCCB->pui32ReadOffset + 1) & 255;
+#endif
+
+ ui64KickCount++;
+Exit:
+ return eError;
+}
+
+
+PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ SGXMKIF_CMD_TYPE eCmdType,
+ SGXMKIF_COMMAND *psCommandData,
+ IMG_UINT32 ui32CallerID,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_HANDLE hDevMemContext,
+ IMG_BOOL bLastInScene)
+{
+ PVRSRV_ERROR eError;
+
+
+ PDUMPSUSPEND();
+
+
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
+ PVRSRV_DEV_POWER_STATE_ON,
+ ui32CallerID,
+ IMG_TRUE);
+
+ PDUMPRESUME();
+
+ if (eError == PVRSRV_OK)
+ {
+ psDeviceNode->bReProcessDeviceCommandComplete = IMG_FALSE;
+ }
+ else
+ {
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ if (ui32CallerID == ISR_ID)
+ {
+ SYS_DATA *psSysData;
+
+
+
+
+ psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
+ eError = PVRSRV_OK;
+
+ SysAcquireData(&psSysData);
+ OSScheduleMISR(psSysData);
+ }
+ else
+ {
+
+
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommandKM failed to acquire lock - "
+ "ui32CallerID:%d eError:%u", ui32CallerID, eError));
+ }
+
+ return eError;
+ }
+
+ eError = SGXScheduleCCBCommand(psDeviceNode, eCmdType, psCommandData, ui32CallerID, ui32PDumpFlags, hDevMemContext, bLastInScene);
+
+ PVRSRVPowerUnlock(ui32CallerID);
+ return eError;
+}
+
+
+PVRSRV_ERROR SGXScheduleProcessQueuesKM(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ SGXMKIF_HOST_CTL *psHostCtl = psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
+ IMG_UINT32 ui32PowerStatus;
+ SGXMKIF_COMMAND sCommand = {0};
+
+ ui32PowerStatus = psHostCtl->ui32PowerStatus;
+ if ((ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0)
+ {
+
+ return PVRSRV_OK;
+ }
+
+ eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_PROCESS_QUEUES, &sCommand, ISR_ID, 0, IMG_NULL, IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXScheduleProcessQueuesKM failed to schedule CCB command: %u", eError));
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ return PVRSRVIsDevicePowered(psDeviceNode->sDevId.ui32DeviceIndex);
+}
+
+IMG_EXPORT
+PVRSRV_ERROR SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie,
+#if defined (SUPPORT_SID_INTERFACE)
+ SGX_INTERNAL_DEVINFO_KM *psSGXInternalDevInfo)
+#else
+ SGX_INTERNAL_DEVINFO *psSGXInternalDevInfo)
+#endif
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
+
+ psSGXInternalDevInfo->ui32Flags = psDevInfo->ui32Flags;
+ psSGXInternalDevInfo->bForcePTOff = (IMG_BOOL)psDevInfo->bForcePTOff;
+
+
+ psSGXInternalDevInfo->hHostCtlKernelMemInfoHandle =
+ (IMG_HANDLE)psDevInfo->psKernelSGXHostCtlMemInfo;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR SGXCleanupRequest(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEV_VIRTADDR *psHWDataDevVAddr,
+ IMG_UINT32 ui32CleanupType,
+ IMG_BOOL bForceCleanup)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_KERNEL_MEM_INFO *psHostCtlMemInfo = psDevInfo->psKernelSGXHostCtlMemInfo;
+ SGXMKIF_HOST_CTL *psHostCtl = psHostCtlMemInfo->pvLinAddrKM;
+
+ SGXMKIF_COMMAND sCommand = {0};
+
+
+ if (bForceCleanup != FORCE_CLEANUP)
+ {
+ sCommand.ui32Data[0] = ui32CleanupType;
+ sCommand.ui32Data[1] = (psHWDataDevVAddr == IMG_NULL) ? 0 : psHWDataDevVAddr->uiAddr;
+ PDUMPCOMMENTWITHFLAGS(0, "Request ukernel resource clean-up, Type %u, Data 0x%X", sCommand.ui32Data[0], sCommand.ui32Data[1]);
+
+ eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_CLEANUP, &sCommand, KERNEL_ID, 0, IMG_NULL, IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXCleanupRequest: Failed to submit clean-up command"));
+ SGXDumpDebugInfo(psDevInfo, IMG_FALSE);
+ PVR_DBG_BREAK;
+ return eError;
+ }
+
+
+ #if !defined(NO_HARDWARE)
+ if(PollForValueKM(&psHostCtl->ui32CleanupStatus,
+ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
+ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
+ 10 * MAX_HW_TIME_US,
+ 1000,
+ IMG_TRUE) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXCleanupRequest: Wait for uKernel to clean up (%u) failed", ui32CleanupType));
+ eError = PVRSRV_ERROR_TIMEOUT;
+ SGXDumpDebugInfo(psDevInfo, IMG_FALSE);
+ PVR_DBG_BREAK;
+ }
+ #endif
+
+ #if defined(PDUMP)
+
+
+
+
+
+
+
+
+ PDUMPCOMMENTWITHFLAGS(0, "Host Control - Poll for clean-up request to complete");
+ PDUMPMEMPOL(psHostCtlMemInfo,
+ offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus),
+ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE | PVRSRV_USSE_EDM_CLEANUPCMD_DONE,
+ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE | PVRSRV_USSE_EDM_CLEANUPCMD_DONE,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0,
+ MAKEUNIQUETAG(psHostCtlMemInfo));
+ #endif
+
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ if (psHostCtl->ui32CleanupStatus & PVRSRV_USSE_EDM_CLEANUPCMD_BUSY)
+ {
+
+ PVR_ASSERT((psHostCtl->ui32CleanupStatus & PVRSRV_USSE_EDM_CLEANUPCMD_DONE) == 0);
+ eError = PVRSRV_ERROR_RETRY;
+ psHostCtl->ui32CleanupStatus &= ~(PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE | PVRSRV_USSE_EDM_CLEANUPCMD_BUSY);
+ }
+ else
+ {
+ eError = PVRSRV_OK;
+ psHostCtl->ui32CleanupStatus &= ~(PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE | PVRSRV_USSE_EDM_CLEANUPCMD_DONE);
+ }
+
+ PDUMPMEM(IMG_NULL, psHostCtlMemInfo, offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus), sizeof(IMG_UINT32), 0, MAKEUNIQUETAG(psHostCtlMemInfo));
+
+
+#if defined(SGX_FEATURE_SYSTEM_CACHE)
+ psDevInfo->ui32CacheControl |= (SGXMKIF_CC_INVAL_BIF_SL | SGXMKIF_CC_INVAL_DATA);
+#else
+ psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_DATA;
+#endif
+ return eError;
+}
+
+
+typedef struct _SGX_HW_RENDER_CONTEXT_CLEANUP_
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_KERNEL_MEM_INFO *psHWRenderContextMemInfo;
+ IMG_HANDLE hBlockAlloc;
+ PRESMAN_ITEM psResItem;
+ IMG_BOOL bCleanupTimerRunning;
+ IMG_PVOID pvTimeData;
+} SGX_HW_RENDER_CONTEXT_CLEANUP;
+
+
+static PVRSRV_ERROR SGXCleanupHWRenderContextCallback(IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bForceCleanup)
+{
+ PVRSRV_ERROR eError;
+ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup = pvParam;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+
+ eError = SGXCleanupRequest(psCleanup->psDeviceNode,
+ &psCleanup->psHWRenderContextMemInfo->sDevVAddr,
+ PVRSRV_CLEANUPCMD_RC,
+ bForceCleanup);
+
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ if (!psCleanup->bCleanupTimerRunning)
+ {
+ OSTimeCreateWithUSOffset(&psCleanup->pvTimeData, MAX_CLEANUP_TIME_US);
+ psCleanup->bCleanupTimerRunning = IMG_TRUE;
+ }
+ else
+ {
+ if (OSTimeHasTimePassed(psCleanup->pvTimeData))
+ {
+ eError = PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE;
+ psCleanup->bCleanupTimerRunning = IMG_FALSE;
+ OSTimeDestroy(psCleanup->pvTimeData);
+ }
+ }
+ }
+ else
+ {
+ if (psCleanup->bCleanupTimerRunning)
+ {
+ OSTimeDestroy(psCleanup->pvTimeData);
+ }
+ }
+
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+
+ PVRSRVFreeDeviceMemKM(psCleanup->psDeviceNode,
+ psCleanup->psHWRenderContextMemInfo);
+
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
+ psCleanup,
+ psCleanup->hBlockAlloc);
+
+ }
+
+ return eError;
+}
+
+typedef struct _SGX_HW_TRANSFER_CONTEXT_CLEANUP_
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_KERNEL_MEM_INFO *psHWTransferContextMemInfo;
+ IMG_HANDLE hBlockAlloc;
+ PRESMAN_ITEM psResItem;
+ IMG_BOOL bCleanupTimerRunning;
+ IMG_PVOID pvTimeData;
+} SGX_HW_TRANSFER_CONTEXT_CLEANUP;
+
+
+static PVRSRV_ERROR SGXCleanupHWTransferContextCallback(IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bForceCleanup)
+{
+ PVRSRV_ERROR eError;
+ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)pvParam;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+
+ eError = SGXCleanupRequest(psCleanup->psDeviceNode,
+ &psCleanup->psHWTransferContextMemInfo->sDevVAddr,
+ PVRSRV_CLEANUPCMD_TC,
+ bForceCleanup);
+
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ if (!psCleanup->bCleanupTimerRunning)
+ {
+ OSTimeCreateWithUSOffset(&psCleanup->pvTimeData, MAX_CLEANUP_TIME_US);
+ psCleanup->bCleanupTimerRunning = IMG_TRUE;
+ }
+ else
+ {
+ if (OSTimeHasTimePassed(psCleanup->pvTimeData))
+ {
+ eError = PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE;
+ psCleanup->bCleanupTimerRunning = IMG_FALSE;
+ OSTimeDestroy(psCleanup->pvTimeData);
+ }
+ }
+ }
+ else
+ {
+ if (psCleanup->bCleanupTimerRunning)
+ {
+ OSTimeDestroy(psCleanup->pvTimeData);
+ }
+ }
+
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+
+ PVRSRVFreeDeviceMemKM(psCleanup->psDeviceNode,
+ psCleanup->psHWTransferContextMemInfo);
+
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
+ psCleanup,
+ psCleanup->hBlockAlloc);
+
+ }
+
+ return eError;
+}
+
+IMG_EXPORT
+IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE hDeviceNode,
+ IMG_CPU_VIRTADDR *psHWRenderContextCpuVAddr,
+ IMG_UINT32 ui32HWRenderContextSize,
+ IMG_UINT32 ui32OffsetToPDDevPAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hBlockAlloc;
+ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDeviceNode;
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ DEVICE_MEMORY_HEAP_INFO *psHeapInfo;
+ IMG_HANDLE hDevMemContextInt;
+ MMU_CONTEXT *psMMUContext;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+ int iPtrByte;
+ IMG_UINT8 *pSrc;
+ IMG_UINT8 *pDst;
+ PRESMAN_ITEM psResItem;
+
+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
+ (IMG_VOID **)&psCleanup,
+ &hBlockAlloc,
+ "SGX Hardware Render Context Cleanup");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't allocate memory for SGX_HW_RENDER_CONTEXT_CLEANUP structure"));
+ goto exit0;
+ }
+
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+ psHeapInfo = &psDevMemoryInfo->psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID];
+
+ eError = PVRSRVAllocDeviceMemKM(hDeviceNode,
+ psPerProc,
+ psHeapInfo->hDevMemHeap,
+ PVRSRV_MEM_READ | PVRSRV_MEM_WRITE
+ | PVRSRV_MEM_NO_SYNCOBJ | PVRSRV_MEM_EDM_PROTECT
+ | PVRSRV_MEM_CACHE_CONSISTENT,
+ ui32HWRenderContextSize,
+ 32,
+ IMG_NULL,
+ 0,
+ &psCleanup->psHWRenderContextMemInfo,
+ "HW Render Context");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't allocate device memory for HW Render Context"));
+ goto exit1;
+ }
+
+ eError = OSCopyFromUser(psPerProc,
+ psCleanup->psHWRenderContextMemInfo->pvLinAddrKM,
+ psHWRenderContextCpuVAddr,
+ ui32HWRenderContextSize);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't copy user-mode copy of HWContext into device memory"));
+ goto exit2;
+ }
+
+
+ psHWRenderContextDevVAddr->uiAddr = psCleanup->psHWRenderContextMemInfo->sDevVAddr.uiAddr;
+
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevMemContextInt,
+ hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Can't lookup DevMem Context"));
+ goto exit2;
+ }
+
+ psMMUContext = BM_GetMMUContextFromMemContext(hDevMemContextInt);
+ sPDDevPAddr = psDeviceNode->pfnMMUGetPDDevPAddr(psMMUContext);
+
+
+
+
+
+
+ pSrc = (IMG_UINT8 *)&sPDDevPAddr;
+ pDst = (IMG_UINT8 *)psCleanup->psHWRenderContextMemInfo->pvLinAddrKM;
+ pDst += ui32OffsetToPDDevPAddr;
+
+ for (iPtrByte = 0; iPtrByte < sizeof(IMG_DEV_PHYADDR); iPtrByte++)
+ {
+ pDst[iPtrByte] = pSrc[iPtrByte];
+ }
+
+#if defined(PDUMP)
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "HW Render context struct");
+
+ PDUMPMEM(
+ IMG_NULL,
+ psCleanup->psHWRenderContextMemInfo,
+ 0,
+ ui32HWRenderContextSize,
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psCleanup->psHWRenderContextMemInfo));
+
+
+ PDUMPCOMMENT("Page directory address in HW render context");
+ PDUMPPDDEVPADDR(
+ psCleanup->psHWRenderContextMemInfo,
+ ui32OffsetToPDDevPAddr,
+ sPDDevPAddr,
+ MAKEUNIQUETAG(psCleanup->psHWRenderContextMemInfo),
+ PDUMP_PD_UNIQUETAG);
+#endif
+
+ psCleanup->hBlockAlloc = hBlockAlloc;
+ psCleanup->psDeviceNode = psDeviceNode;
+ psCleanup->bCleanupTimerRunning = IMG_FALSE;
+
+ psResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_HW_RENDER_CONTEXT,
+ (IMG_VOID *)psCleanup,
+ 0,
+ &SGXCleanupHWRenderContextCallback);
+
+ if (psResItem == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: ResManRegisterRes failed"));
+ goto exit2;
+ }
+
+ psCleanup->psResItem = psResItem;
+
+ return (IMG_HANDLE)psCleanup;
+
+exit2:
+ PVRSRVFreeDeviceMemKM(hDeviceNode,
+ psCleanup->psHWRenderContextMemInfo);
+exit1:
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
+ psCleanup,
+ psCleanup->hBlockAlloc);
+
+exit0:
+ return IMG_NULL;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext, IMG_BOOL bForceCleanup)
+{
+ PVRSRV_ERROR eError;
+ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
+
+ PVR_ASSERT(hHWRenderContext != IMG_NULL);
+
+ psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext;
+
+ if (psCleanup == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXUnregisterHWRenderContextKM: invalid parameter"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = ResManFreeResByPtr(psCleanup->psResItem, bForceCleanup);
+
+ return eError;
+}
+
+
+IMG_EXPORT
+IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE hDeviceNode,
+ IMG_CPU_VIRTADDR *psHWTransferContextCpuVAddr,
+ IMG_UINT32 ui32HWTransferContextSize,
+ IMG_UINT32 ui32OffsetToPDDevPAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hBlockAlloc;
+ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDeviceNode;
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ DEVICE_MEMORY_HEAP_INFO *psHeapInfo;
+ IMG_HANDLE hDevMemContextInt;
+ MMU_CONTEXT *psMMUContext;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+ int iPtrByte;
+ IMG_UINT8 *pSrc;
+ IMG_UINT8 *pDst;
+ PRESMAN_ITEM psResItem;
+
+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
+ (IMG_VOID **)&psCleanup,
+ &hBlockAlloc,
+ "SGX Hardware Transfer Context Cleanup");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't allocate memory for SGX_HW_TRANSFER_CONTEXT_CLEANUP structure"));
+ goto exit0;
+ }
+
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+ psHeapInfo = &psDevMemoryInfo->psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID];
+
+ eError = PVRSRVAllocDeviceMemKM(hDeviceNode,
+ psPerProc,
+ psHeapInfo->hDevMemHeap,
+ PVRSRV_MEM_READ | PVRSRV_MEM_WRITE
+ | PVRSRV_MEM_NO_SYNCOBJ | PVRSRV_MEM_EDM_PROTECT
+ | PVRSRV_MEM_CACHE_CONSISTENT,
+ ui32HWTransferContextSize,
+ 32,
+ IMG_NULL,
+ 0,
+ &psCleanup->psHWTransferContextMemInfo,
+ "HW Render Context");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't allocate device memory for HW Render Context"));
+ goto exit1;
+ }
+
+ eError = OSCopyFromUser(psPerProc,
+ psCleanup->psHWTransferContextMemInfo->pvLinAddrKM,
+ psHWTransferContextCpuVAddr,
+ ui32HWTransferContextSize);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't copy user-mode copy of HWContext into device memory"));
+ goto exit2;
+ }
+
+
+ psHWTransferContextDevVAddr->uiAddr = psCleanup->psHWTransferContextMemInfo->sDevVAddr.uiAddr;
+
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevMemContextInt,
+ hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Can't lookup DevMem Context"));
+ goto exit2;
+ }
+
+ psMMUContext = BM_GetMMUContextFromMemContext(hDevMemContextInt);
+ sPDDevPAddr = psDeviceNode->pfnMMUGetPDDevPAddr(psMMUContext);
+
+
+
+
+
+
+ pSrc = (IMG_UINT8 *)&sPDDevPAddr;
+ pDst = (IMG_UINT8 *)psCleanup->psHWTransferContextMemInfo->pvLinAddrKM;
+ pDst += ui32OffsetToPDDevPAddr;
+
+ for (iPtrByte = 0; iPtrByte < sizeof(IMG_DEV_PHYADDR); iPtrByte++)
+ {
+ pDst[iPtrByte] = pSrc[iPtrByte];
+ }
+
+#if defined(PDUMP)
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "HW Transfer context struct");
+
+ PDUMPMEM(
+ IMG_NULL,
+ psCleanup->psHWTransferContextMemInfo,
+ 0,
+ ui32HWTransferContextSize,
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psCleanup->psHWTransferContextMemInfo));
+
+
+ PDUMPCOMMENT("Page directory address in HW transfer context");
+
+ PDUMPPDDEVPADDR(
+ psCleanup->psHWTransferContextMemInfo,
+ ui32OffsetToPDDevPAddr,
+ sPDDevPAddr,
+ MAKEUNIQUETAG(psCleanup->psHWTransferContextMemInfo),
+ PDUMP_PD_UNIQUETAG);
+#endif
+
+ psCleanup->hBlockAlloc = hBlockAlloc;
+ psCleanup->psDeviceNode = psDeviceNode;
+ psCleanup->bCleanupTimerRunning = IMG_FALSE;
+
+ psResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_HW_TRANSFER_CONTEXT,
+ psCleanup,
+ 0,
+ &SGXCleanupHWTransferContextCallback);
+
+ if (psResItem == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: ResManRegisterRes failed"));
+ goto exit2;
+ }
+
+ psCleanup->psResItem = psResItem;
+
+ return (IMG_HANDLE)psCleanup;
+
+exit2:
+ PVRSRVFreeDeviceMemKM(hDeviceNode,
+ psCleanup->psHWTransferContextMemInfo);
+exit1:
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
+ psCleanup,
+ psCleanup->hBlockAlloc);
+
+
+exit0:
+ return IMG_NULL;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext, IMG_BOOL bForceCleanup)
+{
+ PVRSRV_ERROR eError;
+ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
+
+ PVR_ASSERT(hHWTransferContext != IMG_NULL);
+
+ psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)hHWTransferContext;
+
+ if (psCleanup == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXUnregisterHWTransferContextKM: invalid parameter"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = ResManFreeResByPtr(psCleanup->psResItem, bForceCleanup);
+
+ return eError;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR SGXSetTransferContextPriorityKM(
+ IMG_HANDLE hDeviceNode,
+ IMG_HANDLE hHWTransferContext,
+ IMG_UINT32 ui32Priority,
+ IMG_UINT32 ui32OffsetOfPriorityField)
+{
+ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
+ IMG_UINT8 *pSrc;
+ IMG_UINT8 *pDst;
+ int iPtrByte;
+ PVR_UNREFERENCED_PARAMETER(hDeviceNode);
+
+ if (hHWTransferContext != IMG_NULL)
+ {
+ psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)hHWTransferContext;
+
+ if ((ui32OffsetOfPriorityField + sizeof(ui32Priority))
+ >= psCleanup->psHWTransferContextMemInfo->uAllocSize)
+ {
+ PVR_DPF((
+ PVR_DBG_ERROR,
+ "SGXSetTransferContextPriorityKM: invalid context prioirty offset"));
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+
+
+ pDst = (IMG_UINT8 *)psCleanup->psHWTransferContextMemInfo->pvLinAddrKM;
+ pDst += ui32OffsetOfPriorityField;
+ pSrc = (IMG_UINT8 *)&ui32Priority;
+
+ for (iPtrByte = 0; iPtrByte < sizeof(ui32Priority); iPtrByte++)
+ {
+ pDst[iPtrByte] = pSrc[iPtrByte];
+ }
+ }
+ return PVRSRV_OK;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR SGXSetRenderContextPriorityKM(
+ IMG_HANDLE hDeviceNode,
+ IMG_HANDLE hHWRenderContext,
+ IMG_UINT32 ui32Priority,
+ IMG_UINT32 ui32OffsetOfPriorityField)
+{
+ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
+ IMG_UINT8 *pSrc;
+ IMG_UINT8 *pDst;
+ int iPtrByte;
+ PVR_UNREFERENCED_PARAMETER(hDeviceNode);
+
+ if (hHWRenderContext != IMG_NULL)
+ {
+ psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext;
+ if ((ui32OffsetOfPriorityField + sizeof(ui32Priority))
+ >= psCleanup->psHWRenderContextMemInfo->uAllocSize)
+ {
+ PVR_DPF((
+ PVR_DBG_ERROR,
+ "SGXSetContextPriorityKM: invalid HWRenderContext prioirty offset"));
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+
+
+
+ pDst = (IMG_UINT8 *)psCleanup->psHWRenderContextMemInfo->pvLinAddrKM;
+ pDst += ui32OffsetOfPriorityField;
+
+ pSrc = (IMG_UINT8 *)&ui32Priority;
+
+ for (iPtrByte = 0; iPtrByte < sizeof(ui32Priority); iPtrByte++)
+ {
+ pDst[iPtrByte] = pSrc[iPtrByte];
+ }
+ }
+ return PVRSRV_OK;
+}
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+typedef struct _SGX_HW_2D_CONTEXT_CLEANUP_
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_KERNEL_MEM_INFO *psHW2DContextMemInfo;
+ IMG_HANDLE hBlockAlloc;
+ PRESMAN_ITEM psResItem;
+ IMG_BOOL bCleanupTimerRunning;
+ IMG_PVOID pvTimeData;
+} SGX_HW_2D_CONTEXT_CLEANUP;
+
+static PVRSRV_ERROR SGXCleanupHW2DContextCallback(IMG_PVOID pvParam,
+ IMG_UINT32 ui32Param,
+ IMG_BOOL bForceCleanup)
+{
+ PVRSRV_ERROR eError;
+ SGX_HW_2D_CONTEXT_CLEANUP *psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *)pvParam;
+
+ PVR_UNREFERENCED_PARAMETER(ui32Param);
+
+
+ eError = SGXCleanupRequest(psCleanup->psDeviceNode,
+ &psCleanup->psHW2DContextMemInfo->sDevVAddr,
+ PVRSRV_CLEANUPCMD_2DC,
+ bForceCleanup);
+
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ if (!psCleanup->bCleanupTimerRunning)
+ {
+ OSTimeCreateWithUSOffset(&psCleanup->pvTimeData, MAX_CLEANUP_TIME_US);
+ psCleanup->bCleanupTimerRunning = IMG_TRUE;
+ }
+ else
+ {
+ if (OSTimeHasTimePassed(psCleanup->pvTimeData))
+ {
+ eError = PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE;
+ psCleanup->bCleanupTimerRunning = IMG_FALSE;
+ OSTimeDestroy(psCleanup->pvTimeData);
+ }
+ }
+ }
+ else
+ {
+ if (psCleanup->bCleanupTimerRunning)
+ {
+ OSTimeDestroy(psCleanup->pvTimeData);
+ }
+ }
+
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+
+ PVRSRVFreeDeviceMemKM(psCleanup->psDeviceNode,
+ psCleanup->psHW2DContextMemInfo);
+
+
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
+ psCleanup,
+ psCleanup->hBlockAlloc);
+
+ }
+ return eError;
+}
+
+IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE hDeviceNode,
+ IMG_CPU_VIRTADDR *psHW2DContextCpuVAddr,
+ IMG_UINT32 ui32HW2DContextSize,
+ IMG_UINT32 ui32OffsetToPDDevPAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_VIRTADDR *psHW2DContextDevVAddr,
+ PVRSRV_PER_PROCESS_DATA *psPerProc)
+{
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hBlockAlloc;
+ SGX_HW_2D_CONTEXT_CLEANUP *psCleanup;
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDeviceNode;
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ DEVICE_MEMORY_HEAP_INFO *psHeapInfo;
+ IMG_HANDLE hDevMemContextInt;
+ MMU_CONTEXT *psMMUContext;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+ int iPtrByte;
+ IMG_UINT8 *pSrc;
+ IMG_UINT8 *pDst;
+ PRESMAN_ITEM psResItem;
+
+ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
+ (IMG_VOID **)&psCleanup,
+ &hBlockAlloc,
+ "SGX Hardware 2D Context Cleanup");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't allocate memory for SGX_HW_2D_CONTEXT_CLEANUP structure"));
+ goto exit0;
+ }
+
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+ psHeapInfo = &psDevMemoryInfo->psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID];
+
+ eError = PVRSRVAllocDeviceMemKM(hDeviceNode,
+ psPerProc,
+ psHeapInfo->hDevMemHeap,
+ PVRSRV_MEM_READ | PVRSRV_MEM_WRITE
+ | PVRSRV_MEM_NO_SYNCOBJ | PVRSRV_MEM_EDM_PROTECT
+ | PVRSRV_MEM_CACHE_CONSISTENT,
+ ui32HW2DContextSize,
+ 32,
+ IMG_NULL,
+ 0,
+ &psCleanup->psHW2DContextMemInfo,
+ "HW 2D Context");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't allocate device memory for HW Render Context"));
+ goto exit1;
+ }
+
+ eError = OSCopyFromUser(psPerProc,
+ psCleanup->psHW2DContextMemInfo->pvLinAddrKM,
+ psHW2DContextCpuVAddr,
+ ui32HW2DContextSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't copy user-mode copy of HWContext into device memory"));
+ goto exit2;
+ }
+
+
+ psHW2DContextDevVAddr->uiAddr = psCleanup->psHW2DContextMemInfo->sDevVAddr.uiAddr;
+
+
+ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
+ &hDevMemContextInt,
+ hDevMemContext,
+ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Can't lookup DevMem Context"));
+ goto exit2;
+ }
+
+ psMMUContext = BM_GetMMUContextFromMemContext(hDevMemContextInt);
+ sPDDevPAddr = psDeviceNode->pfnMMUGetPDDevPAddr(psMMUContext);
+
+
+
+
+
+
+ pSrc = (IMG_UINT8 *)&sPDDevPAddr;
+ pDst = (IMG_UINT8 *)psCleanup->psHW2DContextMemInfo->pvLinAddrKM;
+ pDst += ui32OffsetToPDDevPAddr;
+
+ for (iPtrByte = 0; iPtrByte < sizeof(IMG_DEV_PHYADDR); iPtrByte++)
+ {
+ pDst[iPtrByte] = pSrc[iPtrByte];
+ }
+
+#if defined(PDUMP)
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "HW 2D context struct");
+
+ PDUMPMEM(
+ IMG_NULL,
+ psCleanup->psHW2DContextMemInfo,
+ 0,
+ ui32HW2DContextSize,
+ PDUMP_FLAGS_CONTINUOUS,
+ MAKEUNIQUETAG(psCleanup->psHW2DContextMemInfo));
+
+
+ PDUMPCOMMENT("Page directory address in HW 2D transfer context");
+ PDUMPPDDEVPADDR(
+ psCleanup->psHW2DContextMemInfo,
+ ui32OffsetToPDDevPAddr,
+ sPDDevPAddr,
+ MAKEUNIQUETAG(psCleanup->psHW2DContextMemInfo),
+ PDUMP_PD_UNIQUETAG);
+#endif
+
+ psCleanup->hBlockAlloc = hBlockAlloc;
+ psCleanup->psDeviceNode = psDeviceNode;
+ psCleanup->bCleanupTimerRunning = IMG_FALSE;
+
+ psResItem = ResManRegisterRes(psPerProc->hResManContext,
+ RESMAN_TYPE_HW_2D_CONTEXT,
+ psCleanup,
+ 0,
+ &SGXCleanupHW2DContextCallback);
+
+ if (psResItem == IMG_NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: ResManRegisterRes failed"));
+ goto exit2;
+ }
+
+ psCleanup->psResItem = psResItem;
+
+ return (IMG_HANDLE)psCleanup;
+
+exit2:
+ PVRSRVFreeDeviceMemKM(hDeviceNode,
+ psCleanup->psHW2DContextMemInfo);
+exit1:
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
+ psCleanup,
+ psCleanup->hBlockAlloc);
+
+exit0:
+ return IMG_NULL;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR SGXUnregisterHW2DContextKM(IMG_HANDLE hHW2DContext, IMG_BOOL bForceCleanup)
+{
+ PVRSRV_ERROR eError;
+ SGX_HW_2D_CONTEXT_CLEANUP *psCleanup;
+
+ PVR_ASSERT(hHW2DContext != IMG_NULL);
+
+ if (hHW2DContext == IMG_NULL)
+ {
+ return (PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *)hHW2DContext;
+
+ eError = ResManFreeResByPtr(psCleanup->psResItem, bForceCleanup);
+
+ return eError;
+}
+#endif
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SGX2DQuerySyncOpsComplete)
+#endif
+static INLINE
+IMG_BOOL SGX2DQuerySyncOpsComplete(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
+ IMG_UINT32 ui32ReadOpsPending,
+ IMG_UINT32 ui32WriteOpsPending)
+{
+ PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData;
+
+ return (IMG_BOOL)(
+ (psSyncData->ui32ReadOpsComplete >= ui32ReadOpsPending) &&
+ (psSyncData->ui32WriteOpsComplete >= ui32WriteOpsPending)
+ );
+}
+
+IMG_EXPORT
+PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO *psDevInfo,
+ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
+ IMG_BOOL bWaitForComplete)
+{
+ IMG_UINT32 ui32ReadOpsPending, ui32WriteOpsPending;
+
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+
+ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Start"));
+
+ ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending;
+ ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending;
+
+ if(SGX2DQuerySyncOpsComplete(psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending))
+ {
+
+ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: No wait. Blits complete."));
+ return PVRSRV_OK;
+ }
+
+
+ if (!bWaitForComplete)
+ {
+
+ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: No wait. Ops pending."));
+ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
+ }
+
+
+ PVR_DPF((PVR_DBG_MESSAGE, "SGX2DQueryBlitsCompleteKM: Ops pending. Start polling."));
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ OSSleepms(1);
+
+ if(SGX2DQuerySyncOpsComplete(psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending))
+ {
+
+ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Wait over. Blits complete."));
+ return PVRSRV_OK;
+ }
+
+ OSSleepms(1);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+
+ PVR_DPF((PVR_DBG_ERROR,"SGX2DQueryBlitsCompleteKM: Timed out. Ops pending."));
+
+#if defined(DEBUG)
+ {
+ PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData;
+
+ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Syncinfo: 0x%x, Syncdata: 0x%x",
+ (IMG_UINTPTR_T)psSyncInfo, (IMG_UINTPTR_T)psSyncData));
+
+ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Read ops complete: %d, Read ops pending: %d", psSyncData->ui32ReadOpsComplete, psSyncData->ui32ReadOpsPending));
+ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Write ops complete: %d, Write ops pending: %d", psSyncData->ui32WriteOpsComplete, psSyncData->ui32WriteOpsPending));
+
+ }
+#endif
+
+ return PVRSRV_ERROR_TIMEOUT;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR SGXFlushHWRenderTargetKM(IMG_HANDLE psDeviceNode,
+ IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr,
+ IMG_BOOL bForceCleanup)
+{
+ PVR_ASSERT(sHWRTDataSetDevVAddr.uiAddr != IMG_NULL);
+
+ return SGXCleanupRequest(psDeviceNode,
+ &sHWRTDataSetDevVAddr,
+ PVRSRV_CLEANUPCMD_RT,
+ bForceCleanup);
+}
+
+
+IMG_UINT32 SGXConvertTimeStamp(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32TimeWraps,
+ IMG_UINT32 ui32Time)
+{
+#if defined(EUR_CR_TIMER)
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+ PVR_UNREFERENCED_PARAMETER(ui32TimeWraps);
+ return ui32Time;
+#else
+ IMG_UINT64 ui64Clocks;
+ IMG_UINT32 ui32Clocksx16;
+
+ ui64Clocks = ((IMG_UINT64)ui32TimeWraps * psDevInfo->ui32uKernelTimerClock) +
+ (psDevInfo->ui32uKernelTimerClock - (ui32Time & EUR_CR_EVENT_TIMER_VALUE_MASK));
+ ui32Clocksx16 = (IMG_UINT32)(ui64Clocks / 16);
+
+ return ui32Clocksx16;
+#endif
+}
+
+
+IMG_VOID SGXWaitClocks(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32SGXClocks)
+{
+
+
+ OSWaitus(1 + (ui32SGXClocks * 1000000 / psDevInfo->ui32CoreClockSpeed));
+}
+
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVGetSGXRevDataKM(PVRSRV_DEVICE_NODE* psDeviceNode, IMG_UINT32 *pui32SGXCoreRev,
+ IMG_UINT32 *pui32SGXCoreID)
+{
+ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
+ SGX_MISC_INFO sMiscInfo;
+ PVRSRV_ERROR eError;
+
+ sMiscInfo.eRequest = SGX_MISC_INFO_REQUEST_SGXREV;
+ eError = SGXGetMiscInfoKM(psDevInfo, &sMiscInfo, psDeviceNode, NULL);
+
+ *pui32SGXCoreRev = sMiscInfo.uData.sSGXFeatures.ui32CoreRev;
+ *pui32SGXCoreID = sMiscInfo.uData.sSGXFeatures.ui32CoreID;
+ return eError;
+}
+
+
+PVRSRV_ERROR SGXContextSuspend(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEV_VIRTADDR *psHWContextDevVAddr,
+ IMG_BOOL bResume)
+{
+ PVRSRV_ERROR eError;
+ SGXMKIF_COMMAND sCommand = {0};
+
+ sCommand.ui32Data[0] = psHWContextDevVAddr->uiAddr;
+ sCommand.ui32Data[1] = bResume ? PVRSRV_CTXSUSPCMD_RESUME : PVRSRV_CTXSUSPCMD_SUSPEND;
+
+ eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_CONTEXTSUSPEND, &sCommand, KERNEL_ID, 0, IMG_NULL, IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"SGXContextSuspend: Failed to submit context suspend command"));
+ return eError;
+ }
+
+ return eError;
+}
+
diff --git a/drivers/gpu/pvr/sgx/sgxutils.h b/drivers/gpu/pvr/sgx/sgxutils.h
new file mode 100644
index 0000000..9017acf
--- /dev/null
+++ b/drivers/gpu/pvr/sgx/sgxutils.h
@@ -0,0 +1,143 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "perproc.h"
+#include "sgxinfokm.h"
+
+
+#define CCB_OFFSET_IS_VALID(type, psCCBMemInfo, psCCBKick, offset) \
+ ((sizeof(type) <= (psCCBMemInfo)->uAllocSize) && \
+ ((psCCBKick)->offset <= (psCCBMemInfo)->uAllocSize - sizeof(type)))
+
+#define CCB_DATA_FROM_OFFSET(type, psCCBMemInfo, psCCBKick, offset) \
+ ((type *)(((IMG_CHAR *)(psCCBMemInfo)->pvLinAddrKM) + \
+ (psCCBKick)->offset))
+
+extern IMG_UINT64 ui64KickCount;
+
+
+IMG_IMPORT
+IMG_VOID SGXTestActivePowerEvent(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32CallerID);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_DEVICE_NODE *psDeviceNode,
+ SGXMKIF_CMD_TYPE eCommandType,
+ SGXMKIF_COMMAND *psCommandData,
+ IMG_UINT32 ui32CallerID,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_HANDLE hDevMemContext,
+ IMG_BOOL bLastInScene);
+IMG_IMPORT
+PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ SGXMKIF_CMD_TYPE eCommandType,
+ SGXMKIF_COMMAND *psCommandData,
+ IMG_UINT32 ui32CallerID,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_HANDLE hDevMemContext,
+ IMG_BOOL bLastInScene);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXScheduleProcessQueuesKM(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_IMPORT
+IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_IMPORT
+IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE psDeviceNode,
+ IMG_CPU_VIRTADDR *psHWRenderContextCpuVAddr,
+ IMG_UINT32 ui32HWRenderContextSize,
+ IMG_UINT32 ui32OffsetToPDDevPAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr,
+ PVRSRV_PER_PROCESS_DATA *psPerProc);
+
+IMG_IMPORT
+IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psDeviceNode,
+ IMG_CPU_VIRTADDR *psHWTransferContextCpuVAddr,
+ IMG_UINT32 ui32HWTransferContextSize,
+ IMG_UINT32 ui32OffsetToPDDevPAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr,
+ PVRSRV_PER_PROCESS_DATA *psPerProc);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXFlushHWRenderTargetKM(IMG_HANDLE psSGXDevInfo,
+ IMG_DEV_VIRTADDR psHWRTDataSetDevVAddr,
+ IMG_BOOL bForceCleanup);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext, IMG_BOOL bForceCleanup);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext, IMG_BOOL bForceCleanup);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXSetRenderContextPriorityKM(IMG_HANDLE hDeviceNode,
+ IMG_HANDLE hHWRenderContext,
+ IMG_UINT32 ui32Priority,
+ IMG_UINT32 ui32OffsetOfPriorityField);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXSetTransferContextPriorityKM(IMG_HANDLE hDeviceNode,
+ IMG_HANDLE hHWTransferContext,
+ IMG_UINT32 ui32Priority,
+ IMG_UINT32 ui32OffsetOfPriorityField);
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+IMG_IMPORT
+IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE psDeviceNode,
+ IMG_CPU_VIRTADDR *psHW2DContextCpuVAddr,
+ IMG_UINT32 ui32HW2DContextSize,
+ IMG_UINT32 ui32OffsetToPDDevPAddr,
+ IMG_HANDLE hDevMemContext,
+ IMG_DEV_VIRTADDR *psHW2DContextDevVAddr,
+ PVRSRV_PER_PROCESS_DATA *psPerProc);
+
+IMG_IMPORT
+PVRSRV_ERROR SGXUnregisterHW2DContextKM(IMG_HANDLE hHW2DContext, IMG_BOOL bForceCleanup);
+#endif
+
+IMG_UINT32 SGXConvertTimeStamp(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32TimeWraps,
+ IMG_UINT32 ui32Time);
+
+IMG_VOID SGXWaitClocks(PVRSRV_SGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32SGXClocks);
+
+PVRSRV_ERROR SGXCleanupRequest(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEV_VIRTADDR *psHWDataDevVAddr,
+ IMG_UINT32 ui32CleanupType,
+ IMG_BOOL bForceCleanup);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVGetSGXRevDataKM(PVRSRV_DEVICE_NODE* psDeviceNode, IMG_UINT32 *pui32SGXCoreRev,
+ IMG_UINT32 *pui32SGXCoreID);
+
+PVRSRV_ERROR SGXContextSuspend(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEV_VIRTADDR *psHWContextDevVAddr,
+ IMG_BOOL bResume);
+
diff --git a/drivers/gpu/pvr/sgx520defs.h b/drivers/gpu/pvr/sgx520defs.h
new file mode 100644
index 0000000..a21295d
--- /dev/null
+++ b/drivers/gpu/pvr/sgx520defs.h
@@ -0,0 +1,488 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _SGX520DEFS_KM_H_
+#define _SGX520DEFS_KM_H_
+
+#define EUR_CR_CLKGATECTL 0x0000
+#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000030U
+#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 4
+#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000300U
+#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 8
+#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x00003000U
+#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 12
+#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00030000U
+#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 16
+#define EUR_CR_CLKGATECTL_USE_CLKG_MASK 0x00300000U
+#define EUR_CR_CLKGATECTL_USE_CLKG_SHIFT 20
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24
+#define EUR_CR_CLKGATESTATUS 0x0004
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000010U
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 4
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000100U
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 8
+#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00001000U
+#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 12
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00010000U
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 16
+#define EUR_CR_CLKGATESTATUS_USE_CLKS_MASK 0x00100000U
+#define EUR_CR_CLKGATESTATUS_USE_CLKS_SHIFT 20
+#define EUR_CR_CLKGATECTLOVR 0x0008
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000030U
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 4
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000300U
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 8
+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x00003000U
+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 12
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00030000U
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 16
+#define EUR_CR_CLKGATECTLOVR_USE_CLKO_MASK 0x00300000U
+#define EUR_CR_CLKGATECTLOVR_USE_CLKO_SHIFT 20
+#define EUR_CR_CORE_ID 0x0010
+#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFFU
+#define EUR_CR_CORE_ID_CONFIG_SHIFT 0
+#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U
+#define EUR_CR_CORE_ID_ID_SHIFT 16
+#define EUR_CR_CORE_REVISION 0x0014
+#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU
+#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0
+#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U
+#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8
+#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U
+#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16
+#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U
+#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24
+#define EUR_CR_DESIGNER_REV_FIELD1 0x0018
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0
+#define EUR_CR_DESIGNER_REV_FIELD2 0x001C
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0
+#define EUR_CR_SOFT_RESET 0x0080
+#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U
+#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0
+#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004U
+#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2
+#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00000008U
+#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 3
+#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00000010U
+#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 4
+#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020U
+#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5
+#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000040U
+#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 6
+#define EUR_CR_EVENT_HOST_ENABLE2 0x0110
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_HOST_CLEAR2 0x0114
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_STATUS2 0x0118
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_STATUS 0x012C
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK 0x00020000U
+#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT 17
+#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000U
+#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_HOST_ENABLE 0x0130
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000U
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000U
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_HOST_CLEAR 0x0134
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000U
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000U
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_TIMER 0x0144
+#define EUR_CR_TIMER_VALUE_MASK 0xFFFFFFFFU
+#define EUR_CR_TIMER_VALUE_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_0 0x0A0C
+#define EUR_CR_USE_CODE_BASE_ADDR_00_MASK 0x000FFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_00_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_00_MASK 0x00300000U
+#define EUR_CR_USE_CODE_BASE_DM_00_SHIFT 20
+#define EUR_CR_USE_CODE_BASE_1 0x0A10
+#define EUR_CR_USE_CODE_BASE_ADDR_01_MASK 0x000FFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_01_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_01_MASK 0x00300000U
+#define EUR_CR_USE_CODE_BASE_DM_01_SHIFT 20
+#define EUR_CR_USE_CODE_BASE_2 0x0A14
+#define EUR_CR_USE_CODE_BASE_ADDR_02_MASK 0x000FFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_02_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_02_MASK 0x00300000U
+#define EUR_CR_USE_CODE_BASE_DM_02_SHIFT 20
+#define EUR_CR_USE_CODE_BASE_3 0x0A18
+#define EUR_CR_USE_CODE_BASE_ADDR_03_MASK 0x000FFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_03_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_03_MASK 0x00300000U
+#define EUR_CR_USE_CODE_BASE_DM_03_SHIFT 20
+#define EUR_CR_USE_CODE_BASE_4 0x0A1C
+#define EUR_CR_USE_CODE_BASE_ADDR_04_MASK 0x000FFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_04_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_04_MASK 0x00300000U
+#define EUR_CR_USE_CODE_BASE_DM_04_SHIFT 20
+#define EUR_CR_USE_CODE_BASE_5 0x0A20
+#define EUR_CR_USE_CODE_BASE_ADDR_05_MASK 0x000FFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_05_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_05_MASK 0x00300000U
+#define EUR_CR_USE_CODE_BASE_DM_05_SHIFT 20
+#define EUR_CR_USE_CODE_BASE_6 0x0A24
+#define EUR_CR_USE_CODE_BASE_ADDR_06_MASK 0x000FFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_06_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_06_MASK 0x00300000U
+#define EUR_CR_USE_CODE_BASE_DM_06_SHIFT 20
+#define EUR_CR_USE_CODE_BASE_7 0x0A28
+#define EUR_CR_USE_CODE_BASE_ADDR_07_MASK 0x000FFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_07_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_07_MASK 0x00300000U
+#define EUR_CR_USE_CODE_BASE_DM_07_SHIFT 20
+#define EUR_CR_USE_CODE_BASE_8 0x0A2C
+#define EUR_CR_USE_CODE_BASE_ADDR_08_MASK 0x000FFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_08_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_08_MASK 0x00300000U
+#define EUR_CR_USE_CODE_BASE_DM_08_SHIFT 20
+#define EUR_CR_USE_CODE_BASE_9 0x0A30
+#define EUR_CR_USE_CODE_BASE_ADDR_09_MASK 0x000FFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_09_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_09_MASK 0x00300000U
+#define EUR_CR_USE_CODE_BASE_DM_09_SHIFT 20
+#define EUR_CR_USE_CODE_BASE_10 0x0A34
+#define EUR_CR_USE_CODE_BASE_ADDR_10_MASK 0x000FFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_10_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_10_MASK 0x00300000U
+#define EUR_CR_USE_CODE_BASE_DM_10_SHIFT 20
+#define EUR_CR_USE_CODE_BASE_11 0x0A38
+#define EUR_CR_USE_CODE_BASE_ADDR_11_MASK 0x000FFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_11_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_11_MASK 0x00300000U
+#define EUR_CR_USE_CODE_BASE_DM_11_SHIFT 20
+#define EUR_CR_USE_CODE_BASE_12 0x0A3C
+#define EUR_CR_USE_CODE_BASE_ADDR_12_MASK 0x000FFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_12_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_12_MASK 0x00300000U
+#define EUR_CR_USE_CODE_BASE_DM_12_SHIFT 20
+#define EUR_CR_USE_CODE_BASE_13 0x0A40
+#define EUR_CR_USE_CODE_BASE_ADDR_13_MASK 0x000FFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_13_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_13_MASK 0x00300000U
+#define EUR_CR_USE_CODE_BASE_DM_13_SHIFT 20
+#define EUR_CR_USE_CODE_BASE_14 0x0A44
+#define EUR_CR_USE_CODE_BASE_ADDR_14_MASK 0x000FFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_14_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_14_MASK 0x00300000U
+#define EUR_CR_USE_CODE_BASE_DM_14_SHIFT 20
+#define EUR_CR_USE_CODE_BASE_15 0x0A48
+#define EUR_CR_USE_CODE_BASE_ADDR_15_MASK 0x000FFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_15_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_15_MASK 0x00300000U
+#define EUR_CR_USE_CODE_BASE_DM_15_SHIFT 20
+#define EUR_CR_PDS_EXEC_BASE 0x0AB8
+#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0x0FF00000U
+#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20
+#define EUR_CR_EVENT_KICKER 0x0AC4
+#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0x0FFFFFF0U
+#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4
+#define EUR_CR_EVENT_KICK 0x0AC8
+#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK_NOW_SHIFT 0
+#define EUR_CR_EVENT_TIMER 0x0ACC
+#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U
+#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24
+#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU
+#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0
+#define EUR_CR_PDS_INV0 0x0AD0
+#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV0_DSC_SHIFT 0
+#define EUR_CR_PDS_INV1 0x0AD4
+#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV1_DSC_SHIFT 0
+#define EUR_CR_PDS_INV2 0x0AD8
+#define EUR_CR_PDS_INV2_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV2_DSC_SHIFT 0
+#define EUR_CR_PDS_INV3 0x0ADC
+#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV3_DSC_SHIFT 0
+#define EUR_CR_PDS_INV_CSC 0x0AE0
+#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U
+#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0
+#define EUR_CR_PDS_PC_BASE 0x0B2C
+#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK 0x3FFFFFFFU
+#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_CTRL 0x0C00
+#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U
+#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0
+#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U
+#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1
+#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004U
+#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2
+#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008U
+#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15
+#define EUR_CR_BIF_INT_STAT 0x0C04
+#define EUR_CR_BIF_INT_STAT_FAULT_MASK 0x00003FFFU
+#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT 0
+#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK 0x00004000U
+#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT 14
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00008000U
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 15
+#define EUR_CR_BIF_FAULT 0x0C08
+#define EUR_CR_BIF_FAULT_ADDR_MASK 0x0FFFF000U
+#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12
+#define EUR_CR_BIF_TA_REQ_BASE 0x0C90
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0x0FF00000U
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFU
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0
+#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0x0FF00000U
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0x0FF00000U
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X)))
+#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x000FFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_MASK 0x00300000U
+#define EUR_CR_USE_CODE_BASE_DM_SHIFT 20
+#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16
+#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16
+
+#endif
+
diff --git a/drivers/gpu/pvr/sgx530defs.h b/drivers/gpu/pvr/sgx530defs.h
new file mode 100644
index 0000000..810cb81
--- /dev/null
+++ b/drivers/gpu/pvr/sgx530defs.h
@@ -0,0 +1,488 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _SGX530DEFS_KM_H_
+#define _SGX530DEFS_KM_H_
+
+#define EUR_CR_CLKGATECTL 0x0000
+#define EUR_CR_CLKGATECTL_2D_CLKG_MASK 0x00000003U
+#define EUR_CR_CLKGATECTL_2D_CLKG_SHIFT 0
+#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000030U
+#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 4
+#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000300U
+#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 8
+#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x00003000U
+#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 12
+#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00030000U
+#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 16
+#define EUR_CR_CLKGATECTL_USE_CLKG_MASK 0x00300000U
+#define EUR_CR_CLKGATECTL_USE_CLKG_SHIFT 20
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24
+#define EUR_CR_CLKGATESTATUS 0x0004
+#define EUR_CR_CLKGATESTATUS_2D_CLKS_MASK 0x00000001U
+#define EUR_CR_CLKGATESTATUS_2D_CLKS_SHIFT 0
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000010U
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 4
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000100U
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 8
+#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00001000U
+#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 12
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00010000U
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 16
+#define EUR_CR_CLKGATESTATUS_USE_CLKS_MASK 0x00100000U
+#define EUR_CR_CLKGATESTATUS_USE_CLKS_SHIFT 20
+#define EUR_CR_CLKGATECTLOVR 0x0008
+#define EUR_CR_CLKGATECTLOVR_2D_CLKO_MASK 0x00000003U
+#define EUR_CR_CLKGATECTLOVR_2D_CLKO_SHIFT 0
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000030U
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 4
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000300U
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 8
+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x00003000U
+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 12
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00030000U
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 16
+#define EUR_CR_CLKGATECTLOVR_USE_CLKO_MASK 0x00300000U
+#define EUR_CR_CLKGATECTLOVR_USE_CLKO_SHIFT 20
+#define EUR_CR_CORE_ID 0x0010
+#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFFU
+#define EUR_CR_CORE_ID_CONFIG_SHIFT 0
+#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U
+#define EUR_CR_CORE_ID_ID_SHIFT 16
+#define EUR_CR_CORE_REVISION 0x0014
+#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU
+#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0
+#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U
+#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8
+#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U
+#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16
+#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U
+#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24
+#define EUR_CR_DESIGNER_REV_FIELD1 0x0018
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0
+#define EUR_CR_DESIGNER_REV_FIELD2 0x001C
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0
+#define EUR_CR_SOFT_RESET 0x0080
+#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U
+#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0
+#define EUR_CR_SOFT_RESET_TWOD_RESET_MASK 0x00000002U
+#define EUR_CR_SOFT_RESET_TWOD_RESET_SHIFT 1
+#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004U
+#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2
+#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00000008U
+#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 3
+#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00000010U
+#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 4
+#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020U
+#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5
+#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000040U
+#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 6
+#define EUR_CR_EVENT_HOST_ENABLE2 0x0110
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_HOST_CLEAR2 0x0114
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_STATUS2 0x0118
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_STATUS 0x012CU
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK 0x08000000U
+#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_SHIFT 27
+#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK 0x00020000U
+#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT 17
+#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000U
+#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_HOST_ENABLE 0x0130
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_MASK 0x08000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_SHIFT 27
+#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000U
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000U
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_HOST_CLEAR 0x0134
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK 0x08000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_SHIFT 27
+#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000U
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000U
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_PDS_EXEC_BASE 0x0AB8
+#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0x0FF00000U
+#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20
+#define EUR_CR_EVENT_KICKER 0x0AC4
+#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0x0FFFFFF0U
+#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4
+#define EUR_CR_EVENT_KICK 0x0AC8
+#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK_NOW_SHIFT 0
+#define EUR_CR_EVENT_TIMER 0x0ACC
+#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U
+#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24
+#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU
+#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0
+#define EUR_CR_PDS_INV0 0x0AD0
+#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV0_DSC_SHIFT 0
+#define EUR_CR_PDS_INV1 0x0AD4
+#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV1_DSC_SHIFT 0
+#define EUR_CR_PDS_INV2 0x0AD8
+#define EUR_CR_PDS_INV2_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV2_DSC_SHIFT 0
+#define EUR_CR_PDS_INV3 0x0ADC
+#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV3_DSC_SHIFT 0
+#define EUR_CR_PDS_INV_CSC 0x0AE0
+#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U
+#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0
+#define EUR_CR_PDS_PC_BASE 0x0B2C
+#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK 0x3FFFFFFFU
+#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_CTRL 0x0C00
+#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U
+#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0
+#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U
+#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1
+#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004U
+#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2
+#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008U
+#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_MASK 0x00000800U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_SHIFT 11
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15
+#define EUR_CR_BIF_INT_STAT 0x0C04
+#define EUR_CR_BIF_INT_STAT_FAULT_MASK 0x00003FFFU
+#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT 0
+#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK 0x00004000U
+#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT 14
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00008000U
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 15
+#define EUR_CR_BIF_FAULT 0x0C08
+#define EUR_CR_BIF_FAULT_ADDR_MASK 0x0FFFF000U
+#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12
+#define EUR_CR_BIF_TWOD_REQ_BASE 0x0C88
+#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK 0x0FF00000U
+#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_TA_REQ_BASE 0x0C90
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0x0FF00000U
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFU
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0
+#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0x0FF00000U
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0x0FF00000U
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_2D_BLIT_STATUS 0x0E04
+#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFFU
+#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0
+#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000U
+#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24
+#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10
+#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001U
+#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000EU
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0U
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000U
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12
+#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFFU
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000U
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000U
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24
+#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X)))
+#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x00FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_MASK 0x03000000U
+#define EUR_CR_USE_CODE_BASE_DM_SHIFT 24
+#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16
+#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16
+#define EUR_CR_MNE_CR_CTRL 0x0D00
+#define EUR_CR_MNE_CR_CTRL_BYP_CC_N_MASK 0x00010000U
+#define EUR_CR_MNE_CR_CTRL_BYP_CC_N_SHIFT 16
+#define EUR_CR_MNE_CR_CTRL_BYP_CC_MASK 0x00008000U
+#define EUR_CR_MNE_CR_CTRL_BYP_CC_SHIFT 15
+#define EUR_CR_MNE_CR_CTRL_USE_INVAL_ADDR_MASK 0x00007800U
+#define EUR_CR_MNE_CR_CTRL_USE_INVAL_ADDR_SHIFT 11
+#define EUR_CR_MNE_CR_CTRL_BYPASS_ALL_MASK 0x00000400U
+#define EUR_CR_MNE_CR_CTRL_BYPASS_ALL_SHIFT 10
+#define EUR_CR_MNE_CR_CTRL_BYPASS_MASK 0x000003E0U
+#define EUR_CR_MNE_CR_CTRL_BYPASS_SHIFT 5
+#define EUR_CR_MNE_CR_CTRL_PAUSE_MASK 0x00000010U
+#define EUR_CR_MNE_CR_CTRL_PAUSE_SHIFT 4
+#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_MASK 0x0000000EU
+#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_SHIFT 1
+#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_PDS_MASK (1UL<<EUR_CR_MNE_CR_CTRL_INVAL_PREQ_SHIFT+2)
+#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_USEC_MASK (1UL<<EUR_CR_MNE_CR_CTRL_INVAL_PREQ_SHIFT+1)
+#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_CACHE_MASK (1UL<<EUR_CR_MNE_CR_CTRL_INVAL_PREQ_SHIFT)
+#define EUR_CR_MNE_CR_CTRL_INVAL_ALL_MASK 0x00000001U
+#define EUR_CR_MNE_CR_CTRL_INVAL_ALL_SHIFT 0
+#define EUR_CR_MNE_CR_USE_INVAL 0x0D04
+#define EUR_CR_MNE_CR_USE_INVAL_ADDR_MASK 0xFFFFFFFFU
+#define EUR_CR_MNE_CR_USE_INVAL_ADDR_SHIFT 0
+#define EUR_CR_MNE_CR_STAT 0x0D08
+#define EUR_CR_MNE_CR_STAT_PAUSED_MASK 0x00000400U
+#define EUR_CR_MNE_CR_STAT_PAUSED_SHIFT 10
+#define EUR_CR_MNE_CR_STAT_READS_MASK 0x000003FFU
+#define EUR_CR_MNE_CR_STAT_READS_SHIFT 0
+#define EUR_CR_MNE_CR_STAT_STATS 0x0D0C
+#define EUR_CR_MNE_CR_STAT_STATS_RST_MASK 0x000FFFF0U
+#define EUR_CR_MNE_CR_STAT_STATS_RST_SHIFT 4
+#define EUR_CR_MNE_CR_STAT_STATS_SEL_MASK 0x0000000FU
+#define EUR_CR_MNE_CR_STAT_STATS_SEL_SHIFT 0
+#define EUR_CR_MNE_CR_STAT_STATS_OUT 0x0D10
+#define EUR_CR_MNE_CR_STAT_STATS_OUT_VALUE_MASK 0xFFFFFFFFU
+#define EUR_CR_MNE_CR_STAT_STATS_OUT_VALUE_SHIFT 0
+#define EUR_CR_MNE_CR_EVENT_STATUS 0x0D14
+#define EUR_CR_MNE_CR_EVENT_STATUS_INVAL_MASK 0x00000001U
+#define EUR_CR_MNE_CR_EVENT_STATUS_INVAL_SHIFT 0
+#define EUR_CR_MNE_CR_EVENT_CLEAR 0x0D18
+#define EUR_CR_MNE_CR_EVENT_CLEAR_INVAL_MASK 0x00000001U
+#define EUR_CR_MNE_CR_EVENT_CLEAR_INVAL_SHIFT 0
+#define EUR_CR_MNE_CR_CTRL_INVAL 0x0D20
+
+#endif
+
diff --git a/drivers/gpu/pvr/sgx531defs.h b/drivers/gpu/pvr/sgx531defs.h
new file mode 100644
index 0000000..c9f2899
--- /dev/null
+++ b/drivers/gpu/pvr/sgx531defs.h
@@ -0,0 +1,544 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _SGX531DEFS_KM_H_
+#define _SGX531DEFS_KM_H_
+
+#define EUR_CR_CLKGATECTL 0x0000
+#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000003U
+#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 0
+#define EUR_CR_CLKGATECTL_ISP2_CLKG_MASK 0x0000000CU
+#define EUR_CR_CLKGATECTL_ISP2_CLKG_SHIFT 2
+#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000030U
+#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 4
+#define EUR_CR_CLKGATECTL_TE_CLKG_MASK 0x000000C0U
+#define EUR_CR_CLKGATECTL_TE_CLKG_SHIFT 6
+#define EUR_CR_CLKGATECTL_MTE_CLKG_MASK 0x00000300U
+#define EUR_CR_CLKGATECTL_MTE_CLKG_SHIFT 8
+#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00000C00U
+#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 10
+#define EUR_CR_CLKGATECTL_VDM_CLKG_MASK 0x00003000U
+#define EUR_CR_CLKGATECTL_VDM_CLKG_SHIFT 12
+#define EUR_CR_CLKGATECTL_PDS_CLKG_MASK 0x0000C000U
+#define EUR_CR_CLKGATECTL_PDS_CLKG_SHIFT 14
+#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_MASK 0x00030000U
+#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_SHIFT 16
+#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x000C0000U
+#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 18
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24
+#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_MASK 0x10000000U
+#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_SHIFT 28
+#define EUR_CR_CLKGATECTL2 0x0004
+#define EUR_CR_CLKGATECTL2_PBE_CLKG_MASK 0x00000003U
+#define EUR_CR_CLKGATECTL2_PBE_CLKG_SHIFT 0
+#define EUR_CR_CLKGATECTL2_CACHEL2_CLKG_MASK 0x0000000CU
+#define EUR_CR_CLKGATECTL2_CACHEL2_CLKG_SHIFT 2
+#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_MASK 0x00000030U
+#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SHIFT 4
+#define EUR_CR_CLKGATECTL2_USE0_CLKG_MASK 0x000000C0U
+#define EUR_CR_CLKGATECTL2_USE0_CLKG_SHIFT 6
+#define EUR_CR_CLKGATECTL2_ITR0_CLKG_MASK 0x00000300U
+#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SHIFT 8
+#define EUR_CR_CLKGATECTL2_TEX0_CLKG_MASK 0x00000C00U
+#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SHIFT 10
+#define EUR_CR_CLKGATECTL2_MADD0_CLKG_MASK 0x00003000U
+#define EUR_CR_CLKGATECTL2_MADD0_CLKG_SHIFT 12
+#define EUR_CR_CLKGATECTL2_USE1_CLKG_MASK 0x0000C000U
+#define EUR_CR_CLKGATECTL2_USE1_CLKG_SHIFT 14
+#define EUR_CR_CLKGATECTL2_ITR1_CLKG_MASK 0x00030000U
+#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SHIFT 16
+#define EUR_CR_CLKGATECTL2_TEX1_CLKG_MASK 0x000C0000U
+#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SHIFT 18
+#define EUR_CR_CLKGATECTL2_MADD1_CLKG_MASK 0x00300000U
+#define EUR_CR_CLKGATECTL2_MADD1_CLKG_SHIFT 20
+#define EUR_CR_CLKGATESTATUS 0x0008
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000001U
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 0
+#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_MASK 0x00000002U
+#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SHIFT 1
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000004U
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 2
+#define EUR_CR_CLKGATESTATUS_TE_CLKS_MASK 0x00000008U
+#define EUR_CR_CLKGATESTATUS_TE_CLKS_SHIFT 3
+#define EUR_CR_CLKGATESTATUS_MTE_CLKS_MASK 0x00000010U
+#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SHIFT 4
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00000020U
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 5
+#define EUR_CR_CLKGATESTATUS_VDM_CLKS_MASK 0x00000040U
+#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SHIFT 6
+#define EUR_CR_CLKGATESTATUS_PDS_CLKS_MASK 0x00000080U
+#define EUR_CR_CLKGATESTATUS_PDS_CLKS_SHIFT 7
+#define EUR_CR_CLKGATESTATUS_PBE_CLKS_MASK 0x00000100U
+#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SHIFT 8
+#define EUR_CR_CLKGATESTATUS_CACHEL2_CLKS_MASK 0x00000200U
+#define EUR_CR_CLKGATESTATUS_CACHEL2_CLKS_SHIFT 9
+#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_MASK 0x00000400U
+#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SHIFT 10
+#define EUR_CR_CLKGATESTATUS_USE0_CLKS_MASK 0x00000800U
+#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SHIFT 11
+#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_MASK 0x00001000U
+#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SHIFT 12
+#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_MASK 0x00002000U
+#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SHIFT 13
+#define EUR_CR_CLKGATESTATUS_MADD0_CLKS_MASK 0x00004000U
+#define EUR_CR_CLKGATESTATUS_MADD0_CLKS_SHIFT 14
+#define EUR_CR_CLKGATESTATUS_USE1_CLKS_MASK 0x00008000U
+#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SHIFT 15
+#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_MASK 0x00010000U
+#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SHIFT 16
+#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_MASK 0x00020000U
+#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SHIFT 17
+#define EUR_CR_CLKGATESTATUS_MADD1_CLKS_MASK 0x00040000U
+#define EUR_CR_CLKGATESTATUS_MADD1_CLKS_SHIFT 18
+#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_MASK 0x00080000U
+#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_SHIFT 19
+#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00100000U
+#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 20
+#define EUR_CR_CLKGATECTLOVR 0x000C
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000003U
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 0
+#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_MASK 0x0000000CU
+#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SHIFT 2
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000030U
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 4
+#define EUR_CR_CLKGATECTLOVR_TE_CLKO_MASK 0x000000C0U
+#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SHIFT 6
+#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_MASK 0x00000300U
+#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SHIFT 8
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00000C00U
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 10
+#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_MASK 0x00003000U
+#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SHIFT 12
+#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_MASK 0x0000C000U
+#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_SHIFT 14
+#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_MASK 0x00030000U
+#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_SHIFT 16
+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x000C0000U
+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 18
+#define EUR_CR_CORE_ID 0x0020
+#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFFU
+#define EUR_CR_CORE_ID_CONFIG_SHIFT 0
+#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U
+#define EUR_CR_CORE_ID_ID_SHIFT 16
+#define EUR_CR_CORE_REVISION 0x0024
+#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU
+#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0
+#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U
+#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8
+#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U
+#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16
+#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U
+#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24
+#define EUR_CR_DESIGNER_REV_FIELD1 0x0028
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0
+#define EUR_CR_DESIGNER_REV_FIELD2 0x002C
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0
+#define EUR_CR_SOFT_RESET 0x0080
+#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U
+#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0
+#define EUR_CR_SOFT_RESET_VDM_RESET_MASK 0x00000002U
+#define EUR_CR_SOFT_RESET_VDM_RESET_SHIFT 1
+#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004U
+#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2
+#define EUR_CR_SOFT_RESET_TE_RESET_MASK 0x00000008U
+#define EUR_CR_SOFT_RESET_TE_RESET_SHIFT 3
+#define EUR_CR_SOFT_RESET_MTE_RESET_MASK 0x00000010U
+#define EUR_CR_SOFT_RESET_MTE_RESET_SHIFT 4
+#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020U
+#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5
+#define EUR_CR_SOFT_RESET_ISP2_RESET_MASK 0x00000040U
+#define EUR_CR_SOFT_RESET_ISP2_RESET_SHIFT 6
+#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000080U
+#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 7
+#define EUR_CR_SOFT_RESET_PDS_RESET_MASK 0x00000100U
+#define EUR_CR_SOFT_RESET_PDS_RESET_SHIFT 8
+#define EUR_CR_SOFT_RESET_PBE_RESET_MASK 0x00000200U
+#define EUR_CR_SOFT_RESET_PBE_RESET_SHIFT 9
+#define EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK 0x00000400U
+#define EUR_CR_SOFT_RESET_CACHEL2_RESET_SHIFT 10
+#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK 0x00000800U
+#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_SHIFT 11
+#define EUR_CR_SOFT_RESET_MADD_RESET_MASK 0x00001000U
+#define EUR_CR_SOFT_RESET_MADD_RESET_SHIFT 12
+#define EUR_CR_SOFT_RESET_ITR_RESET_MASK 0x00002000U
+#define EUR_CR_SOFT_RESET_ITR_RESET_SHIFT 13
+#define EUR_CR_SOFT_RESET_TEX_RESET_MASK 0x00004000U
+#define EUR_CR_SOFT_RESET_TEX_RESET_SHIFT 14
+#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00008000U
+#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 15
+#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK 0x00010000U
+#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_SHIFT 16
+#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00020000U
+#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 17
+#define EUR_CR_EVENT_HOST_ENABLE2 0x0110
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_HOST_CLEAR2 0x0114
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_STATUS2 0x0118
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_STATUS 0x012CU
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK 0x08000000U
+#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_SHIFT 27
+#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK 0x00020000U
+#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT 17
+#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000U
+#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_HOST_ENABLE 0x0130
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_MASK 0x08000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_SHIFT 27
+#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000U
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000U
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_HOST_CLEAR 0x0134
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK 0x08000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_SHIFT 27
+#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000U
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000U
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_TIMER 0x0144
+#define EUR_CR_TIMER_VALUE_MASK 0xFFFFFFFFU
+#define EUR_CR_TIMER_VALUE_SHIFT 0
+#define EUR_CR_EVENT_KICK1 0x0AB0
+#define EUR_CR_EVENT_KICK1_NOW_MASK 0x000000FFU
+#define EUR_CR_EVENT_KICK1_NOW_SHIFT 0
+#define EUR_CR_PDS_EXEC_BASE 0x0AB8
+#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0x0FF00000U
+#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20
+#define EUR_CR_EVENT_KICK2 0x0AC0
+#define EUR_CR_EVENT_KICK2_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK2_NOW_SHIFT 0
+#define EUR_CR_EVENT_KICKER 0x0AC4
+#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0x0FFFFFF0U
+#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4
+#define EUR_CR_EVENT_KICK 0x0AC8
+#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK_NOW_SHIFT 0
+#define EUR_CR_EVENT_TIMER 0x0ACC
+#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U
+#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24
+#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU
+#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0
+#define EUR_CR_PDS_INV0 0x0AD0
+#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV0_DSC_SHIFT 0
+#define EUR_CR_PDS_INV1 0x0AD4
+#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV1_DSC_SHIFT 0
+#define EUR_CR_EVENT_KICK3 0x0AD8
+#define EUR_CR_EVENT_KICK3_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK3_NOW_SHIFT 0
+#define EUR_CR_PDS_INV3 0x0ADC
+#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV3_DSC_SHIFT 0
+#define EUR_CR_PDS_INV_CSC 0x0AE0
+#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U
+#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0
+#define EUR_CR_PDS_PC_BASE 0x0B2C
+#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK 0x00FFFFFFU
+#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_CTRL 0x0C00
+#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U
+#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0
+#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U
+#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1
+#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004U
+#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2
+#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008U
+#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15
+#define EUR_CR_BIF_INT_STAT 0x0C04
+#define EUR_CR_BIF_INT_STAT_FAULT_MASK 0x00003FFFU
+#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT 0
+#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK 0x00004000U
+#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT 14
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00008000U
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 15
+#define EUR_CR_BIF_FAULT 0x0C08
+#define EUR_CR_BIF_FAULT_SB_MASK 0x000001F0U
+#define EUR_CR_BIF_FAULT_SB_SHIFT 4
+#define EUR_CR_BIF_FAULT_ADDR_MASK 0x0FFFF000U
+#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12
+#define EUR_CR_BIF_TA_REQ_BASE 0x0C90
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0x0FF00000U
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFU
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0
+#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0x0FF00000U
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0x0FF00000U
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_2D_BLIT_STATUS 0x0E04
+#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFFU
+#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0
+#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000U
+#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24
+#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10
+#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001U
+#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000EU
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0U
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000U
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12
+#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFFU
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000U
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000U
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24
+#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X)))
+#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x00FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_MASK 0x03000000U
+#define EUR_CR_USE_CODE_BASE_DM_SHIFT 24
+#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16
+#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16
+
+#endif
+
diff --git a/drivers/gpu/pvr/sgx540defs.h b/drivers/gpu/pvr/sgx540defs.h
new file mode 100644
index 0000000..c09aa26
--- /dev/null
+++ b/drivers/gpu/pvr/sgx540defs.h
@@ -0,0 +1,547 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _SGX540DEFS_KM_H_
+#define _SGX540DEFS_KM_H_
+
+#define EUR_CR_CLKGATECTL 0x0000
+#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000003U
+#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 0
+#define EUR_CR_CLKGATECTL_ISP2_CLKG_MASK 0x0000000CU
+#define EUR_CR_CLKGATECTL_ISP2_CLKG_SHIFT 2
+#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000030U
+#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 4
+#define EUR_CR_CLKGATECTL_TE_CLKG_MASK 0x000000C0U
+#define EUR_CR_CLKGATECTL_TE_CLKG_SHIFT 6
+#define EUR_CR_CLKGATECTL_MTE_CLKG_MASK 0x00000300U
+#define EUR_CR_CLKGATECTL_MTE_CLKG_SHIFT 8
+#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00000C00U
+#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 10
+#define EUR_CR_CLKGATECTL_VDM_CLKG_MASK 0x00003000U
+#define EUR_CR_CLKGATECTL_VDM_CLKG_SHIFT 12
+#define EUR_CR_CLKGATECTL_PDS_CLKG_MASK 0x0000C000U
+#define EUR_CR_CLKGATECTL_PDS_CLKG_SHIFT 14
+#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_MASK 0x00030000U
+#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_SHIFT 16
+#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x000C0000U
+#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 18
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24
+#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_MASK 0x10000000U
+#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_SHIFT 28
+#define EUR_CR_CLKGATECTL2 0x0004
+#define EUR_CR_CLKGATECTL2_PBE_CLKG_MASK 0x00000003U
+#define EUR_CR_CLKGATECTL2_PBE_CLKG_SHIFT 0
+#define EUR_CR_CLKGATECTL2_CACHEL2_CLKG_MASK 0x0000000CU
+#define EUR_CR_CLKGATECTL2_CACHEL2_CLKG_SHIFT 2
+#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_MASK 0x00000030U
+#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SHIFT 4
+#define EUR_CR_CLKGATECTL2_USE0_CLKG_MASK 0x000000C0U
+#define EUR_CR_CLKGATECTL2_USE0_CLKG_SHIFT 6
+#define EUR_CR_CLKGATECTL2_ITR0_CLKG_MASK 0x00000300U
+#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SHIFT 8
+#define EUR_CR_CLKGATECTL2_TEX0_CLKG_MASK 0x00000C00U
+#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SHIFT 10
+#define EUR_CR_CLKGATECTL2_MADD0_CLKG_MASK 0x00003000U
+#define EUR_CR_CLKGATECTL2_MADD0_CLKG_SHIFT 12
+#define EUR_CR_CLKGATECTL2_USE1_CLKG_MASK 0x0000C000U
+#define EUR_CR_CLKGATECTL2_USE1_CLKG_SHIFT 14
+#define EUR_CR_CLKGATECTL2_ITR1_CLKG_MASK 0x00030000U
+#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SHIFT 16
+#define EUR_CR_CLKGATECTL2_TEX1_CLKG_MASK 0x000C0000U
+#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SHIFT 18
+#define EUR_CR_CLKGATECTL2_MADD1_CLKG_MASK 0x00300000U
+#define EUR_CR_CLKGATECTL2_MADD1_CLKG_SHIFT 20
+#define EUR_CR_CLKGATESTATUS 0x0008
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000001U
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 0
+#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_MASK 0x00000002U
+#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SHIFT 1
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000004U
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 2
+#define EUR_CR_CLKGATESTATUS_TE_CLKS_MASK 0x00000008U
+#define EUR_CR_CLKGATESTATUS_TE_CLKS_SHIFT 3
+#define EUR_CR_CLKGATESTATUS_MTE_CLKS_MASK 0x00000010U
+#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SHIFT 4
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00000020U
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 5
+#define EUR_CR_CLKGATESTATUS_VDM_CLKS_MASK 0x00000040U
+#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SHIFT 6
+#define EUR_CR_CLKGATESTATUS_PDS_CLKS_MASK 0x00000080U
+#define EUR_CR_CLKGATESTATUS_PDS_CLKS_SHIFT 7
+#define EUR_CR_CLKGATESTATUS_PBE_CLKS_MASK 0x00000100U
+#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SHIFT 8
+#define EUR_CR_CLKGATESTATUS_CACHEL2_CLKS_MASK 0x00000200U
+#define EUR_CR_CLKGATESTATUS_CACHEL2_CLKS_SHIFT 9
+#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_MASK 0x00000400U
+#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SHIFT 10
+#define EUR_CR_CLKGATESTATUS_USE0_CLKS_MASK 0x00000800U
+#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SHIFT 11
+#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_MASK 0x00001000U
+#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SHIFT 12
+#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_MASK 0x00002000U
+#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SHIFT 13
+#define EUR_CR_CLKGATESTATUS_MADD0_CLKS_MASK 0x00004000U
+#define EUR_CR_CLKGATESTATUS_MADD0_CLKS_SHIFT 14
+#define EUR_CR_CLKGATESTATUS_USE1_CLKS_MASK 0x00008000U
+#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SHIFT 15
+#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_MASK 0x00010000U
+#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SHIFT 16
+#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_MASK 0x00020000U
+#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SHIFT 17
+#define EUR_CR_CLKGATESTATUS_MADD1_CLKS_MASK 0x00040000U
+#define EUR_CR_CLKGATESTATUS_MADD1_CLKS_SHIFT 18
+#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_MASK 0x00080000U
+#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_SHIFT 19
+#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00100000U
+#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 20
+#define EUR_CR_CLKGATECTLOVR 0x000C
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000003U
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 0
+#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_MASK 0x0000000CU
+#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SHIFT 2
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000030U
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 4
+#define EUR_CR_CLKGATECTLOVR_TE_CLKO_MASK 0x000000C0U
+#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SHIFT 6
+#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_MASK 0x00000300U
+#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SHIFT 8
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00000C00U
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 10
+#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_MASK 0x00003000U
+#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SHIFT 12
+#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_MASK 0x0000C000U
+#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_SHIFT 14
+#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_MASK 0x00030000U
+#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_SHIFT 16
+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x000C0000U
+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 18
+#define EUR_CR_POWER 0x001C
+#define EUR_CR_POWER_PIPE_DISABLE_MASK 0x00000001U
+#define EUR_CR_POWER_PIPE_DISABLE_SHIFT 0
+#define EUR_CR_CORE_ID 0x0020
+#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFFU
+#define EUR_CR_CORE_ID_CONFIG_SHIFT 0
+#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U
+#define EUR_CR_CORE_ID_ID_SHIFT 16
+#define EUR_CR_CORE_REVISION 0x0024
+#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU
+#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0
+#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U
+#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8
+#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U
+#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16
+#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U
+#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24
+#define EUR_CR_DESIGNER_REV_FIELD1 0x0028
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0
+#define EUR_CR_DESIGNER_REV_FIELD2 0x002C
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0
+#define EUR_CR_SOFT_RESET 0x0080
+#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U
+#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0
+#define EUR_CR_SOFT_RESET_VDM_RESET_MASK 0x00000002U
+#define EUR_CR_SOFT_RESET_VDM_RESET_SHIFT 1
+#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004U
+#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2
+#define EUR_CR_SOFT_RESET_TE_RESET_MASK 0x00000008U
+#define EUR_CR_SOFT_RESET_TE_RESET_SHIFT 3
+#define EUR_CR_SOFT_RESET_MTE_RESET_MASK 0x00000010U
+#define EUR_CR_SOFT_RESET_MTE_RESET_SHIFT 4
+#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020U
+#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5
+#define EUR_CR_SOFT_RESET_ISP2_RESET_MASK 0x00000040U
+#define EUR_CR_SOFT_RESET_ISP2_RESET_SHIFT 6
+#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000080U
+#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 7
+#define EUR_CR_SOFT_RESET_PDS_RESET_MASK 0x00000100U
+#define EUR_CR_SOFT_RESET_PDS_RESET_SHIFT 8
+#define EUR_CR_SOFT_RESET_PBE_RESET_MASK 0x00000200U
+#define EUR_CR_SOFT_RESET_PBE_RESET_SHIFT 9
+#define EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK 0x00000400U
+#define EUR_CR_SOFT_RESET_CACHEL2_RESET_SHIFT 10
+#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK 0x00000800U
+#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_SHIFT 11
+#define EUR_CR_SOFT_RESET_MADD_RESET_MASK 0x00001000U
+#define EUR_CR_SOFT_RESET_MADD_RESET_SHIFT 12
+#define EUR_CR_SOFT_RESET_ITR_RESET_MASK 0x00002000U
+#define EUR_CR_SOFT_RESET_ITR_RESET_SHIFT 13
+#define EUR_CR_SOFT_RESET_TEX_RESET_MASK 0x00004000U
+#define EUR_CR_SOFT_RESET_TEX_RESET_SHIFT 14
+#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00008000U
+#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 15
+#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK 0x00010000U
+#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_SHIFT 16
+#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00020000U
+#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 17
+#define EUR_CR_EVENT_HOST_ENABLE2 0x0110
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_HOST_CLEAR2 0x0114
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_STATUS2 0x0118
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_STATUS 0x012CU
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK 0x08000000U
+#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_SHIFT 27
+#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK 0x00020000U
+#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT 17
+#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000U
+#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_HOST_ENABLE 0x0130
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_MASK 0x08000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_SHIFT 27
+#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000U
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000U
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_HOST_CLEAR 0x0134
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK 0x08000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_SHIFT 27
+#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000U
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000U
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_TIMER 0x0144
+#define EUR_CR_TIMER_VALUE_MASK 0xFFFFFFFFU
+#define EUR_CR_TIMER_VALUE_SHIFT 0
+#define EUR_CR_EVENT_KICK1 0x0AB0
+#define EUR_CR_EVENT_KICK1_NOW_MASK 0x000000FFU
+#define EUR_CR_EVENT_KICK1_NOW_SHIFT 0
+#define EUR_CR_PDS_EXEC_BASE 0x0AB8
+#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0x0FF00000U
+#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20
+#define EUR_CR_EVENT_KICK2 0x0AC0
+#define EUR_CR_EVENT_KICK2_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK2_NOW_SHIFT 0
+#define EUR_CR_EVENT_KICKER 0x0AC4
+#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0x0FFFFFF0U
+#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4
+#define EUR_CR_EVENT_KICK 0x0AC8
+#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK_NOW_SHIFT 0
+#define EUR_CR_EVENT_TIMER 0x0ACC
+#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U
+#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24
+#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU
+#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0
+#define EUR_CR_PDS_INV0 0x0AD0
+#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV0_DSC_SHIFT 0
+#define EUR_CR_PDS_INV1 0x0AD4
+#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV1_DSC_SHIFT 0
+#define EUR_CR_EVENT_KICK3 0x0AD8
+#define EUR_CR_EVENT_KICK3_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK3_NOW_SHIFT 0
+#define EUR_CR_PDS_INV3 0x0ADC
+#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV3_DSC_SHIFT 0
+#define EUR_CR_PDS_INV_CSC 0x0AE0
+#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U
+#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0
+#define EUR_CR_PDS_PC_BASE 0x0B2C
+#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK 0x00FFFFFFU
+#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_CTRL 0x0C00
+#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U
+#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0
+#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U
+#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1
+#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004U
+#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2
+#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008U
+#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15
+#define EUR_CR_BIF_INT_STAT 0x0C04
+#define EUR_CR_BIF_INT_STAT_FAULT_MASK 0x00003FFFU
+#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT 0
+#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK 0x00004000U
+#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT 14
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00008000U
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 15
+#define EUR_CR_BIF_FAULT 0x0C08
+#define EUR_CR_BIF_FAULT_SB_MASK 0x000001F0U
+#define EUR_CR_BIF_FAULT_SB_SHIFT 4
+#define EUR_CR_BIF_FAULT_ADDR_MASK 0x0FFFF000U
+#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12
+#define EUR_CR_BIF_TA_REQ_BASE 0x0C90
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0x0FF00000U
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFU
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0
+#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0x0FF00000U
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0x0FF00000U
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_2D_BLIT_STATUS 0x0E04
+#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFFU
+#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0
+#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000U
+#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24
+#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10
+#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001U
+#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000EU
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0U
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000U
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12
+#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFFU
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000U
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000U
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24
+#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X)))
+#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x00FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_DM_MASK 0x03000000U
+#define EUR_CR_USE_CODE_BASE_DM_SHIFT 24
+#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16
+#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16
+
+#endif
+
diff --git a/drivers/gpu/pvr/sgx543_v1.164defs.h b/drivers/gpu/pvr/sgx543_v1.164defs.h
new file mode 100644
index 0000000..23bd4d4
--- /dev/null
+++ b/drivers/gpu/pvr/sgx543_v1.164defs.h
@@ -0,0 +1,1284 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _SGX543DEFS_KM_H_
+#define _SGX543DEFS_KM_H_
+
+#define EUR_CR_CLKGATECTL 0x0000
+#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000003U
+#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 0
+#define EUR_CR_CLKGATECTL_ISP_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_ISP2_CLKG_MASK 0x0000000CU
+#define EUR_CR_CLKGATECTL_ISP2_CLKG_SHIFT 2
+#define EUR_CR_CLKGATECTL_ISP2_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000030U
+#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 4
+#define EUR_CR_CLKGATECTL_TSP_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_TE_CLKG_MASK 0x000000C0U
+#define EUR_CR_CLKGATECTL_TE_CLKG_SHIFT 6
+#define EUR_CR_CLKGATECTL_TE_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_MTE_CLKG_MASK 0x00000300U
+#define EUR_CR_CLKGATECTL_MTE_CLKG_SHIFT 8
+#define EUR_CR_CLKGATECTL_MTE_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00000C00U
+#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 10
+#define EUR_CR_CLKGATECTL_DPM_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_VDM_CLKG_MASK 0x00003000U
+#define EUR_CR_CLKGATECTL_VDM_CLKG_SHIFT 12
+#define EUR_CR_CLKGATECTL_VDM_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_PDS_CLKG_MASK 0x0000C000U
+#define EUR_CR_CLKGATECTL_PDS_CLKG_SHIFT 14
+#define EUR_CR_CLKGATECTL_PDS_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_MASK 0x00030000U
+#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_SHIFT 16
+#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x000C0000U
+#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 18
+#define EUR_CR_CLKGATECTL_TA_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_BIF_CORE_CLKG_MASK 0x00300000U
+#define EUR_CR_CLKGATECTL_BIF_CORE_CLKG_SHIFT 20
+#define EUR_CR_CLKGATECTL_BIF_CORE_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SIGNED 0
+#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_MASK 0x10000000U
+#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_SHIFT 28
+#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2 0x0004
+#define EUR_CR_CLKGATECTL2_PBE_CLKG_MASK 0x00000003U
+#define EUR_CR_CLKGATECTL2_PBE_CLKG_SHIFT 0
+#define EUR_CR_CLKGATECTL2_PBE_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_TCU_L2_CLKG_MASK 0x0000000CU
+#define EUR_CR_CLKGATECTL2_TCU_L2_CLKG_SHIFT 2
+#define EUR_CR_CLKGATECTL2_TCU_L2_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_MASK 0x00000030U
+#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SHIFT 4
+#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_USE0_CLKG_MASK 0x000000C0U
+#define EUR_CR_CLKGATECTL2_USE0_CLKG_SHIFT 6
+#define EUR_CR_CLKGATECTL2_USE0_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_ITR0_CLKG_MASK 0x00000300U
+#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SHIFT 8
+#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_TEX0_CLKG_MASK 0x00000C00U
+#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SHIFT 10
+#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_USE1_CLKG_MASK 0x0000C000U
+#define EUR_CR_CLKGATECTL2_USE1_CLKG_SHIFT 14
+#define EUR_CR_CLKGATECTL2_USE1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_ITR1_CLKG_MASK 0x00030000U
+#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SHIFT 16
+#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_TEX1_CLKG_MASK 0x000C0000U
+#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SHIFT 18
+#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_DCU_L2_CLKG_MASK 0x00C00000U
+#define EUR_CR_CLKGATECTL2_DCU_L2_CLKG_SHIFT 22
+#define EUR_CR_CLKGATECTL2_DCU_L2_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_DCU1_L0L1_CLKG_MASK 0x03000000U
+#define EUR_CR_CLKGATECTL2_DCU1_L0L1_CLKG_SHIFT 24
+#define EUR_CR_CLKGATECTL2_DCU1_L0L1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_DCU0_L0L1_CLKG_MASK 0x0C000000U
+#define EUR_CR_CLKGATECTL2_DCU0_L0L1_CLKG_SHIFT 26
+#define EUR_CR_CLKGATECTL2_DCU0_L0L1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATESTATUS 0x0008
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000001U
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 0
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_MASK 0x00000002U
+#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SHIFT 1
+#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000004U
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 2
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TE_CLKS_MASK 0x00000008U
+#define EUR_CR_CLKGATESTATUS_TE_CLKS_SHIFT 3
+#define EUR_CR_CLKGATESTATUS_TE_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_MTE_CLKS_MASK 0x00000010U
+#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SHIFT 4
+#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00000020U
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 5
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_VDM_CLKS_MASK 0x00000040U
+#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SHIFT 6
+#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_PDS_CLKS_MASK 0x00000080U
+#define EUR_CR_CLKGATESTATUS_PDS_CLKS_SHIFT 7
+#define EUR_CR_CLKGATESTATUS_PDS_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_PBE_CLKS_MASK 0x00000100U
+#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SHIFT 8
+#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TCU_L2_CLKS_MASK 0x00000200U
+#define EUR_CR_CLKGATESTATUS_TCU_L2_CLKS_SHIFT 9
+#define EUR_CR_CLKGATESTATUS_TCU_L2_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_MASK 0x00000400U
+#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SHIFT 10
+#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_USE0_CLKS_MASK 0x00000800U
+#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SHIFT 11
+#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_MASK 0x00001000U
+#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SHIFT 12
+#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_MASK 0x00002000U
+#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SHIFT 13
+#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_USE1_CLKS_MASK 0x00008000U
+#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SHIFT 15
+#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_MASK 0x00010000U
+#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SHIFT 16
+#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_MASK 0x00020000U
+#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SHIFT 17
+#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_MASK 0x00080000U
+#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_SHIFT 19
+#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00100000U
+#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 20
+#define EUR_CR_CLKGATESTATUS_TA_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_DCU_L2_CLKS_MASK 0x00200000U
+#define EUR_CR_CLKGATESTATUS_DCU_L2_CLKS_SHIFT 21
+#define EUR_CR_CLKGATESTATUS_DCU_L2_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_DCU0_L0L1_CLKS_MASK 0x00400000U
+#define EUR_CR_CLKGATESTATUS_DCU0_L0L1_CLKS_SHIFT 22
+#define EUR_CR_CLKGATESTATUS_DCU0_L0L1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_DCU1_L0L1_CLKS_MASK 0x00800000U
+#define EUR_CR_CLKGATESTATUS_DCU1_L0L1_CLKS_SHIFT 23
+#define EUR_CR_CLKGATESTATUS_DCU1_L0L1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_BIF_CORE_CLKS_MASK 0x01000000U
+#define EUR_CR_CLKGATESTATUS_BIF_CORE_CLKS_SHIFT 24
+#define EUR_CR_CLKGATESTATUS_BIF_CORE_CLKS_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR 0x000C
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000003U
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 0
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_MASK 0x0000000CU
+#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SHIFT 2
+#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000030U
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 4
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_TE_CLKO_MASK 0x000000C0U
+#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SHIFT 6
+#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_MASK 0x00000300U
+#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SHIFT 8
+#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00000C00U
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 10
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_MASK 0x00003000U
+#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SHIFT 12
+#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_MASK 0x0000C000U
+#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_SHIFT 14
+#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_MASK 0x00030000U
+#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_SHIFT 16
+#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x000C0000U
+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 18
+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_BIF_CORE_CLKO_MASK 0x00300000U
+#define EUR_CR_CLKGATECTLOVR_BIF_CORE_CLKO_SHIFT 20
+#define EUR_CR_CLKGATECTLOVR_BIF_CORE_CLKO_SIGNED 0
+#define EUR_CR_POWER 0x001C
+#define EUR_CR_POWER_PIPE_DISABLE_MASK 0x00000001U
+#define EUR_CR_POWER_PIPE_DISABLE_SHIFT 0
+#define EUR_CR_POWER_PIPE_DISABLE_SIGNED 0
+#define EUR_CR_CORE_ID 0x0020
+#define EUR_CR_CORE_ID_CONFIG_MULTI_MASK 0x00000001U
+#define EUR_CR_CORE_ID_CONFIG_MULTI_SHIFT 0
+#define EUR_CR_CORE_ID_CONFIG_MULTI_SIGNED 0
+#define EUR_CR_CORE_ID_CONFIG_BASE_MASK 0x00000002U
+#define EUR_CR_CORE_ID_CONFIG_BASE_SHIFT 1
+#define EUR_CR_CORE_ID_CONFIG_BASE_SIGNED 0
+#define EUR_CR_CORE_ID_CONFIG_MASK 0x000000FCU
+#define EUR_CR_CORE_ID_CONFIG_SHIFT 2
+#define EUR_CR_CORE_ID_CONFIG_SIGNED 0
+#define EUR_CR_CORE_ID_CONFIG_CORES_MASK 0x00000F00U
+#define EUR_CR_CORE_ID_CONFIG_CORES_SHIFT 8
+#define EUR_CR_CORE_ID_CONFIG_CORES_SIGNED 0
+#define EUR_CR_CORE_ID_CONFIG_SLC_MASK 0x0000F000U
+#define EUR_CR_CORE_ID_CONFIG_SLC_SHIFT 12
+#define EUR_CR_CORE_ID_CONFIG_SLC_SIGNED 0
+#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U
+#define EUR_CR_CORE_ID_ID_SHIFT 16
+#define EUR_CR_CORE_ID_ID_SIGNED 0
+#define EUR_CR_CORE_REVISION 0x0024
+#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU
+#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0
+#define EUR_CR_CORE_REVISION_MAINTENANCE_SIGNED 0
+#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U
+#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8
+#define EUR_CR_CORE_REVISION_MINOR_SIGNED 0
+#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U
+#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16
+#define EUR_CR_CORE_REVISION_MAJOR_SIGNED 0
+#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U
+#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24
+#define EUR_CR_CORE_REVISION_DESIGNER_SIGNED 0
+#define EUR_CR_DESIGNER_REV_FIELD1 0x0028
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SIGNED 0
+#define EUR_CR_DESIGNER_REV_FIELD2 0x002C
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SIGNED 0
+#define EUR_CR_SOFT_RESET 0x0080
+#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U
+#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0
+#define EUR_CR_SOFT_RESET_BIF_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_VDM_RESET_MASK 0x00000002U
+#define EUR_CR_SOFT_RESET_VDM_RESET_SHIFT 1
+#define EUR_CR_SOFT_RESET_VDM_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004U
+#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2
+#define EUR_CR_SOFT_RESET_DPM_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_TE_RESET_MASK 0x00000008U
+#define EUR_CR_SOFT_RESET_TE_RESET_SHIFT 3
+#define EUR_CR_SOFT_RESET_TE_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_MTE_RESET_MASK 0x00000010U
+#define EUR_CR_SOFT_RESET_MTE_RESET_SHIFT 4
+#define EUR_CR_SOFT_RESET_MTE_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020U
+#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5
+#define EUR_CR_SOFT_RESET_ISP_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_ISP2_RESET_MASK 0x00000040U
+#define EUR_CR_SOFT_RESET_ISP2_RESET_SHIFT 6
+#define EUR_CR_SOFT_RESET_ISP2_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000080U
+#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 7
+#define EUR_CR_SOFT_RESET_TSP_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_PDS_RESET_MASK 0x00000100U
+#define EUR_CR_SOFT_RESET_PDS_RESET_SHIFT 8
+#define EUR_CR_SOFT_RESET_PDS_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_PBE_RESET_MASK 0x00000200U
+#define EUR_CR_SOFT_RESET_PBE_RESET_SHIFT 9
+#define EUR_CR_SOFT_RESET_PBE_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK 0x00000400U
+#define EUR_CR_SOFT_RESET_TCU_L2_RESET_SHIFT 10
+#define EUR_CR_SOFT_RESET_TCU_L2_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK 0x00000800U
+#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_SHIFT 11
+#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_ITR_RESET_MASK 0x00002000U
+#define EUR_CR_SOFT_RESET_ITR_RESET_SHIFT 13
+#define EUR_CR_SOFT_RESET_ITR_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_TEX_RESET_MASK 0x00004000U
+#define EUR_CR_SOFT_RESET_TEX_RESET_SHIFT 14
+#define EUR_CR_SOFT_RESET_TEX_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00008000U
+#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 15
+#define EUR_CR_SOFT_RESET_USE_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK 0x00010000U
+#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_SHIFT 16
+#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00020000U
+#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 17
+#define EUR_CR_SOFT_RESET_TA_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK 0x00040000U
+#define EUR_CR_SOFT_RESET_DCU_L2_RESET_SHIFT 18
+#define EUR_CR_SOFT_RESET_DCU_L2_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK 0x00080000U
+#define EUR_CR_SOFT_RESET_DCU_L0L1_RESET_SHIFT 19
+#define EUR_CR_SOFT_RESET_DCU_L0L1_RESET_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2 0x0110
+#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_UNTRAPPED_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_UNTRAPPED_SHIFT 11
+#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_UNTRAPPED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_TRAPPED_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_TRAPPED_SHIFT 10
+#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_TRAPPED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_CONTEXT_DRAINED_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_CONTEXT_DRAINED_SHIFT 9
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_CONTEXT_DRAINED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_ISP2_ZLS_CSW_FINISHED_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_ENABLE2_ISP2_ZLS_CSW_FINISHED_SHIFT 8
+#define EUR_CR_EVENT_HOST_ENABLE2_ISP2_ZLS_CSW_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_SHIFT 7
+#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_SHIFT 6
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_TE_RGNHDR_INIT_COMPLETE_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_ENABLE2_TE_RGNHDR_INIT_COMPLETE_SHIFT 5
+#define EUR_CR_EVENT_HOST_ENABLE2_TE_RGNHDR_INIT_COMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2 0x0114
+#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_SHIFT 11
+#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_SHIFT 10
+#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_CONTEXT_DRAINED_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_CONTEXT_DRAINED_SHIFT 9
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_CONTEXT_DRAINED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_ISP2_ZLS_CSW_FINISHED_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_CLEAR2_ISP2_ZLS_CSW_FINISHED_SHIFT 8
+#define EUR_CR_EVENT_HOST_CLEAR2_ISP2_ZLS_CSW_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_SHIFT 7
+#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_SHIFT 6
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_TE_RGNHDR_INIT_COMPLETE_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_CLEAR2_TE_RGNHDR_INIT_COMPLETE_SHIFT 5
+#define EUR_CR_EVENT_HOST_CLEAR2_TE_RGNHDR_INIT_COMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS2 0x0118
+#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_MASK 0x00000800U
+#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_SHIFT 11
+#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_MASK 0x00000400U
+#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_SHIFT 10
+#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_MTE_CONTEXT_DRAINED_MASK 0x00000200U
+#define EUR_CR_EVENT_STATUS2_MTE_CONTEXT_DRAINED_SHIFT 9
+#define EUR_CR_EVENT_STATUS2_MTE_CONTEXT_DRAINED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_ISP2_ZLS_CSW_FINISHED_MASK 0x00000100U
+#define EUR_CR_EVENT_STATUS2_ISP2_ZLS_CSW_FINISHED_SHIFT 8
+#define EUR_CR_EVENT_STATUS2_ISP2_ZLS_CSW_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_MASK 0x00000080U
+#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_SHIFT 7
+#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_MASK 0x00000040U
+#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_SHIFT 6
+#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_TE_RGNHDR_INIT_COMPLETE_MASK 0x00000020U
+#define EUR_CR_EVENT_STATUS2_TE_RGNHDR_INIT_COMPLETE_SHIFT 5
+#define EUR_CR_EVENT_STATUS2_TE_RGNHDR_INIT_COMPLETE_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS 0x012C
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29
+#define EUR_CR_EVENT_STATUS_TIMER_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TCU_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_STATUS_TCU_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_STATUS_TCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_SIGNED 0
+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SIGNED 0
+#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_STATUS_OTPM_INV_SIGNED 0
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SIGNED 0
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_STATUS_SW_EVENT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE 0x0130
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TCU_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TCU_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_HOST_ENABLE_TCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR 0x0134
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TCU_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TCU_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_HOST_CLEAR_TCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SIGNED 0
+#define EUR_CR_TIMER 0x0144
+#define EUR_CR_TIMER_VALUE_MASK 0xFFFFFFFFU
+#define EUR_CR_TIMER_VALUE_SHIFT 0
+#define EUR_CR_TIMER_VALUE_SIGNED 0
+#define EUR_CR_EVENT_KICK1 0x0AB0
+#define EUR_CR_EVENT_KICK1_NOW_MASK 0x000000FFU
+#define EUR_CR_EVENT_KICK1_NOW_SHIFT 0
+#define EUR_CR_EVENT_KICK1_NOW_SIGNED 0
+#define EUR_CR_EVENT_KICK2 0x0AC0
+#define EUR_CR_EVENT_KICK2_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK2_NOW_SHIFT 0
+#define EUR_CR_EVENT_KICK2_NOW_SIGNED 0
+#define EUR_CR_EVENT_KICKER 0x0AC4
+#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4
+#define EUR_CR_EVENT_KICKER_ADDRESS_SIGNED 0
+#define EUR_CR_EVENT_KICK 0x0AC8
+#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK_NOW_SHIFT 0
+#define EUR_CR_EVENT_KICK_NOW_SIGNED 0
+#define EUR_CR_EVENT_TIMER 0x0ACC
+#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U
+#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24
+#define EUR_CR_EVENT_TIMER_ENABLE_SIGNED 0
+#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU
+#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0
+#define EUR_CR_EVENT_TIMER_VALUE_SIGNED 0
+#define EUR_CR_PDS_INV0 0x0AD0
+#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV0_DSC_SHIFT 0
+#define EUR_CR_PDS_INV0_DSC_SIGNED 0
+#define EUR_CR_PDS_INV1 0x0AD4
+#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV1_DSC_SHIFT 0
+#define EUR_CR_PDS_INV1_DSC_SIGNED 0
+#define EUR_CR_EVENT_KICK3 0x0AD8
+#define EUR_CR_EVENT_KICK3_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK3_NOW_SHIFT 0
+#define EUR_CR_EVENT_KICK3_NOW_SIGNED 0
+#define EUR_CR_PDS_INV3 0x0ADC
+#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV3_DSC_SHIFT 0
+#define EUR_CR_PDS_INV3_DSC_SIGNED 0
+#define EUR_CR_PDS_INV_CSC 0x0AE0
+#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U
+#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0
+#define EUR_CR_PDS_INV_CSC_KICK_SIGNED 0
+#define EUR_CR_BIF_CTRL 0x0C00
+#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U
+#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0
+#define EUR_CR_BIF_CTRL_NOREORDER_SIGNED 0
+#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U
+#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1
+#define EUR_CR_BIF_CTRL_PAUSE_SIGNED 0
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TA_MASK 0x00000400U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TA_SHIFT 10
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TA_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PTLA_MASK 0x00010000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PTLA_SHIFT 16
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PTLA_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_VDM_MASK 0x00020000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_VDM_SHIFT 17
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_VDM_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_IPF_MASK 0x00040000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_IPF_SHIFT 18
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_IPF_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_DPM_MASK 0x00080000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_DPM_SHIFT 19
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_DPM_SIGNED 0
+#define EUR_CR_BIF_INT_STAT 0x0C04
+#define EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK 0x00003FFFU
+#define EUR_CR_BIF_INT_STAT_FAULT_REQ_SHIFT 0
+#define EUR_CR_BIF_INT_STAT_FAULT_REQ_SIGNED 0
+#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_MASK 0x00070000U
+#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_SHIFT 16
+#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_SIGNED 0
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00080000U
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 19
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SIGNED 0
+#define EUR_CR_BIF_FAULT 0x0C08
+#define EUR_CR_BIF_FAULT_CID_MASK 0x0000000FU
+#define EUR_CR_BIF_FAULT_CID_SHIFT 0
+#define EUR_CR_BIF_FAULT_CID_SIGNED 0
+#define EUR_CR_BIF_FAULT_SB_MASK 0x000001F0U
+#define EUR_CR_BIF_FAULT_SB_SHIFT 4
+#define EUR_CR_BIF_FAULT_SB_SIGNED 0
+#define EUR_CR_BIF_FAULT_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12
+#define EUR_CR_BIF_FAULT_ADDR_SIGNED 0
+#define EUR_CR_BIF_TILE0 0x0C0C
+#define EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE0_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE0_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE0_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE1 0x0C10
+#define EUR_CR_BIF_TILE1_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE1_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE1_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE1_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE1_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE2 0x0C14
+#define EUR_CR_BIF_TILE2_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE2_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE2_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE2_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE2_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE3 0x0C18
+#define EUR_CR_BIF_TILE3_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE3_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE3_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE3_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE3_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE4 0x0C1C
+#define EUR_CR_BIF_TILE4_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE4_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE4_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE4_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE4_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE5 0x0C20
+#define EUR_CR_BIF_TILE5_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE5_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE5_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE5_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE5_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE6 0x0C24
+#define EUR_CR_BIF_TILE6_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE6_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE6_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE6_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE6_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE7 0x0C28
+#define EUR_CR_BIF_TILE7_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE7_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE7_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE7_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE7_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE8 0x0C2C
+#define EUR_CR_BIF_TILE8_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE8_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE8_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE8_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE8_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE9 0x0C30
+#define EUR_CR_BIF_TILE9_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE9_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE9_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE9_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE9_CFG_SIGNED 0
+#define EUR_CR_BIF_CTRL_INVAL 0x0C34
+#define EUR_CR_BIF_CTRL_INVAL_PTE_MASK 0x00000004U
+#define EUR_CR_BIF_CTRL_INVAL_PTE_SHIFT 2
+#define EUR_CR_BIF_CTRL_INVAL_PTE_SIGNED 0
+#define EUR_CR_BIF_CTRL_INVAL_ALL_MASK 0x00000008U
+#define EUR_CR_BIF_CTRL_INVAL_ALL_SHIFT 3
+#define EUR_CR_BIF_CTRL_INVAL_ALL_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE1 0x0C38
+#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE2 0x0C3C
+#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE3 0x0C40
+#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE4 0x0C44
+#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE5 0x0C48
+#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE6 0x0C4C
+#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE7 0x0C50
+#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SIGNED 0
+#define EUR_CR_BIF_BANK_SET 0x0C74
+#define EUR_CR_BIF_BANK_SET_SELECT_2D_MASK 0x00000001U
+#define EUR_CR_BIF_BANK_SET_SELECT_2D_SHIFT 0
+#define EUR_CR_BIF_BANK_SET_SELECT_2D_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_3D_MASK 0x0000000CU
+#define EUR_CR_BIF_BANK_SET_SELECT_3D_SHIFT 2
+#define EUR_CR_BIF_BANK_SET_SELECT_3D_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_HOST_MASK 0x00000010U
+#define EUR_CR_BIF_BANK_SET_SELECT_HOST_SHIFT 4
+#define EUR_CR_BIF_BANK_SET_SELECT_HOST_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_TA_MASK 0x000000C0U
+#define EUR_CR_BIF_BANK_SET_SELECT_TA_SHIFT 6
+#define EUR_CR_BIF_BANK_SET_SELECT_TA_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_EDM_MASK 0x00000100U
+#define EUR_CR_BIF_BANK_SET_SELECT_EDM_SHIFT 8
+#define EUR_CR_BIF_BANK_SET_SELECT_EDM_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_MASK 0x00000200U
+#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_SHIFT 9
+#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_SIGNED 0
+#define EUR_CR_BIF_BANK0 0x0C78
+#define EUR_CR_BIF_BANK0_INDEX_EDM_MASK 0x0000000FU
+#define EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT 0
+#define EUR_CR_BIF_BANK0_INDEX_EDM_SIGNED 0
+#define EUR_CR_BIF_BANK0_INDEX_TA_MASK 0x000000F0U
+#define EUR_CR_BIF_BANK0_INDEX_TA_SHIFT 4
+#define EUR_CR_BIF_BANK0_INDEX_TA_SIGNED 0
+#define EUR_CR_BIF_BANK0_INDEX_3D_MASK 0x0000F000U
+#define EUR_CR_BIF_BANK0_INDEX_3D_SHIFT 12
+#define EUR_CR_BIF_BANK0_INDEX_3D_SIGNED 0
+#define EUR_CR_BIF_BANK0_INDEX_PTLA_MASK 0x000F0000U
+#define EUR_CR_BIF_BANK0_INDEX_PTLA_SHIFT 16
+#define EUR_CR_BIF_BANK0_INDEX_PTLA_SIGNED 0
+#define EUR_CR_BIF_BANK1 0x0C7C
+#define EUR_CR_BIF_BANK1_INDEX_EDM_MASK 0x0000000FU
+#define EUR_CR_BIF_BANK1_INDEX_EDM_SHIFT 0
+#define EUR_CR_BIF_BANK1_INDEX_EDM_SIGNED 0
+#define EUR_CR_BIF_BANK1_INDEX_TA_MASK 0x000000F0U
+#define EUR_CR_BIF_BANK1_INDEX_TA_SHIFT 4
+#define EUR_CR_BIF_BANK1_INDEX_TA_SIGNED 0
+#define EUR_CR_BIF_BANK1_INDEX_3D_MASK 0x0000F000U
+#define EUR_CR_BIF_BANK1_INDEX_3D_SHIFT 12
+#define EUR_CR_BIF_BANK1_INDEX_3D_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SIGNED 0
+#define EUR_CR_BIF_TA_REQ_BASE 0x0C90
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0xFFF00000U
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SIGNED 0
+#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFU
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_SIGNED 0
+#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0xFFF00000U
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SIGNED 0
+#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0xFFF00000U
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SIGNED 0
+#define EUR_CR_BIF_BANK_STATUS 0x0CB4
+#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_MASK 0x00000001U
+#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SHIFT 0
+#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SIGNED 0
+#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_MASK 0x00000002U
+#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SHIFT 1
+#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SIGNED 0
+#define EUR_CR_BIF_MMU_CTRL 0x0CD0
+#define EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_MASK 0x00000001U
+#define EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_SHIFT 0
+#define EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_SIGNED 0
+#define EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_MASK 0x00000006U
+#define EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT 1
+#define EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_SIGNED 0
+#define EUR_CR_BIF_MMU_CTRL_ENABLE_WRITE_BURST_COLLATE_MASK 0x00000008U
+#define EUR_CR_BIF_MMU_CTRL_ENABLE_WRITE_BURST_COLLATE_SHIFT 3
+#define EUR_CR_BIF_MMU_CTRL_ENABLE_WRITE_BURST_COLLATE_SIGNED 0
+#define EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_MASK 0x00000010U
+#define EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_SHIFT 4
+#define EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_SIGNED 0
+#define EUR_CR_2D_BLIT_STATUS 0x0E04
+#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFFU
+#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0
+#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SIGNED 0
+#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000U
+#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24
+#define EUR_CR_2D_BLIT_STATUS_BUSY_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10
+#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001U
+#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0
+#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000EU
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0U
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000U
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFFU
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000U
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000U
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SIGNED 0
+#define EUR_CR_BREAKPOINT0_START 0x0F44
+#define EUR_CR_BREAKPOINT0_START_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT0_START_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT0_START_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT0_END 0x0F48
+#define EUR_CR_BREAKPOINT0_END_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT0_END_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT0_END_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT0 0x0F4C
+#define EUR_CR_BREAKPOINT0_MASK_DM_MASK 0x00000038U
+#define EUR_CR_BREAKPOINT0_MASK_DM_SHIFT 3
+#define EUR_CR_BREAKPOINT0_MASK_DM_SIGNED 0
+#define EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_MASK 0x00000004U
+#define EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_SHIFT 2
+#define EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT0_CTRL_WENABLE_MASK 0x00000002U
+#define EUR_CR_BREAKPOINT0_CTRL_WENABLE_SHIFT 1
+#define EUR_CR_BREAKPOINT0_CTRL_WENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT0_CTRL_RENABLE_MASK 0x00000001U
+#define EUR_CR_BREAKPOINT0_CTRL_RENABLE_SHIFT 0
+#define EUR_CR_BREAKPOINT0_CTRL_RENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT1_START 0x0F50
+#define EUR_CR_BREAKPOINT1_START_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT1_START_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT1_START_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT1_END 0x0F54
+#define EUR_CR_BREAKPOINT1_END_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT1_END_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT1_END_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT1 0x0F58
+#define EUR_CR_BREAKPOINT1_MASK_DM_MASK 0x00000038U
+#define EUR_CR_BREAKPOINT1_MASK_DM_SHIFT 3
+#define EUR_CR_BREAKPOINT1_MASK_DM_SIGNED 0
+#define EUR_CR_BREAKPOINT1_CTRL_TRAPENABLE_MASK 0x00000004U
+#define EUR_CR_BREAKPOINT1_CTRL_TRAPENABLE_SHIFT 2
+#define EUR_CR_BREAKPOINT1_CTRL_TRAPENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT1_CTRL_WENABLE_MASK 0x00000002U
+#define EUR_CR_BREAKPOINT1_CTRL_WENABLE_SHIFT 1
+#define EUR_CR_BREAKPOINT1_CTRL_WENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT1_CTRL_RENABLE_MASK 0x00000001U
+#define EUR_CR_BREAKPOINT1_CTRL_RENABLE_SHIFT 0
+#define EUR_CR_BREAKPOINT1_CTRL_RENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT2_START 0x0F5C
+#define EUR_CR_BREAKPOINT2_START_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT2_START_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT2_START_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT2_END 0x0F60
+#define EUR_CR_BREAKPOINT2_END_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT2_END_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT2_END_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT2 0x0F64
+#define EUR_CR_BREAKPOINT2_MASK_DM_MASK 0x00000038U
+#define EUR_CR_BREAKPOINT2_MASK_DM_SHIFT 3
+#define EUR_CR_BREAKPOINT2_MASK_DM_SIGNED 0
+#define EUR_CR_BREAKPOINT2_CTRL_TRAPENABLE_MASK 0x00000004U
+#define EUR_CR_BREAKPOINT2_CTRL_TRAPENABLE_SHIFT 2
+#define EUR_CR_BREAKPOINT2_CTRL_TRAPENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT2_CTRL_WENABLE_MASK 0x00000002U
+#define EUR_CR_BREAKPOINT2_CTRL_WENABLE_SHIFT 1
+#define EUR_CR_BREAKPOINT2_CTRL_WENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT2_CTRL_RENABLE_MASK 0x00000001U
+#define EUR_CR_BREAKPOINT2_CTRL_RENABLE_SHIFT 0
+#define EUR_CR_BREAKPOINT2_CTRL_RENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT3_START 0x0F68
+#define EUR_CR_BREAKPOINT3_START_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT3_START_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT3_START_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT3_END 0x0F6C
+#define EUR_CR_BREAKPOINT3_END_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT3_END_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT3_END_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT3 0x0F70
+#define EUR_CR_BREAKPOINT3_MASK_DM_MASK 0x00000038U
+#define EUR_CR_BREAKPOINT3_MASK_DM_SHIFT 3
+#define EUR_CR_BREAKPOINT3_MASK_DM_SIGNED 0
+#define EUR_CR_BREAKPOINT3_CTRL_TRAPENABLE_MASK 0x00000004U
+#define EUR_CR_BREAKPOINT3_CTRL_TRAPENABLE_SHIFT 2
+#define EUR_CR_BREAKPOINT3_CTRL_TRAPENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT3_CTRL_WENABLE_MASK 0x00000002U
+#define EUR_CR_BREAKPOINT3_CTRL_WENABLE_SHIFT 1
+#define EUR_CR_BREAKPOINT3_CTRL_WENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT3_CTRL_RENABLE_MASK 0x00000001U
+#define EUR_CR_BREAKPOINT3_CTRL_RENABLE_SHIFT 0
+#define EUR_CR_BREAKPOINT3_CTRL_RENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT_READ 0x0F74
+#define EUR_CR_BREAKPOINT_READ_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT_READ_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT_READ_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT_TRAP 0x0F78
+#define EUR_CR_BREAKPOINT_TRAP_CONTINUE_MASK 0x00000002U
+#define EUR_CR_BREAKPOINT_TRAP_CONTINUE_SHIFT 1
+#define EUR_CR_BREAKPOINT_TRAP_CONTINUE_SIGNED 0
+#define EUR_CR_BREAKPOINT_TRAP_WRNOTIFY_MASK 0x00000001U
+#define EUR_CR_BREAKPOINT_TRAP_WRNOTIFY_SHIFT 0
+#define EUR_CR_BREAKPOINT_TRAP_WRNOTIFY_SIGNED 0
+#define EUR_CR_BREAKPOINT 0x0F7C
+#define EUR_CR_BREAKPOINT_MODULE_ID_MASK 0x000003C0U
+#define EUR_CR_BREAKPOINT_MODULE_ID_SHIFT 6
+#define EUR_CR_BREAKPOINT_MODULE_ID_SIGNED 0
+#define EUR_CR_BREAKPOINT_ID_MASK 0x00000030U
+#define EUR_CR_BREAKPOINT_ID_SHIFT 4
+#define EUR_CR_BREAKPOINT_ID_SIGNED 0
+#define EUR_CR_BREAKPOINT_UNTRAPPED_MASK 0x00000008U
+#define EUR_CR_BREAKPOINT_UNTRAPPED_SHIFT 3
+#define EUR_CR_BREAKPOINT_UNTRAPPED_SIGNED 0
+#define EUR_CR_BREAKPOINT_TRAPPED_MASK 0x00000004U
+#define EUR_CR_BREAKPOINT_TRAPPED_SHIFT 2
+#define EUR_CR_BREAKPOINT_TRAPPED_SIGNED 0
+#define EUR_CR_BREAKPOINT_TRAP_INFO0 0x0F80
+#define EUR_CR_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT_TRAP_INFO0_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT_TRAP_INFO0_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT_TRAP_INFO1 0x0F84
+#define EUR_CR_BREAKPOINT_TRAP_INFO1_SIZE_MASK 0x00007C00U
+#define EUR_CR_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT 10
+#define EUR_CR_BREAKPOINT_TRAP_INFO1_SIZE_SIGNED 0
+#define EUR_CR_BREAKPOINT_TRAP_INFO1_NUMBER_MASK 0x00000300U
+#define EUR_CR_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT 8
+#define EUR_CR_BREAKPOINT_TRAP_INFO1_NUMBER_SIGNED 0
+#define EUR_CR_BREAKPOINT_TRAP_INFO1_TAG_MASK 0x000000F8U
+#define EUR_CR_BREAKPOINT_TRAP_INFO1_TAG_SHIFT 3
+#define EUR_CR_BREAKPOINT_TRAP_INFO1_TAG_SIGNED 0
+#define EUR_CR_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK 0x00000006U
+#define EUR_CR_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT 1
+#define EUR_CR_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SIGNED 0
+#define EUR_CR_BREAKPOINT_TRAP_INFO1_RNW_MASK 0x00000001U
+#define EUR_CR_BREAKPOINT_TRAP_INFO1_RNW_SHIFT 0
+#define EUR_CR_BREAKPOINT_TRAP_INFO1_RNW_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_0 0x0A0C
+#define EUR_CR_USE_CODE_BASE_ADDR_00_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_00_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_00_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_00_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_00_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_00_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_1 0x0A10
+#define EUR_CR_USE_CODE_BASE_ADDR_01_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_01_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_01_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_01_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_01_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_01_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_2 0x0A14
+#define EUR_CR_USE_CODE_BASE_ADDR_02_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_02_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_02_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_02_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_02_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_02_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_3 0x0A18
+#define EUR_CR_USE_CODE_BASE_ADDR_03_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_03_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_03_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_03_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_03_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_03_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_4 0x0A1C
+#define EUR_CR_USE_CODE_BASE_ADDR_04_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_04_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_04_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_04_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_04_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_04_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_5 0x0A20
+#define EUR_CR_USE_CODE_BASE_ADDR_05_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_05_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_05_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_05_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_05_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_05_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_6 0x0A24
+#define EUR_CR_USE_CODE_BASE_ADDR_06_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_06_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_06_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_06_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_06_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_06_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_7 0x0A28
+#define EUR_CR_USE_CODE_BASE_ADDR_07_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_07_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_07_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_07_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_07_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_07_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_8 0x0A2C
+#define EUR_CR_USE_CODE_BASE_ADDR_08_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_08_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_08_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_08_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_08_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_08_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_9 0x0A30
+#define EUR_CR_USE_CODE_BASE_ADDR_09_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_09_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_09_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_09_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_09_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_09_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_10 0x0A34
+#define EUR_CR_USE_CODE_BASE_ADDR_10_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_10_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_10_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_10_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_10_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_10_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_11 0x0A38
+#define EUR_CR_USE_CODE_BASE_ADDR_11_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_11_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_11_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_11_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_11_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_11_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_12 0x0A3C
+#define EUR_CR_USE_CODE_BASE_ADDR_12_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_12_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_12_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_12_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_12_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_12_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_13 0x0A40
+#define EUR_CR_USE_CODE_BASE_ADDR_13_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_13_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_13_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_13_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_13_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_13_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_14 0x0A44
+#define EUR_CR_USE_CODE_BASE_ADDR_14_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_14_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_14_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_14_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_14_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_14_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_15 0x0A48
+#define EUR_CR_USE_CODE_BASE_ADDR_15_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_15_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_15_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_15_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_15_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_15_SIGNED 0
+#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X)))
+#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16
+#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16
+
+#endif
+
diff --git a/drivers/gpu/pvr/sgx543defs.h b/drivers/gpu/pvr/sgx543defs.h
new file mode 100644
index 0000000..1f54226
--- /dev/null
+++ b/drivers/gpu/pvr/sgx543defs.h
@@ -0,0 +1,1364 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _SGX543DEFS_KM_H_
+#define _SGX543DEFS_KM_H_
+
+#define EUR_CR_CLKGATECTL 0x0000
+#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000003U
+#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 0
+#define EUR_CR_CLKGATECTL_ISP_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_ISP2_CLKG_MASK 0x0000000CU
+#define EUR_CR_CLKGATECTL_ISP2_CLKG_SHIFT 2
+#define EUR_CR_CLKGATECTL_ISP2_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000030U
+#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 4
+#define EUR_CR_CLKGATECTL_TSP_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_TE_CLKG_MASK 0x000000C0U
+#define EUR_CR_CLKGATECTL_TE_CLKG_SHIFT 6
+#define EUR_CR_CLKGATECTL_TE_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_MTE_CLKG_MASK 0x00000300U
+#define EUR_CR_CLKGATECTL_MTE_CLKG_SHIFT 8
+#define EUR_CR_CLKGATECTL_MTE_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00000C00U
+#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 10
+#define EUR_CR_CLKGATECTL_DPM_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_VDM_CLKG_MASK 0x00003000U
+#define EUR_CR_CLKGATECTL_VDM_CLKG_SHIFT 12
+#define EUR_CR_CLKGATECTL_VDM_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_PDS_CLKG_MASK 0x0000C000U
+#define EUR_CR_CLKGATECTL_PDS_CLKG_SHIFT 14
+#define EUR_CR_CLKGATECTL_PDS_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_MASK 0x00030000U
+#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_SHIFT 16
+#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x000C0000U
+#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 18
+#define EUR_CR_CLKGATECTL_TA_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_BIF_CORE_CLKG_MASK 0x00300000U
+#define EUR_CR_CLKGATECTL_BIF_CORE_CLKG_SHIFT 20
+#define EUR_CR_CLKGATECTL_BIF_CORE_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SIGNED 0
+#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_MASK 0x10000000U
+#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_SHIFT 28
+#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2 0x0004
+#define EUR_CR_CLKGATECTL2_PBE_CLKG_MASK 0x00000003U
+#define EUR_CR_CLKGATECTL2_PBE_CLKG_SHIFT 0
+#define EUR_CR_CLKGATECTL2_PBE_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_TCU_L2_CLKG_MASK 0x0000000CU
+#define EUR_CR_CLKGATECTL2_TCU_L2_CLKG_SHIFT 2
+#define EUR_CR_CLKGATECTL2_TCU_L2_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_MASK 0x00000030U
+#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SHIFT 4
+#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_USE0_CLKG_MASK 0x000000C0U
+#define EUR_CR_CLKGATECTL2_USE0_CLKG_SHIFT 6
+#define EUR_CR_CLKGATECTL2_USE0_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_ITR0_CLKG_MASK 0x00000300U
+#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SHIFT 8
+#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_TEX0_CLKG_MASK 0x00000C00U
+#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SHIFT 10
+#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_USE1_CLKG_MASK 0x0000C000U
+#define EUR_CR_CLKGATECTL2_USE1_CLKG_SHIFT 14
+#define EUR_CR_CLKGATECTL2_USE1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_ITR1_CLKG_MASK 0x00030000U
+#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SHIFT 16
+#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_TEX1_CLKG_MASK 0x000C0000U
+#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SHIFT 18
+#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_DCU_L2_CLKG_MASK 0x00C00000U
+#define EUR_CR_CLKGATECTL2_DCU_L2_CLKG_SHIFT 22
+#define EUR_CR_CLKGATECTL2_DCU_L2_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_DCU1_L0L1_CLKG_MASK 0x03000000U
+#define EUR_CR_CLKGATECTL2_DCU1_L0L1_CLKG_SHIFT 24
+#define EUR_CR_CLKGATECTL2_DCU1_L0L1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_DCU0_L0L1_CLKG_MASK 0x0C000000U
+#define EUR_CR_CLKGATECTL2_DCU0_L0L1_CLKG_SHIFT 26
+#define EUR_CR_CLKGATECTL2_DCU0_L0L1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATESTATUS 0x0008
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000001U
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 0
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_MASK 0x00000002U
+#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SHIFT 1
+#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000004U
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 2
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TE_CLKS_MASK 0x00000008U
+#define EUR_CR_CLKGATESTATUS_TE_CLKS_SHIFT 3
+#define EUR_CR_CLKGATESTATUS_TE_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_MTE_CLKS_MASK 0x00000010U
+#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SHIFT 4
+#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00000020U
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 5
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_VDM_CLKS_MASK 0x00000040U
+#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SHIFT 6
+#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_PDS_CLKS_MASK 0x00000080U
+#define EUR_CR_CLKGATESTATUS_PDS_CLKS_SHIFT 7
+#define EUR_CR_CLKGATESTATUS_PDS_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_PBE_CLKS_MASK 0x00000100U
+#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SHIFT 8
+#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TCU_L2_CLKS_MASK 0x00000200U
+#define EUR_CR_CLKGATESTATUS_TCU_L2_CLKS_SHIFT 9
+#define EUR_CR_CLKGATESTATUS_TCU_L2_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_MASK 0x00000400U
+#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SHIFT 10
+#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_USE0_CLKS_MASK 0x00000800U
+#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SHIFT 11
+#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_MASK 0x00001000U
+#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SHIFT 12
+#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_MASK 0x00002000U
+#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SHIFT 13
+#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_USE1_CLKS_MASK 0x00008000U
+#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SHIFT 15
+#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_MASK 0x00010000U
+#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SHIFT 16
+#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_MASK 0x00020000U
+#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SHIFT 17
+#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_MASK 0x00080000U
+#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_SHIFT 19
+#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00100000U
+#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 20
+#define EUR_CR_CLKGATESTATUS_TA_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_DCU_L2_CLKS_MASK 0x00200000U
+#define EUR_CR_CLKGATESTATUS_DCU_L2_CLKS_SHIFT 21
+#define EUR_CR_CLKGATESTATUS_DCU_L2_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_DCU0_L0L1_CLKS_MASK 0x00400000U
+#define EUR_CR_CLKGATESTATUS_DCU0_L0L1_CLKS_SHIFT 22
+#define EUR_CR_CLKGATESTATUS_DCU0_L0L1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_DCU1_L0L1_CLKS_MASK 0x00800000U
+#define EUR_CR_CLKGATESTATUS_DCU1_L0L1_CLKS_SHIFT 23
+#define EUR_CR_CLKGATESTATUS_DCU1_L0L1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_BIF_CORE_CLKS_MASK 0x01000000U
+#define EUR_CR_CLKGATESTATUS_BIF_CORE_CLKS_SHIFT 24
+#define EUR_CR_CLKGATESTATUS_BIF_CORE_CLKS_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR 0x000C
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000003U
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 0
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_MASK 0x0000000CU
+#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SHIFT 2
+#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000030U
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 4
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_TE_CLKO_MASK 0x000000C0U
+#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SHIFT 6
+#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_MASK 0x00000300U
+#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SHIFT 8
+#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00000C00U
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 10
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_MASK 0x00003000U
+#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SHIFT 12
+#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_MASK 0x0000C000U
+#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_SHIFT 14
+#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_MASK 0x00030000U
+#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_SHIFT 16
+#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x000C0000U
+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 18
+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_BIF_CORE_CLKO_MASK 0x00300000U
+#define EUR_CR_CLKGATECTLOVR_BIF_CORE_CLKO_SHIFT 20
+#define EUR_CR_CLKGATECTLOVR_BIF_CORE_CLKO_SIGNED 0
+#define EUR_CR_POWER 0x001C
+#define EUR_CR_POWER_PIPE_DISABLE_MASK 0x00000001U
+#define EUR_CR_POWER_PIPE_DISABLE_SHIFT 0
+#define EUR_CR_POWER_PIPE_DISABLE_SIGNED 0
+#define EUR_CR_CORE_ID 0x0020
+#define EUR_CR_CORE_ID_CONFIG_MULTI_MASK 0x00000001U
+#define EUR_CR_CORE_ID_CONFIG_MULTI_SHIFT 0
+#define EUR_CR_CORE_ID_CONFIG_MULTI_SIGNED 0
+#define EUR_CR_CORE_ID_CONFIG_BASE_MASK 0x00000002U
+#define EUR_CR_CORE_ID_CONFIG_BASE_SHIFT 1
+#define EUR_CR_CORE_ID_CONFIG_BASE_SIGNED 0
+#define EUR_CR_CORE_ID_CONFIG_MASK 0x000000FCU
+#define EUR_CR_CORE_ID_CONFIG_SHIFT 2
+#define EUR_CR_CORE_ID_CONFIG_SIGNED 0
+#define EUR_CR_CORE_ID_CONFIG_CORES_MASK 0x00000F00U
+#define EUR_CR_CORE_ID_CONFIG_CORES_SHIFT 8
+#define EUR_CR_CORE_ID_CONFIG_CORES_SIGNED 0
+#define EUR_CR_CORE_ID_CONFIG_SLC_MASK 0x0000F000U
+#define EUR_CR_CORE_ID_CONFIG_SLC_SHIFT 12
+#define EUR_CR_CORE_ID_CONFIG_SLC_SIGNED 0
+#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U
+#define EUR_CR_CORE_ID_ID_SHIFT 16
+#define EUR_CR_CORE_ID_ID_SIGNED 0
+#define EUR_CR_CORE_REVISION 0x0024
+#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU
+#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0
+#define EUR_CR_CORE_REVISION_MAINTENANCE_SIGNED 0
+#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U
+#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8
+#define EUR_CR_CORE_REVISION_MINOR_SIGNED 0
+#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U
+#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16
+#define EUR_CR_CORE_REVISION_MAJOR_SIGNED 0
+#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U
+#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24
+#define EUR_CR_CORE_REVISION_DESIGNER_SIGNED 0
+#define EUR_CR_DESIGNER_REV_FIELD1 0x0028
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SIGNED 0
+#define EUR_CR_DESIGNER_REV_FIELD2 0x002C
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SIGNED 0
+#define EUR_CR_SOFT_RESET 0x0080
+#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U
+#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0
+#define EUR_CR_SOFT_RESET_BIF_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_VDM_RESET_MASK 0x00000002U
+#define EUR_CR_SOFT_RESET_VDM_RESET_SHIFT 1
+#define EUR_CR_SOFT_RESET_VDM_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004U
+#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2
+#define EUR_CR_SOFT_RESET_DPM_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_TE_RESET_MASK 0x00000008U
+#define EUR_CR_SOFT_RESET_TE_RESET_SHIFT 3
+#define EUR_CR_SOFT_RESET_TE_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_MTE_RESET_MASK 0x00000010U
+#define EUR_CR_SOFT_RESET_MTE_RESET_SHIFT 4
+#define EUR_CR_SOFT_RESET_MTE_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020U
+#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5
+#define EUR_CR_SOFT_RESET_ISP_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_ISP2_RESET_MASK 0x00000040U
+#define EUR_CR_SOFT_RESET_ISP2_RESET_SHIFT 6
+#define EUR_CR_SOFT_RESET_ISP2_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000080U
+#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 7
+#define EUR_CR_SOFT_RESET_TSP_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_PDS_RESET_MASK 0x00000100U
+#define EUR_CR_SOFT_RESET_PDS_RESET_SHIFT 8
+#define EUR_CR_SOFT_RESET_PDS_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_PBE_RESET_MASK 0x00000200U
+#define EUR_CR_SOFT_RESET_PBE_RESET_SHIFT 9
+#define EUR_CR_SOFT_RESET_PBE_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK 0x00000400U
+#define EUR_CR_SOFT_RESET_TCU_L2_RESET_SHIFT 10
+#define EUR_CR_SOFT_RESET_TCU_L2_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK 0x00000800U
+#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_SHIFT 11
+#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_ITR_RESET_MASK 0x00002000U
+#define EUR_CR_SOFT_RESET_ITR_RESET_SHIFT 13
+#define EUR_CR_SOFT_RESET_ITR_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_TEX_RESET_MASK 0x00004000U
+#define EUR_CR_SOFT_RESET_TEX_RESET_SHIFT 14
+#define EUR_CR_SOFT_RESET_TEX_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00008000U
+#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 15
+#define EUR_CR_SOFT_RESET_USE_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK 0x00010000U
+#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_SHIFT 16
+#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00020000U
+#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 17
+#define EUR_CR_SOFT_RESET_TA_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK 0x00040000U
+#define EUR_CR_SOFT_RESET_DCU_L2_RESET_SHIFT 18
+#define EUR_CR_SOFT_RESET_DCU_L2_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK 0x00080000U
+#define EUR_CR_SOFT_RESET_DCU_L0L1_RESET_SHIFT 19
+#define EUR_CR_SOFT_RESET_DCU_L0L1_RESET_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2 0x0110
+#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_UNTRAPPED_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_UNTRAPPED_SHIFT 11
+#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_UNTRAPPED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_TRAPPED_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_TRAPPED_SHIFT 10
+#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_TRAPPED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_CONTEXT_DRAINED_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_CONTEXT_DRAINED_SHIFT 9
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_CONTEXT_DRAINED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_ISP2_ZLS_CSW_FINISHED_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_ENABLE2_ISP2_ZLS_CSW_FINISHED_SHIFT 8
+#define EUR_CR_EVENT_HOST_ENABLE2_ISP2_ZLS_CSW_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_SHIFT 7
+#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_SHIFT 6
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_TE_RGNHDR_INIT_COMPLETE_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_ENABLE2_TE_RGNHDR_INIT_COMPLETE_SHIFT 5
+#define EUR_CR_EVENT_HOST_ENABLE2_TE_RGNHDR_INIT_COMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2 0x0114
+#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_SHIFT 11
+#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_SHIFT 10
+#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_CONTEXT_DRAINED_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_CONTEXT_DRAINED_SHIFT 9
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_CONTEXT_DRAINED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_ISP2_ZLS_CSW_FINISHED_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_CLEAR2_ISP2_ZLS_CSW_FINISHED_SHIFT 8
+#define EUR_CR_EVENT_HOST_CLEAR2_ISP2_ZLS_CSW_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_SHIFT 7
+#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_SHIFT 6
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_TE_RGNHDR_INIT_COMPLETE_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_CLEAR2_TE_RGNHDR_INIT_COMPLETE_SHIFT 5
+#define EUR_CR_EVENT_HOST_CLEAR2_TE_RGNHDR_INIT_COMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS2 0x0118
+#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_MASK 0x00000800U
+#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_SHIFT 11
+#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_MASK 0x00000400U
+#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_SHIFT 10
+#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_MTE_CONTEXT_DRAINED_MASK 0x00000200U
+#define EUR_CR_EVENT_STATUS2_MTE_CONTEXT_DRAINED_SHIFT 9
+#define EUR_CR_EVENT_STATUS2_MTE_CONTEXT_DRAINED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_ISP2_ZLS_CSW_FINISHED_MASK 0x00000100U
+#define EUR_CR_EVENT_STATUS2_ISP2_ZLS_CSW_FINISHED_SHIFT 8
+#define EUR_CR_EVENT_STATUS2_ISP2_ZLS_CSW_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_MASK 0x00000080U
+#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_SHIFT 7
+#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_MASK 0x00000040U
+#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_SHIFT 6
+#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_TE_RGNHDR_INIT_COMPLETE_MASK 0x00000020U
+#define EUR_CR_EVENT_STATUS2_TE_RGNHDR_INIT_COMPLETE_SHIFT 5
+#define EUR_CR_EVENT_STATUS2_TE_RGNHDR_INIT_COMPLETE_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS 0x012C
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29
+#define EUR_CR_EVENT_STATUS_TIMER_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TCU_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_STATUS_TCU_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_STATUS_TCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_SIGNED 0
+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SIGNED 0
+#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_STATUS_OTPM_INV_SIGNED 0
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SIGNED 0
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_STATUS_SW_EVENT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE 0x0130
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TCU_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TCU_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_HOST_ENABLE_TCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR 0x0134
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TCU_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TCU_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_HOST_CLEAR_TCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SIGNED 0
+#define EUR_CR_TIMER 0x0144
+#define EUR_CR_TIMER_VALUE_MASK 0xFFFFFFFFU
+#define EUR_CR_TIMER_VALUE_SHIFT 0
+#define EUR_CR_TIMER_VALUE_SIGNED 0
+#define EUR_CR_EVENT_KICK1 0x0AB0
+#define EUR_CR_EVENT_KICK1_NOW_MASK 0x000000FFU
+#define EUR_CR_EVENT_KICK1_NOW_SHIFT 0
+#define EUR_CR_EVENT_KICK1_NOW_SIGNED 0
+#define EUR_CR_EVENT_KICK2 0x0AC0
+#define EUR_CR_EVENT_KICK2_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK2_NOW_SHIFT 0
+#define EUR_CR_EVENT_KICK2_NOW_SIGNED 0
+#define EUR_CR_EVENT_KICKER 0x0AC4
+#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4
+#define EUR_CR_EVENT_KICKER_ADDRESS_SIGNED 0
+#define EUR_CR_EVENT_KICK 0x0AC8
+#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK_NOW_SHIFT 0
+#define EUR_CR_EVENT_KICK_NOW_SIGNED 0
+#define EUR_CR_EVENT_TIMER 0x0ACC
+#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U
+#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24
+#define EUR_CR_EVENT_TIMER_ENABLE_SIGNED 0
+#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU
+#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0
+#define EUR_CR_EVENT_TIMER_VALUE_SIGNED 0
+#define EUR_CR_PDS_INV0 0x0AD0
+#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV0_DSC_SHIFT 0
+#define EUR_CR_PDS_INV0_DSC_SIGNED 0
+#define EUR_CR_PDS_INV1 0x0AD4
+#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV1_DSC_SHIFT 0
+#define EUR_CR_PDS_INV1_DSC_SIGNED 0
+#define EUR_CR_EVENT_KICK3 0x0AD8
+#define EUR_CR_EVENT_KICK3_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK3_NOW_SHIFT 0
+#define EUR_CR_EVENT_KICK3_NOW_SIGNED 0
+#define EUR_CR_PDS_INV3 0x0ADC
+#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV3_DSC_SHIFT 0
+#define EUR_CR_PDS_INV3_DSC_SIGNED 0
+#define EUR_CR_PDS_INV_CSC 0x0AE0
+#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U
+#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0
+#define EUR_CR_PDS_INV_CSC_KICK_SIGNED 0
+#define EUR_CR_BIF_CTRL 0x0C00
+#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U
+#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0
+#define EUR_CR_BIF_CTRL_NOREORDER_SIGNED 0
+#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U
+#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1
+#define EUR_CR_BIF_CTRL_PAUSE_SIGNED 0
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TA_MASK 0x00000400U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TA_SHIFT 10
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TA_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PTLA_MASK 0x00010000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PTLA_SHIFT 16
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PTLA_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_VDM_MASK 0x00020000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_VDM_SHIFT 17
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_VDM_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_IPF_MASK 0x00040000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_IPF_SHIFT 18
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_IPF_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_DPM_MASK 0x00080000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_DPM_SHIFT 19
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_DPM_SIGNED 0
+#define EUR_CR_BIF_INT_STAT 0x0C04
+#define EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK 0x00003FFFU
+#define EUR_CR_BIF_INT_STAT_FAULT_REQ_SHIFT 0
+#define EUR_CR_BIF_INT_STAT_FAULT_REQ_SIGNED 0
+#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_MASK 0x00070000U
+#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_SHIFT 16
+#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_SIGNED 0
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00080000U
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 19
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SIGNED 0
+#define EUR_CR_BIF_FAULT 0x0C08
+#define EUR_CR_BIF_FAULT_CID_MASK 0x0000000FU
+#define EUR_CR_BIF_FAULT_CID_SHIFT 0
+#define EUR_CR_BIF_FAULT_CID_SIGNED 0
+#define EUR_CR_BIF_FAULT_SB_MASK 0x000001F0U
+#define EUR_CR_BIF_FAULT_SB_SHIFT 4
+#define EUR_CR_BIF_FAULT_SB_SIGNED 0
+#define EUR_CR_BIF_FAULT_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12
+#define EUR_CR_BIF_FAULT_ADDR_SIGNED 0
+#define EUR_CR_BIF_TILE0 0x0C0C
+#define EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE0_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE0_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE0_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE1 0x0C10
+#define EUR_CR_BIF_TILE1_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE1_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE1_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE1_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE1_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE2 0x0C14
+#define EUR_CR_BIF_TILE2_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE2_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE2_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE2_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE2_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE3 0x0C18
+#define EUR_CR_BIF_TILE3_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE3_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE3_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE3_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE3_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE4 0x0C1C
+#define EUR_CR_BIF_TILE4_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE4_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE4_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE4_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE4_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE5 0x0C20
+#define EUR_CR_BIF_TILE5_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE5_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE5_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE5_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE5_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE6 0x0C24
+#define EUR_CR_BIF_TILE6_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE6_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE6_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE6_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE6_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE7 0x0C28
+#define EUR_CR_BIF_TILE7_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE7_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE7_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE7_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE7_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE8 0x0C2C
+#define EUR_CR_BIF_TILE8_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE8_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE8_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE8_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE8_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE9 0x0C30
+#define EUR_CR_BIF_TILE9_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE9_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE9_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE9_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE9_CFG_SIGNED 0
+#define EUR_CR_BIF_CTRL_INVAL 0x0C34
+#define EUR_CR_BIF_CTRL_INVAL_PTE_MASK 0x00000004U
+#define EUR_CR_BIF_CTRL_INVAL_PTE_SHIFT 2
+#define EUR_CR_BIF_CTRL_INVAL_PTE_SIGNED 0
+#define EUR_CR_BIF_CTRL_INVAL_ALL_MASK 0x00000008U
+#define EUR_CR_BIF_CTRL_INVAL_ALL_SHIFT 3
+#define EUR_CR_BIF_CTRL_INVAL_ALL_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE1 0x0C38
+#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE2 0x0C3C
+#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE3 0x0C40
+#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE4 0x0C44
+#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE5 0x0C48
+#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE6 0x0C4C
+#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE7 0x0C50
+#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SIGNED 0
+#define EUR_CR_BIF_BANK_SET 0x0C74
+#define EUR_CR_BIF_BANK_SET_SELECT_2D_MASK 0x00000001U
+#define EUR_CR_BIF_BANK_SET_SELECT_2D_SHIFT 0
+#define EUR_CR_BIF_BANK_SET_SELECT_2D_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_3D_MASK 0x0000000CU
+#define EUR_CR_BIF_BANK_SET_SELECT_3D_SHIFT 2
+#define EUR_CR_BIF_BANK_SET_SELECT_3D_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_HOST_MASK 0x00000010U
+#define EUR_CR_BIF_BANK_SET_SELECT_HOST_SHIFT 4
+#define EUR_CR_BIF_BANK_SET_SELECT_HOST_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_TA_MASK 0x000000C0U
+#define EUR_CR_BIF_BANK_SET_SELECT_TA_SHIFT 6
+#define EUR_CR_BIF_BANK_SET_SELECT_TA_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_EDM_MASK 0x00000100U
+#define EUR_CR_BIF_BANK_SET_SELECT_EDM_SHIFT 8
+#define EUR_CR_BIF_BANK_SET_SELECT_EDM_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_MASK 0x00000200U
+#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_SHIFT 9
+#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_SIGNED 0
+#define EUR_CR_BIF_BANK0 0x0C78
+#define EUR_CR_BIF_BANK0_INDEX_EDM_MASK 0x0000000FU
+#define EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT 0
+#define EUR_CR_BIF_BANK0_INDEX_EDM_SIGNED 0
+#define EUR_CR_BIF_BANK0_INDEX_TA_MASK 0x000000F0U
+#define EUR_CR_BIF_BANK0_INDEX_TA_SHIFT 4
+#define EUR_CR_BIF_BANK0_INDEX_TA_SIGNED 0
+#define EUR_CR_BIF_BANK0_INDEX_3D_MASK 0x0000F000U
+#define EUR_CR_BIF_BANK0_INDEX_3D_SHIFT 12
+#define EUR_CR_BIF_BANK0_INDEX_3D_SIGNED 0
+#define EUR_CR_BIF_BANK0_INDEX_PTLA_MASK 0x000F0000U
+#define EUR_CR_BIF_BANK0_INDEX_PTLA_SHIFT 16
+#define EUR_CR_BIF_BANK0_INDEX_PTLA_SIGNED 0
+#define EUR_CR_BIF_BANK1 0x0C7C
+#define EUR_CR_BIF_BANK1_INDEX_EDM_MASK 0x0000000FU
+#define EUR_CR_BIF_BANK1_INDEX_EDM_SHIFT 0
+#define EUR_CR_BIF_BANK1_INDEX_EDM_SIGNED 0
+#define EUR_CR_BIF_BANK1_INDEX_TA_MASK 0x000000F0U
+#define EUR_CR_BIF_BANK1_INDEX_TA_SHIFT 4
+#define EUR_CR_BIF_BANK1_INDEX_TA_SIGNED 0
+#define EUR_CR_BIF_BANK1_INDEX_3D_MASK 0x0000F000U
+#define EUR_CR_BIF_BANK1_INDEX_3D_SHIFT 12
+#define EUR_CR_BIF_BANK1_INDEX_3D_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SIGNED 0
+#define EUR_CR_BIF_TA_REQ_BASE 0x0C90
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0xFFF00000U
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SIGNED 0
+#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFU
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_SIGNED 0
+#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0xFFF00000U
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SIGNED 0
+#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0xFFF00000U
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SIGNED 0
+#define EUR_CR_BIF_BANK_STATUS 0x0CB4
+#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_MASK 0x00000001U
+#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SHIFT 0
+#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SIGNED 0
+#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_MASK 0x00000002U
+#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SHIFT 1
+#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SIGNED 0
+#define EUR_CR_BIF_MMU_CTRL 0x0CD0
+#define EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_MASK 0x00000001U
+#define EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_SHIFT 0
+#define EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_SIGNED 0
+#define EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_MASK 0x00000006U
+#define EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT 1
+#define EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_SIGNED 0
+#define EUR_CR_BIF_MMU_CTRL_ENABLE_WRITE_BURST_COLLATE_MASK 0x00000008U
+#define EUR_CR_BIF_MMU_CTRL_ENABLE_WRITE_BURST_COLLATE_SHIFT 3
+#define EUR_CR_BIF_MMU_CTRL_ENABLE_WRITE_BURST_COLLATE_SIGNED 0
+#define EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_MASK 0x00000010U
+#define EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_SHIFT 4
+#define EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_SIGNED 0
+#define EUR_CR_2D_BLIT_STATUS 0x0E04
+#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFFU
+#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0
+#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SIGNED 0
+#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000U
+#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24
+#define EUR_CR_2D_BLIT_STATUS_BUSY_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10
+#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001U
+#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0
+#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000EU
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0U
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000U
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFFU
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000U
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000U
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SIGNED 0
+#define EUR_CR_BREAKPOINT0_START 0x0F44
+#define EUR_CR_BREAKPOINT0_START_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT0_START_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT0_START_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT0_END 0x0F48
+#define EUR_CR_BREAKPOINT0_END_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT0_END_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT0_END_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT0 0x0F4C
+#define EUR_CR_BREAKPOINT0_MASK_DM_MASK 0x00000038U
+#define EUR_CR_BREAKPOINT0_MASK_DM_SHIFT 3
+#define EUR_CR_BREAKPOINT0_MASK_DM_SIGNED 0
+#define EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_MASK 0x00000004U
+#define EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_SHIFT 2
+#define EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT0_CTRL_WENABLE_MASK 0x00000002U
+#define EUR_CR_BREAKPOINT0_CTRL_WENABLE_SHIFT 1
+#define EUR_CR_BREAKPOINT0_CTRL_WENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT0_CTRL_RENABLE_MASK 0x00000001U
+#define EUR_CR_BREAKPOINT0_CTRL_RENABLE_SHIFT 0
+#define EUR_CR_BREAKPOINT0_CTRL_RENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT1_START 0x0F50
+#define EUR_CR_BREAKPOINT1_START_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT1_START_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT1_START_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT1_END 0x0F54
+#define EUR_CR_BREAKPOINT1_END_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT1_END_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT1_END_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT1 0x0F58
+#define EUR_CR_BREAKPOINT1_MASK_DM_MASK 0x00000038U
+#define EUR_CR_BREAKPOINT1_MASK_DM_SHIFT 3
+#define EUR_CR_BREAKPOINT1_MASK_DM_SIGNED 0
+#define EUR_CR_BREAKPOINT1_CTRL_TRAPENABLE_MASK 0x00000004U
+#define EUR_CR_BREAKPOINT1_CTRL_TRAPENABLE_SHIFT 2
+#define EUR_CR_BREAKPOINT1_CTRL_TRAPENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT1_CTRL_WENABLE_MASK 0x00000002U
+#define EUR_CR_BREAKPOINT1_CTRL_WENABLE_SHIFT 1
+#define EUR_CR_BREAKPOINT1_CTRL_WENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT1_CTRL_RENABLE_MASK 0x00000001U
+#define EUR_CR_BREAKPOINT1_CTRL_RENABLE_SHIFT 0
+#define EUR_CR_BREAKPOINT1_CTRL_RENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT2_START 0x0F5C
+#define EUR_CR_BREAKPOINT2_START_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT2_START_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT2_START_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT2_END 0x0F60
+#define EUR_CR_BREAKPOINT2_END_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT2_END_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT2_END_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT2 0x0F64
+#define EUR_CR_BREAKPOINT2_MASK_DM_MASK 0x00000038U
+#define EUR_CR_BREAKPOINT2_MASK_DM_SHIFT 3
+#define EUR_CR_BREAKPOINT2_MASK_DM_SIGNED 0
+#define EUR_CR_BREAKPOINT2_CTRL_TRAPENABLE_MASK 0x00000004U
+#define EUR_CR_BREAKPOINT2_CTRL_TRAPENABLE_SHIFT 2
+#define EUR_CR_BREAKPOINT2_CTRL_TRAPENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT2_CTRL_WENABLE_MASK 0x00000002U
+#define EUR_CR_BREAKPOINT2_CTRL_WENABLE_SHIFT 1
+#define EUR_CR_BREAKPOINT2_CTRL_WENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT2_CTRL_RENABLE_MASK 0x00000001U
+#define EUR_CR_BREAKPOINT2_CTRL_RENABLE_SHIFT 0
+#define EUR_CR_BREAKPOINT2_CTRL_RENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT3_START 0x0F68
+#define EUR_CR_BREAKPOINT3_START_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT3_START_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT3_START_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT3_END 0x0F6C
+#define EUR_CR_BREAKPOINT3_END_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT3_END_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT3_END_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT3 0x0F70
+#define EUR_CR_BREAKPOINT3_MASK_DM_MASK 0x00000038U
+#define EUR_CR_BREAKPOINT3_MASK_DM_SHIFT 3
+#define EUR_CR_BREAKPOINT3_MASK_DM_SIGNED 0
+#define EUR_CR_BREAKPOINT3_CTRL_TRAPENABLE_MASK 0x00000004U
+#define EUR_CR_BREAKPOINT3_CTRL_TRAPENABLE_SHIFT 2
+#define EUR_CR_BREAKPOINT3_CTRL_TRAPENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT3_CTRL_WENABLE_MASK 0x00000002U
+#define EUR_CR_BREAKPOINT3_CTRL_WENABLE_SHIFT 1
+#define EUR_CR_BREAKPOINT3_CTRL_WENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT3_CTRL_RENABLE_MASK 0x00000001U
+#define EUR_CR_BREAKPOINT3_CTRL_RENABLE_SHIFT 0
+#define EUR_CR_BREAKPOINT3_CTRL_RENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT_READ 0x0F74
+#define EUR_CR_BREAKPOINT_READ_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT_READ_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT_READ_ADDRESS_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP 0x0F78
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_CONTINUE_MASK 0x00000002U
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_CONTINUE_SHIFT 1
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_CONTINUE_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_WRNOTIFY_MASK 0x00000001U
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_WRNOTIFY_SHIFT 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_WRNOTIFY_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT 0x0F7C
+#define EUR_CR_PARTITION_BREAKPOINT_MODULE_ID_MASK 0x000003C0U
+#define EUR_CR_PARTITION_BREAKPOINT_MODULE_ID_SHIFT 6
+#define EUR_CR_PARTITION_BREAKPOINT_MODULE_ID_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_ID_MASK 0x00000030U
+#define EUR_CR_PARTITION_BREAKPOINT_ID_SHIFT 4
+#define EUR_CR_PARTITION_BREAKPOINT_ID_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_UNTRAPPED_MASK 0x00000008U
+#define EUR_CR_PARTITION_BREAKPOINT_UNTRAPPED_SHIFT 3
+#define EUR_CR_PARTITION_BREAKPOINT_UNTRAPPED_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAPPED_MASK 0x00000004U
+#define EUR_CR_PARTITION_BREAKPOINT_TRAPPED_SHIFT 2
+#define EUR_CR_PARTITION_BREAKPOINT_TRAPPED_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0 0x0F80
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0_ADDRESS_SHIFT 4
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0_ADDRESS_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1 0x0F84
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_MASK 0x00007C00U
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT 10
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_MASK 0x00000300U
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT 8
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_MASK 0x000000F8U
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_SHIFT 3
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK 0x00000006U
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT 1
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_RNW_MASK 0x00000001U
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_RNW_SHIFT 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_RNW_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_0 0x0A0C
+#define EUR_CR_USE_CODE_BASE_ADDR_00_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_00_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_00_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_00_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_00_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_00_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_1 0x0A10
+#define EUR_CR_USE_CODE_BASE_ADDR_01_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_01_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_01_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_01_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_01_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_01_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_2 0x0A14
+#define EUR_CR_USE_CODE_BASE_ADDR_02_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_02_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_02_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_02_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_02_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_02_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_3 0x0A18
+#define EUR_CR_USE_CODE_BASE_ADDR_03_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_03_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_03_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_03_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_03_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_03_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_4 0x0A1C
+#define EUR_CR_USE_CODE_BASE_ADDR_04_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_04_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_04_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_04_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_04_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_04_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_5 0x0A20
+#define EUR_CR_USE_CODE_BASE_ADDR_05_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_05_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_05_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_05_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_05_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_05_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_6 0x0A24
+#define EUR_CR_USE_CODE_BASE_ADDR_06_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_06_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_06_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_06_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_06_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_06_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_7 0x0A28
+#define EUR_CR_USE_CODE_BASE_ADDR_07_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_07_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_07_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_07_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_07_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_07_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_8 0x0A2C
+#define EUR_CR_USE_CODE_BASE_ADDR_08_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_08_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_08_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_08_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_08_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_08_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_9 0x0A30
+#define EUR_CR_USE_CODE_BASE_ADDR_09_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_09_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_09_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_09_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_09_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_09_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_10 0x0A34
+#define EUR_CR_USE_CODE_BASE_ADDR_10_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_10_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_10_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_10_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_10_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_10_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_11 0x0A38
+#define EUR_CR_USE_CODE_BASE_ADDR_11_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_11_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_11_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_11_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_11_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_11_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_12 0x0A3C
+#define EUR_CR_USE_CODE_BASE_ADDR_12_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_12_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_12_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_12_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_12_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_12_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_13 0x0A40
+#define EUR_CR_USE_CODE_BASE_ADDR_13_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_13_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_13_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_13_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_13_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_13_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_14 0x0A44
+#define EUR_CR_USE_CODE_BASE_ADDR_14_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_14_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_14_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_14_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_14_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_14_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_15 0x0A48
+#define EUR_CR_USE_CODE_BASE_ADDR_15_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_15_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_15_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_15_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_15_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_15_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP 0x0F88
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_CONTINUE_MASK 0x00000002U
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_CONTINUE_SHIFT 1
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_CONTINUE_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_WRNOTIFY_MASK 0x00000001U
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_WRNOTIFY_SHIFT 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_WRNOTIFY_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT 0x0F8C
+#define EUR_CR_PIPE0_BREAKPOINT_MODULE_ID_MASK 0x000003C0U
+#define EUR_CR_PIPE0_BREAKPOINT_MODULE_ID_SHIFT 6
+#define EUR_CR_PIPE0_BREAKPOINT_MODULE_ID_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_ID_MASK 0x00000030U
+#define EUR_CR_PIPE0_BREAKPOINT_ID_SHIFT 4
+#define EUR_CR_PIPE0_BREAKPOINT_ID_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_UNTRAPPED_MASK 0x00000008U
+#define EUR_CR_PIPE0_BREAKPOINT_UNTRAPPED_SHIFT 3
+#define EUR_CR_PIPE0_BREAKPOINT_UNTRAPPED_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAPPED_MASK 0x00000004U
+#define EUR_CR_PIPE0_BREAKPOINT_TRAPPED_SHIFT 2
+#define EUR_CR_PIPE0_BREAKPOINT_TRAPPED_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO0 0x0F90
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO0_ADDRESS_SHIFT 4
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO0_ADDRESS_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1 0x0F94
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_SIZE_MASK 0x00007C00U
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT 10
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_SIZE_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_NUMBER_MASK 0x00000300U
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT 8
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_NUMBER_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_TAG_MASK 0x000000F8U
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_TAG_SHIFT 3
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_TAG_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK 0x00000006U
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT 1
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_RNW_MASK 0x00000001U
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_RNW_SHIFT 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_RNW_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP 0x0F98
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_CONTINUE_MASK 0x00000002U
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_CONTINUE_SHIFT 1
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_CONTINUE_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_WRNOTIFY_MASK 0x00000001U
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_WRNOTIFY_SHIFT 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_WRNOTIFY_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT 0x0F9C
+#define EUR_CR_PIPE1_BREAKPOINT_MODULE_ID_MASK 0x000003C0U
+#define EUR_CR_PIPE1_BREAKPOINT_MODULE_ID_SHIFT 6
+#define EUR_CR_PIPE1_BREAKPOINT_MODULE_ID_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_ID_MASK 0x00000030U
+#define EUR_CR_PIPE1_BREAKPOINT_ID_SHIFT 4
+#define EUR_CR_PIPE1_BREAKPOINT_ID_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_UNTRAPPED_MASK 0x00000008U
+#define EUR_CR_PIPE1_BREAKPOINT_UNTRAPPED_SHIFT 3
+#define EUR_CR_PIPE1_BREAKPOINT_UNTRAPPED_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAPPED_MASK 0x00000004U
+#define EUR_CR_PIPE1_BREAKPOINT_TRAPPED_SHIFT 2
+#define EUR_CR_PIPE1_BREAKPOINT_TRAPPED_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO0 0x0FA0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO0_ADDRESS_SHIFT 4
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO0_ADDRESS_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1 0x0FA4
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_SIZE_MASK 0x00007C00U
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT 10
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_SIZE_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_NUMBER_MASK 0x00000300U
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT 8
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_NUMBER_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_TAG_MASK 0x000000F8U
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_TAG_SHIFT 3
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_TAG_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK 0x00000006U
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT 1
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_RNW_MASK 0x00000001U
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_RNW_SHIFT 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_RNW_SIGNED 0
+#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X)))
+#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16
+#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16
+
+#endif
+
diff --git a/drivers/gpu/pvr/sgx544defs.h b/drivers/gpu/pvr/sgx544defs.h
new file mode 100644
index 0000000..c18b8ad
--- /dev/null
+++ b/drivers/gpu/pvr/sgx544defs.h
@@ -0,0 +1,1367 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _SGX544DEFS_KM_H_
+#define _SGX544DEFS_KM_H_
+
+#define EUR_CR_CLKGATECTL 0x0000
+#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000003U
+#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 0
+#define EUR_CR_CLKGATECTL_ISP_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_ISP2_CLKG_MASK 0x0000000CU
+#define EUR_CR_CLKGATECTL_ISP2_CLKG_SHIFT 2
+#define EUR_CR_CLKGATECTL_ISP2_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000030U
+#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 4
+#define EUR_CR_CLKGATECTL_TSP_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_TE_CLKG_MASK 0x000000C0U
+#define EUR_CR_CLKGATECTL_TE_CLKG_SHIFT 6
+#define EUR_CR_CLKGATECTL_TE_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_MTE_CLKG_MASK 0x00000300U
+#define EUR_CR_CLKGATECTL_MTE_CLKG_SHIFT 8
+#define EUR_CR_CLKGATECTL_MTE_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00000C00U
+#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 10
+#define EUR_CR_CLKGATECTL_DPM_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_VDM_CLKG_MASK 0x00003000U
+#define EUR_CR_CLKGATECTL_VDM_CLKG_SHIFT 12
+#define EUR_CR_CLKGATECTL_VDM_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_PDS_CLKG_MASK 0x0000C000U
+#define EUR_CR_CLKGATECTL_PDS_CLKG_SHIFT 14
+#define EUR_CR_CLKGATECTL_PDS_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_MASK 0x00030000U
+#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_SHIFT 16
+#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x000C0000U
+#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 18
+#define EUR_CR_CLKGATECTL_TA_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_BIF_CORE_CLKG_MASK 0x00300000U
+#define EUR_CR_CLKGATECTL_BIF_CORE_CLKG_SHIFT 20
+#define EUR_CR_CLKGATECTL_BIF_CORE_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SIGNED 0
+#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_MASK 0x10000000U
+#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_SHIFT 28
+#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2 0x0004
+#define EUR_CR_CLKGATECTL2_PBE_CLKG_MASK 0x00000003U
+#define EUR_CR_CLKGATECTL2_PBE_CLKG_SHIFT 0
+#define EUR_CR_CLKGATECTL2_PBE_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_TCU_L2_CLKG_MASK 0x0000000CU
+#define EUR_CR_CLKGATECTL2_TCU_L2_CLKG_SHIFT 2
+#define EUR_CR_CLKGATECTL2_TCU_L2_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_MASK 0x00000030U
+#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SHIFT 4
+#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_USE0_CLKG_MASK 0x000000C0U
+#define EUR_CR_CLKGATECTL2_USE0_CLKG_SHIFT 6
+#define EUR_CR_CLKGATECTL2_USE0_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_ITR0_CLKG_MASK 0x00000300U
+#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SHIFT 8
+#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_TEX0_CLKG_MASK 0x00000C00U
+#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SHIFT 10
+#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_USE1_CLKG_MASK 0x0000C000U
+#define EUR_CR_CLKGATECTL2_USE1_CLKG_SHIFT 14
+#define EUR_CR_CLKGATECTL2_USE1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_ITR1_CLKG_MASK 0x00030000U
+#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SHIFT 16
+#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_TEX1_CLKG_MASK 0x000C0000U
+#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SHIFT 18
+#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_DCU_L2_CLKG_MASK 0x00C00000U
+#define EUR_CR_CLKGATECTL2_DCU_L2_CLKG_SHIFT 22
+#define EUR_CR_CLKGATECTL2_DCU_L2_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_DCU1_L0L1_CLKG_MASK 0x03000000U
+#define EUR_CR_CLKGATECTL2_DCU1_L0L1_CLKG_SHIFT 24
+#define EUR_CR_CLKGATECTL2_DCU1_L0L1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_DCU0_L0L1_CLKG_MASK 0x0C000000U
+#define EUR_CR_CLKGATECTL2_DCU0_L0L1_CLKG_SHIFT 26
+#define EUR_CR_CLKGATECTL2_DCU0_L0L1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATESTATUS 0x0008
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000001U
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 0
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_MASK 0x00000002U
+#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SHIFT 1
+#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000004U
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 2
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TE_CLKS_MASK 0x00000008U
+#define EUR_CR_CLKGATESTATUS_TE_CLKS_SHIFT 3
+#define EUR_CR_CLKGATESTATUS_TE_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_MTE_CLKS_MASK 0x00000010U
+#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SHIFT 4
+#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00000020U
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 5
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_VDM_CLKS_MASK 0x00000040U
+#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SHIFT 6
+#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_PDS_CLKS_MASK 0x00000080U
+#define EUR_CR_CLKGATESTATUS_PDS_CLKS_SHIFT 7
+#define EUR_CR_CLKGATESTATUS_PDS_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_PBE_CLKS_MASK 0x00000100U
+#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SHIFT 8
+#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TCU_L2_CLKS_MASK 0x00000200U
+#define EUR_CR_CLKGATESTATUS_TCU_L2_CLKS_SHIFT 9
+#define EUR_CR_CLKGATESTATUS_TCU_L2_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_MASK 0x00000400U
+#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SHIFT 10
+#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_USE0_CLKS_MASK 0x00000800U
+#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SHIFT 11
+#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_MASK 0x00001000U
+#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SHIFT 12
+#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_MASK 0x00002000U
+#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SHIFT 13
+#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_USE1_CLKS_MASK 0x00008000U
+#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SHIFT 15
+#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_MASK 0x00010000U
+#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SHIFT 16
+#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_MASK 0x00020000U
+#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SHIFT 17
+#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_MASK 0x00080000U
+#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_SHIFT 19
+#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00100000U
+#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 20
+#define EUR_CR_CLKGATESTATUS_TA_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_DCU_L2_CLKS_MASK 0x00200000U
+#define EUR_CR_CLKGATESTATUS_DCU_L2_CLKS_SHIFT 21
+#define EUR_CR_CLKGATESTATUS_DCU_L2_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_DCU0_L0L1_CLKS_MASK 0x00400000U
+#define EUR_CR_CLKGATESTATUS_DCU0_L0L1_CLKS_SHIFT 22
+#define EUR_CR_CLKGATESTATUS_DCU0_L0L1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_DCU1_L0L1_CLKS_MASK 0x00800000U
+#define EUR_CR_CLKGATESTATUS_DCU1_L0L1_CLKS_SHIFT 23
+#define EUR_CR_CLKGATESTATUS_DCU1_L0L1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_BIF_CORE_CLKS_MASK 0x01000000U
+#define EUR_CR_CLKGATESTATUS_BIF_CORE_CLKS_SHIFT 24
+#define EUR_CR_CLKGATESTATUS_BIF_CORE_CLKS_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR 0x000C
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000003U
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 0
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_MASK 0x0000000CU
+#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SHIFT 2
+#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000030U
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 4
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_TE_CLKO_MASK 0x000000C0U
+#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SHIFT 6
+#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_MASK 0x00000300U
+#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SHIFT 8
+#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00000C00U
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 10
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_MASK 0x00003000U
+#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SHIFT 12
+#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_MASK 0x0000C000U
+#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_SHIFT 14
+#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_MASK 0x00030000U
+#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_SHIFT 16
+#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x000C0000U
+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 18
+#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_BIF_CORE_CLKO_MASK 0x00300000U
+#define EUR_CR_CLKGATECTLOVR_BIF_CORE_CLKO_SHIFT 20
+#define EUR_CR_CLKGATECTLOVR_BIF_CORE_CLKO_SIGNED 0
+#define EUR_CR_POWER 0x001C
+#define EUR_CR_POWER_PIPE_DISABLE_MASK 0x00000001U
+#define EUR_CR_POWER_PIPE_DISABLE_SHIFT 0
+#define EUR_CR_POWER_PIPE_DISABLE_SIGNED 0
+#define EUR_CR_CORE_ID 0x0020
+#define EUR_CR_CORE_ID_CONFIG_MULTI_MASK 0x00000001U
+#define EUR_CR_CORE_ID_CONFIG_MULTI_SHIFT 0
+#define EUR_CR_CORE_ID_CONFIG_MULTI_SIGNED 0
+#define EUR_CR_CORE_ID_CONFIG_BASE_MASK 0x00000002U
+#define EUR_CR_CORE_ID_CONFIG_BASE_SHIFT 1
+#define EUR_CR_CORE_ID_CONFIG_BASE_SIGNED 0
+#define EUR_CR_CORE_ID_CONFIG_MASK 0x000000FCU
+#define EUR_CR_CORE_ID_CONFIG_SHIFT 2
+#define EUR_CR_CORE_ID_CONFIG_SIGNED 0
+#define EUR_CR_CORE_ID_CONFIG_CORES_MASK 0x00000F00U
+#define EUR_CR_CORE_ID_CONFIG_CORES_SHIFT 8
+#define EUR_CR_CORE_ID_CONFIG_CORES_SIGNED 0
+#define EUR_CR_CORE_ID_CONFIG_SLC_MASK 0x0000F000U
+#define EUR_CR_CORE_ID_CONFIG_SLC_SHIFT 12
+#define EUR_CR_CORE_ID_CONFIG_SLC_SIGNED 0
+#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U
+#define EUR_CR_CORE_ID_ID_SHIFT 16
+#define EUR_CR_CORE_ID_ID_SIGNED 0
+#define EUR_CR_CORE_REVISION 0x0024
+#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU
+#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0
+#define EUR_CR_CORE_REVISION_MAINTENANCE_SIGNED 0
+#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U
+#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8
+#define EUR_CR_CORE_REVISION_MINOR_SIGNED 0
+#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U
+#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16
+#define EUR_CR_CORE_REVISION_MAJOR_SIGNED 0
+#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U
+#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24
+#define EUR_CR_CORE_REVISION_DESIGNER_SIGNED 0
+#define EUR_CR_DESIGNER_REV_FIELD1 0x0028
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SIGNED 0
+#define EUR_CR_DESIGNER_REV_FIELD2 0x002C
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SIGNED 0
+#define EUR_CR_SOFT_RESET 0x0080
+#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U
+#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0
+#define EUR_CR_SOFT_RESET_BIF_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_VDM_RESET_MASK 0x00000002U
+#define EUR_CR_SOFT_RESET_VDM_RESET_SHIFT 1
+#define EUR_CR_SOFT_RESET_VDM_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004U
+#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2
+#define EUR_CR_SOFT_RESET_DPM_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_TE_RESET_MASK 0x00000008U
+#define EUR_CR_SOFT_RESET_TE_RESET_SHIFT 3
+#define EUR_CR_SOFT_RESET_TE_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_MTE_RESET_MASK 0x00000010U
+#define EUR_CR_SOFT_RESET_MTE_RESET_SHIFT 4
+#define EUR_CR_SOFT_RESET_MTE_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020U
+#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5
+#define EUR_CR_SOFT_RESET_ISP_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_ISP2_RESET_MASK 0x00000040U
+#define EUR_CR_SOFT_RESET_ISP2_RESET_SHIFT 6
+#define EUR_CR_SOFT_RESET_ISP2_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000080U
+#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 7
+#define EUR_CR_SOFT_RESET_TSP_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_PDS_RESET_MASK 0x00000100U
+#define EUR_CR_SOFT_RESET_PDS_RESET_SHIFT 8
+#define EUR_CR_SOFT_RESET_PDS_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_PBE_RESET_MASK 0x00000200U
+#define EUR_CR_SOFT_RESET_PBE_RESET_SHIFT 9
+#define EUR_CR_SOFT_RESET_PBE_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK 0x00000400U
+#define EUR_CR_SOFT_RESET_TCU_L2_RESET_SHIFT 10
+#define EUR_CR_SOFT_RESET_TCU_L2_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK 0x00000800U
+#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_SHIFT 11
+#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_ITR_RESET_MASK 0x00002000U
+#define EUR_CR_SOFT_RESET_ITR_RESET_SHIFT 13
+#define EUR_CR_SOFT_RESET_ITR_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_TEX_RESET_MASK 0x00004000U
+#define EUR_CR_SOFT_RESET_TEX_RESET_SHIFT 14
+#define EUR_CR_SOFT_RESET_TEX_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00008000U
+#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 15
+#define EUR_CR_SOFT_RESET_USE_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK 0x00010000U
+#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_SHIFT 16
+#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00020000U
+#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 17
+#define EUR_CR_SOFT_RESET_TA_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK 0x00040000U
+#define EUR_CR_SOFT_RESET_DCU_L2_RESET_SHIFT 18
+#define EUR_CR_SOFT_RESET_DCU_L2_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK 0x00080000U
+#define EUR_CR_SOFT_RESET_DCU_L0L1_RESET_SHIFT 19
+#define EUR_CR_SOFT_RESET_DCU_L0L1_RESET_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2 0x0110
+#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_UNTRAPPED_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_UNTRAPPED_SHIFT 11
+#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_UNTRAPPED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_TRAPPED_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_TRAPPED_SHIFT 10
+#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_TRAPPED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_CONTEXT_DRAINED_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_CONTEXT_DRAINED_SHIFT 9
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_CONTEXT_DRAINED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_ISP2_ZLS_CSW_FINISHED_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_ENABLE2_ISP2_ZLS_CSW_FINISHED_SHIFT 8
+#define EUR_CR_EVENT_HOST_ENABLE2_ISP2_ZLS_CSW_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_SHIFT 7
+#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_SHIFT 6
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_TE_RGNHDR_INIT_COMPLETE_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_ENABLE2_TE_RGNHDR_INIT_COMPLETE_SHIFT 5
+#define EUR_CR_EVENT_HOST_ENABLE2_TE_RGNHDR_INIT_COMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2 0x0114
+#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_SHIFT 11
+#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_SHIFT 10
+#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_CONTEXT_DRAINED_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_CONTEXT_DRAINED_SHIFT 9
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_CONTEXT_DRAINED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_ISP2_ZLS_CSW_FINISHED_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_CLEAR2_ISP2_ZLS_CSW_FINISHED_SHIFT 8
+#define EUR_CR_EVENT_HOST_CLEAR2_ISP2_ZLS_CSW_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_SHIFT 7
+#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_SHIFT 6
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_TE_RGNHDR_INIT_COMPLETE_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_CLEAR2_TE_RGNHDR_INIT_COMPLETE_SHIFT 5
+#define EUR_CR_EVENT_HOST_CLEAR2_TE_RGNHDR_INIT_COMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS2 0x0118
+#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_MASK 0x00000800U
+#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_SHIFT 11
+#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_MASK 0x00000400U
+#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_SHIFT 10
+#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_MTE_CONTEXT_DRAINED_MASK 0x00000200U
+#define EUR_CR_EVENT_STATUS2_MTE_CONTEXT_DRAINED_SHIFT 9
+#define EUR_CR_EVENT_STATUS2_MTE_CONTEXT_DRAINED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_ISP2_ZLS_CSW_FINISHED_MASK 0x00000100U
+#define EUR_CR_EVENT_STATUS2_ISP2_ZLS_CSW_FINISHED_SHIFT 8
+#define EUR_CR_EVENT_STATUS2_ISP2_ZLS_CSW_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_MASK 0x00000080U
+#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_SHIFT 7
+#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_MASK 0x00000040U
+#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_SHIFT 6
+#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_TE_RGNHDR_INIT_COMPLETE_MASK 0x00000020U
+#define EUR_CR_EVENT_STATUS2_TE_RGNHDR_INIT_COMPLETE_SHIFT 5
+#define EUR_CR_EVENT_STATUS2_TE_RGNHDR_INIT_COMPLETE_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000010U
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 4
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000008U
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 3
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000004U
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 2
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS 0x012C
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29
+#define EUR_CR_EVENT_STATUS_TIMER_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TCU_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_STATUS_TCU_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_STATUS_TCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_SIGNED 0
+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SIGNED 0
+#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_STATUS_OTPM_INV_SIGNED 0
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SIGNED 0
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_STATUS_SW_EVENT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE 0x0130
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TCU_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TCU_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_HOST_ENABLE_TCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR 0x0134
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TCU_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TCU_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_HOST_CLEAR_TCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SIGNED 0
+#define EUR_CR_TIMER 0x0144
+#define EUR_CR_TIMER_VALUE_MASK 0xFFFFFFFFU
+#define EUR_CR_TIMER_VALUE_SHIFT 0
+#define EUR_CR_TIMER_VALUE_SIGNED 0
+#define EUR_CR_EVENT_KICK1 0x0AB0
+#define EUR_CR_EVENT_KICK1_NOW_MASK 0x000000FFU
+#define EUR_CR_EVENT_KICK1_NOW_SHIFT 0
+#define EUR_CR_EVENT_KICK1_NOW_SIGNED 0
+#define EUR_CR_EVENT_KICK2 0x0AC0
+#define EUR_CR_EVENT_KICK2_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK2_NOW_SHIFT 0
+#define EUR_CR_EVENT_KICK2_NOW_SIGNED 0
+#define EUR_CR_EVENT_KICKER 0x0AC4
+#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4
+#define EUR_CR_EVENT_KICKER_ADDRESS_SIGNED 0
+#define EUR_CR_EVENT_KICK 0x0AC8
+#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK_NOW_SHIFT 0
+#define EUR_CR_EVENT_KICK_NOW_SIGNED 0
+#define EUR_CR_EVENT_TIMER 0x0ACC
+#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U
+#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24
+#define EUR_CR_EVENT_TIMER_ENABLE_SIGNED 0
+#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU
+#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0
+#define EUR_CR_EVENT_TIMER_VALUE_SIGNED 0
+#define EUR_CR_PDS_INV0 0x0AD0
+#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV0_DSC_SHIFT 0
+#define EUR_CR_PDS_INV0_DSC_SIGNED 0
+#define EUR_CR_PDS_INV1 0x0AD4
+#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV1_DSC_SHIFT 0
+#define EUR_CR_PDS_INV1_DSC_SIGNED 0
+#define EUR_CR_EVENT_KICK3 0x0AD8
+#define EUR_CR_EVENT_KICK3_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK3_NOW_SHIFT 0
+#define EUR_CR_EVENT_KICK3_NOW_SIGNED 0
+#define EUR_CR_PDS_INV3 0x0ADC
+#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV3_DSC_SHIFT 0
+#define EUR_CR_PDS_INV3_DSC_SIGNED 0
+#define EUR_CR_PDS_INV_CSC 0x0AE0
+#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U
+#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0
+#define EUR_CR_PDS_INV_CSC_KICK_SIGNED 0
+#define EUR_CR_BIF_CTRL 0x0C00
+#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U
+#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0
+#define EUR_CR_BIF_CTRL_NOREORDER_SIGNED 0
+#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U
+#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1
+#define EUR_CR_BIF_CTRL_PAUSE_SIGNED 0
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TA_MASK 0x00000400U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TA_SHIFT 10
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TA_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PTLA_MASK 0x00010000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PTLA_SHIFT 16
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PTLA_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_VDM_MASK 0x00020000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_VDM_SHIFT 17
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_VDM_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_IPF_MASK 0x00040000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_IPF_SHIFT 18
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_IPF_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_DPM_MASK 0x00080000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_DPM_SHIFT 19
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_DPM_SIGNED 0
+#define EUR_CR_BIF_INT_STAT 0x0C04
+#define EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK 0x00003FFFU
+#define EUR_CR_BIF_INT_STAT_FAULT_REQ_SHIFT 0
+#define EUR_CR_BIF_INT_STAT_FAULT_REQ_SIGNED 0
+#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_MASK 0x00070000U
+#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_SHIFT 16
+#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_SIGNED 0
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00080000U
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 19
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SIGNED 0
+#define EUR_CR_BIF_FAULT 0x0C08
+#define EUR_CR_BIF_FAULT_CID_MASK 0x0000000FU
+#define EUR_CR_BIF_FAULT_CID_SHIFT 0
+#define EUR_CR_BIF_FAULT_CID_SIGNED 0
+#define EUR_CR_BIF_FAULT_SB_MASK 0x000001F0U
+#define EUR_CR_BIF_FAULT_SB_SHIFT 4
+#define EUR_CR_BIF_FAULT_SB_SIGNED 0
+#define EUR_CR_BIF_FAULT_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12
+#define EUR_CR_BIF_FAULT_ADDR_SIGNED 0
+#define EUR_CR_BIF_TILE0 0x0C0C
+#define EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE0_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE0_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE0_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE1 0x0C10
+#define EUR_CR_BIF_TILE1_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE1_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE1_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE1_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE1_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE2 0x0C14
+#define EUR_CR_BIF_TILE2_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE2_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE2_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE2_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE2_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE3 0x0C18
+#define EUR_CR_BIF_TILE3_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE3_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE3_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE3_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE3_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE4 0x0C1C
+#define EUR_CR_BIF_TILE4_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE4_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE4_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE4_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE4_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE5 0x0C20
+#define EUR_CR_BIF_TILE5_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE5_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE5_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE5_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE5_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE6 0x0C24
+#define EUR_CR_BIF_TILE6_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE6_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE6_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE6_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE6_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE7 0x0C28
+#define EUR_CR_BIF_TILE7_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE7_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE7_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE7_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE7_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE8 0x0C2C
+#define EUR_CR_BIF_TILE8_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE8_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE8_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE8_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE8_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE9 0x0C30
+#define EUR_CR_BIF_TILE9_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE9_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE9_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE9_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE9_CFG_SIGNED 0
+#define EUR_CR_BIF_CTRL_INVAL 0x0C34
+#define EUR_CR_BIF_CTRL_INVAL_PTE_MASK 0x00000004U
+#define EUR_CR_BIF_CTRL_INVAL_PTE_SHIFT 2
+#define EUR_CR_BIF_CTRL_INVAL_PTE_SIGNED 0
+#define EUR_CR_BIF_CTRL_INVAL_ALL_MASK 0x00000008U
+#define EUR_CR_BIF_CTRL_INVAL_ALL_SHIFT 3
+#define EUR_CR_BIF_CTRL_INVAL_ALL_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE1 0x0C38
+#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE2 0x0C3C
+#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE3 0x0C40
+#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE4 0x0C44
+#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE5 0x0C48
+#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE6 0x0C4C
+#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE7 0x0C50
+#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SIGNED 0
+#define EUR_CR_BIF_BANK_SET 0x0C74
+#define EUR_CR_BIF_BANK_SET_SELECT_2D_MASK 0x00000001U
+#define EUR_CR_BIF_BANK_SET_SELECT_2D_SHIFT 0
+#define EUR_CR_BIF_BANK_SET_SELECT_2D_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_3D_MASK 0x0000000CU
+#define EUR_CR_BIF_BANK_SET_SELECT_3D_SHIFT 2
+#define EUR_CR_BIF_BANK_SET_SELECT_3D_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_HOST_MASK 0x00000010U
+#define EUR_CR_BIF_BANK_SET_SELECT_HOST_SHIFT 4
+#define EUR_CR_BIF_BANK_SET_SELECT_HOST_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_TA_MASK 0x000000C0U
+#define EUR_CR_BIF_BANK_SET_SELECT_TA_SHIFT 6
+#define EUR_CR_BIF_BANK_SET_SELECT_TA_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_EDM_MASK 0x00000100U
+#define EUR_CR_BIF_BANK_SET_SELECT_EDM_SHIFT 8
+#define EUR_CR_BIF_BANK_SET_SELECT_EDM_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_MASK 0x00000200U
+#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_SHIFT 9
+#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_SIGNED 0
+#define EUR_CR_BIF_BANK0 0x0C78
+#define EUR_CR_BIF_BANK0_INDEX_EDM_MASK 0x0000000FU
+#define EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT 0
+#define EUR_CR_BIF_BANK0_INDEX_EDM_SIGNED 0
+#define EUR_CR_BIF_BANK0_INDEX_TA_MASK 0x000000F0U
+#define EUR_CR_BIF_BANK0_INDEX_TA_SHIFT 4
+#define EUR_CR_BIF_BANK0_INDEX_TA_SIGNED 0
+#define EUR_CR_BIF_BANK0_INDEX_3D_MASK 0x0000F000U
+#define EUR_CR_BIF_BANK0_INDEX_3D_SHIFT 12
+#define EUR_CR_BIF_BANK0_INDEX_3D_SIGNED 0
+#define EUR_CR_BIF_BANK0_INDEX_PTLA_MASK 0x000F0000U
+#define EUR_CR_BIF_BANK0_INDEX_PTLA_SHIFT 16
+#define EUR_CR_BIF_BANK0_INDEX_PTLA_SIGNED 0
+#define EUR_CR_BIF_BANK1 0x0C7C
+#define EUR_CR_BIF_BANK1_INDEX_EDM_MASK 0x0000000FU
+#define EUR_CR_BIF_BANK1_INDEX_EDM_SHIFT 0
+#define EUR_CR_BIF_BANK1_INDEX_EDM_SIGNED 0
+#define EUR_CR_BIF_BANK1_INDEX_TA_MASK 0x000000F0U
+#define EUR_CR_BIF_BANK1_INDEX_TA_SHIFT 4
+#define EUR_CR_BIF_BANK1_INDEX_TA_SIGNED 0
+#define EUR_CR_BIF_BANK1_INDEX_3D_MASK 0x0000F000U
+#define EUR_CR_BIF_BANK1_INDEX_3D_SHIFT 12
+#define EUR_CR_BIF_BANK1_INDEX_3D_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SIGNED 0
+#define EUR_CR_BIF_TA_REQ_BASE 0x0C90
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0xFFF00000U
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SIGNED 0
+#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFU
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_SIGNED 0
+#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0xFFF00000U
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SIGNED 0
+#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0xFFF00000U
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SIGNED 0
+#define EUR_CR_BIF_BANK_STATUS 0x0CB4
+#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_MASK 0x00000001U
+#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SHIFT 0
+#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SIGNED 0
+#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_MASK 0x00000002U
+#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SHIFT 1
+#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SIGNED 0
+#define EUR_CR_BIF_MMU_CTRL 0x0CD0
+#define EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_MASK 0x00000001U
+#define EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_SHIFT 0
+#define EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_SIGNED 0
+#define EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_MASK 0x00000006U
+#define EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT 1
+#define EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_SIGNED 0
+#define EUR_CR_BIF_MMU_CTRL_ENABLE_WRITE_BURST_COLLATE_MASK 0x00000008U
+#define EUR_CR_BIF_MMU_CTRL_ENABLE_WRITE_BURST_COLLATE_SHIFT 3
+#define EUR_CR_BIF_MMU_CTRL_ENABLE_WRITE_BURST_COLLATE_SIGNED 0
+#define EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_MASK 0x00000010U
+#define EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_SHIFT 4
+#define EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_SIGNED 0
+#define EUR_CR_BIF_MMU_CTRL_DISABLE_BURST_EXP_MASK 0x00000020U
+#define EUR_CR_BIF_MMU_CTRL_DISABLE_BURST_EXP_SHIFT 5
+#define EUR_CR_BIF_MMU_CTRL_DISABLE_BURST_EXP_SIGNED 0
+#define EUR_CR_2D_BLIT_STATUS 0x0E04
+#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFFU
+#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0
+#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SIGNED 0
+#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000U
+#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24
+#define EUR_CR_2D_BLIT_STATUS_BUSY_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10
+#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001U
+#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0
+#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000EU
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0U
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000U
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12
+#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFFU
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000U
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SIGNED 0
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000U
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24
+#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SIGNED 0
+#define EUR_CR_BREAKPOINT0_START 0x0F44
+#define EUR_CR_BREAKPOINT0_START_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT0_START_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT0_START_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT0_END 0x0F48
+#define EUR_CR_BREAKPOINT0_END_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT0_END_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT0_END_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT0 0x0F4C
+#define EUR_CR_BREAKPOINT0_MASK_DM_MASK 0x00000038U
+#define EUR_CR_BREAKPOINT0_MASK_DM_SHIFT 3
+#define EUR_CR_BREAKPOINT0_MASK_DM_SIGNED 0
+#define EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_MASK 0x00000004U
+#define EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_SHIFT 2
+#define EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT0_CTRL_WENABLE_MASK 0x00000002U
+#define EUR_CR_BREAKPOINT0_CTRL_WENABLE_SHIFT 1
+#define EUR_CR_BREAKPOINT0_CTRL_WENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT0_CTRL_RENABLE_MASK 0x00000001U
+#define EUR_CR_BREAKPOINT0_CTRL_RENABLE_SHIFT 0
+#define EUR_CR_BREAKPOINT0_CTRL_RENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT1_START 0x0F50
+#define EUR_CR_BREAKPOINT1_START_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT1_START_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT1_START_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT1_END 0x0F54
+#define EUR_CR_BREAKPOINT1_END_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT1_END_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT1_END_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT1 0x0F58
+#define EUR_CR_BREAKPOINT1_MASK_DM_MASK 0x00000038U
+#define EUR_CR_BREAKPOINT1_MASK_DM_SHIFT 3
+#define EUR_CR_BREAKPOINT1_MASK_DM_SIGNED 0
+#define EUR_CR_BREAKPOINT1_CTRL_TRAPENABLE_MASK 0x00000004U
+#define EUR_CR_BREAKPOINT1_CTRL_TRAPENABLE_SHIFT 2
+#define EUR_CR_BREAKPOINT1_CTRL_TRAPENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT1_CTRL_WENABLE_MASK 0x00000002U
+#define EUR_CR_BREAKPOINT1_CTRL_WENABLE_SHIFT 1
+#define EUR_CR_BREAKPOINT1_CTRL_WENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT1_CTRL_RENABLE_MASK 0x00000001U
+#define EUR_CR_BREAKPOINT1_CTRL_RENABLE_SHIFT 0
+#define EUR_CR_BREAKPOINT1_CTRL_RENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT2_START 0x0F5C
+#define EUR_CR_BREAKPOINT2_START_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT2_START_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT2_START_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT2_END 0x0F60
+#define EUR_CR_BREAKPOINT2_END_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT2_END_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT2_END_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT2 0x0F64
+#define EUR_CR_BREAKPOINT2_MASK_DM_MASK 0x00000038U
+#define EUR_CR_BREAKPOINT2_MASK_DM_SHIFT 3
+#define EUR_CR_BREAKPOINT2_MASK_DM_SIGNED 0
+#define EUR_CR_BREAKPOINT2_CTRL_TRAPENABLE_MASK 0x00000004U
+#define EUR_CR_BREAKPOINT2_CTRL_TRAPENABLE_SHIFT 2
+#define EUR_CR_BREAKPOINT2_CTRL_TRAPENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT2_CTRL_WENABLE_MASK 0x00000002U
+#define EUR_CR_BREAKPOINT2_CTRL_WENABLE_SHIFT 1
+#define EUR_CR_BREAKPOINT2_CTRL_WENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT2_CTRL_RENABLE_MASK 0x00000001U
+#define EUR_CR_BREAKPOINT2_CTRL_RENABLE_SHIFT 0
+#define EUR_CR_BREAKPOINT2_CTRL_RENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT3_START 0x0F68
+#define EUR_CR_BREAKPOINT3_START_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT3_START_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT3_START_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT3_END 0x0F6C
+#define EUR_CR_BREAKPOINT3_END_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT3_END_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT3_END_ADDRESS_SIGNED 0
+#define EUR_CR_BREAKPOINT3 0x0F70
+#define EUR_CR_BREAKPOINT3_MASK_DM_MASK 0x00000038U
+#define EUR_CR_BREAKPOINT3_MASK_DM_SHIFT 3
+#define EUR_CR_BREAKPOINT3_MASK_DM_SIGNED 0
+#define EUR_CR_BREAKPOINT3_CTRL_TRAPENABLE_MASK 0x00000004U
+#define EUR_CR_BREAKPOINT3_CTRL_TRAPENABLE_SHIFT 2
+#define EUR_CR_BREAKPOINT3_CTRL_TRAPENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT3_CTRL_WENABLE_MASK 0x00000002U
+#define EUR_CR_BREAKPOINT3_CTRL_WENABLE_SHIFT 1
+#define EUR_CR_BREAKPOINT3_CTRL_WENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT3_CTRL_RENABLE_MASK 0x00000001U
+#define EUR_CR_BREAKPOINT3_CTRL_RENABLE_SHIFT 0
+#define EUR_CR_BREAKPOINT3_CTRL_RENABLE_SIGNED 0
+#define EUR_CR_BREAKPOINT_READ 0x0F74
+#define EUR_CR_BREAKPOINT_READ_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_BREAKPOINT_READ_ADDRESS_SHIFT 4
+#define EUR_CR_BREAKPOINT_READ_ADDRESS_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP 0x0F78
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_CONTINUE_MASK 0x00000002U
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_CONTINUE_SHIFT 1
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_CONTINUE_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_WRNOTIFY_MASK 0x00000001U
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_WRNOTIFY_SHIFT 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_WRNOTIFY_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT 0x0F7C
+#define EUR_CR_PARTITION_BREAKPOINT_MODULE_ID_MASK 0x000003C0U
+#define EUR_CR_PARTITION_BREAKPOINT_MODULE_ID_SHIFT 6
+#define EUR_CR_PARTITION_BREAKPOINT_MODULE_ID_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_ID_MASK 0x00000030U
+#define EUR_CR_PARTITION_BREAKPOINT_ID_SHIFT 4
+#define EUR_CR_PARTITION_BREAKPOINT_ID_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_UNTRAPPED_MASK 0x00000008U
+#define EUR_CR_PARTITION_BREAKPOINT_UNTRAPPED_SHIFT 3
+#define EUR_CR_PARTITION_BREAKPOINT_UNTRAPPED_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAPPED_MASK 0x00000004U
+#define EUR_CR_PARTITION_BREAKPOINT_TRAPPED_SHIFT 2
+#define EUR_CR_PARTITION_BREAKPOINT_TRAPPED_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0 0x0F80
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0_ADDRESS_SHIFT 4
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0_ADDRESS_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1 0x0F84
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_MASK 0x00007C00U
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT 10
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_MASK 0x00000300U
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT 8
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_MASK 0x000000F8U
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_SHIFT 3
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK 0x00000006U
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT 1
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SIGNED 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_RNW_MASK 0x00000001U
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_RNW_SHIFT 0
+#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_RNW_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_0 0x0A0C
+#define EUR_CR_USE_CODE_BASE_ADDR_00_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_00_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_00_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_00_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_00_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_00_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_1 0x0A10
+#define EUR_CR_USE_CODE_BASE_ADDR_01_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_01_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_01_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_01_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_01_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_01_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_2 0x0A14
+#define EUR_CR_USE_CODE_BASE_ADDR_02_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_02_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_02_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_02_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_02_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_02_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_3 0x0A18
+#define EUR_CR_USE_CODE_BASE_ADDR_03_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_03_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_03_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_03_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_03_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_03_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_4 0x0A1C
+#define EUR_CR_USE_CODE_BASE_ADDR_04_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_04_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_04_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_04_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_04_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_04_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_5 0x0A20
+#define EUR_CR_USE_CODE_BASE_ADDR_05_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_05_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_05_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_05_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_05_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_05_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_6 0x0A24
+#define EUR_CR_USE_CODE_BASE_ADDR_06_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_06_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_06_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_06_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_06_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_06_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_7 0x0A28
+#define EUR_CR_USE_CODE_BASE_ADDR_07_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_07_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_07_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_07_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_07_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_07_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_8 0x0A2C
+#define EUR_CR_USE_CODE_BASE_ADDR_08_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_08_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_08_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_08_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_08_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_08_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_9 0x0A30
+#define EUR_CR_USE_CODE_BASE_ADDR_09_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_09_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_09_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_09_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_09_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_09_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_10 0x0A34
+#define EUR_CR_USE_CODE_BASE_ADDR_10_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_10_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_10_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_10_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_10_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_10_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_11 0x0A38
+#define EUR_CR_USE_CODE_BASE_ADDR_11_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_11_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_11_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_11_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_11_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_11_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_12 0x0A3C
+#define EUR_CR_USE_CODE_BASE_ADDR_12_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_12_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_12_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_12_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_12_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_12_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_13 0x0A40
+#define EUR_CR_USE_CODE_BASE_ADDR_13_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_13_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_13_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_13_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_13_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_13_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_14 0x0A44
+#define EUR_CR_USE_CODE_BASE_ADDR_14_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_14_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_14_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_14_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_14_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_14_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_15 0x0A48
+#define EUR_CR_USE_CODE_BASE_ADDR_15_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_15_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_15_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_15_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_15_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_15_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP 0x0F88
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_CONTINUE_MASK 0x00000002U
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_CONTINUE_SHIFT 1
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_CONTINUE_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_WRNOTIFY_MASK 0x00000001U
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_WRNOTIFY_SHIFT 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_WRNOTIFY_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT 0x0F8C
+#define EUR_CR_PIPE0_BREAKPOINT_MODULE_ID_MASK 0x000003C0U
+#define EUR_CR_PIPE0_BREAKPOINT_MODULE_ID_SHIFT 6
+#define EUR_CR_PIPE0_BREAKPOINT_MODULE_ID_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_ID_MASK 0x00000030U
+#define EUR_CR_PIPE0_BREAKPOINT_ID_SHIFT 4
+#define EUR_CR_PIPE0_BREAKPOINT_ID_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_UNTRAPPED_MASK 0x00000008U
+#define EUR_CR_PIPE0_BREAKPOINT_UNTRAPPED_SHIFT 3
+#define EUR_CR_PIPE0_BREAKPOINT_UNTRAPPED_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAPPED_MASK 0x00000004U
+#define EUR_CR_PIPE0_BREAKPOINT_TRAPPED_SHIFT 2
+#define EUR_CR_PIPE0_BREAKPOINT_TRAPPED_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO0 0x0F90
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO0_ADDRESS_SHIFT 4
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO0_ADDRESS_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1 0x0F94
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_SIZE_MASK 0x00007C00U
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT 10
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_SIZE_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_NUMBER_MASK 0x00000300U
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT 8
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_NUMBER_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_TAG_MASK 0x000000F8U
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_TAG_SHIFT 3
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_TAG_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK 0x00000006U
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT 1
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SIGNED 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_RNW_MASK 0x00000001U
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_RNW_SHIFT 0
+#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_RNW_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP 0x0F98
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_CONTINUE_MASK 0x00000002U
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_CONTINUE_SHIFT 1
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_CONTINUE_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_WRNOTIFY_MASK 0x00000001U
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_WRNOTIFY_SHIFT 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_WRNOTIFY_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT 0x0F9C
+#define EUR_CR_PIPE1_BREAKPOINT_MODULE_ID_MASK 0x000003C0U
+#define EUR_CR_PIPE1_BREAKPOINT_MODULE_ID_SHIFT 6
+#define EUR_CR_PIPE1_BREAKPOINT_MODULE_ID_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_ID_MASK 0x00000030U
+#define EUR_CR_PIPE1_BREAKPOINT_ID_SHIFT 4
+#define EUR_CR_PIPE1_BREAKPOINT_ID_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_UNTRAPPED_MASK 0x00000008U
+#define EUR_CR_PIPE1_BREAKPOINT_UNTRAPPED_SHIFT 3
+#define EUR_CR_PIPE1_BREAKPOINT_UNTRAPPED_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAPPED_MASK 0x00000004U
+#define EUR_CR_PIPE1_BREAKPOINT_TRAPPED_SHIFT 2
+#define EUR_CR_PIPE1_BREAKPOINT_TRAPPED_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO0 0x0FA0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO0_ADDRESS_SHIFT 4
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO0_ADDRESS_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1 0x0FA4
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_SIZE_MASK 0x00007C00U
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT 10
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_SIZE_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_NUMBER_MASK 0x00000300U
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT 8
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_NUMBER_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_TAG_MASK 0x000000F8U
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_TAG_SHIFT 3
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_TAG_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK 0x00000006U
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT 1
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SIGNED 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_RNW_MASK 0x00000001U
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_RNW_SHIFT 0
+#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_RNW_SIGNED 0
+#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X)))
+#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x03FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_MASK 0x0C000000U
+#define EUR_CR_USE_CODE_BASE_DM_SHIFT 26
+#define EUR_CR_USE_CODE_BASE_DM_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16
+#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16
+
+#endif
+
diff --git a/drivers/gpu/pvr/sgx545defs.h b/drivers/gpu/pvr/sgx545defs.h
new file mode 100644
index 0000000..4dc7f3c
--- /dev/null
+++ b/drivers/gpu/pvr/sgx545defs.h
@@ -0,0 +1,1180 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _SGX545DEFS_KM_H_
+#define _SGX545DEFS_KM_H_
+
+#define EUR_CR_CLKGATECTL 0x0000
+#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000003U
+#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 0
+#define EUR_CR_CLKGATECTL_ISP_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_ISP2_CLKG_MASK 0x0000000CU
+#define EUR_CR_CLKGATECTL_ISP2_CLKG_SHIFT 2
+#define EUR_CR_CLKGATECTL_ISP2_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000030U
+#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 4
+#define EUR_CR_CLKGATECTL_TSP_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_TE_CLKG_MASK 0x000000C0U
+#define EUR_CR_CLKGATECTL_TE_CLKG_SHIFT 6
+#define EUR_CR_CLKGATECTL_TE_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_MTE_CLKG_MASK 0x00000300U
+#define EUR_CR_CLKGATECTL_MTE_CLKG_SHIFT 8
+#define EUR_CR_CLKGATECTL_MTE_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00000C00U
+#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 10
+#define EUR_CR_CLKGATECTL_DPM_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_VDM_CLKG_MASK 0x00003000U
+#define EUR_CR_CLKGATECTL_VDM_CLKG_SHIFT 12
+#define EUR_CR_CLKGATECTL_VDM_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_PDS0_CLKG_MASK 0x0000C000U
+#define EUR_CR_CLKGATECTL_PDS0_CLKG_SHIFT 14
+#define EUR_CR_CLKGATECTL_PDS0_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24
+#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SIGNED 0
+#define EUR_CR_CLKGATECTL2 0x0004
+#define EUR_CR_CLKGATECTL2_PBE_CLKG_MASK 0x00000003U
+#define EUR_CR_CLKGATECTL2_PBE_CLKG_SHIFT 0
+#define EUR_CR_CLKGATECTL2_PBE_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_CACHEL2_CLKG_MASK 0x0000000CU
+#define EUR_CR_CLKGATECTL2_CACHEL2_CLKG_SHIFT 2
+#define EUR_CR_CLKGATECTL2_CACHEL2_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_MASK 0x00000030U
+#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SHIFT 4
+#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_USE0_CLKG_MASK 0x000000C0U
+#define EUR_CR_CLKGATECTL2_USE0_CLKG_SHIFT 6
+#define EUR_CR_CLKGATECTL2_USE0_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_ITR0_CLKG_MASK 0x00000300U
+#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SHIFT 8
+#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_TEX0_CLKG_MASK 0x00000C00U
+#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SHIFT 10
+#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_MADD0_CLKG_MASK 0x00003000U
+#define EUR_CR_CLKGATECTL2_MADD0_CLKG_SHIFT 12
+#define EUR_CR_CLKGATECTL2_MADD0_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_USE1_CLKG_MASK 0x0000C000U
+#define EUR_CR_CLKGATECTL2_USE1_CLKG_SHIFT 14
+#define EUR_CR_CLKGATECTL2_USE1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_ITR1_CLKG_MASK 0x00030000U
+#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SHIFT 16
+#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_TEX1_CLKG_MASK 0x000C0000U
+#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SHIFT 18
+#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_MADD1_CLKG_MASK 0x00300000U
+#define EUR_CR_CLKGATECTL2_MADD1_CLKG_SHIFT 20
+#define EUR_CR_CLKGATECTL2_MADD1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATECTL2_PDS1_CLKG_MASK 0x00C00000U
+#define EUR_CR_CLKGATECTL2_PDS1_CLKG_SHIFT 22
+#define EUR_CR_CLKGATECTL2_PDS1_CLKG_SIGNED 0
+#define EUR_CR_CLKGATESTATUS 0x0008
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000001U
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 0
+#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_MASK 0x00000002U
+#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SHIFT 1
+#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000004U
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 2
+#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TE_CLKS_MASK 0x00000008U
+#define EUR_CR_CLKGATESTATUS_TE_CLKS_SHIFT 3
+#define EUR_CR_CLKGATESTATUS_TE_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_MTE_CLKS_MASK 0x00000010U
+#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SHIFT 4
+#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00000020U
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 5
+#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_VDM_CLKS_MASK 0x00000040U
+#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SHIFT 6
+#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_PDS0_CLKS_MASK 0x00000080U
+#define EUR_CR_CLKGATESTATUS_PDS0_CLKS_SHIFT 7
+#define EUR_CR_CLKGATESTATUS_PDS0_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_PBE_CLKS_MASK 0x00000100U
+#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SHIFT 8
+#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_CACHEL2_CLKS_MASK 0x00000200U
+#define EUR_CR_CLKGATESTATUS_CACHEL2_CLKS_SHIFT 9
+#define EUR_CR_CLKGATESTATUS_CACHEL2_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_MASK 0x00000400U
+#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SHIFT 10
+#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_USE0_CLKS_MASK 0x00000800U
+#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SHIFT 11
+#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_MASK 0x00001000U
+#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SHIFT 12
+#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_MASK 0x00002000U
+#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SHIFT 13
+#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_MADD0_CLKS_MASK 0x00004000U
+#define EUR_CR_CLKGATESTATUS_MADD0_CLKS_SHIFT 14
+#define EUR_CR_CLKGATESTATUS_MADD0_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_USE1_CLKS_MASK 0x00008000U
+#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SHIFT 15
+#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_MASK 0x00010000U
+#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SHIFT 16
+#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_MASK 0x00020000U
+#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SHIFT 17
+#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_MADD1_CLKS_MASK 0x00040000U
+#define EUR_CR_CLKGATESTATUS_MADD1_CLKS_SHIFT 18
+#define EUR_CR_CLKGATESTATUS_MADD1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATESTATUS_PDS1_CLKS_MASK 0x00080000U
+#define EUR_CR_CLKGATESTATUS_PDS1_CLKS_SHIFT 19
+#define EUR_CR_CLKGATESTATUS_PDS1_CLKS_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR 0x000C
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000003U
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 0
+#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_MASK 0x0000000CU
+#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SHIFT 2
+#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000030U
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 4
+#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_TE_CLKO_MASK 0x000000C0U
+#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SHIFT 6
+#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_MASK 0x00000300U
+#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SHIFT 8
+#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00000C00U
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 10
+#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_MASK 0x00003000U
+#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SHIFT 12
+#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SIGNED 0
+#define EUR_CR_CLKGATECTLOVR_PDS0_CLKO_MASK 0x0000C000U
+#define EUR_CR_CLKGATECTLOVR_PDS0_CLKO_SHIFT 14
+#define EUR_CR_CLKGATECTLOVR_PDS0_CLKO_SIGNED 0
+#define EUR_CR_CORE_ID 0x001C
+#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFFU
+#define EUR_CR_CORE_ID_CONFIG_SHIFT 0
+#define EUR_CR_CORE_ID_CONFIG_SIGNED 0
+#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U
+#define EUR_CR_CORE_ID_ID_SHIFT 16
+#define EUR_CR_CORE_ID_ID_SIGNED 0
+#define EUR_CR_CORE_REVISION 0x0020
+#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU
+#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0
+#define EUR_CR_CORE_REVISION_MAINTENANCE_SIGNED 0
+#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U
+#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8
+#define EUR_CR_CORE_REVISION_MINOR_SIGNED 0
+#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U
+#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16
+#define EUR_CR_CORE_REVISION_MAJOR_SIGNED 0
+#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U
+#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24
+#define EUR_CR_CORE_REVISION_DESIGNER_SIGNED 0
+#define EUR_CR_DESIGNER_REV_FIELD1 0x0024
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0
+#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SIGNED 0
+#define EUR_CR_DESIGNER_REV_FIELD2 0x002C
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0
+#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SIGNED 0
+#define EUR_CR_SOFT_RESET 0x0080
+#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U
+#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0
+#define EUR_CR_SOFT_RESET_BIF_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000002U
+#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 1
+#define EUR_CR_SOFT_RESET_DPM_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00000004U
+#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 2
+#define EUR_CR_SOFT_RESET_TA_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00000008U
+#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 3
+#define EUR_CR_SOFT_RESET_USE_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000010U
+#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 4
+#define EUR_CR_SOFT_RESET_ISP_RESET_SIGNED 0
+#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000020U
+#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 5
+#define EUR_CR_SOFT_RESET_TSP_RESET_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2 0x0110
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_MASK 0x00008000U
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_SHIFT 15
+#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_VDM_CONTEXT_LOAD_MASK 0x00004000U
+#define EUR_CR_EVENT_HOST_ENABLE2_VDM_CONTEXT_LOAD_SHIFT 14
+#define EUR_CR_EVENT_HOST_ENABLE2_VDM_CONTEXT_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_VDM_TASK_KICKED_MASK 0x00002000U
+#define EUR_CR_EVENT_HOST_ENABLE2_VDM_TASK_KICKED_SHIFT 13
+#define EUR_CR_EVENT_HOST_ENABLE2_VDM_TASK_KICKED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_OTPM_MEM_CLEARED_MASK 0x00001000U
+#define EUR_CR_EVENT_HOST_ENABLE2_OTPM_MEM_CLEARED_SHIFT 12
+#define EUR_CR_EVENT_HOST_ENABLE2_OTPM_MEM_CLEARED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_OTPM_FLUSHED_INV_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_ENABLE2_OTPM_FLUSHED_INV_SHIFT 11
+#define EUR_CR_EVENT_HOST_ENABLE2_OTPM_FLUSHED_INV_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_SHIFT 10
+#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_GSG_FLUSHED_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_ENABLE2_GSG_FLUSHED_SHIFT 9
+#define EUR_CR_EVENT_HOST_ENABLE2_GSG_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_GSG_LOADED_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_ENABLE2_GSG_LOADED_SHIFT 8
+#define EUR_CR_EVENT_HOST_ENABLE2_GSG_LOADED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 7
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 6
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 5
+#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_DHOST_FREE_LOAD_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_DHOST_FREE_LOAD_SHIFT 3
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_DHOST_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_HOST_FREE_LOAD_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_HOST_FREE_LOAD_SHIFT 2
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_HOST_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2 0x0114
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_MASK 0x00008000U
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_SHIFT 15
+#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_VDM_CONTEXT_LOAD_MASK 0x00004000U
+#define EUR_CR_EVENT_HOST_CLEAR2_VDM_CONTEXT_LOAD_SHIFT 14
+#define EUR_CR_EVENT_HOST_CLEAR2_VDM_CONTEXT_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_VDM_TASK_KICKED_MASK 0x00002000U
+#define EUR_CR_EVENT_HOST_CLEAR2_VDM_TASK_KICKED_SHIFT 13
+#define EUR_CR_EVENT_HOST_CLEAR2_VDM_TASK_KICKED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_OTPM_MEM_CLEARED_MASK 0x00001000U
+#define EUR_CR_EVENT_HOST_CLEAR2_OTPM_MEM_CLEARED_SHIFT 12
+#define EUR_CR_EVENT_HOST_CLEAR2_OTPM_MEM_CLEARED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_OTPM_FLUSHED_INV_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_CLEAR2_OTPM_FLUSHED_INV_SHIFT 11
+#define EUR_CR_EVENT_HOST_CLEAR2_OTPM_FLUSHED_INV_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_SHIFT 10
+#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_GSG_FLUSHED_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_CLEAR2_GSG_FLUSHED_SHIFT 9
+#define EUR_CR_EVENT_HOST_CLEAR2_GSG_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_GSG_LOADED_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_CLEAR2_GSG_LOADED_SHIFT 8
+#define EUR_CR_EVENT_HOST_CLEAR2_GSG_LOADED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 7
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 6
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 5
+#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_DHOST_FREE_LOAD_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_DHOST_FREE_LOAD_SHIFT 3
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_DHOST_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_HOST_FREE_LOAD_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_HOST_FREE_LOAD_SHIFT 2
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_HOST_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS2 0x0118
+#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_MASK 0x00008000U
+#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_SHIFT 15
+#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_VDM_CONTEXT_LOAD_MASK 0x00004000U
+#define EUR_CR_EVENT_STATUS2_VDM_CONTEXT_LOAD_SHIFT 14
+#define EUR_CR_EVENT_STATUS2_VDM_CONTEXT_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_VDM_TASK_KICKED_MASK 0x00002000U
+#define EUR_CR_EVENT_STATUS2_VDM_TASK_KICKED_SHIFT 13
+#define EUR_CR_EVENT_STATUS2_VDM_TASK_KICKED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_OTPM_MEM_CLEARED_MASK 0x00001000U
+#define EUR_CR_EVENT_STATUS2_OTPM_MEM_CLEARED_SHIFT 12
+#define EUR_CR_EVENT_STATUS2_OTPM_MEM_CLEARED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_OTPM_FLUSHED_INV_MASK 0x00000800U
+#define EUR_CR_EVENT_STATUS2_OTPM_FLUSHED_INV_SHIFT 11
+#define EUR_CR_EVENT_STATUS2_OTPM_FLUSHED_INV_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_MASK 0x00000400U
+#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_SHIFT 10
+#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_GSG_FLUSHED_MASK 0x00000200U
+#define EUR_CR_EVENT_STATUS2_GSG_FLUSHED_SHIFT 9
+#define EUR_CR_EVENT_STATUS2_GSG_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_GSG_LOADED_MASK 0x00000100U
+#define EUR_CR_EVENT_STATUS2_GSG_LOADED_SHIFT 8
+#define EUR_CR_EVENT_STATUS2_GSG_LOADED_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000080U
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 7
+#define EUR_CR_EVENT_STATUS2_TRIG_TA_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000040U
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 6
+#define EUR_CR_EVENT_STATUS2_TRIG_3D_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000020U
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 5
+#define EUR_CR_EVENT_STATUS2_TRIG_DL_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_DPM_DHOST_FREE_LOAD_MASK 0x00000008U
+#define EUR_CR_EVENT_STATUS2_DPM_DHOST_FREE_LOAD_SHIFT 3
+#define EUR_CR_EVENT_STATUS2_DPM_DHOST_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_DPM_HOST_FREE_LOAD_MASK 0x00000004U
+#define EUR_CR_EVENT_STATUS2_DPM_HOST_FREE_LOAD_SHIFT 2
+#define EUR_CR_EVENT_STATUS2_DPM_HOST_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1
+#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0
+#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS 0x012C
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29
+#define EUR_CR_EVENT_STATUS_TIMER_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_STATUS_DPM_INITEND_SIGNED 0
+#define EUR_CR_EVENT_STATUS_ISP2_ZLS_CSW_FINISHED_MASK 0x00200000U
+#define EUR_CR_EVENT_STATUS_ISP2_ZLS_CSW_FINISHED_SHIFT 21
+#define EUR_CR_EVENT_STATUS_ISP2_ZLS_CSW_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_STATUS_OTPM_INV_SIGNED 0
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SIGNED 0
+#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000U
+#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16
+#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SIGNED 0
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_STATUS_BREAKPOINT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_STATUS_SW_EVENT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_STATUS_TA_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SIGNED 0
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE 0x0130
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29
+#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_ISP2_ZLS_CSW_FINISHED_MASK 0x00200000U
+#define EUR_CR_EVENT_HOST_ENABLE_ISP2_ZLS_CSW_FINISHED_SHIFT 21
+#define EUR_CR_EVENT_HOST_ENABLE_ISP2_ZLS_CSW_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000U
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16
+#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SIGNED 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR 0x0134
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31
+#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29
+#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28
+#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U
+#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26
+#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_ISP2_ZLS_CSW_FINISHED_MASK 0x00200000U
+#define EUR_CR_EVENT_HOST_CLEAR_ISP2_ZLS_CSW_FINISHED_SHIFT 21
+#define EUR_CR_EVENT_HOST_CLEAR_ISP2_ZLS_CSW_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19
+#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18
+#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000U
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16
+#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15
+#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14
+#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13
+#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12
+#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10
+#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SIGNED 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0
+#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SIGNED 0
+#define EUR_CR_TIMER 0x0144
+#define EUR_CR_TIMER_VALUE_MASK 0xFFFFFFFFU
+#define EUR_CR_TIMER_VALUE_SHIFT 0
+#define EUR_CR_TIMER_VALUE_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_0 0x0A0C
+#define EUR_CR_USE_CODE_BASE_ADDR_00_MASK 0x01FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_00_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_00_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_00_MASK 0x06000000U
+#define EUR_CR_USE_CODE_BASE_DM_00_SHIFT 25
+#define EUR_CR_USE_CODE_BASE_DM_00_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_1 0x0A10
+#define EUR_CR_USE_CODE_BASE_ADDR_01_MASK 0x01FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_01_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_01_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_01_MASK 0x06000000U
+#define EUR_CR_USE_CODE_BASE_DM_01_SHIFT 25
+#define EUR_CR_USE_CODE_BASE_DM_01_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_2 0x0A14
+#define EUR_CR_USE_CODE_BASE_ADDR_02_MASK 0x01FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_02_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_02_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_02_MASK 0x06000000U
+#define EUR_CR_USE_CODE_BASE_DM_02_SHIFT 25
+#define EUR_CR_USE_CODE_BASE_DM_02_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_3 0x0A18
+#define EUR_CR_USE_CODE_BASE_ADDR_03_MASK 0x01FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_03_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_03_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_03_MASK 0x06000000U
+#define EUR_CR_USE_CODE_BASE_DM_03_SHIFT 25
+#define EUR_CR_USE_CODE_BASE_DM_03_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_4 0x0A1C
+#define EUR_CR_USE_CODE_BASE_ADDR_04_MASK 0x01FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_04_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_04_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_04_MASK 0x06000000U
+#define EUR_CR_USE_CODE_BASE_DM_04_SHIFT 25
+#define EUR_CR_USE_CODE_BASE_DM_04_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_5 0x0A20
+#define EUR_CR_USE_CODE_BASE_ADDR_05_MASK 0x01FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_05_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_05_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_05_MASK 0x06000000U
+#define EUR_CR_USE_CODE_BASE_DM_05_SHIFT 25
+#define EUR_CR_USE_CODE_BASE_DM_05_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_6 0x0A24
+#define EUR_CR_USE_CODE_BASE_ADDR_06_MASK 0x01FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_06_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_06_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_06_MASK 0x06000000U
+#define EUR_CR_USE_CODE_BASE_DM_06_SHIFT 25
+#define EUR_CR_USE_CODE_BASE_DM_06_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_7 0x0A28
+#define EUR_CR_USE_CODE_BASE_ADDR_07_MASK 0x01FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_07_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_07_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_07_MASK 0x06000000U
+#define EUR_CR_USE_CODE_BASE_DM_07_SHIFT 25
+#define EUR_CR_USE_CODE_BASE_DM_07_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_8 0x0A2C
+#define EUR_CR_USE_CODE_BASE_ADDR_08_MASK 0x01FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_08_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_08_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_08_MASK 0x06000000U
+#define EUR_CR_USE_CODE_BASE_DM_08_SHIFT 25
+#define EUR_CR_USE_CODE_BASE_DM_08_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_9 0x0A30
+#define EUR_CR_USE_CODE_BASE_ADDR_09_MASK 0x01FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_09_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_09_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_09_MASK 0x06000000U
+#define EUR_CR_USE_CODE_BASE_DM_09_SHIFT 25
+#define EUR_CR_USE_CODE_BASE_DM_09_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_10 0x0A34
+#define EUR_CR_USE_CODE_BASE_ADDR_10_MASK 0x01FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_10_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_10_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_10_MASK 0x06000000U
+#define EUR_CR_USE_CODE_BASE_DM_10_SHIFT 25
+#define EUR_CR_USE_CODE_BASE_DM_10_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_11 0x0A38
+#define EUR_CR_USE_CODE_BASE_ADDR_11_MASK 0x01FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_11_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_11_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_11_MASK 0x06000000U
+#define EUR_CR_USE_CODE_BASE_DM_11_SHIFT 25
+#define EUR_CR_USE_CODE_BASE_DM_11_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_12 0x0A3C
+#define EUR_CR_USE_CODE_BASE_ADDR_12_MASK 0x01FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_12_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_12_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_12_MASK 0x06000000U
+#define EUR_CR_USE_CODE_BASE_DM_12_SHIFT 25
+#define EUR_CR_USE_CODE_BASE_DM_12_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_13 0x0A40
+#define EUR_CR_USE_CODE_BASE_ADDR_13_MASK 0x01FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_13_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_13_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_13_MASK 0x06000000U
+#define EUR_CR_USE_CODE_BASE_DM_13_SHIFT 25
+#define EUR_CR_USE_CODE_BASE_DM_13_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_14 0x0A44
+#define EUR_CR_USE_CODE_BASE_ADDR_14_MASK 0x01FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_14_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_14_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_14_MASK 0x06000000U
+#define EUR_CR_USE_CODE_BASE_DM_14_SHIFT 25
+#define EUR_CR_USE_CODE_BASE_DM_14_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_15 0x0A48
+#define EUR_CR_USE_CODE_BASE_ADDR_15_MASK 0x01FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_15_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_15_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_15_MASK 0x06000000U
+#define EUR_CR_USE_CODE_BASE_DM_15_SHIFT 25
+#define EUR_CR_USE_CODE_BASE_DM_15_SIGNED 0
+#define EUR_CR_PDS_EXEC_BASE 0x0AB8
+#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0xFFF00000U
+#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20
+#define EUR_CR_PDS_EXEC_BASE_ADDR_SIGNED 0
+#define EUR_CR_EVENT_KICKER 0x0AC4
+#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0xFFFFFFF0U
+#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4
+#define EUR_CR_EVENT_KICKER_ADDRESS_SIGNED 0
+#define EUR_CR_EVENT_KICK 0x0AC8
+#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK_NOW_SHIFT 0
+#define EUR_CR_EVENT_KICK_NOW_SIGNED 0
+#define EUR_CR_EVENT_TIMER 0x0ACC
+#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U
+#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24
+#define EUR_CR_EVENT_TIMER_ENABLE_SIGNED 0
+#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU
+#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0
+#define EUR_CR_EVENT_TIMER_VALUE_SIGNED 0
+#define EUR_CR_PDS_INV0 0x0AD0
+#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV0_DSC_SHIFT 0
+#define EUR_CR_PDS_INV0_DSC_SIGNED 0
+#define EUR_CR_PDS_INV1 0x0AD4
+#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV1_DSC_SHIFT 0
+#define EUR_CR_PDS_INV1_DSC_SIGNED 0
+#define EUR_CR_PDS_INV3 0x0AD8
+#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U
+#define EUR_CR_PDS_INV3_DSC_SHIFT 0
+#define EUR_CR_PDS_INV3_DSC_SIGNED 0
+#define EUR_CR_PDS_INV_CSC 0x0AE0
+#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U
+#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0
+#define EUR_CR_PDS_INV_CSC_KICK_SIGNED 0
+#define EUR_CR_EVENT_KICK1 0x0AE4
+#define EUR_CR_EVENT_KICK1_NOW_MASK 0x000000FFU
+#define EUR_CR_EVENT_KICK1_NOW_SHIFT 0
+#define EUR_CR_EVENT_KICK1_NOW_SIGNED 0
+#define EUR_CR_EVENT_KICK2 0x0AE8
+#define EUR_CR_EVENT_KICK2_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK2_NOW_SHIFT 0
+#define EUR_CR_EVENT_KICK2_NOW_SIGNED 0
+#define EUR_CR_EVENT_KICK3 0x0AEC
+#define EUR_CR_EVENT_KICK3_NOW_MASK 0x00000001U
+#define EUR_CR_EVENT_KICK3_NOW_SHIFT 0
+#define EUR_CR_EVENT_KICK3_NOW_SIGNED 0
+#define EUR_CR_BIF_CTRL 0x0C00
+#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U
+#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0
+#define EUR_CR_BIF_CTRL_NOREORDER_SIGNED 0
+#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U
+#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1
+#define EUR_CR_BIF_CTRL_PAUSE_SIGNED 0
+#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004U
+#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2
+#define EUR_CR_BIF_CTRL_FLUSH_SIGNED 0
+#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008U
+#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3
+#define EUR_CR_BIF_CTRL_INVALDC_SIGNED 0
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4
+#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00000800U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 11
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00001000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 12
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00002000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 13
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00004000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 14
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SIGNED 0
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK 0x00008000U
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_SHIFT 15
+#define EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_SIGNED 0
+#define EUR_CR_BIF_INT_STAT 0x0C04
+#define EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK 0x0000FFFFU
+#define EUR_CR_BIF_INT_STAT_FAULT_REQ_SHIFT 0
+#define EUR_CR_BIF_INT_STAT_FAULT_REQ_SIGNED 0
+#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_MASK 0x00070000U
+#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_SHIFT 16
+#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_SIGNED 0
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00080000U
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 19
+#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SIGNED 0
+#define EUR_CR_BIF_FAULT 0x0C08
+#define EUR_CR_BIF_FAULT_CID_MASK 0x0000000FU
+#define EUR_CR_BIF_FAULT_CID_SHIFT 0
+#define EUR_CR_BIF_FAULT_CID_SIGNED 0
+#define EUR_CR_BIF_FAULT_SB_MASK 0x000001F0U
+#define EUR_CR_BIF_FAULT_SB_SHIFT 4
+#define EUR_CR_BIF_FAULT_SB_SIGNED 0
+#define EUR_CR_BIF_FAULT_ADDR_MASK 0xFFFFF000U
+#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12
+#define EUR_CR_BIF_FAULT_ADDR_SIGNED 0
+#define EUR_CR_BIF_TILE0 0x0C0C
+#define EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE0_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE0_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE0_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE1 0x0C10
+#define EUR_CR_BIF_TILE1_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE1_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE1_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE1_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE1_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE2 0x0C14
+#define EUR_CR_BIF_TILE2_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE2_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE2_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE2_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE2_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE3 0x0C18
+#define EUR_CR_BIF_TILE3_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE3_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE3_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE3_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE3_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE4 0x0C1C
+#define EUR_CR_BIF_TILE4_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE4_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE4_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE4_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE4_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE5 0x0C20
+#define EUR_CR_BIF_TILE5_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE5_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE5_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE5_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE5_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE6 0x0C24
+#define EUR_CR_BIF_TILE6_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE6_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE6_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE6_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE6_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE7 0x0C28
+#define EUR_CR_BIF_TILE7_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE7_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE7_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE7_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE7_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE8 0x0C2C
+#define EUR_CR_BIF_TILE8_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE8_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE8_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE8_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE8_CFG_SIGNED 0
+#define EUR_CR_BIF_TILE9 0x0C30
+#define EUR_CR_BIF_TILE9_MIN_ADDRESS_MASK 0x00000FFFU
+#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SHIFT 0
+#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE9_MAX_ADDRESS_MASK 0x00FFF000U
+#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SHIFT 12
+#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SIGNED 0
+#define EUR_CR_BIF_TILE9_CFG_MASK 0x0F000000U
+#define EUR_CR_BIF_TILE9_CFG_SHIFT 24
+#define EUR_CR_BIF_TILE9_CFG_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE1 0x0C38
+#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_MASK 0xFFFFFF00U
+#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SHIFT 8
+#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE2 0x0C3C
+#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_MASK 0xFFFFFF00U
+#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SHIFT 8
+#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE3 0x0C40
+#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_MASK 0xFFFFFF00U
+#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SHIFT 8
+#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE4 0x0C44
+#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_MASK 0xFFFFFF00U
+#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SHIFT 8
+#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE5 0x0C48
+#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_MASK 0xFFFFFF00U
+#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SHIFT 8
+#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE6 0x0C4C
+#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_MASK 0xFFFFFF00U
+#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SHIFT 8
+#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE7 0x0C50
+#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_MASK 0xFFFFFF00U
+#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SHIFT 8
+#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE8 0x0C54
+#define EUR_CR_BIF_DIR_LIST_BASE8_ADDR_MASK 0xFFFFFF00U
+#define EUR_CR_BIF_DIR_LIST_BASE8_ADDR_SHIFT 8
+#define EUR_CR_BIF_DIR_LIST_BASE8_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE9 0x0C58
+#define EUR_CR_BIF_DIR_LIST_BASE9_ADDR_MASK 0xFFFFFF00U
+#define EUR_CR_BIF_DIR_LIST_BASE9_ADDR_SHIFT 8
+#define EUR_CR_BIF_DIR_LIST_BASE9_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE10 0x0C5C
+#define EUR_CR_BIF_DIR_LIST_BASE10_ADDR_MASK 0xFFFFFF00U
+#define EUR_CR_BIF_DIR_LIST_BASE10_ADDR_SHIFT 8
+#define EUR_CR_BIF_DIR_LIST_BASE10_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE11 0x0C60
+#define EUR_CR_BIF_DIR_LIST_BASE11_ADDR_MASK 0xFFFFFF00U
+#define EUR_CR_BIF_DIR_LIST_BASE11_ADDR_SHIFT 8
+#define EUR_CR_BIF_DIR_LIST_BASE11_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE12 0x0C64
+#define EUR_CR_BIF_DIR_LIST_BASE12_ADDR_MASK 0xFFFFFF00U
+#define EUR_CR_BIF_DIR_LIST_BASE12_ADDR_SHIFT 8
+#define EUR_CR_BIF_DIR_LIST_BASE12_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE13 0x0C68
+#define EUR_CR_BIF_DIR_LIST_BASE13_ADDR_MASK 0xFFFFFF00U
+#define EUR_CR_BIF_DIR_LIST_BASE13_ADDR_SHIFT 8
+#define EUR_CR_BIF_DIR_LIST_BASE13_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE14 0x0C6C
+#define EUR_CR_BIF_DIR_LIST_BASE14_ADDR_MASK 0xFFFFFF00U
+#define EUR_CR_BIF_DIR_LIST_BASE14_ADDR_SHIFT 8
+#define EUR_CR_BIF_DIR_LIST_BASE14_ADDR_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE15 0x0C70
+#define EUR_CR_BIF_DIR_LIST_BASE15_ADDR_MASK 0xFFFFFF00U
+#define EUR_CR_BIF_DIR_LIST_BASE15_ADDR_SHIFT 8
+#define EUR_CR_BIF_DIR_LIST_BASE15_ADDR_SIGNED 0
+#define EUR_CR_BIF_BANK_SET 0x0C74
+#define EUR_CR_BIF_BANK_SET_SELECT_2D_MASK 0x00000001U
+#define EUR_CR_BIF_BANK_SET_SELECT_2D_SHIFT 0
+#define EUR_CR_BIF_BANK_SET_SELECT_2D_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_3D_MASK 0x0000000CU
+#define EUR_CR_BIF_BANK_SET_SELECT_3D_SHIFT 2
+#define EUR_CR_BIF_BANK_SET_SELECT_3D_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_HOST_MASK 0x00000010U
+#define EUR_CR_BIF_BANK_SET_SELECT_HOST_SHIFT 4
+#define EUR_CR_BIF_BANK_SET_SELECT_HOST_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_TA_MASK 0x000000C0U
+#define EUR_CR_BIF_BANK_SET_SELECT_TA_SHIFT 6
+#define EUR_CR_BIF_BANK_SET_SELECT_TA_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_EDM_MASK 0x00000100U
+#define EUR_CR_BIF_BANK_SET_SELECT_EDM_SHIFT 8
+#define EUR_CR_BIF_BANK_SET_SELECT_EDM_SIGNED 0
+#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_MASK 0x00000200U
+#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_SHIFT 9
+#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_SIGNED 0
+#define EUR_CR_BIF_BANK0 0x0C78
+#define EUR_CR_BIF_BANK0_INDEX_EDM_MASK 0x0000000FU
+#define EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT 0
+#define EUR_CR_BIF_BANK0_INDEX_EDM_SIGNED 0
+#define EUR_CR_BIF_BANK0_INDEX_TA_MASK 0x000000F0U
+#define EUR_CR_BIF_BANK0_INDEX_TA_SHIFT 4
+#define EUR_CR_BIF_BANK0_INDEX_TA_SIGNED 0
+#define EUR_CR_BIF_BANK0_INDEX_HOST_MASK 0x00000F00U
+#define EUR_CR_BIF_BANK0_INDEX_HOST_SHIFT 8
+#define EUR_CR_BIF_BANK0_INDEX_HOST_SIGNED 0
+#define EUR_CR_BIF_BANK0_INDEX_3D_MASK 0x0000F000U
+#define EUR_CR_BIF_BANK0_INDEX_3D_SHIFT 12
+#define EUR_CR_BIF_BANK0_INDEX_3D_SIGNED 0
+#define EUR_CR_BIF_BANK0_INDEX_2D_MASK 0x000F0000U
+#define EUR_CR_BIF_BANK0_INDEX_2D_SHIFT 16
+#define EUR_CR_BIF_BANK0_INDEX_2D_SIGNED 0
+#define EUR_CR_BIF_BANK1 0x0C7C
+#define EUR_CR_BIF_BANK1_INDEX_EDM_MASK 0x0000000FU
+#define EUR_CR_BIF_BANK1_INDEX_EDM_SHIFT 0
+#define EUR_CR_BIF_BANK1_INDEX_EDM_SIGNED 0
+#define EUR_CR_BIF_BANK1_INDEX_TA_MASK 0x000000F0U
+#define EUR_CR_BIF_BANK1_INDEX_TA_SHIFT 4
+#define EUR_CR_BIF_BANK1_INDEX_TA_SIGNED 0
+#define EUR_CR_BIF_BANK1_INDEX_HOST_MASK 0x00000F00U
+#define EUR_CR_BIF_BANK1_INDEX_HOST_SHIFT 8
+#define EUR_CR_BIF_BANK1_INDEX_HOST_SIGNED 0
+#define EUR_CR_BIF_BANK1_INDEX_3D_MASK 0x0000F000U
+#define EUR_CR_BIF_BANK1_INDEX_3D_SHIFT 12
+#define EUR_CR_BIF_BANK1_INDEX_3D_SIGNED 0
+#define EUR_CR_BIF_BANK1_INDEX_2D_MASK 0x000F0000U
+#define EUR_CR_BIF_BANK1_INDEX_2D_SHIFT 16
+#define EUR_CR_BIF_BANK1_INDEX_2D_SIGNED 0
+#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFFF00U
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 8
+#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SIGNED 0
+#define EUR_CR_BIF_TA_REQ_BASE 0x0C90
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0xFFF00000U
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SIGNED 0
+#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000007FFU
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0
+#define EUR_CR_BIF_MEM_REQ_STAT_READS_SIGNED 0
+#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0xFFF00000U
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SIGNED 0
+#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0xFFF00000U
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20
+#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SIGNED 0
+#define EUR_CR_BIF_BANK_STATUS 0x0CB4
+#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_MASK 0x00000001U
+#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SHIFT 0
+#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SIGNED 0
+#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_MASK 0x00000002U
+#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SHIFT 1
+#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SIGNED 0
+#define EUR_CR_BIF_36BIT_ADDRESSING 0x0CCC
+#define EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK 0x00000001U
+#define EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_SHIFT 0
+#define EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_SIGNED 0
+#define EUR_CR_BIF_TILE0_ADDR_EXT 0x0CD0
+#define EUR_CR_BIF_TILE0_ADDR_EXT_MIN_MASK 0x000000FFU
+#define EUR_CR_BIF_TILE0_ADDR_EXT_MIN_SHIFT 0
+#define EUR_CR_BIF_TILE0_ADDR_EXT_MIN_SIGNED 0
+#define EUR_CR_BIF_TILE0_ADDR_EXT_MAX_MASK 0x0000FF00U
+#define EUR_CR_BIF_TILE0_ADDR_EXT_MAX_SHIFT 8
+#define EUR_CR_BIF_TILE0_ADDR_EXT_MAX_SIGNED 0
+#define EUR_CR_BIF_TILE1_ADDR_EXT 0x0CD4
+#define EUR_CR_BIF_TILE1_ADDR_EXT_MIN_MASK 0x000000FFU
+#define EUR_CR_BIF_TILE1_ADDR_EXT_MIN_SHIFT 0
+#define EUR_CR_BIF_TILE1_ADDR_EXT_MIN_SIGNED 0
+#define EUR_CR_BIF_TILE1_ADDR_EXT_MAX_MASK 0x0000FF00U
+#define EUR_CR_BIF_TILE1_ADDR_EXT_MAX_SHIFT 8
+#define EUR_CR_BIF_TILE1_ADDR_EXT_MAX_SIGNED 0
+#define EUR_CR_BIF_TILE2_ADDR_EXT 0x0CD8
+#define EUR_CR_BIF_TILE2_ADDR_EXT_MIN_MASK 0x000000FFU
+#define EUR_CR_BIF_TILE2_ADDR_EXT_MIN_SHIFT 0
+#define EUR_CR_BIF_TILE2_ADDR_EXT_MIN_SIGNED 0
+#define EUR_CR_BIF_TILE2_ADDR_EXT_MAX_MASK 0x0000FF00U
+#define EUR_CR_BIF_TILE2_ADDR_EXT_MAX_SHIFT 8
+#define EUR_CR_BIF_TILE2_ADDR_EXT_MAX_SIGNED 0
+#define EUR_CR_BIF_TILE3_ADDR_EXT 0x0CDC
+#define EUR_CR_BIF_TILE3_ADDR_EXT_MIN_MASK 0x000000FFU
+#define EUR_CR_BIF_TILE3_ADDR_EXT_MIN_SHIFT 0
+#define EUR_CR_BIF_TILE3_ADDR_EXT_MIN_SIGNED 0
+#define EUR_CR_BIF_TILE3_ADDR_EXT_MAX_MASK 0x0000FF00U
+#define EUR_CR_BIF_TILE3_ADDR_EXT_MAX_SHIFT 8
+#define EUR_CR_BIF_TILE3_ADDR_EXT_MAX_SIGNED 0
+#define EUR_CR_BIF_TILE4_ADDR_EXT 0x0CE0
+#define EUR_CR_BIF_TILE4_ADDR_EXT_MIN_MASK 0x000000FFU
+#define EUR_CR_BIF_TILE4_ADDR_EXT_MIN_SHIFT 0
+#define EUR_CR_BIF_TILE4_ADDR_EXT_MIN_SIGNED 0
+#define EUR_CR_BIF_TILE4_ADDR_EXT_MAX_MASK 0x0000FF00U
+#define EUR_CR_BIF_TILE4_ADDR_EXT_MAX_SHIFT 8
+#define EUR_CR_BIF_TILE4_ADDR_EXT_MAX_SIGNED 0
+#define EUR_CR_BIF_TILE5_ADDR_EXT 0x0CE4
+#define EUR_CR_BIF_TILE5_ADDR_EXT_MIN_MASK 0x000000FFU
+#define EUR_CR_BIF_TILE5_ADDR_EXT_MIN_SHIFT 0
+#define EUR_CR_BIF_TILE5_ADDR_EXT_MIN_SIGNED 0
+#define EUR_CR_BIF_TILE5_ADDR_EXT_MAX_MASK 0x0000FF00U
+#define EUR_CR_BIF_TILE5_ADDR_EXT_MAX_SHIFT 8
+#define EUR_CR_BIF_TILE5_ADDR_EXT_MAX_SIGNED 0
+#define EUR_CR_BIF_TILE6_ADDR_EXT 0x0CE8
+#define EUR_CR_BIF_TILE6_ADDR_EXT_MIN_MASK 0x000000FFU
+#define EUR_CR_BIF_TILE6_ADDR_EXT_MIN_SHIFT 0
+#define EUR_CR_BIF_TILE6_ADDR_EXT_MIN_SIGNED 0
+#define EUR_CR_BIF_TILE6_ADDR_EXT_MAX_MASK 0x0000FF00U
+#define EUR_CR_BIF_TILE6_ADDR_EXT_MAX_SHIFT 8
+#define EUR_CR_BIF_TILE6_ADDR_EXT_MAX_SIGNED 0
+#define EUR_CR_BIF_TILE7_ADDR_EXT 0x0CEC
+#define EUR_CR_BIF_TILE7_ADDR_EXT_MIN_MASK 0x000000FFU
+#define EUR_CR_BIF_TILE7_ADDR_EXT_MIN_SHIFT 0
+#define EUR_CR_BIF_TILE7_ADDR_EXT_MIN_SIGNED 0
+#define EUR_CR_BIF_TILE7_ADDR_EXT_MAX_MASK 0x0000FF00U
+#define EUR_CR_BIF_TILE7_ADDR_EXT_MAX_SHIFT 8
+#define EUR_CR_BIF_TILE7_ADDR_EXT_MAX_SIGNED 0
+#define EUR_CR_BIF_TILE8_ADDR_EXT 0x0CF0
+#define EUR_CR_BIF_TILE8_ADDR_EXT_MIN_MASK 0x000000FFU
+#define EUR_CR_BIF_TILE8_ADDR_EXT_MIN_SHIFT 0
+#define EUR_CR_BIF_TILE8_ADDR_EXT_MIN_SIGNED 0
+#define EUR_CR_BIF_TILE8_ADDR_EXT_MAX_MASK 0x0000FF00U
+#define EUR_CR_BIF_TILE8_ADDR_EXT_MAX_SHIFT 8
+#define EUR_CR_BIF_TILE8_ADDR_EXT_MAX_SIGNED 0
+#define EUR_CR_BIF_TILE9_ADDR_EXT 0x0CF4
+#define EUR_CR_BIF_TILE9_ADDR_EXT_MIN_MASK 0x000000FFU
+#define EUR_CR_BIF_TILE9_ADDR_EXT_MIN_SHIFT 0
+#define EUR_CR_BIF_TILE9_ADDR_EXT_MIN_SIGNED 0
+#define EUR_CR_BIF_TILE9_ADDR_EXT_MAX_MASK 0x0000FF00U
+#define EUR_CR_BIF_TILE9_ADDR_EXT_MAX_SHIFT 8
+#define EUR_CR_BIF_TILE9_ADDR_EXT_MAX_SIGNED 0
+#define EUR_CR_BIF_CTRL_RDATA 0x0CF8
+#define EUR_CR_BIF_CTRL_RDATA_LIMIT_MASK 0x000003FFU
+#define EUR_CR_BIF_CTRL_RDATA_LIMIT_SHIFT 0
+#define EUR_CR_BIF_CTRL_RDATA_LIMIT_SIGNED 0
+#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X)))
+#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x01FFFFFFU
+#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0
+#define EUR_CR_USE_CODE_BASE_ADDR_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_DM_MASK 0x06000000U
+#define EUR_CR_USE_CODE_BASE_DM_SHIFT 25
+#define EUR_CR_USE_CODE_BASE_DM_SIGNED 0
+#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16
+#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16
+
+#endif
+
diff --git a/drivers/gpu/pvr/sgx_bridge.h b/drivers/gpu/pvr/sgx_bridge.h
new file mode 100644
index 0000000..ec630a5
--- /dev/null
+++ b/drivers/gpu/pvr/sgx_bridge.h
@@ -0,0 +1,666 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined(__SGX_BRIDGE_H__)
+#define __SGX_BRIDGE_H__
+
+#if defined (SUPPORT_SID_INTERFACE)
+#include "sgxapi.h"
+#else
+#include "sgxapi_km.h"
+#endif
+#include "sgxinfo.h"
+#include "pvr_bridge.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+#define PVRSRV_BRIDGE_SGX_CMD_BASE (PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD+1)
+#define PVRSRV_BRIDGE_SGX_GETCLIENTINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+0)
+#define PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+1)
+#define PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+2)
+#define PVRSRV_BRIDGE_SGX_DOKICK PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+3)
+#define PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+4)
+#define PVRSRV_BRIDGE_SGX_READREGISTRYDWORD PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+5)
+
+#define PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+9)
+
+#if defined(TRANSFER_QUEUE)
+#define PVRSRV_BRIDGE_SGX_SUBMITTRANSFER PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+13)
+#endif
+#define PVRSRV_BRIDGE_SGX_GETMISCINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+14)
+#define PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+15)
+#define PVRSRV_BRIDGE_SGX_DEVINITPART2 PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+16)
+
+#define PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+17)
+#define PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+18)
+#define PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+19)
+#define PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+20)
+#define PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+21)
+#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+22)
+#if defined(SGX_FEATURE_2D_HARDWARE)
+#define PVRSRV_BRIDGE_SGX_SUBMIT2D PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+23)
+#define PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+24)
+#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+25)
+#endif
+#define PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+26)
+#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+27)
+
+#define PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+28)
+
+#define PVRSRV_BRIDGE_SGX_READ_HWPERF_CB PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+29)
+#define PVRSRV_BRIDGE_SGX_SET_RENDER_CONTEXT_PRIORITY PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+30)
+#define PVRSRV_BRIDGE_SGX_SET_TRANSFER_CONTEXT_PRIORITY PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+31)
+
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+32)
+#define PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+33)
+#define PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+34)
+#define PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+35)
+#define PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+36)
+#define PVRSRV_BRIDGE_SGX_PDUMP_SAVEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+37)
+#endif
+
+
+
+#define PVRSRV_BRIDGE_LAST_SGX_CMD (PVRSRV_BRIDGE_SGX_CMD_BASE+37)
+
+
+typedef struct PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_HANDLE hDevMemHeap;
+ IMG_DEV_VIRTADDR sDevVAddr;
+}PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR
+{
+ PVRSRV_ERROR eError;
+ IMG_DEV_PHYADDR DevPAddr;
+ IMG_CPU_PHYADDR CpuPAddr;
+}PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR;
+
+
+typedef struct PVRSRV_BRIDGE_IN_SGX_SET_TRANSFER_CONTEXT_PRIORITY_TAG
+ {
+ IMG_UINT32 ui32BridgeFlags;
+ #if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+ IMG_SID hHWTransferContext;
+ #else
+ IMG_HANDLE hDevCookie;
+ IMG_HANDLE hHWTransferContext;
+ #endif
+ IMG_UINT32 ui32Priority;
+ IMG_UINT32 ui32OffsetOfPriorityField;
+}PVRSRV_BRIDGE_IN_SGX_SET_TRANSFER_CONTEXT_PRIORITY;
+
+
+typedef struct PVRSRV_BRIDGE_IN_SGX_SET_RENDER_CONTEXT_PRIORITY_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+ IMG_SID hHWRenderContext;
+#else
+ IMG_HANDLE hDevCookie;
+ IMG_HANDLE hHWRenderContext;
+#endif
+ IMG_UINT32 ui32Priority;
+ IMG_UINT32 ui32OffsetOfPriorityField;
+}PVRSRV_BRIDGE_IN_SGX_SET_RENDER_CONTEXT_PRIORITY;
+
+
+typedef struct PVRSRV_BRIDGE_IN_GETCLIENTINFO_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+}PVRSRV_BRIDGE_IN_GETCLIENTINFO;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO_TAG
+{
+ SGX_INTERNAL_DEVINFO sSGXInternalDevInfo;
+ PVRSRV_ERROR eError;
+}PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO;
+
+
+typedef struct PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+}PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_GETCLIENTINFO_TAG
+{
+ SGX_CLIENT_INFO sClientInfo;
+ PVRSRV_ERROR eError;
+}PVRSRV_BRIDGE_OUT_GETCLIENTINFO;
+
+
+typedef struct PVRSRV_BRIDGE_IN_RELEASECLIENTINFO_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ SGX_CLIENT_INFO sClientInfo;
+}PVRSRV_BRIDGE_IN_RELEASECLIENTINFO;
+
+
+typedef struct PVRSRV_BRIDGE_IN_ISPBREAKPOLL_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+}PVRSRV_BRIDGE_IN_ISPBREAKPOLL;
+
+
+typedef struct PVRSRV_BRIDGE_IN_DOKICK_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ SGX_CCB_KICK sCCBKick;
+}PVRSRV_BRIDGE_IN_DOKICK;
+
+
+typedef struct PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+}PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES;
+
+
+#if defined(TRANSFER_QUEUE)
+
+typedef struct PVRSRV_BRIDGE_IN_SUBMITTRANSFER_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ PVRSRV_TRANSFER_SGX_KICK sKick;
+}PVRSRV_BRIDGE_IN_SUBMITTRANSFER;
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+
+typedef struct PVRSRV_BRIDGE_IN_SUBMIT2D_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ PVRSRV_2D_SGX_KICK sKick;
+} PVRSRV_BRIDGE_IN_SUBMIT2D;
+#endif
+#endif
+
+
+typedef struct PVRSRV_BRIDGE_IN_READREGDWORD_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ IMG_PCHAR pszKey;
+ IMG_PCHAR pszValue;
+}PVRSRV_BRIDGE_IN_READREGDWORD;
+
+
+typedef struct PVRSRV_BRIDGE_OUT_READREGDWORD_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Data;
+}PVRSRV_BRIDGE_OUT_READREGDWORD;
+
+
+typedef struct PVRSRV_BRIDGE_IN_SGXGETMISCINFO_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ SGX_MISC_INFO *psMiscInfo;
+}PVRSRV_BRIDGE_IN_SGXGETMISCINFO;
+
+typedef struct PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+}PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT;
+
+typedef struct PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT_TAG
+{
+ PVRSRV_ERROR eError;
+ SGX_BRIDGE_INFO_FOR_SRVINIT sInitInfo;
+}PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT;
+
+typedef struct PVRSRV_BRIDGE_IN_SGXDEVINITPART2_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ SGX_BRIDGE_INIT_INFO sInitInfo;
+}PVRSRV_BRIDGE_IN_SGXDEVINITPART2;
+
+typedef struct PVRSRV_BRIDGE_OUT_SGXDEVINITPART2_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32KMBuildOptions;
+
+}PVRSRV_BRIDGE_OUT_SGXDEVINITPART2;
+
+
+typedef struct PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+ IMG_SID hKernSyncInfo;
+#else
+ IMG_HANDLE hDevCookie;
+ IMG_HANDLE hKernSyncInfo;
+#endif
+ IMG_BOOL bWaitForComplete;
+}PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE;
+
+
+#define PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS 10
+
+typedef struct PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ IMG_BOOL bLockOnFailure;
+ IMG_UINT32 ui32TotalPBSize;
+}PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC;
+
+typedef struct PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC_TAG
+{
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+ IMG_SID hSharedPBDesc;
+ IMG_SID hSharedPBDescKernelMemInfoHandle;
+ IMG_SID hHWPBDescKernelMemInfoHandle;
+ IMG_SID hBlockKernelMemInfoHandle;
+ IMG_SID hHWBlockKernelMemInfoHandle;
+ IMG_SID ahSharedPBDescSubKernelMemInfoHandles[PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS];
+#else
+ IMG_HANDLE hKernelMemInfo;
+ IMG_HANDLE hSharedPBDesc;
+ IMG_HANDLE hSharedPBDescKernelMemInfoHandle;
+ IMG_HANDLE hHWPBDescKernelMemInfoHandle;
+ IMG_HANDLE hBlockKernelMemInfoHandle;
+ IMG_HANDLE hHWBlockKernelMemInfoHandle;
+ IMG_HANDLE ahSharedPBDescSubKernelMemInfoHandles[PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS];
+#endif
+ IMG_UINT32 ui32SharedPBDescSubKernelMemInfoHandlesCount;
+ PVRSRV_ERROR eError;
+}PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC;
+
+typedef struct PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hSharedPBDesc;
+#else
+ IMG_HANDLE hSharedPBDesc;
+#endif
+}PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC;
+
+typedef struct PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC_TAG
+{
+ PVRSRV_ERROR eError;
+}PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC;
+
+
+typedef struct PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_UINT32 ui32TotalPBSize;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+ IMG_SID hSharedPBDescKernelMemInfo;
+ IMG_SID hHWPBDescKernelMemInfo;
+ IMG_SID hBlockKernelMemInfo;
+ IMG_SID hHWBlockKernelMemInfo;
+ IMG_SID *phKernelMemInfoHandles;
+#else
+ IMG_HANDLE hDevCookie;
+ IMG_HANDLE hSharedPBDescKernelMemInfo;
+ IMG_HANDLE hHWPBDescKernelMemInfo;
+ IMG_HANDLE hBlockKernelMemInfo;
+ IMG_HANDLE hHWBlockKernelMemInfo;
+ IMG_HANDLE *phKernelMemInfoHandles;
+#endif
+ IMG_UINT32 ui32KernelMemInfoHandlesCount;
+ IMG_DEV_VIRTADDR sHWPBDescDevVAddr;
+}PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC;
+
+typedef struct PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC_TAG
+{
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hSharedPBDesc;
+#else
+ IMG_HANDLE hSharedPBDesc;
+#endif
+}PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC;
+
+
+#ifdef PDUMP
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ SGX_KICKTA_DUMP_BUFFER *psBufferArray;
+ IMG_UINT32 ui32BufferArrayLength;
+ IMG_BOOL bDumpPolls;
+} PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY;
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+ IMG_SID hDevMemContext;
+#else
+ IMG_HANDLE hDevCookie;
+ IMG_HANDLE hDevMemContext;
+#endif
+ IMG_UINT32 ui32DumpFrameNum;
+ IMG_BOOL bLastFrame;
+ IMG_UINT32 *pui32Registers;
+ IMG_UINT32 ui32NumRegisters;
+}PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS;
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMPCOUNTER_REGISTERS_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ IMG_UINT32 ui32DumpFrameNum;
+ IMG_BOOL bLastFrame;
+ IMG_UINT32 *pui32Registers;
+ IMG_UINT32 ui32NumRegisters;
+}PVRSRV_BRIDGE_IN_PDUMP_COUNTER_REGISTERS;
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ IMG_UINT32 ui32DumpFrameNum;
+ IMG_UINT32 ui32TAKickCount;
+ IMG_BOOL bLastFrame;
+ IMG_UINT32 *pui32Registers;
+ IMG_UINT32 ui32NumRegisters;
+}PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS;
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+ IMG_SID hDevMemContext;
+#else
+ IMG_HANDLE hDevCookie;
+ IMG_HANDLE hDevMemContext;
+#endif
+ IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
+ IMG_UINT32 ui32FileOffset;
+ IMG_UINT32 ui32PDumpFlags;
+
+}PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB;
+
+typedef struct PVRSRV_BRIDGE_IN_PDUMP_SAVEMEM
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+ IMG_SID hDevMemContext;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
+ IMG_UINT32 ui32FileOffset;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_UINT32 ui32Size;
+#if !defined (SUPPORT_SID_INTERFACE)
+ IMG_HANDLE hDevMemContext;
+#endif
+ IMG_UINT32 ui32PDumpFlags;
+
+}PVRSRV_BRIDGE_IN_PDUMP_SAVEMEM;
+
+#endif
+
+typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ IMG_CPU_VIRTADDR pHWRenderContextCpuVAddr;
+ IMG_UINT32 ui32HWRenderContextSize;
+ IMG_UINT32 ui32OffsetToPDDevPAddr;
+ IMG_HANDLE hDevMemContext;
+}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT;
+
+typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hHWRenderContext;
+#else
+ IMG_HANDLE hHWRenderContext;
+#endif
+ IMG_DEV_VIRTADDR sHWRenderContextDevVAddr;
+}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT;
+
+typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_BOOL bForceCleanup;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+ IMG_SID hHWRenderContext;
+#else
+ IMG_HANDLE hDevCookie;
+ IMG_HANDLE hHWRenderContext;
+#endif
+}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT;
+
+typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ IMG_CPU_VIRTADDR pHWTransferContextCpuVAddr;
+ IMG_UINT32 ui32HWTransferContextSize;
+ IMG_UINT32 ui32OffsetToPDDevPAddr;
+ IMG_HANDLE hDevMemContext;
+}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT;
+
+typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hHWTransferContext;
+#else
+ IMG_HANDLE hHWTransferContext;
+#endif
+ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
+}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT;
+
+typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_BOOL bForceCleanup;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+ IMG_SID hHWTransferContext;
+#else
+ IMG_HANDLE hDevCookie;
+ IMG_HANDLE hHWTransferContext;
+#endif
+}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT;
+
+typedef struct PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr;
+}PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET;
+
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ IMG_CPU_VIRTADDR pHW2DContextCpuVAddr;
+ IMG_UINT32 ui32HW2DContextSize;
+ IMG_UINT32 ui32OffsetToPDDevPAddr;
+ IMG_HANDLE hDevMemContext;
+}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT;
+
+typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hHW2DContext;
+#else
+ IMG_HANDLE hHW2DContext;
+#endif
+ IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
+}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT;
+
+typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+ IMG_BOOL bForceCleanup;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+ IMG_SID hHW2DContext;
+#else
+ IMG_HANDLE hDevCookie;
+ IMG_HANDLE hHW2DContext;
+#endif
+}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT;
+
+#define SGX2D_MAX_BLT_CMD_SIZ 256
+#endif
+
+
+typedef struct PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB_TAG
+{
+ IMG_UINT32 ui32BridgeFlags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hDevCookie;
+#else
+ IMG_HANDLE hDevCookie;
+#endif
+ IMG_UINT32 ui32ArraySize;
+ PVRSRV_SGX_HWPERF_CB_ENTRY *psHWPerfCBData;
+} PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB;
+
+typedef struct PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32DataCount;
+ IMG_UINT32 ui32ClockSpeed;
+ IMG_UINT32 ui32HostTimeStamp;
+} PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB;
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/sgx_mkif_km.h b/drivers/gpu/pvr/sgx_mkif_km.h
new file mode 100644
index 0000000..9f4f41f
--- /dev/null
+++ b/drivers/gpu/pvr/sgx_mkif_km.h
@@ -0,0 +1,349 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined (__SGX_MKIF_KM_H__)
+#define __SGX_MKIF_KM_H__
+
+#include "img_types.h"
+#include "servicesint.h"
+#include "sgxapi_km.h"
+
+
+#if !defined (SGX_MP_CORE_SELECT)
+#if defined(SGX_FEATURE_MP)
+ #define SGX_REG_BANK_SHIFT (14)
+ #define SGX_REG_BANK_SIZE (1 << SGX_REG_BANK_SHIFT)
+ #define SGX_REG_BANK_BASE_INDEX (2)
+ #define SGX_REG_BANK_MASTER_INDEX (1)
+ #define SGX_MP_CORE_SELECT(x,i) (x + ((i + SGX_REG_BANK_BASE_INDEX) * SGX_REG_BANK_SIZE))
+ #define SGX_MP_MASTER_SELECT(x) (x + (SGX_REG_BANK_MASTER_INDEX * SGX_REG_BANK_SIZE))
+#else
+ #define SGX_MP_CORE_SELECT(x,i) (x)
+#endif
+#endif
+
+
+typedef struct _SGXMKIF_COMMAND_
+{
+ IMG_UINT32 ui32ServiceAddress;
+ IMG_UINT32 ui32CacheControl;
+ IMG_UINT32 ui32Data[6];
+} SGXMKIF_COMMAND;
+
+
+typedef struct _PVRSRV_SGX_KERNEL_CCB_
+{
+ SGXMKIF_COMMAND asCommands[256];
+} PVRSRV_SGX_KERNEL_CCB;
+
+
+typedef struct _PVRSRV_SGX_CCB_CTL_
+{
+ IMG_UINT32 ui32WriteOffset;
+ IMG_UINT32 ui32ReadOffset;
+} PVRSRV_SGX_CCB_CTL;
+
+
+typedef struct _SGXMKIF_HOST_CTL_
+{
+#if defined(PVRSRV_USSE_EDM_BREAKPOINTS)
+ IMG_UINT32 ui32BreakpointDisable;
+ IMG_UINT32 ui32Continue;
+#endif
+
+ volatile IMG_UINT32 ui32InitStatus;
+ volatile IMG_UINT32 ui32PowerStatus;
+ volatile IMG_UINT32 ui32CleanupStatus;
+#if defined(FIX_HW_BRN_28889)
+ volatile IMG_UINT32 ui32InvalStatus;
+#endif
+#if defined(SUPPORT_HW_RECOVERY)
+ IMG_UINT32 ui32uKernelDetectedLockups;
+ IMG_UINT32 ui32HostDetectedLockups;
+ IMG_UINT32 ui32HWRecoverySampleRate;
+#endif
+ IMG_UINT32 ui32uKernelTimerClock;
+ IMG_UINT32 ui32ActivePowManSampleRate;
+ IMG_UINT32 ui32InterruptFlags;
+ IMG_UINT32 ui32InterruptClearFlags;
+ IMG_UINT32 ui32BPSetClearSignal;
+
+ IMG_UINT32 ui32NumActivePowerEvents;
+
+ IMG_UINT32 ui32TimeWraps;
+ IMG_UINT32 ui32HostClock;
+ IMG_UINT32 ui32AssertFail;
+
+#if defined(SGX_FEATURE_EXTENDED_PERF_COUNTERS)
+ IMG_UINT32 aui32PerfGroup[PVRSRV_SGX_HWPERF_NUM_COUNTERS];
+ IMG_UINT32 aui32PerfBit[PVRSRV_SGX_HWPERF_NUM_COUNTERS];
+#else
+ IMG_UINT32 ui32PerfGroup;
+#endif
+
+#if defined(FIX_HW_BRN_31939)
+ IMG_UINT32 ui32BRN31939Mem;
+#endif
+
+ IMG_UINT32 ui32OpenCLDelayCount;
+} SGXMKIF_HOST_CTL;
+
+#define SGXMKIF_CMDTA_CTRLFLAGS_READY 0x00000001
+typedef struct _SGXMKIF_CMDTA_SHARED_
+{
+ IMG_UINT32 ui32CtrlFlags;
+
+ IMG_UINT32 ui32NumTAStatusVals;
+ IMG_UINT32 ui32Num3DStatusVals;
+
+
+ IMG_UINT32 ui32TATQSyncWriteOpsPendingVal;
+ IMG_DEV_VIRTADDR sTATQSyncWriteOpsCompleteDevVAddr;
+ IMG_UINT32 ui32TATQSyncReadOpsPendingVal;
+ IMG_DEV_VIRTADDR sTATQSyncReadOpsCompleteDevVAddr;
+
+
+ IMG_UINT32 ui323DTQSyncWriteOpsPendingVal;
+ IMG_DEV_VIRTADDR s3DTQSyncWriteOpsCompleteDevVAddr;
+ IMG_UINT32 ui323DTQSyncReadOpsPendingVal;
+ IMG_DEV_VIRTADDR s3DTQSyncReadOpsCompleteDevVAddr;
+
+
+ PVRSRV_DEVICE_SYNC_OBJECT sTA3DDependency;
+
+#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
+
+ IMG_UINT32 ui32NumTASrcSyncs;
+ PVRSRV_DEVICE_SYNC_OBJECT asTASrcSyncs[SGX_MAX_TA_SRC_SYNCS];
+ IMG_UINT32 ui32NumTADstSyncs;
+ PVRSRV_DEVICE_SYNC_OBJECT asTADstSyncs[SGX_MAX_TA_DST_SYNCS];
+ IMG_UINT32 ui32Num3DSrcSyncs;
+ PVRSRV_DEVICE_SYNC_OBJECT as3DSrcSyncs[SGX_MAX_3D_SRC_SYNCS];
+#else
+
+ IMG_UINT32 ui32NumSrcSyncs;
+ PVRSRV_DEVICE_SYNC_OBJECT asSrcSyncs[SGX_MAX_SRC_SYNCS_TA];
+#endif
+
+ CTL_STATUS sCtlTAStatusInfo[SGX_MAX_TA_STATUS_VALS];
+ CTL_STATUS sCtl3DStatusInfo[SGX_MAX_3D_STATUS_VALS];
+
+} SGXMKIF_CMDTA_SHARED;
+
+#define SGXTQ_MAX_STATUS SGX_MAX_TRANSFER_STATUS_VALS + 2
+
+#define SGXMKIF_TQFLAGS_NOSYNCUPDATE 0x00000001
+#define SGXMKIF_TQFLAGS_KEEPPENDING 0x00000002
+#define SGXMKIF_TQFLAGS_TATQ_SYNC 0x00000004
+#define SGXMKIF_TQFLAGS_3DTQ_SYNC 0x00000008
+#if defined(SGX_FEATURE_FAST_RENDER_CONTEXT_SWITCH)
+#define SGXMKIF_TQFLAGS_CTXSWITCH 0x00000010
+#endif
+#define SGXMKIF_TQFLAGS_DUMMYTRANSFER 0x00000020
+
+typedef struct _SGXMKIF_TRANSFERCMD_SHARED_
+{
+
+
+ IMG_UINT32 ui32NumSrcSyncs;
+ PVRSRV_DEVICE_SYNC_OBJECT asSrcSyncs[SGX_MAX_SRC_SYNCS_TQ];
+
+
+ IMG_UINT32 ui32NumDstSyncs;
+ PVRSRV_DEVICE_SYNC_OBJECT asDstSyncs[SGX_MAX_DST_SYNCS_TQ];
+
+ IMG_UINT32 ui32TASyncWriteOpsPendingVal;
+ IMG_DEV_VIRTADDR sTASyncWriteOpsCompleteDevVAddr;
+ IMG_UINT32 ui32TASyncReadOpsPendingVal;
+ IMG_DEV_VIRTADDR sTASyncReadOpsCompleteDevVAddr;
+
+
+ IMG_UINT32 ui323DSyncWriteOpsPendingVal;
+ IMG_DEV_VIRTADDR s3DSyncWriteOpsCompleteDevVAddr;
+ IMG_UINT32 ui323DSyncReadOpsPendingVal;
+ IMG_DEV_VIRTADDR s3DSyncReadOpsCompleteDevVAddr;
+
+ IMG_UINT32 ui32NumStatusVals;
+ CTL_STATUS sCtlStatusInfo[SGXTQ_MAX_STATUS];
+} SGXMKIF_TRANSFERCMD_SHARED, *PSGXMKIF_TRANSFERCMD_SHARED;
+
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+typedef struct _SGXMKIF_2DCMD_SHARED_ {
+
+ IMG_UINT32 ui32NumSrcSync;
+ PVRSRV_DEVICE_SYNC_OBJECT sSrcSyncData[SGX_MAX_2D_SRC_SYNC_OPS];
+
+
+ PVRSRV_DEVICE_SYNC_OBJECT sDstSyncData;
+
+
+ PVRSRV_DEVICE_SYNC_OBJECT sTASyncData;
+
+
+ PVRSRV_DEVICE_SYNC_OBJECT s3DSyncData;
+} SGXMKIF_2DCMD_SHARED, *PSGXMKIF_2DCMD_SHARED;
+#endif
+
+
+typedef struct _SGXMKIF_HWDEVICE_SYNC_LIST_
+{
+ IMG_DEV_VIRTADDR sAccessDevAddr;
+ IMG_UINT32 ui32NumSyncObjects;
+
+ PVRSRV_DEVICE_SYNC_OBJECT asSyncData[1];
+} SGXMKIF_HWDEVICE_SYNC_LIST, *PSGXMKIF_HWDEVICE_SYNC_LIST;
+
+
+#define PVRSRV_USSE_EDM_INIT_COMPLETE (1UL << 0)
+
+#define PVRSRV_USSE_EDM_POWMAN_IDLE_COMPLETE (1UL << 2)
+#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE (1UL << 3)
+#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE (1UL << 4)
+#define PVRSRV_USSE_EDM_POWMAN_NO_WORK (1UL << 5)
+
+#define PVRSRV_USSE_EDM_INTERRUPT_HWR (1UL << 0)
+#define PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER (1UL << 1)
+#define PVRSRV_USSE_EDM_INTERRUPT_IDLE (1UL << 2)
+
+#define PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE (1UL << 0)
+#define PVRSRV_USSE_EDM_CLEANUPCMD_BUSY (1UL << 1)
+#define PVRSRV_USSE_EDM_CLEANUPCMD_DONE (1UL << 2)
+
+#if defined(FIX_HW_BRN_28889)
+#define PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE (1UL << 0)
+#endif
+
+#define PVRSRV_USSE_MISCINFO_READY 0x1UL
+#define PVRSRV_USSE_MISCINFO_GET_STRUCT_SIZES 0x2UL
+#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
+#define PVRSRV_USSE_MISCINFO_MEMREAD 0x4UL
+#define PVRSRV_USSE_MISCINFO_MEMWRITE 0x8UL
+#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+#define PVRSRV_USSE_MISCINFO_MEMREAD_FAIL 0x1UL << 31
+#endif
+#endif
+
+
+#define PVRSRV_CLEANUPCMD_RT 0x1U
+#define PVRSRV_CLEANUPCMD_RC 0x2U
+#define PVRSRV_CLEANUPCMD_TC 0x3U
+#define PVRSRV_CLEANUPCMD_2DC 0x4U
+#define PVRSRV_CLEANUPCMD_PB 0x5U
+
+#define PVRSRV_POWERCMD_POWEROFF 0x1U
+#define PVRSRV_POWERCMD_IDLE 0x2U
+#define PVRSRV_POWERCMD_RESUME 0x3U
+
+#define PVRSRV_CTXSUSPCMD_SUSPEND 0x1U
+#define PVRSRV_CTXSUSPCMD_RESUME 0x2U
+
+
+#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+#define SGX_BIF_DIR_LIST_INDEX_EDM (SGX_FEATURE_BIF_NUM_DIRLISTS - 1)
+#else
+#define SGX_BIF_DIR_LIST_INDEX_EDM (0)
+#endif
+
+#define SGXMKIF_CC_INVAL_BIF_PT 0x1
+#define SGXMKIF_CC_INVAL_BIF_PD 0x2
+#define SGXMKIF_CC_INVAL_BIF_SL 0x4
+#define SGXMKIF_CC_INVAL_DATA 0x8
+
+
+typedef struct _SGX_MISCINFO_STRUCT_SIZES_
+{
+#if defined (SGX_FEATURE_2D_HARDWARE)
+ IMG_UINT32 ui32Sizeof_2DCMD;
+ IMG_UINT32 ui32Sizeof_2DCMD_SHARED;
+#endif
+ IMG_UINT32 ui32Sizeof_CMDTA;
+ IMG_UINT32 ui32Sizeof_CMDTA_SHARED;
+ IMG_UINT32 ui32Sizeof_TRANSFERCMD;
+ IMG_UINT32 ui32Sizeof_TRANSFERCMD_SHARED;
+ IMG_UINT32 ui32Sizeof_3DREGISTERS;
+ IMG_UINT32 ui32Sizeof_HWPBDESC;
+ IMG_UINT32 ui32Sizeof_HWRENDERCONTEXT;
+ IMG_UINT32 ui32Sizeof_HWRENDERDETAILS;
+ IMG_UINT32 ui32Sizeof_HWRTDATA;
+ IMG_UINT32 ui32Sizeof_HWRTDATASET;
+ IMG_UINT32 ui32Sizeof_HWTRANSFERCONTEXT;
+ IMG_UINT32 ui32Sizeof_HOST_CTL;
+ IMG_UINT32 ui32Sizeof_COMMAND;
+} SGX_MISCINFO_STRUCT_SIZES;
+
+
+#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
+typedef struct _PVRSRV_SGX_MISCINFO_MEMACCESS
+{
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_DEV_PHYADDR sPDDevPAddr;
+} PVRSRV_SGX_MISCINFO_MEMACCESS;
+#endif
+
+typedef struct _PVRSRV_SGX_MISCINFO_INFO
+{
+ IMG_UINT32 ui32MiscInfoFlags;
+ PVRSRV_SGX_MISCINFO_FEATURES sSGXFeatures;
+ SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes;
+#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
+ PVRSRV_SGX_MISCINFO_MEMACCESS sSGXMemAccessSrc;
+ PVRSRV_SGX_MISCINFO_MEMACCESS sSGXMemAccessDest;
+#endif
+} PVRSRV_SGX_MISCINFO_INFO;
+
+#ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
+#define SGXMK_TRACE_BUFFER_SIZE 512
+#endif
+
+#define SGXMKIF_HWPERF_CB_SIZE 0x100
+
+typedef struct _SGXMKIF_HWPERF_CB_ENTRY_
+{
+ IMG_UINT32 ui32FrameNo;
+ IMG_UINT32 ui32PID;
+ IMG_UINT32 ui32RTData;
+ IMG_UINT32 ui32Type;
+ IMG_UINT32 ui32Ordinal;
+ IMG_UINT32 ui32Info;
+ IMG_UINT32 ui32TimeWraps;
+ IMG_UINT32 ui32Time;
+
+ IMG_UINT32 ui32Counters[SGX_FEATURE_MP_CORE_COUNT_3D][PVRSRV_SGX_HWPERF_NUM_COUNTERS];
+ IMG_UINT32 ui32MiscCounters[SGX_FEATURE_MP_CORE_COUNT_3D][PVRSRV_SGX_HWPERF_NUM_MISC_COUNTERS];
+} SGXMKIF_HWPERF_CB_ENTRY;
+
+typedef struct _SGXMKIF_HWPERF_CB_
+{
+ IMG_UINT32 ui32Woff;
+ IMG_UINT32 ui32Roff;
+ IMG_UINT32 ui32Ordinal;
+ SGXMKIF_HWPERF_CB_ENTRY psHWPerfCBData[SGXMKIF_HWPERF_CB_SIZE];
+} SGXMKIF_HWPERF_CB;
+
+
+#endif
+
diff --git a/drivers/gpu/pvr/sgx_options.h b/drivers/gpu/pvr/sgx_options.h
new file mode 100644
index 0000000..c70d1eb
--- /dev/null
+++ b/drivers/gpu/pvr/sgx_options.h
@@ -0,0 +1,254 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+******************************************************************************/
+
+/* Each build option listed here is packed into a dword which
+ * provides up to 32 flags (or up to 28 flags plus a numeric
+ * value in the range 0-15 which corresponds to the number of
+ * cores minus one if SGX_FEATURE_MP is defined). The corresponding
+ * bit is set if the build option was enabled at compile time.
+ *
+ * In order to extract the enabled build flags the INTERNAL_TEST
+ * switch should be enabled in a client program which includes this
+ * header. Then the client can test specific build flags by reading
+ * the bit value at ##OPTIONNAME##_SET_OFFSET in SGX_BUILD_OPTIONS.
+ *
+ * IMPORTANT: add new options to unused bits or define a new dword
+ * (e.g. SGX_BUILD_OPTIONS2) so that the bitfield remains backwards
+ * compatible.
+ */
+
+
+#if defined(DEBUG) || defined (INTERNAL_TEST)
+#define DEBUG_SET_OFFSET OPTIONS_BIT0
+#define OPTIONS_BIT0 0x1U
+#else
+#define OPTIONS_BIT0 0x0
+#endif /* DEBUG */
+
+#if defined(PDUMP) || defined (INTERNAL_TEST)
+#define PDUMP_SET_OFFSET OPTIONS_BIT1
+#define OPTIONS_BIT1 (0x1U << 1)
+#else
+#define OPTIONS_BIT1 0x0
+#endif /* PDUMP */
+
+#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) || defined (INTERNAL_TEST)
+#define PVRSRV_USSE_EDM_STATUS_DEBUG_SET_OFFSET OPTIONS_BIT2
+#define OPTIONS_BIT2 (0x1U << 2)
+#else
+#define OPTIONS_BIT2 0x0
+#endif /* PVRSRV_USSE_EDM_STATUS_DEBUG */
+
+#if defined(SUPPORT_HW_RECOVERY) || defined (INTERNAL_TEST)
+#define SUPPORT_HW_RECOVERY_SET_OFFSET OPTIONS_BIT3
+#define OPTIONS_BIT3 (0x1U << 3)
+#else
+#define OPTIONS_BIT3 0x0
+#endif /* SUPPORT_HW_RECOVERY */
+
+
+
+#if defined (SUPPORT_SID_INTERFACE)
+#define PVR_SECURE_HANDLES_SET_OFFSET OPTIONS_BIT4
+#define OPTIONS_BIT4 (0x1U << 4)
+#else
+#if defined(PVR_SECURE_HANDLES) || defined (INTERNAL_TEST)
+#define PVR_SECURE_HANDLES_SET_OFFSET OPTIONS_BIT4
+#define OPTIONS_BIT4 (0x1U << 4)
+#else
+#define OPTIONS_BIT4 0x0
+#endif /* PVR_SECURE_HANDLES */
+#endif
+
+#if defined(SGX_BYPASS_SYSTEM_CACHE) || defined (INTERNAL_TEST)
+#define SGX_BYPASS_SYSTEM_CACHE_SET_OFFSET OPTIONS_BIT5
+#define OPTIONS_BIT5 (0x1U << 5)
+#else
+#define OPTIONS_BIT5 0x0
+#endif /* SGX_BYPASS_SYSTEM_CACHE */
+
+#if defined(SGX_DMS_AGE_ENABLE) || defined (INTERNAL_TEST)
+#define SGX_DMS_AGE_ENABLE_SET_OFFSET OPTIONS_BIT6
+#define OPTIONS_BIT6 (0x1U << 6)
+#else
+#define OPTIONS_BIT6 0x0
+#endif /* SGX_DMS_AGE_ENABLE */
+
+#if defined(SGX_FAST_DPM_INIT) || defined (INTERNAL_TEST)
+#define SGX_FAST_DPM_INIT_SET_OFFSET OPTIONS_BIT8
+#define OPTIONS_BIT8 (0x1U << 8)
+#else
+#define OPTIONS_BIT8 0x0
+#endif /* SGX_FAST_DPM_INIT */
+
+#if defined(SGX_FEATURE_WRITEBACK_DCU) || defined (INTERNAL_TEST)
+#define SGX_FEATURE_DCU_SET_OFFSET OPTIONS_BIT9
+#define OPTIONS_BIT9 (0x1U << 9)
+#else
+#define OPTIONS_BIT9 0x0
+#endif /* SGX_FEATURE_WRITEBACK_DCU */
+
+#if defined(SGX_FEATURE_MP) || defined (INTERNAL_TEST)
+#define SGX_FEATURE_MP_SET_OFFSET OPTIONS_BIT10
+#define OPTIONS_BIT10 (0x1U << 10)
+#else
+#define OPTIONS_BIT10 0x0
+#endif /* SGX_FEATURE_MP */
+
+#if defined(SGX_FEATURE_MULTITHREADED_UKERNEL) || defined (INTERNAL_TEST)
+#define SGX_FEATURE_MULTITHREADED_UKERNEL_SET_OFFSET OPTIONS_BIT11
+#define OPTIONS_BIT11 (0x1U << 11)
+#else
+#define OPTIONS_BIT11 0x0
+#endif /* SGX_FEATURE_MULTITHREADED_UKERNEL */
+
+
+
+#if defined(SGX_FEATURE_OVERLAPPED_SPM) || defined (INTERNAL_TEST)
+#define SGX_FEATURE_OVERLAPPED_SPM_SET_OFFSET OPTIONS_BIT12
+#define OPTIONS_BIT12 (0x1U << 12)
+#else
+#define OPTIONS_BIT12 0x0
+#endif /* SGX_FEATURE_RENDER_TARGET_ARRAYS */
+
+
+#if defined(SGX_FEATURE_SYSTEM_CACHE) || defined (INTERNAL_TEST)
+#define SGX_FEATURE_SYSTEM_CACHE_SET_OFFSET OPTIONS_BIT13
+#define OPTIONS_BIT13 (0x1U << 13)
+#else
+#define OPTIONS_BIT13 0x0
+#endif /* SGX_FEATURE_SYSTEM_CACHE */
+
+#if defined(SGX_SUPPORT_HWPROFILING) || defined (INTERNAL_TEST)
+#define SGX_SUPPORT_HWPROFILING_SET_OFFSET OPTIONS_BIT14
+#define OPTIONS_BIT14 (0x1U << 14)
+#else
+#define OPTIONS_BIT14 0x0
+#endif /* SGX_SUPPORT_HWPROFILING */
+
+
+
+#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) || defined (INTERNAL_TEST)
+#define SUPPORT_ACTIVE_POWER_MANAGEMENT_SET_OFFSET OPTIONS_BIT15
+#define OPTIONS_BIT15 (0x1U << 15)
+#else
+#define OPTIONS_BIT15 0x0
+#endif /* SUPPORT_ACTIVE_POWER_MANAGEMENT */
+
+#if defined(SUPPORT_DISPLAYCONTROLLER_TILING) || defined (INTERNAL_TEST)
+#define SUPPORT_DISPLAYCONTROLLER_TILING_SET_OFFSET OPTIONS_BIT16
+#define OPTIONS_BIT16 (0x1U << 16)
+#else
+#define OPTIONS_BIT16 0x0
+#endif /* SUPPORT_DISPLAYCONTROLLER_TILING */
+
+#if defined(SUPPORT_PERCONTEXT_PB) || defined (INTERNAL_TEST)
+#define SUPPORT_PERCONTEXT_PB_SET_OFFSET OPTIONS_BIT17
+#define OPTIONS_BIT17 (0x1U << 17)
+#else
+#define OPTIONS_BIT17 0x0
+#endif /* SUPPORT_PERCONTEXT_PB */
+
+#if defined(SUPPORT_SGX_HWPERF) || defined (INTERNAL_TEST)
+#define SUPPORT_SGX_HWPERF_SET_OFFSET OPTIONS_BIT18
+#define OPTIONS_BIT18 (0x1U << 18)
+#else
+#define OPTIONS_BIT18 0x0
+#endif /* SUPPORT_SGX_HWPERF */
+
+
+
+#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) || defined (INTERNAL_TEST)
+#define SUPPORT_SGX_MMU_DUMMY_PAGE_SET_OFFSET OPTIONS_BIT19
+#define OPTIONS_BIT19 (0x1U << 19)
+#else
+#define OPTIONS_BIT19 0x0
+#endif /* SUPPORT_SGX_MMU_DUMMY_PAGE */
+
+#if defined(SUPPORT_SGX_PRIORITY_SCHEDULING) || defined (INTERNAL_TEST)
+#define SUPPORT_SGX_PRIORITY_SCHEDULING_SET_OFFSET OPTIONS_BIT20
+#define OPTIONS_BIT20 (0x1U << 20)
+#else
+#define OPTIONS_BIT20 0x0
+#endif /* SUPPORT_SGX_PRIORITY_SCHEDULING */
+
+#if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) || defined (INTERNAL_TEST)
+#define SUPPORT_SGX_LOW_LATENCY_SCHEDULING_SET_OFFSET OPTIONS_BIT21
+#define OPTIONS_BIT21 (0x1U << 21)
+#else
+#define OPTIONS_BIT21 0x0
+#endif /* SUPPORT_SGX_LOW_LATENCY_SCHEDULING */
+
+#if defined(USE_SUPPORT_NO_TA3D_OVERLAP) || defined (INTERNAL_TEST)
+#define USE_SUPPORT_NO_TA3D_OVERLAP_SET_OFFSET OPTIONS_BIT22
+#define OPTIONS_BIT22 (0x1U << 22)
+#else
+#define OPTIONS_BIT22 0x0
+#endif /* USE_SUPPORT_NO_TA3D_OVERLAP */
+
+#if defined(SGX_FEATURE_MP) || defined (INTERNAL_TEST)
+#if defined(SGX_FEATURE_MP_CORE_COUNT)
+#define OPTIONS_HIGHBYTE ((SGX_FEATURE_MP_CORE_COUNT-1) << SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET)
+#define SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET 28UL
+#define SGX_FEATURE_MP_CORE_COUNT_SET_MASK 0xFF
+#else
+#define OPTIONS_HIGHBYTE (((SGX_FEATURE_MP_CORE_COUNT_TA-1) << SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET) |\
+ ((SGX_FEATURE_MP_CORE_COUNT_3D-1) << SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET_3D))
+#define SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET 24UL
+#define SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET_3D 28UL
+#define SGX_FEATURE_MP_CORE_COUNT_SET_MASK 0xFF
+#endif
+#else /* SGX_FEATURE_MP */
+#define OPTIONS_HIGHBYTE 0x0
+#endif /* SGX_FEATURE_MP */
+
+
+
+#define SGX_BUILD_OPTIONS \
+ OPTIONS_BIT0 |\
+ OPTIONS_BIT1 |\
+ OPTIONS_BIT2 |\
+ OPTIONS_BIT3 |\
+ OPTIONS_BIT4 |\
+ OPTIONS_BIT5 |\
+ OPTIONS_BIT6 |\
+ OPTIONS_BIT8 |\
+ OPTIONS_BIT9 |\
+ OPTIONS_BIT10 |\
+ OPTIONS_BIT11 |\
+ OPTIONS_BIT12 |\
+ OPTIONS_BIT13 |\
+ OPTIONS_BIT14 |\
+ OPTIONS_BIT15 |\
+ OPTIONS_BIT16 |\
+ OPTIONS_BIT17 |\
+ OPTIONS_BIT18 |\
+ OPTIONS_BIT19 |\
+ OPTIONS_BIT20 |\
+ OPTIONS_BIT21 |\
+ OPTIONS_BIT22 |\
+ OPTIONS_HIGHBYTE
+
diff --git a/drivers/gpu/pvr/sgx_ukernel_status_codes.h b/drivers/gpu/pvr/sgx_ukernel_status_codes.h
new file mode 100644
index 0000000..4a9eaf8
--- /dev/null
+++ b/drivers/gpu/pvr/sgx_ukernel_status_codes.h
@@ -0,0 +1,940 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+
+******************************************************************************/
+
+#ifndef __SGX_UKERNEL_STATUS_CODES_H__
+#define __SGX_UKERNEL_STATUS_CODES_H__
+
+/*
+ NOTE: Do not add any conditional macros to this file! There must be
+ no use of #if defined(). This file is included in srvkm to print
+ stringified ukernel status codes, it must build identically to
+ srvinit.
+*/
+
+/*
+ Users of this header might define this macro to do something
+ clever; the primary use right now is to generate a switch/case
+ LUT for debugging in srvkm. If you add a new code, make sure it
+ has a corresponding MKTC_ST.
+*/
+#ifndef MKTC_ST
+#define MKTC_ST(x)
+#endif
+
+/*
+ It would be nice to put these definitions into an enumeration, but USEASM
+ only has access to the C preprocessor so macros are required.
+*/
+
+/*
+ Bits 24-31 of these codes (0xAD) are a magic number used to help
+ distinguish between them and other debug information which can be
+ optionally dumped into the status buffer, e.g. sync object values.
+*/
+
+/*
+ Microkernel trace codes
+*/
+#define MKTC_EHEVENT_3DMEMFREE 0xAD000001
+MKTC_ST(MKTC_EHEVENT_3DMEMFREE)
+#define MKTC_EHEVENT_PIXELENDRENDER 0xAD000002
+MKTC_ST(MKTC_EHEVENT_PIXELENDRENDER)
+#define MKTC_EHEVENT_ISPBREAKPOINT 0xAD000004
+MKTC_ST(MKTC_EHEVENT_ISPBREAKPOINT)
+#define MKTC_EHEVENT_TAFINISHED 0xAD000005
+MKTC_ST(MKTC_EHEVENT_TAFINISHED)
+#define MKTC_EHEVENT_OUTOFMEM 0xAD000007
+MKTC_ST(MKTC_EHEVENT_OUTOFMEM)
+#define MKTC_EHEVENT_TATERMINATE 0xAD000008
+MKTC_ST(MKTC_EHEVENT_TATERMINATE)
+#define MKTC_EHEVENT_TIMER 0xAD000009
+MKTC_ST(MKTC_EHEVENT_TIMER)
+#define MKTC_EHEVENT_SWEVENT 0xAD00000A
+MKTC_ST(MKTC_EHEVENT_SWEVENT)
+#define MKTC_EHEVENT_2DCOMPLETE 0xAD00000B
+MKTC_ST(MKTC_EHEVENT_2DCOMPLETE)
+
+#define MKTC_3DEVENT_3DMEMFREE 0xAD000100
+MKTC_ST(MKTC_3DEVENT_3DMEMFREE)
+#define MKTC_3DEVENT_PIXELENDRENDER 0xAD000101
+MKTC_ST(MKTC_3DEVENT_PIXELENDRENDER)
+#define MKTC_3DEVENT_ISPBREAKPOINT 0xAD000102
+MKTC_ST(MKTC_3DEVENT_ISPBREAKPOINT)
+#define MKTC_3DEVENT_END 0xAD000104
+MKTC_ST(MKTC_3DEVENT_END)
+#define MKTC_3DLB_3DMEMFREE 0xAD000180
+MKTC_ST(MKTC_3DLB_3DMEMFREE)
+#define MKTC_3DLB_PIXELENDRENDER 0xAD000181
+MKTC_ST(MKTC_3DLB_PIXELENDRENDER)
+#define MKTC_3DLB_ISPBREAKPOINT 0xAD000182
+MKTC_ST(MKTC_3DLB_ISPBREAKPOINT)
+#define MKTC_3DLB_FIND3D 0xAD000183
+MKTC_ST(MKTC_3DLB_FIND3D)
+#define MKTC_3DLB_END 0xAD000184
+MKTC_ST(MKTC_3DLB_END)
+
+#define MKTC_TAEVENT_TAFINISHED 0xAD000200
+MKTC_ST(MKTC_TAEVENT_TAFINISHED)
+#define MKTC_TAEVENT_END 0xAD000202
+MKTC_ST(MKTC_TAEVENT_END)
+#define MKTC_TALB_TAFINISHED 0xAD000280
+MKTC_ST(MKTC_TALB_TAFINISHED)
+#define MKTC_TALB_FINDTA 0xAD000281
+MKTC_ST(MKTC_TALB_FINDTA)
+#define MKTC_TALB_END 0xAD000282
+MKTC_ST(MKTC_TALB_END)
+
+#define MKTC_CRRL_WRITEOPSBLOCKED 0xAD000300
+MKTC_ST(MKTC_CRRL_WRITEOPSBLOCKED)
+#define MKTC_CRRL_READOPSBLOCKED 0xAD000301
+MKTC_ST(MKTC_CRRL_READOPSBLOCKED)
+#define MKTC_CRRL_FOUNDRENDER 0xAD000302
+MKTC_ST(MKTC_CRRL_FOUNDRENDER)
+#define MKTC_CRRL_NORENDER 0xAD000303
+MKTC_ST(MKTC_CRRL_NORENDER)
+#define MKTC_CRRL_TARC_DIFFERENT 0xAD000304
+MKTC_ST(MKTC_CRRL_TARC_DIFFERENT)
+#define MKTC_CRRL_BLOCKEDRC 0xAD000309
+MKTC_ST(MKTC_CRRL_BLOCKEDRC)
+#define MKTC_CRRL_BLOCKEDRTDATA 0xAD00030A
+MKTC_ST(MKTC_CRRL_BLOCKEDRTDATA)
+#define MKTC_CRRL_CONTEXT_SUSPENDED 0xAD00030B
+MKTC_ST(MKTC_CRRL_CONTEXT_SUSPENDED)
+#define MKTC_CRRL_TAWAITINGFORMEM 0xAD00030C
+MKTC_ST(MKTC_CRRL_TAWAITINGFORMEM)
+#define MKTC_CRRL_TAOOMBUTPRIOINV 0xAD00030D
+MKTC_ST(MKTC_CRRL_TAOOMBUTPRIOINV)
+#define MKTC_CRRL_READOPS2BLOCKED 0xAD00030E
+MKTC_ST(MKTC_CRRL_READOPS2BLOCKED)
+#define MKTC_CRRL_SRC_WRITEOPSBLOCKED 0xAD00030F
+MKTC_ST(MKTC_CRRL_SRC_WRITEOPSBLOCKED)
+#define MKTC_CRRL_SRC_READOPSBLOCKED 0xAD000310
+MKTC_ST(MKTC_CRRL_SRC_READOPSBLOCKED)
+
+#define MKTC_KICKRENDER_START 0xAD000400
+MKTC_ST(MKTC_KICKRENDER_START)
+#define MKTC_KICKRENDER_OVERLAP 0xAD000401
+MKTC_ST(MKTC_KICKRENDER_OVERLAP)
+#define MKTC_KICKRENDER_ISP_START 0xAD000402
+MKTC_ST(MKTC_KICKRENDER_ISP_START)
+#define MKTC_KICKRENDER_RESUME 0xAD000403
+MKTC_ST(MKTC_KICKRENDER_RESUME)
+#define MKTC_KICKRENDER_CONFIG_REGION_HDRS 0xAD000404
+MKTC_ST(MKTC_KICKRENDER_CONFIG_REGION_HDRS)
+#define MKTC_KICKRENDER_END 0xAD000408
+MKTC_ST(MKTC_KICKRENDER_END)
+#define MKTC_KICKRENDER_RENDERCONTEXT 0xAD000409
+MKTC_ST(MKTC_KICKRENDER_RENDERCONTEXT)
+#define MKTC_KICKRENDER_RTDATA 0xAD00040A
+MKTC_ST(MKTC_KICKRENDER_RTDATA)
+#define MKTC_KICKRENDER_PID 0xAD00040B
+MKTC_ST(MKTC_KICKRENDER_PID)
+
+#define MKTC_RENDERFINISHED_START 0xAD000500
+MKTC_ST(MKTC_RENDERFINISHED_START)
+#define MKTC_RF_START_NEXT_MT 0xAD000501
+MKTC_ST(MKTC_RF_START_NEXT_MT)
+#define MKTC_RF_ALL_MTS_DONE 0xAD000502
+MKTC_ST(MKTC_RF_ALL_MTS_DONE)
+#define MKTC_RENDERFINISHED_END 0xAD000503
+MKTC_ST(MKTC_RENDERFINISHED_END)
+#define MKTC_VISQUERY_START 0xAD000504
+MKTC_ST(MKTC_VISQUERY_START)
+#define MKTC_VISQUERY_END 0xAD000505
+MKTC_ST(MKTC_VISQUERY_END)
+#define MKTC_TRANSFERRENDERFINISHED_START 0xAD000508
+MKTC_ST(MKTC_TRANSFERRENDERFINISHED_START)
+#define MKTC_TRANSFERRENDERFINISHED_END 0xAD000509
+MKTC_ST(MKTC_TRANSFERRENDERFINISHED_END)
+#define MKTC_TRF_UPDATESTATUSVALS 0xAD00050A
+MKTC_ST(MKTC_TRF_UPDATESTATUSVALS)
+#define MKTC_TRF_UPDATESTATUSVALS_DONE 0xAD00050B
+MKTC_ST(MKTC_TRF_UPDATESTATUSVALS_DONE)
+
+#define MKTC_PIXELENDRENDER_START 0xAD000600
+MKTC_ST(MKTC_PIXELENDRENDER_START)
+#define MKTC_PIXELENDRENDER_AFTERLOCK 0xAD000601
+MKTC_ST(MKTC_PIXELENDRENDER_AFTERLOCK)
+#define MKTC_PIXELENDRENDER_END 0xAD000602
+MKTC_ST(MKTC_PIXELENDRENDER_END)
+#define MKTC_PIXELENDRENDER_TLQEND 0xAD000603
+MKTC_ST(MKTC_PIXELENDRENDER_TLQEND)
+
+#define MKTC_3DMEMFREE_START 0xAD000700
+MKTC_ST(MKTC_3DMEMFREE_START)
+#define MKTC_3DMEMFREE_AFTERLOCK 0xAD000701
+MKTC_ST(MKTC_3DMEMFREE_AFTERLOCK)
+#define MKTC_3DMEMFREE_TESTEOR 0xAD000702
+MKTC_ST(MKTC_3DMEMFREE_TESTEOR)
+#define MKTC_3DMEMFREE_END 0xAD000703
+MKTC_ST(MKTC_3DMEMFREE_END)
+
+#define MKTC_KICKTA_START 0xAD000800
+MKTC_ST(MKTC_KICKTA_START)
+#define MKTC_KICKTA_OVERLAP 0xAD000801
+MKTC_ST(MKTC_KICKTA_OVERLAP)
+#define MKTC_KICKTA_RESETCONTEXT 0xAD000802
+MKTC_ST(MKTC_KICKTA_RESETCONTEXT)
+#define MKTC_KICKTA_VDM_START 0xAD000803
+MKTC_ST(MKTC_KICKTA_VDM_START)
+#define MKTC_KICKTA_END 0xAD000804
+MKTC_ST(MKTC_KICKTA_END)
+#define MKTC_KICKTA_RENDERCONTEXT 0xAD000805
+MKTC_ST(MKTC_KICKTA_RENDERCONTEXT)
+#define MKTC_KICKTA_RTDATA 0xAD000806
+MKTC_ST(MKTC_KICKTA_RTDATA)
+#define MKTC_KICKTA_RESET_VDMCSSTATUS 0xAD000807
+MKTC_ST(MKTC_KICKTA_RESET_VDMCSSTATUS)
+#define MKTC_KICKTA_RESET_BUFFERS 0xAD000808
+MKTC_ST(MKTC_KICKTA_RESET_BUFFERS)
+#define MKTC_KICKTA_PID 0xAD000809
+MKTC_ST(MKTC_KICKTA_PID)
+#define MKTC_KICKTA_TACMD_DEBUG 0xAD00080A
+MKTC_ST(MKTC_KICKTA_TACMD_DEBUG)
+#define MKTC_KICKTA_FREECONTEXT 0xAD00080B
+MKTC_ST(MKTC_KICKTA_FREECONTEXT)
+#define MKTC_KICKTA_PIM_PATCHING 0xAD00080C
+MKTC_ST(MKTC_KICKTA_PIM_PATCHING)
+
+#define MKTC_KICKTA_CHKPT_START_DUMMY_CS 0xAD0008A1
+MKTC_ST(MKTC_KICKTA_CHKPT_START_DUMMY_CS)
+#define MKTC_KICKTA_CHKPT_START_DUMMY_TAK 0xAD0008A2
+MKTC_ST(MKTC_KICKTA_CHKPT_START_DUMMY_TAK)
+#define MKTC_KICKTA_CHKPT_WAIT_FOR_DUMMY_KICK 0xAD0008A3
+MKTC_ST(MKTC_KICKTA_CHKPT_WAIT_FOR_DUMMY_KICK)
+#define MKTC_KICKTA_CHKPT_WAIT_NEXT_CORE 0xAD0008A4
+MKTC_ST(MKTC_KICKTA_CHKPT_WAIT_NEXT_CORE)
+#define MKTC_KICKTA_CHKPT_RESET_COMPLETE 0xAD0008A5
+MKTC_ST(MKTC_KICKTA_CHKPT_RESET_COMPLETE)
+#define MKTC_KICKTA_CHKPT_CHECK_SWITCH 0xAD0008A6
+MKTC_ST(MKTC_KICKTA_CHKPT_CHECK_SWITCH)
+
+#define MKTC_HOSTKICK_START 0xAD000900
+MKTC_ST(MKTC_HOSTKICK_START)
+#define MKTC_HOSTKICK_END 0xAD000901
+MKTC_ST(MKTC_HOSTKICK_END)
+#define MKTC_HOSTKICK_PROCESS_QUEUES_END 0xAD000902
+MKTC_ST(MKTC_HOSTKICK_PROCESS_QUEUES_END)
+#define MKTC_HOSTKICK_2D 0xAD000903
+MKTC_ST(MKTC_HOSTKICK_2D)
+#define MKTC_HOSTKICK_TRANSFER 0xAD000904
+MKTC_ST(MKTC_HOSTKICK_TRANSFER)
+#define MKTC_HOSTKICK_TA 0xAD000905
+MKTC_ST(MKTC_HOSTKICK_TA)
+#define MKTC_HOSTKICK_PROCESS_QUEUES 0xAD000906
+MKTC_ST(MKTC_HOSTKICK_PROCESS_QUEUES)
+#define MKTC_HOSTKICK_RESUME 0xAD000908
+MKTC_ST(MKTC_HOSTKICK_RESUME)
+#define MKTC_HOSTKICK_POWEROFF 0xAD000909
+MKTC_ST(MKTC_HOSTKICK_POWEROFF)
+#define MKTC_HOSTKICK_IDLE 0xAD00090A
+MKTC_ST(MKTC_HOSTKICK_IDLE)
+#define MKTC_HOSTKICK_CTXSUSPEND 0xAD00090B
+MKTC_ST(MKTC_HOSTKICK_CTXSUSPEND)
+#define MKTC_HOSTKICK_CTXRESUME 0xAD00090C
+MKTC_ST(MKTC_HOSTKICK_CTXRESUME)
+
+#define MKTC_TIMER_POTENTIAL_TA_LOCKUP 0xAD000A00
+MKTC_ST(MKTC_TIMER_POTENTIAL_TA_LOCKUP)
+#define MKTC_TIMER_POTENTIAL_3D_LOCKUP 0xAD000A01
+MKTC_ST(MKTC_TIMER_POTENTIAL_3D_LOCKUP)
+#define MKTC_TIMER_CTAL_START 0xAD000A02
+MKTC_ST(MKTC_TIMER_CTAL_START)
+#define MKTC_TIMER_CTAL_END 0xAD000A03
+MKTC_ST(MKTC_TIMER_CTAL_END)
+#define MKTC_TIMER_C3DL_START 0xAD000A04
+MKTC_ST(MKTC_TIMER_C3DL_START)
+#define MKTC_TIMER_C3DL_END 0xAD000A05
+MKTC_ST(MKTC_TIMER_C3DL_END)
+#define MKTC_TIMER_LOCKUP 0xAD000A0A
+MKTC_ST(MKTC_TIMER_LOCKUP)
+#define MKTC_TIMER_NOT_TA_LOCKUP 0xAD000A0B
+MKTC_ST(MKTC_TIMER_NOT_TA_LOCKUP)
+#define MKTC_TIMER_NOT_3D_LOCKUP 0xAD000A0C
+MKTC_ST(MKTC_TIMER_NOT_3D_LOCKUP)
+#define MKTC_TIMER_2D_LOCKUP 0xAD000A0D
+MKTC_ST(MKTC_TIMER_2D_LOCKUP)
+#define MKTC_TIMER_POTENTIAL_2D_LOCKUP 0xAD000A10
+MKTC_ST(MKTC_TIMER_POTENTIAL_2D_LOCKUP)
+#define MKTC_TIMER_C2DL_START 0xAD000A11
+MKTC_ST(MKTC_TIMER_C2DL_START)
+#define MKTC_TIMER_C2DL_END 0xAD000A12
+MKTC_ST(MKTC_TIMER_C2DL_END)
+#define MKTC_TIMER_NOT_2D_LOCKUP 0xAD000A13
+MKTC_ST(MKTC_TIMER_NOT_2D_LOCKUP)
+#define MKTC_TIMER_ABORTALL 0xAD000A0E
+MKTC_ST(MKTC_TIMER_ABORTALL)
+#define MKTC_TIMER_END 0xAD000A0F
+MKTC_ST(MKTC_TIMER_END)
+
+#define MKTC_HWR_START 0xAD000B00
+MKTC_ST(MKTC_HWR_START)
+#define MKTC_HWR_END 0xAD000B01
+MKTC_ST(MKTC_HWR_END)
+#define MKTC_HWR_HKS 0xAD000B02
+MKTC_ST(MKTC_HWR_HKS)
+#define MKTC_HWR_PRL 0xAD000B03
+MKTC_ST(MKTC_HWR_PRL)
+#define MKTC_HWR_PRL_DP 0xAD000B04
+MKTC_ST(MKTC_HWR_PRL_DP)
+#define MKTC_HWR_CRL 0xAD000B05
+MKTC_ST(MKTC_HWR_CRL)
+#define MKTC_HWR_CRL_DP 0xAD000B06
+MKTC_ST(MKTC_HWR_CRL_DP)
+#define MKTC_HWR_TRL 0xAD000B07
+MKTC_ST(MKTC_HWR_TRL)
+#define MKTC_HWR_TRL_DP 0xAD000B08
+MKTC_ST(MKTC_HWR_TRL_DP)
+#define MKTC_HWR_ISC 0xAD000B09
+MKTC_ST(MKTC_HWR_ISC)
+#define MKTC_HWR_2DL 0xAD000B0A
+MKTC_ST(MKTC_HWR_2DL)
+
+#define MKTC_URSV_START 0xAD000C00
+MKTC_ST(MKTC_URSV_START)
+#define MKTC_URSV_UPDATEWRITEOPS 0xAD000C01
+MKTC_ST(MKTC_URSV_UPDATEWRITEOPS)
+#define MKTC_URSV_UPDATESTATUSVALS 0xAD000C03
+MKTC_ST(MKTC_URSV_UPDATESTATUSVALS)
+#define MKTC_URSV_UPDATESTATUSVALS_DONE 0xAD000C04
+MKTC_ST(MKTC_URSV_UPDATESTATUSVALS_DONE)
+#define MKTC_URSV_END 0xAD000C05
+MKTC_ST(MKTC_URSV_END)
+
+#define MKTC_STORETACONTEXT_START 0xAD000D00
+MKTC_ST(MKTC_STORETACONTEXT_START)
+#define MKTC_STORETACONTEXT_END 0xAD000D01
+MKTC_ST(MKTC_STORETACONTEXT_END)
+#define MKTC_LOADTACONTEXT_START 0xAD000D02
+MKTC_ST(MKTC_LOADTACONTEXT_START)
+#define MKTC_LOADTACONTEXT_END 0xAD000D03
+MKTC_ST(MKTC_LOADTACONTEXT_END)
+#define MKTC_STORE3DCONTEXT_START 0xAD000D04
+MKTC_ST(MKTC_STORE3DCONTEXT_START)
+#define MKTC_STORE3DCONTEXT_END 0xAD000D05
+MKTC_ST(MKTC_STORE3DCONTEXT_END)
+#define MKTC_LOAD3DCONTEXT_START 0xAD000D06
+MKTC_ST(MKTC_LOAD3DCONTEXT_START)
+#define MKTC_LOAD3DCONTEXT_END 0xAD000D07
+MKTC_ST(MKTC_LOAD3DCONTEXT_END)
+
+#define MKTC_FINDTA_POWERREQUEST 0xAD000E00
+MKTC_ST(MKTC_FINDTA_POWERREQUEST)
+#define MKTC_FINDTA_TA3D_OVERLAP_BLOCKED 0xAD000E01
+MKTC_ST(MKTC_FINDTA_TA3D_OVERLAP_BLOCKED)
+#define MKTC_FINDTA_RTDATA_RENDERING 0xAD000E02
+MKTC_ST(MKTC_FINDTA_RTDATA_RENDERING)
+#define MKTC_FINDTA_3DRC_DIFFERENT 0xAD000E03
+MKTC_ST(MKTC_FINDTA_3DRC_DIFFERENT)
+#define MKTC_FINDTA_WRITEOPSBLOCKED 0xAD000E04
+MKTC_ST(MKTC_FINDTA_WRITEOPSBLOCKED)
+#define MKTC_FINDTA_READOPSBLOCKED 0xAD000E05
+MKTC_ST(MKTC_FINDTA_READOPSBLOCKED)
+#define MKTC_FINDTA_RESIZE_PB 0xAD000E06
+MKTC_ST(MKTC_FINDTA_RESIZE_PB)
+#define MKTC_FINDTA_RESIZE_PB_BLOCKED 0xAD000E07
+MKTC_ST(MKTC_FINDTA_RESIZE_PB_BLOCKED)
+#define MKTC_FINDTA_SHRINK_PB 0xAD000E08
+MKTC_ST(MKTC_FINDTA_SHRINK_PB)
+#define MKTC_FINDTA_TAPB_DIFFERENT 0xAD000E09
+MKTC_ST(MKTC_FINDTA_TAPB_DIFFERENT)
+#define MKTC_FINDTA_TACONTEXT_DIFFERENT 0xAD000E0A
+MKTC_ST(MKTC_FINDTA_TACONTEXT_DIFFERENT)
+#define MKTC_FINDTA_TA2D_OVERLAP_BLOCKED 0xAD000E0B
+MKTC_ST(MKTC_FINDTA_TA2D_OVERLAP_BLOCKED)
+#define MKTC_FINDTA_CONTEXT_SUSPENDED 0xAD000E0C
+MKTC_ST(MKTC_FINDTA_CONTEXT_SUSPENDED)
+#define MKTC_FINDTA_SRC_READOPSBLOCKED 0xAD000E0D
+MKTC_ST(MKTC_FINDTA_SRC_READOPSBLOCKED)
+#define MKTC_FINDTA_SRC_WRITEOPSBLOCKED 0xAD000E0E
+MKTC_ST(MKTC_FINDTA_SRC_WRITEOPSBLOCKED)
+#define MKTC_FINDTA_READOPS2BLOCKED 0xAD000E0F
+MKTC_ST(MKTC_FINDTA_READOPS2BLOCKED)
+
+#define MKTC_CTRL_SRCREADOPSBLOCKED 0xAD000F00
+MKTC_ST(MKTC_CTRL_SRCREADOPSBLOCKED)
+#define MKTC_CTRL_SRCWRITEOPSBLOCKED 0xAD000F01
+MKTC_ST(MKTC_CTRL_SRCWRITEOPSBLOCKED)
+#define MKTC_CTRL_DSTREADOPSBLOCKED 0xAD000F02
+MKTC_ST(MKTC_CTRL_DSTREADOPSBLOCKED)
+#define MKTC_CTRL_DSTWRITEOPSBLOCKED 0xAD000F03
+MKTC_ST(MKTC_CTRL_DSTWRITEOPSBLOCKED)
+#define MKTC_CTRL_TARC_DIFFERENT 0xAD000F04
+MKTC_ST(MKTC_CTRL_TARC_DIFFERENT)
+#define MKTC_CTRL_CONTEXT_SUSPENDED 0xAD000F05
+MKTC_ST(MKTC_CTRL_CONTEXT_SUSPENDED)
+#define MKTC_CTRL_SRCREADOPS2BLOCKED 0xAD000F06
+MKTC_ST(MKTC_CTRL_SRCREADOPS2BLOCKED)
+
+#define MKTC_DPTA_START 0xAD001000
+MKTC_ST(MKTC_DPTA_START)
+#define MKTC_DPTA_UPDATESTATUSVALS 0xAD001001
+MKTC_ST(MKTC_DPTA_UPDATESTATUSVALS)
+#define MKTC_DPTA_UPDATESTATUSVALS_DONE 0xAD001002
+MKTC_ST(MKTC_DPTA_UPDATESTATUSVALS_DONE)
+#define MKTC_DPTA_NORENDER 0xAD001003
+MKTC_ST(MKTC_DPTA_NORENDER)
+#define MKTC_DPTA_MEMFREE 0xAD001004
+MKTC_ST(MKTC_DPTA_MEMFREE)
+#define MKTC_DPTA_INC_COMPLETECOUNT 0xAD001005
+MKTC_ST(MKTC_DPTA_INC_COMPLETECOUNT)
+
+#define MKTC_INVALDC 0xAD001100
+MKTC_ST(MKTC_INVALDC)
+#define MKTC_INVALPT 0xAD001101
+MKTC_ST(MKTC_INVALPT)
+#define MKTC_INVALSLC 0xAD001102
+MKTC_ST(MKTC_INVALSLC)
+#define MKTC_INVALDATA 0xAD001103
+MKTC_ST(MKTC_INVALDATA)
+
+#define MKTC_RESTARTTA 0xAD001200
+MKTC_ST(MKTC_RESTARTTA)
+#define MKTC_CSABORTNONGBL 0xAD001201
+MKTC_ST(MKTC_CSABORTNONGBL)
+#define MKTC_CSABORTALL 0xAD001202
+MKTC_ST(MKTC_CSABORTALL)
+#define MKTC_CSRENDERINPROGRESS 0xAD001203
+MKTC_ST(MKTC_CSRENDERINPROGRESS)
+#define MKTC_TATERMRENDERINPROGRESS 0xAD001204
+MKTC_ST(MKTC_TATERMRENDERINPROGRESS)
+#define MKTC_RESTARTTANORENDER 0xAD001205
+MKTC_ST(MKTC_RESTARTTANORENDER)
+#define MKTC_SPM_KICKRENDER 0xAD001206
+MKTC_ST(MKTC_SPM_KICKRENDER)
+#define MKTC_SPM_RESUME_ABORTCOMPLETE 0xAD001208
+MKTC_ST(MKTC_SPM_RESUME_ABORTCOMPLETE)
+#define MKTC_RESUMEVDM 0xAD001209
+MKTC_ST(MKTC_RESUMEVDM)
+#define MKTC_REMOVE_RESERVE_MEM 0xAD00120A
+MKTC_ST(MKTC_REMOVE_RESERVE_MEM)
+#define MKTC_INCREASEZLSTHRESHOLD 0xAD00120B
+MKTC_ST(MKTC_INCREASEZLSTHRESHOLD)
+#define MKTC_CSFORCEABORTALL 0xAD00120C
+MKTC_ST(MKTC_CSFORCEABORTALL)
+
+#define MKTC_DUMMY_DEPTH 0xAD00120D
+MKTC_ST(MKTC_DUMMY_DEPTH)
+#define MKTC_DUMMY_DEPTH_CS 0xAD00120E
+MKTC_ST(MKTC_DUMMY_DEPTH_CS)
+
+#define MKTC_MTETE_OOM 0xAD00120F
+MKTC_ST(MKTC_MTETE_OOM)
+#define MKTC_MTETE_OOM_FIRST_STORE_REF 0xAD001210
+MKTC_ST(MKTC_MTETE_OOM_FIRST_STORE_REF)
+#define MKTC_MERGE_STATE_TABLES 0xAD001211
+MKTC_ST(MKTC_MERGE_STATE_TABLES)
+#define MKTC_NO_PAGES_LEFT_FOR_23055 0xAD001212
+MKTC_ST(MKTC_NO_PAGES_LEFT_FOR_23055)
+#define MKTC_NO_STATE_MODS 0xAD001213
+MKTC_ST(MKTC_NO_STATE_MODS)
+#define MKTC_FIND_MTE_PAGE_IN_STATE 0xAD001214
+MKTC_ST(MKTC_FIND_MTE_PAGE_IN_STATE)
+#define MKTC_MTE_PAGE_FOUND 0xAD001215
+MKTC_ST(MKTC_MTE_PAGE_FOUND)
+#define MKTC_MOVE_MTE_PAGE_TO_TA_STATE 0xAD001216
+MKTC_ST(MKTC_MOVE_MTE_PAGE_TO_TA_STATE)
+#define MKTC_MOVE_MTE_PAGE_TO_TA_STATE_END 0xAD001217
+MKTC_ST(MKTC_MOVE_MTE_PAGE_TO_TA_STATE_END)
+#define MKTC_ZERO_ZLS_THRESHOLD 0xAD001218
+MKTC_ST(MKTC_ZERO_ZLS_THRESHOLD)
+#define MKTC_RESTORE_ZLS_THRESHOLD 0xAD001219
+MKTC_ST(MKTC_RESTORE_ZLS_THRESHOLD)
+#define MKTC_FIND_MTE_PAGE_IN_CSM 0xAD00121A
+MKTC_ST(MKTC_FIND_MTE_PAGE_IN_CSM)
+#define MKTC_REISSUE_MTE_PAGE 0xAD00121B
+MKTC_ST(MKTC_REISSUE_MTE_PAGE)
+#define MKTC_REISSUE_MTE_PAGE_REQUIRED 0xAD00121C
+MKTC_ST(MKTC_REISSUE_MTE_PAGE_REQUIRED)
+#define MKTC_REISSUE_MTE_PAGE_END 0xAD00121D
+MKTC_ST(MKTC_REISSUE_MTE_PAGE_END)
+#define MKTC_RESET_TE_PSG 0xAD00121E
+MKTC_ST(MKTC_RESET_TE_PSG)
+
+#define MKTC_OOM_WRITEOPSBLOCKED 0xAD00121F
+MKTC_ST(MKTC_OOM_WRITEOPSBLOCKED)
+#define MKTC_OOM_READOPSBLOCKED 0xAD001220
+MKTC_ST(MKTC_OOM_READOPSBLOCKED)
+#define MKTC_OOM_SRC_WRITEOPSBLOCKED 0xAD001221
+MKTC_ST(MKTC_OOM_SRC_WRITEOPSBLOCKED)
+#define MKTC_OOM_SRC_READOPSBLOCKED 0xAD001222
+MKTC_ST(MKTC_OOM_SRC_READOPSBLOCKED)
+#define MKTC_OOM_SPM_DEADLOCK 0xAD001223
+MKTC_ST(MKTC_OOM_SPM_DEADLOCK)
+#define MKTC_OOM_SPM_DEADLOCK_MEM_ADDED 0xAD001224
+MKTC_ST(MKTC_OOM_SPM_DEADLOCK_MEM_ADDED)
+#define MKTC_RESET 0xAD001225
+MKTC_ST(MKTC_RESET)
+#define MKTC_SPM_INVALID_ZLSCONFIG 0xAD001226
+MKTC_ST(MKTC_SPM_INVALID_ZLSCONFIG)
+
+#define MKTC_OOM_TYPE_MT 0xAD00122A
+MKTC_ST(MKTC_OOM_TYPE_MT)
+#define MKTC_OOM_TYPE_GLOBAL 0xAD001230
+MKTC_ST(MKTC_OOM_TYPE_GLOBAL)
+#define MKTC_OOM_CAUSE_GBL_OOM 0xAD001231
+MKTC_ST(MKTC_OOM_CAUSE_GBL_OOM)
+#define MKTC_OOM_RESTORE_LIST_SIZE 0xAD001232
+MKTC_ST(MKTC_OOM_RESTORE_LIST_SIZE)
+
+#define MKTC_CHECK_MTE_PAGE_REISSUE 0xAD001240
+MKTC_ST(MKTC_CHECK_MTE_PAGE_REISSUE)
+#define MKTC_CPRI_VALID_ENTRIES 0xAD001241
+MKTC_ST(MKTC_CPRI_VALID_ENTRIES)
+#define MKTC_CPRI_STORE_DPLIST 0xAD001242
+MKTC_ST(MKTC_CPRI_STORE_DPLIST)
+#define MKTC_CPRI_STORE_OTPM_CSM 0xAD001243
+MKTC_ST(MKTC_CPRI_STORE_OTPM_CSM)
+#define MKTC_CPRI_ABORT_MT_IDX 0xAD001244
+MKTC_ST(MKTC_CPRI_ABORT_MT_IDX)
+#define MKTC_CPRI_ABORT_CORE_IDX 0xAD001245
+MKTC_ST(MKTC_CPRI_ABORT_CORE_IDX)
+#define MKTC_CPRI_CSM_TABLE_DATA 0xAD001246
+MKTC_ST(MKTC_CPRI_CSM_TABLE_DATA)
+#define MKTC_CPRI_PIM_DATA 0xAD001247
+MKTC_ST(MKTC_CPRI_PIM_DATA)
+#define MKTC_CPRI_DO_CIRCULAR_TEST 0xAD001248
+MKTC_ST(MKTC_CPRI_DO_CIRCULAR_TEST)
+#define MKTC_CPRI_WRITE_ENTRIES 0xAD001249
+MKTC_ST(MKTC_CPRI_WRITE_ENTRIES)
+
+#define MKTC_MTE_ENTRY_NOT_IN_ANY_LIST 0xAD001250
+MKTC_ST(MKTC_MTE_ENTRY_NOT_IN_ANY_LIST)
+
+#define MKTC_SPMAC_IGNORE_TERMINATE 0xAD001251
+MKTC_ST(MKTC_SPMAC_IGNORE_TERMINATE)
+
+#define MKTC_SPMAC_REQUEST_3D_TIMEOUT 0xAD001252
+MKTC_ST(MKTC_SPMAC_REQUEST_3D_TIMEOUT)
+#define MKTC_SPMAC_3D_TIMEOUT_COMPLETE 0xAD001253
+MKTC_ST(MKTC_SPMAC_3D_TIMEOUT_COMPLETE)
+#define MKTC_OOM_READOPS2BLOCKED 0xAD001254
+MKTC_ST(MKTC_OOM_READOPS2BLOCKED)
+
+/* PB Load/store status */
+#define MKTC_LOADTAPB_START 0xAD001300
+MKTC_ST(MKTC_LOADTAPB_START)
+#define MKTC_LOADTAPB_END 0xAD001301
+MKTC_ST(MKTC_LOADTAPB_END)
+#define MKTC_STORETAPB_START 0xAD001302
+MKTC_ST(MKTC_STORETAPB_START)
+#define MKTC_STORETAPB_END 0xAD001303
+MKTC_ST(MKTC_STORETAPB_END)
+#define MKTC_LOAD3DPB_START 0xAD001304
+MKTC_ST(MKTC_LOAD3DPB_START)
+#define MKTC_LOAD3DPB_END 0xAD001305
+MKTC_ST(MKTC_LOAD3DPB_END)
+#define MKTC_STORE3DPB_START 0xAD001306
+MKTC_ST(MKTC_STORE3DPB_START)
+#define MKTC_STORE3DPB_END 0xAD001307
+MKTC_ST(MKTC_STORE3DPB_END)
+#define MKTC_LOADTAPB_PAGETABLE_DONE 0xAD001308
+MKTC_ST(MKTC_LOADTAPB_PAGETABLE_DONE)
+#define MKTC_LOAD3DPB_PAGETABLE_DONE 0xAD001309
+MKTC_ST(MKTC_LOAD3DPB_PAGETABLE_DONE)
+
+#define MKTC_TIMER_RC_CLEANUP 0xAD001400
+MKTC_ST(MKTC_TIMER_RC_CLEANUP)
+#define MKTC_TIMER_RC_CLEANUP_DONE 0xAD001401
+MKTC_ST(MKTC_TIMER_RC_CLEANUP_DONE)
+#define MKTC_TIMER_RC_CLEANUP_BUSY 0xAD001402
+MKTC_ST(MKTC_TIMER_RC_CLEANUP_BUSY)
+#define MKTC_TIMER_RT_CLEANUP 0xAD001410
+MKTC_ST(MKTC_TIMER_RT_CLEANUP)
+#define MKTC_TIMER_RT_CLEANUP_DONE 0xAD001411
+MKTC_ST(MKTC_TIMER_RT_CLEANUP_DONE)
+#define MKTC_TIMER_RT_CLEANUP_PENDING 0xAD001412
+MKTC_ST(MKTC_TIMER_RT_CLEANUP_PENDING)
+#define MKTC_TIMER_RT_CLEANUP_TIDYPARTIALLIST 0xAD001413
+MKTC_ST(MKTC_TIMER_RT_CLEANUP_TIDYPARTIALLIST)
+#define MKTC_TIMER_RT_CLEANUP_BUSY 0xAD001414
+MKTC_ST(MKTC_TIMER_RT_CLEANUP_BUSY)
+#define MKTC_TIMER_TC_CLEANUP 0xAD001420
+MKTC_ST(MKTC_TIMER_TC_CLEANUP)
+#define MKTC_TIMER_TC_CLEANUP_DONE 0xAD001421
+MKTC_ST(MKTC_TIMER_TC_CLEANUP_DONE)
+#define MKTC_TIMER_TC_CLEANUP_BUSY 0xAD001422
+MKTC_ST(MKTC_TIMER_TC_CLEANUP_BUSY)
+#define MKTC_TIMER_2DC_CLEANUP 0xAD001430
+MKTC_ST(MKTC_TIMER_2DC_CLEANUP)
+#define MKTC_TIMER_2DC_CLEANUP_DONE 0xAD001431
+MKTC_ST(MKTC_TIMER_2DC_CLEANUP_DONE)
+#define MKTC_TIMER_2DC_CLEANUP_BUSY 0xAD001432
+MKTC_ST(MKTC_TIMER_2DC_CLEANUP_BUSY)
+#define MKTC_TIMER_SHAREDPBDESC_CLEANUP 0xAD001440
+MKTC_ST(MKTC_TIMER_SHAREDPBDESC_CLEANUP)
+
+
+#define MKTC_TIMER_ISP_SWITCH_POTENTIAL_LOCKUP 0xAD001450
+MKTC_ST(MKTC_TIMER_ISP_SWITCH_POTENTIAL_LOCKUP)
+#define MKTC_TIMER_ISP_SWITCH_FORCE_SWITCH 0xAD001451
+MKTC_ST(MKTC_TIMER_ISP_SWITCH_FORCE_SWITCH)
+
+#define MKTC_UTSO_UPDATEREADOPS 0xAD001600
+MKTC_ST(MKTC_UTSO_UPDATEREADOPS)
+#define MKTC_UTSO_UPDATEWRITEOPS 0xAD001601
+MKTC_ST(MKTC_UTSO_UPDATEWRITEOPS)
+
+#define MKTC_TAFINISHED_UPDATESTATUSVALS 0xAD001700
+MKTC_ST(MKTC_TAFINISHED_UPDATESTATUSVALS)
+#define MKTC_TAFINISHED_UPDATESTATUSVALS_DONE 0xAD001701
+MKTC_ST(MKTC_TAFINISHED_UPDATESTATUSVALS_DONE)
+#define MKTC_TAFINISHED_NORENDER 0xAD001702
+MKTC_ST(MKTC_TAFINISHED_NORENDER)
+#define MKTC_TAFINISHED_LASTKICK 0xAD001703
+MKTC_ST(MKTC_TAFINISHED_LASTKICK)
+#define MKTC_TAFINISHED_FINDRENDER 0xAD001704
+MKTC_ST(MKTC_TAFINISHED_FINDRENDER)
+#define MKTC_TAFINISHED_FINDTA 0xAD001705
+MKTC_ST(MKTC_TAFINISHED_FINDTA)
+#define MKTC_TAFINISHED_END 0xAD001706
+MKTC_ST(MKTC_TAFINISHED_END)
+#define MKTC_TAF_SPM_DEADLOCK_MEM_REMOVED 0xAD001707
+MKTC_ST(MKTC_TAF_SPM_DEADLOCK_MEM_REMOVED)
+#define MKTC_TAF_RESERVE_MEM 0xAD001708
+MKTC_ST(MKTC_TAF_RESERVE_MEM)
+#define MKTC_TAF_RESERVE_MEM_REQUEST_RENDER 0xAD001709
+MKTC_ST(MKTC_TAF_RESERVE_MEM_REQUEST_RENDER)
+#define MKTC_TAF_RESERVE_FREE_RENDER_FINISHED 0xAD00170A
+MKTC_ST(MKTC_TAF_RESERVE_FREE_RENDER_FINISHED)
+#define MKTC_TAF_RESERVE_FREE_DUMMY_RENDER 0xAD00170B
+MKTC_ST(MKTC_TAF_RESERVE_FREE_DUMMY_RENDER)
+#define MKTC_TAF_DEBUG_SAS 0xAD00170C
+MKTC_ST(MKTC_TAF_DEBUG_SAS)
+#define MKTC_TAFINISHED_NOCONTEXTSWITCH 0xAD00170D
+MKTC_ST(MKTC_TAFINISHED_NOCONTEXTSWITCH)
+
+#define MKTC_TAFINISHED_TERM_COMPLETE_START 0xAD001710
+MKTC_ST(MKTC_TAFINISHED_TERM_COMPLETE_START)
+#define MKTC_TAFINISHED_TERM_COMPLETE_END 0xAD001711
+MKTC_ST(MKTC_TAFINISHED_TERM_COMPLETE_END)
+
+#define MKTC_TAFINISHED_DPMPAGERECYCLING 0xAD001720
+MKTC_ST(MKTC_TAFINISHED_DPMPAGERECYCLING)
+
+#define MKTC_2DEVENT_2DCOMPLETE 0xAD001800
+MKTC_ST(MKTC_2DEVENT_2DCOMPLETE)
+#define MKTC_2DEVENT_END 0xAD001801
+MKTC_ST(MKTC_2DEVENT_END)
+#define MKTC_2DLB_2DCOMPLETE 0xAD001802
+MKTC_ST(MKTC_2DLB_2DCOMPLETE)
+#define MKTC_2DLB_FIND2D 0xAD001803
+MKTC_ST(MKTC_2DLB_FIND2D)
+#define MKTC_2DLB_END 0xAD001804
+MKTC_ST(MKTC_2DLB_END)
+#define MKTC_2DCOMPLETE_START 0xAD001805
+MKTC_ST(MKTC_2DCOMPLETE_START)
+#define MKTC_2DCOMPLETE_END 0xAD001806
+MKTC_ST(MKTC_2DCOMPLETE_END)
+#define MKTC_KICK2D_START 0xAD001807
+MKTC_ST(MKTC_KICK2D_START)
+#define MKTC_KICK2D_END 0xAD001808
+MKTC_ST(MKTC_KICK2D_END)
+#define MKTC_DUMMYPROC2D 0xAD001809
+MKTC_ST(MKTC_DUMMYPROC2D)
+#define MKTC_FTD_SRCREADOPSBLOCKED 0xAD00180A
+MKTC_ST(MKTC_FTD_SRCREADOPSBLOCKED)
+#define MKTC_FTD_SRCWRITEOPSBLOCKED 0xAD00180B
+MKTC_ST(MKTC_FTD_SRCWRITEOPSBLOCKED)
+#define MKTC_FTD_DSTREADOPSBLOCKED 0xAD00180C
+MKTC_ST(MKTC_FTD_DSTREADOPSBLOCKED)
+#define MKTC_FTD_DSTWRITEOPSBLOCKED 0xAD00180D
+MKTC_ST(MKTC_FTD_DSTWRITEOPSBLOCKED)
+#define MKTC_FTD_TA2D_OVERLAP_BLOCKED 0xAD00180E
+MKTC_ST(MKTC_FTD_TA2D_OVERLAP_BLOCKED)
+#define MKTC_U2DSO_UPDATEREADOPS 0xAD00180F
+MKTC_ST(MKTC_U2DSO_UPDATEREADOPS)
+#define MKTC_U2DSO_UPDATEWRITEOPS 0xAD001810
+MKTC_ST(MKTC_U2DSO_UPDATEWRITEOPS)
+#define MKTC_FTD_TAOPSBLOCKED 0xAD001811
+MKTC_ST(MKTC_FTD_TAOPSBLOCKED)
+#define MKTC_KICK2D_2DSLAVEPORT 0xAD001812
+MKTC_ST(MKTC_KICK2D_2DSLAVEPORT)
+#define MKTC_KICK2D_2DSLAVEPORT_DONE 0xAD001813
+MKTC_ST(MKTC_KICK2D_2DSLAVEPORT_DONE)
+#define MKTC_FTD_CONTEXT_SUSPENDED 0xAD001814
+MKTC_ST(MKTC_FTD_CONTEXT_SUSPENDED)
+#define MKTC_KICK2D_PID 0xAD001815
+MKTC_ST(MKTC_KICK2D_PID)
+#define MKTC_FIND2D_ADDR_SPACE_DIFFERENT 0xAD001816
+MKTC_ST(MKTC_FIND2D_ADDR_SPACE_DIFFERENT)
+#define MKTC_FTD_3DOPSBLOCKED 0xAD001817
+MKTC_ST(MKTC_FTD_3DOPSBLOCKED)
+#define MKTC_FTD_DSTREADOPS2BLOCKED 0xAD001818
+MKTC_ST(MKTC_FTD_DSTREADOPS2BLOCKED)
+
+#define MKTC_FCM_START 0xAD001900
+MKTC_ST(MKTC_FCM_START)
+#define MKTC_FCM_END 0xAD001901
+MKTC_ST(MKTC_FCM_END)
+
+#define MKTC_TIMER_ACTIVE_POWER 0xAD001A00
+MKTC_ST(MKTC_TIMER_ACTIVE_POWER)
+#define MKTC_TIMER_POWER_3D_ACTIVE 0xAD001A01
+MKTC_ST(MKTC_TIMER_POWER_3D_ACTIVE)
+#define MKTC_TIMER_POWER_TA_ACTIVE 0xAD001A02
+MKTC_ST(MKTC_TIMER_POWER_TA_ACTIVE)
+#define MKTC_TIMER_POWER_2D_ACTIVE 0xAD001A03
+MKTC_ST(MKTC_TIMER_POWER_2D_ACTIVE)
+#define MKTC_TIMER_POWER_PENDING_EVENTS 0xAD001A04
+MKTC_ST(MKTC_TIMER_POWER_PENDING_EVENTS)
+#define MKTC_TIMER_POWER_IDLE 0xAD001A05
+MKTC_ST(MKTC_TIMER_POWER_IDLE)
+#define MKTC_TIMER_POWER_OFF 0xAD001A06
+MKTC_ST(MKTC_TIMER_POWER_OFF)
+#define MKTC_TIMER_POWER_CCB_ERROR 0xAD001A07
+MKTC_ST(MKTC_TIMER_POWER_CCB_ERROR)
+#define MKTC_TIMER_POWER_RESTART_IMMEDIATE 0xAD001A08
+MKTC_ST(MKTC_TIMER_POWER_RESTART_IMMEDIATE)
+
+#define MKTC_3DCONTEXT_SWITCH 0xAD001B00
+MKTC_ST(MKTC_3DCONTEXT_SWITCH)
+#define MKTC_3DCONTEXT_SWITCH_END 0xAD001B01
+MKTC_ST(MKTC_3DCONTEXT_SWITCH_END)
+
+#define MKTC_TACONTEXT_SWITCH 0xAD001C00
+MKTC_ST(MKTC_TACONTEXT_SWITCH)
+#define MKTC_TACONTEXT_SWITCH_END 0xAD001C02
+MKTC_ST(MKTC_TACONTEXT_SWITCH_END)
+
+#define MKTC_GETMISCINFO_MEMREAD_START 0xAD001D00
+MKTC_ST(MKTC_GETMISCINFO_MEMREAD_START)
+#define MKTC_GETMISCINFO_MEMREAD_END 0xAD001D01
+MKTC_ST(MKTC_GETMISCINFO_MEMREAD_END)
+#define MKTC_GETMISCINFO_MEMWRITE_START 0xAD001D02
+MKTC_ST(MKTC_GETMISCINFO_MEMWRITE_START)
+#define MKTC_GETMISCINFO_MEMWRITE_END 0xAD001D03
+MKTC_ST(MKTC_GETMISCINFO_MEMWRITE_END)
+
+#define MKTC_HALTTA 0xAD001E00
+MKTC_ST(MKTC_HALTTA)
+#define MKTC_HTA_SET_FLAG 0xAD001E01
+MKTC_ST(MKTC_HTA_SET_FLAG)
+#define MKTC_HTA_SAVE_COMPLEX_PTR 0xAD001E02
+MKTC_ST(MKTC_HTA_SAVE_COMPLEX_PTR)
+#define MKTC_HALTTA_END 0xAD001E03
+MKTC_ST(MKTC_HALTTA_END)
+
+#define MKTC_RESUMETA 0xAD001F00
+MKTC_ST(MKTC_RESUMETA)
+#define MKTC_RTA_CONTEXT_LOADED 0xAD001F01
+MKTC_ST(MKTC_RTA_CONTEXT_LOADED)
+#define MKTC_RTA_MTE_STATE_KICKED 0xAD001F02
+MKTC_ST(MKTC_RTA_MTE_STATE_KICKED)
+#define MKTC_RTA_CMPLX_GEOM_PRESENT 0xAD001F03
+MKTC_ST(MKTC_RTA_CMPLX_GEOM_PRESENT)
+#define MKTC_RTA_CMPLX_STATE_KICKED 0xAD001F04
+MKTC_ST(MKTC_RTA_CMPLX_STATE_KICKED)
+#define MKTC_RTA_CHECK_NEXT_SA_PROG 0xAD001F05
+MKTC_ST(MKTC_RTA_CHECK_NEXT_SA_PROG)
+#define MKTC_RTA_CORE_COMPLETED 0xAD001F06
+MKTC_ST(MKTC_RTA_CORE_COMPLETED)
+#define MKTC_RTA_DEBUG_SAS 0xAD001F07
+MKTC_ST(MKTC_RTA_DEBUG_SAS)
+#define MKTC_RESUMETA_END 0xAD001F0F
+MKTC_ST(MKTC_RESUMETA_END)
+
+#define MKTC_RENDERHALT 0xAD002000
+MKTC_ST(MKTC_RENDERHALT)
+#define MKTC_RH_CLEARFLAGS 0xAD002001
+MKTC_ST(MKTC_RH_CLEARFLAGS)
+#define MKTC_RH_CTRL_ADDR 0xAD002002
+MKTC_ST(MKTC_RH_CTRL_ADDR)
+#define MKTC_RH_RGN_ADDR 0xAD002003
+MKTC_ST(MKTC_RH_RGN_ADDR)
+#define MKTC_RH_EMPTY_TILE 0xAD002004
+MKTC_ST(MKTC_RH_EMPTY_TILE)
+#define MKTC_RH_EMPTY_LAST_TILE 0xAD002005
+MKTC_ST(MKTC_RH_EMPTY_LAST_TILE)
+#define MKTC_RH_3D_TIMEOUT 0xAD002006
+MKTC_ST(MKTC_RH_3D_TIMEOUT)
+#define MKTC_RH_NOT_EMPTY 0xAD002007
+MKTC_ST(MKTC_RH_NOT_EMPTY)
+#define MKTC_RH_OBJECT_COMPLETE 0xAD002008
+MKTC_ST(MKTC_RH_OBJECT_COMPLETE)
+#define MKTC_RH_STREAM_LINK 0xAD002009
+MKTC_ST(MKTC_RH_STREAM_LINK)
+#define MKTC_RH_OBJECT_INCOMPLETE 0xAD00200A
+MKTC_ST(MKTC_RH_OBJECT_INCOMPLETE)
+#define MKTC_RH_PRIM_MASK_PRESENT 0xAD00200B
+MKTC_ST(MKTC_RH_PRIM_MASK_PRESENT)
+#define MKTC_RH_BYTE_MASK_PRESENT 0xAD00200C
+MKTC_ST(MKTC_RH_BYTE_MASK_PRESENT)
+#define MKTC_RH_BYTE_MASK_ZERO 0xAD00200D
+MKTC_ST(MKTC_RH_BYTE_MASK_ZERO)
+#define MKTC_RH_PRIM_MASK_ZERO 0xAD00200E
+MKTC_ST(MKTC_RH_PRIM_MASK_ZERO)
+#define MKTC_RH_INVALIDATE_OBJECTS 0xAD00200F
+MKTC_ST(MKTC_RH_INVALIDATE_OBJECTS)
+#define MKTC_RH_OBJECTS_INVALIDATED 0xAD002010
+MKTC_ST(MKTC_RH_OBJECTS_INVALIDATED)
+#define MKTC_RH_DPM_RGN_PARSER_IDLE 0xAD002011
+MKTC_ST(MKTC_RH_DPM_RGN_PARSER_IDLE)
+#define MKTC_RH_NEXT_RGN_BASE 0xAD002012
+MKTC_ST(MKTC_RH_NEXT_RGN_BASE)
+#define MKTC_RH_OCC_EXIT 0xAD002013
+MKTC_ST(MKTC_RH_OCC_EXIT)
+#define MKTC_RH_STILL_RUNNING 0xAD002020
+MKTC_ST(MKTC_RH_STILL_RUNNING)
+#define MKTC_RH_CLEARMCI 0xAD002021
+MKTC_ST(MKTC_RH_CLEARMCI)
+#define MKTC_RH_EOR 0xAD002022
+MKTC_ST(MKTC_RH_EOR)
+#define MKTC_RENDERHALT_END 0xAD002030
+MKTC_ST(MKTC_RENDERHALT_END)
+
+#define MKTC_FIND3D_POWERREQUEST 0xAD002100
+MKTC_ST(MKTC_FIND3D_POWERREQUEST)
+
+#define MKTC_FIND2D_POWERREQUEST 0xAD002200
+MKTC_ST(MKTC_FIND2D_POWERREQUEST)
+
+#define MKTC_UKERNEL_INIT 0xAD002300
+MKTC_ST(MKTC_UKERNEL_INIT)
+#define MKTC_UKERNEL_INIT_DCS_COMPLETE 0xAD002301
+MKTC_ST(MKTC_UKERNEL_INIT_DCS_COMPLETE)
+#define MKTC_UKERNEL_INIT_VDMKICK_COMPLETE 0xAD002303
+MKTC_ST(MKTC_UKERNEL_INIT_VDMKICK_COMPLETE)
+
+#define MKTC_KICKTRANSFERRENDER_START 0xAD002400
+MKTC_ST(MKTC_KICKTRANSFERRENDER_START)
+#define MKTC_KICKTRANSFERRENDER_ISP_START 0xAD002401
+MKTC_ST(MKTC_KICKTRANSFERRENDER_ISP_START)
+#define MKTC_KICKTRANSFERRENDER_END 0xAD002402
+MKTC_ST(MKTC_KICKTRANSFERRENDER_END)
+#define MKTC_DUMMYPROCTRANSFER 0xAD002403
+MKTC_ST(MKTC_DUMMYPROCTRANSFER)
+#define MKTC_KTR_TQFENCE 0xAD002404
+MKTC_ST(MKTC_KTR_TQFENCE)
+#define MKTC_KICKTRANSFERRENDER_PID 0xAD002405
+MKTC_ST(MKTC_KICKTRANSFERRENDER_PID)
+
+#define MKTC_HOSTKICK_CLEANUP_RT 0xAD002500
+MKTC_ST(MKTC_HOSTKICK_CLEANUP_RT)
+#define MKTC_HOSTKICK_CLEANUP_RC 0xAD002501
+MKTC_ST(MKTC_HOSTKICK_CLEANUP_RC)
+#define MKTC_HOSTKICK_CLEANUP_TC 0xAD002502
+MKTC_ST(MKTC_HOSTKICK_CLEANUP_TC)
+#define MKTC_HOSTKICK_CLEANUP_2DC 0xAD002503
+MKTC_ST(MKTC_HOSTKICK_CLEANUP_2DC)
+#define MKTC_HOSTKICK_CLEANUP_PB 0xAD002504
+MKTC_ST(MKTC_HOSTKICK_CLEANUP_PB)
+#define MKTC_HOSTKICK_GETMISCINFO 0xAD002505
+MKTC_ST(MKTC_HOSTKICK_GETMISCINFO)
+#define MKTC_HOSTKICK_DATABREAKPOINT 0xAD002506
+MKTC_ST(MKTC_HOSTKICK_DATABREAKPOINT)
+#define MKTC_HOSTKICK_SETHWPERFSTATUS 0xAD002507
+MKTC_ST(MKTC_HOSTKICK_SETHWPERFSTATUS)
+
+#define MKTC_ZEROPC 0xAD002600
+MKTC_ST(MKTC_ZEROPC)
+
+#define MKTC_ASSERT_FAIL 0xAD002700
+MKTC_ST(MKTC_ASSERT_FAIL)
+
+#define MKTC_SDLB_ILLEGAL 0xAD002800
+MKTC_ST(MKTC_SDLB_ILLEGAL)
+
+#define MKTC_SPMEVENT_OUTOFMEM 0xAD002901
+MKTC_ST(MKTC_SPMEVENT_OUTOFMEM)
+#define MKTC_SPMEVENT_TATERMINATE 0xAD002902
+MKTC_ST(MKTC_SPMEVENT_TATERMINATE)
+#define MKTC_SPMEVENT_END 0xAD002904
+MKTC_ST(MKTC_SPMEVENT_END)
+
+#define MKTC_SPMLB_OUTOFMEM 0xAD002981
+MKTC_ST(MKTC_SPMLB_OUTOFMEM)
+#define MKTC_SPMLB_TATERMINATE 0xAD002982
+MKTC_ST(MKTC_SPMLB_TATERMINATE)
+#define MKTC_SPMLB_SPMRENDERFINSHED 0xAD002983
+MKTC_ST(MKTC_SPMLB_SPMRENDERFINSHED)
+#define MKTC_SPMLB_END 0xAD002985
+MKTC_ST(MKTC_SPMLB_END)
+
+#define MKTC_SPM_CHECK_MT_DEADLOCK 0xAD002991
+MKTC_ST(MKTC_SPM_CHECK_MT_DEADLOCK)
+#define MKTC_SPM_CHECK_GLOBAL_DEADLOCK 0xAD002992
+MKTC_ST(MKTC_SPM_CHECK_GLOBAL_DEADLOCK)
+#define MKTC_SPM_RESERVE_ADDED 0xAD002993
+MKTC_ST(MKTC_SPM_RESERVE_ADDED)
+#define MKTC_SPM_FORCE_GLOBAL_OOM_FAILED 0xAD00299E
+MKTC_ST(MKTC_SPM_FORCE_GLOBAL_OOM_FAILED)
+#define MKTC_SPM_DEADLOCK_MEM_FAILED 0xAD00299F
+MKTC_ST(MKTC_SPM_DEADLOCK_MEM_FAILED)
+
+#define MKTC_IBC_ILLEGAL 0xAD002A00
+MKTC_ST(MKTC_IBC_ILLEGAL)
+
+#define MKTC_HWP_CLEARCOUNTERS 0xAD002B00
+MKTC_ST(MKTC_HWP_CLEARCOUNTERS)
+
+#define MKTC_TA_FRAMENUM 0xAD002C00
+MKTC_ST(MKTC_TA_FRAMENUM)
+#define MKTC_3D_FRAMENUM 0xAD002C01
+MKTC_ST(MKTC_3D_FRAMENUM)
+#define MKTC_SPM3D_FRAMENUM 0xAD002C02
+MKTC_ST(MKTC_SPM3D_FRAMENUM)
+
+#define MKTC_HKTA_RENDERCONTEXT 0xAD002D00
+MKTC_ST(MKTC_HKTA_RENDERCONTEXT)
+#define MKTC_IDLECORE_REFCOUNT_FAIL 0xAD002E00
+MKTC_ST(MKTC_IDLECORE_REFCOUNT_FAIL)
+
+#define MKTC_MCISTATE_NOT_CLEARED 0xAD002F00
+MKTC_ST(MKTC_MCISTATE_NOT_CLEARED)
+
+#define MKTC_LOWERED_TO_PDS_THRESHOLD 0xAD003000
+MKTC_ST(MKTC_LOWERED_TO_PDS_THRESHOLD)
+#define MKTC_REDUCE_MAX_VTX_PARTITIONS 0xAD003001
+MKTC_ST(MKTC_REDUCE_MAX_VTX_PARTITIONS)
+#define MKTC_KTAOVERRIDE_MAX_VTX_PARTITIONS 0xAD003002
+MKTC_ST(MKTC_KTAOVERRIDE_MAX_VTX_PARTITIONS)
+#define MKTC_KTANOOVERRIDE_MAX_VTX_PARTITIONS 0xAD003003
+MKTC_ST(MKTC_KTANOOVERRIDE_MAX_VTX_PARTITIONS)
+
+#define MKTC_IPRB_NORENDERDETAILS 0xAD003010
+MKTC_ST(MKTC_IPRB_NORENDERDETAILS)
+#define MKTC_IPRB_HAVERENDERDETAILS 0xAD003011
+MKTC_ST(MKTC_IPRB_HAVERENDERDETAILS)
+
+#define MKTC_RENDER_OUT_OF_ORDER 0xAD003020
+MKTC_ST(MKTC_RENDER_OUT_OF_ORDER)
+#define MKTC_RENDER_NOT_OUT_OF_ORDER 0xAD003021
+MKTC_ST(MKTC_RENDER_NOT_OUT_OF_ORDER)
+
+#define MKTC_ZLS_IDLE_BEGIN 0xAD003030
+MKTC_ST(MKTC_ZLS_IDLE_BEGIN)
+#define MKTC_ZLS_ISP_CLK_GATING_EN 0xAD003031
+MKTC_ST(MKTC_ZLS_ISP_CLK_GATING_EN)
+#define MKTC_ZLS_IDLE_END 0xAD003032
+MKTC_ST(MKTC_ZLS_IDLE_END)
+
+#endif /* __SGX_UKERNEL_STATUS_CODES_H__ */
+
+/******************************************************************************
+ End of file (sgx_ukernel_status_codes.h)
+******************************************************************************/
diff --git a/drivers/gpu/pvr/sgxapi_km.h b/drivers/gpu/pvr/sgxapi_km.h
new file mode 100644
index 0000000..eaf45eb
--- /dev/null
+++ b/drivers/gpu/pvr/sgxapi_km.h
@@ -0,0 +1,500 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+******************************************************************************/
+
+#ifndef __SGXAPI_KM_H__
+#define __SGXAPI_KM_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "sgxdefs.h"
+
+#if defined(__linux__) && !defined(USE_CODE)
+ #if defined(__KERNEL__)
+ #include <asm/unistd.h>
+ #else
+ #include <unistd.h>
+ #endif
+#endif
+
+/******************************************************************************
+ Some defines...
+******************************************************************************/
+
+/* SGX Heap IDs, note: not all heaps are available to clients */
+#define SGX_UNDEFINED_HEAP_ID (~0LU)
+#define SGX_GENERAL_HEAP_ID 0
+#define SGX_TADATA_HEAP_ID 1
+#define SGX_KERNEL_CODE_HEAP_ID 2
+#define SGX_KERNEL_DATA_HEAP_ID 3
+#define SGX_PIXELSHADER_HEAP_ID 4
+#define SGX_VERTEXSHADER_HEAP_ID 5
+#define SGX_PDSPIXEL_CODEDATA_HEAP_ID 6
+#define SGX_PDSVERTEX_CODEDATA_HEAP_ID 7
+#define SGX_SYNCINFO_HEAP_ID 8
+#define SGX_SHARED_3DPARAMETERS_HEAP_ID 9
+#define SGX_PERCONTEXT_3DPARAMETERS_HEAP_ID 10
+#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
+#define SGX_GENERAL_MAPPING_HEAP_ID 11
+#endif
+#if defined(SGX_FEATURE_2D_HARDWARE)
+#define SGX_2D_HEAP_ID 12
+#else
+#if defined(FIX_HW_BRN_26915)
+#define SGX_CGBUFFER_HEAP_ID 13
+#endif
+#endif
+#if defined(SUPPORT_MEMORY_TILING)
+#define SGX_VPB_TILED_HEAP_ID 14
+#endif
+
+#define SGX_MAX_HEAP_ID 15
+
+/*
+ * Keep SGX_3DPARAMETERS_HEAP_ID as TQ full custom
+ * shaders need it to select which heap to write
+ * their ISP controll stream to.
+ */
+#if (defined(SUPPORT_PERCONTEXT_PB) || defined(SUPPORT_HYBRID_PB))
+#define SGX_3DPARAMETERS_HEAP_ID SGX_PERCONTEXT_3DPARAMETERS_HEAP_ID
+#else
+#define SGX_3DPARAMETERS_HEAP_ID SGX_SHARED_3DPARAMETERS_HEAP_ID
+#endif
+/* Define for number of bytes between consecutive code base registers */
+#if defined(SGX543) || defined(SGX544) || defined(SGX554)
+#define SGX_USE_CODE_SEGMENT_RANGE_BITS 23
+#else
+#define SGX_USE_CODE_SEGMENT_RANGE_BITS 19
+#endif
+
+#define SGX_MAX_TA_STATUS_VALS 32
+#define SGX_MAX_3D_STATUS_VALS 4
+
+#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
+/* sync info structure array size */
+#define SGX_MAX_TA_DST_SYNCS 1
+#define SGX_MAX_TA_SRC_SYNCS 1
+#define SGX_MAX_3D_SRC_SYNCS 4
+/* note: there is implicitly 1 3D Dst Sync */
+#else
+/* sync info structure array size */
+#define SGX_MAX_SRC_SYNCS_TA 8
+#define SGX_MAX_DST_SYNCS_TA 1
+/* note: there is implicitly 1 3D Dst Sync */
+#define SGX_MAX_SRC_SYNCS_TQ 8
+#define SGX_MAX_DST_SYNCS_TQ 1
+#endif
+
+
+#if defined(SGX_FEATURE_EXTENDED_PERF_COUNTERS)
+#define PVRSRV_SGX_HWPERF_NUM_COUNTERS 8
+#define PVRSRV_SGX_HWPERF_NUM_MISC_COUNTERS 11
+#else
+#define PVRSRV_SGX_HWPERF_NUM_COUNTERS 9
+#define PVRSRV_SGX_HWPERF_NUM_MISC_COUNTERS 8
+#endif /* SGX543 */
+
+#define PVRSRV_SGX_HWPERF_INVALID 0x1
+
+#define PVRSRV_SGX_HWPERF_TRANSFER 0x2
+#define PVRSRV_SGX_HWPERF_TA 0x3
+#define PVRSRV_SGX_HWPERF_3D 0x4
+#define PVRSRV_SGX_HWPERF_2D 0x5
+#define PVRSRV_SGX_HWPERF_POWER 0x6
+#define PVRSRV_SGX_HWPERF_PERIODIC 0x7
+#define PVRSRV_SGX_HWPERF_3DSPM 0x8
+
+#define PVRSRV_SGX_HWPERF_MK_EVENT 0x101
+#define PVRSRV_SGX_HWPERF_MK_TA 0x102
+#define PVRSRV_SGX_HWPERF_MK_3D 0x103
+#define PVRSRV_SGX_HWPERF_MK_2D 0x104
+#define PVRSRV_SGX_HWPERF_MK_TRANSFER_DUMMY 0x105
+#define PVRSRV_SGX_HWPERF_MK_TA_DUMMY 0x106
+#define PVRSRV_SGX_HWPERF_MK_3D_DUMMY 0x107
+#define PVRSRV_SGX_HWPERF_MK_2D_DUMMY 0x108
+#define PVRSRV_SGX_HWPERF_MK_TA_LOCKUP 0x109
+#define PVRSRV_SGX_HWPERF_MK_3D_LOCKUP 0x10A
+#define PVRSRV_SGX_HWPERF_MK_2D_LOCKUP 0x10B
+
+#define PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT 28
+#define PVRSRV_SGX_HWPERF_TYPE_OP_MASK ((1UL << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT) - 1)
+#define PVRSRV_SGX_HWPERF_TYPE_OP_START (0UL << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT)
+#define PVRSRV_SGX_HWPERF_TYPE_OP_END (1Ul << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT)
+
+#define PVRSRV_SGX_HWPERF_TYPE_TRANSFER_START (PVRSRV_SGX_HWPERF_TRANSFER | PVRSRV_SGX_HWPERF_TYPE_OP_START)
+#define PVRSRV_SGX_HWPERF_TYPE_TRANSFER_END (PVRSRV_SGX_HWPERF_TRANSFER | PVRSRV_SGX_HWPERF_TYPE_OP_END)
+#define PVRSRV_SGX_HWPERF_TYPE_TA_START (PVRSRV_SGX_HWPERF_TA | PVRSRV_SGX_HWPERF_TYPE_OP_START)
+#define PVRSRV_SGX_HWPERF_TYPE_TA_END (PVRSRV_SGX_HWPERF_TA | PVRSRV_SGX_HWPERF_TYPE_OP_END)
+#define PVRSRV_SGX_HWPERF_TYPE_3D_START (PVRSRV_SGX_HWPERF_3D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
+#define PVRSRV_SGX_HWPERF_TYPE_3D_END (PVRSRV_SGX_HWPERF_3D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
+#define PVRSRV_SGX_HWPERF_TYPE_2D_START (PVRSRV_SGX_HWPERF_2D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
+#define PVRSRV_SGX_HWPERF_TYPE_2D_END (PVRSRV_SGX_HWPERF_2D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
+#define PVRSRV_SGX_HWPERF_TYPE_POWER_START (PVRSRV_SGX_HWPERF_POWER | PVRSRV_SGX_HWPERF_TYPE_OP_START)
+#define PVRSRV_SGX_HWPERF_TYPE_POWER_END (PVRSRV_SGX_HWPERF_POWER | PVRSRV_SGX_HWPERF_TYPE_OP_END)
+#define PVRSRV_SGX_HWPERF_TYPE_PERIODIC (PVRSRV_SGX_HWPERF_PERIODIC)
+#define PVRSRV_SGX_HWPERF_TYPE_3DSPM_START (PVRSRV_SGX_HWPERF_3DSPM | PVRSRV_SGX_HWPERF_TYPE_OP_START)
+#define PVRSRV_SGX_HWPERF_TYPE_3DSPM_END (PVRSRV_SGX_HWPERF_3DSPM | PVRSRV_SGX_HWPERF_TYPE_OP_END)
+#define PVRSRV_SGX_HWPERF_TYPE_MK_TRANSFER_DUMMY_START (PVRSRV_SGX_HWPERF_MK_TRANSFER_DUMMY | PVRSRV_SGX_HWPERF_TYPE_OP_START)
+#define PVRSRV_SGX_HWPERF_TYPE_MK_TRANSFER_DUMMY_END (PVRSRV_SGX_HWPERF_MK_TRANSFER_DUMMY | PVRSRV_SGX_HWPERF_TYPE_OP_END)
+#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_DUMMY_START (PVRSRV_SGX_HWPERF_MK_TA_DUMMY | PVRSRV_SGX_HWPERF_TYPE_OP_START)
+#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_DUMMY_END (PVRSRV_SGX_HWPERF_MK_TA_DUMMY | PVRSRV_SGX_HWPERF_TYPE_OP_END)
+#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_DUMMY_START (PVRSRV_SGX_HWPERF_MK_3D_DUMMY | PVRSRV_SGX_HWPERF_TYPE_OP_START)
+#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_DUMMY_END (PVRSRV_SGX_HWPERF_MK_3D_DUMMY | PVRSRV_SGX_HWPERF_TYPE_OP_END)
+#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_DUMMY_START (PVRSRV_SGX_HWPERF_MK_2D_DUMMY | PVRSRV_SGX_HWPERF_TYPE_OP_START)
+#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_DUMMY_END (PVRSRV_SGX_HWPERF_MK_2D_DUMMY | PVRSRV_SGX_HWPERF_TYPE_OP_END)
+#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_LOCKUP (PVRSRV_SGX_HWPERF_MK_TA_LOCKUP)
+#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_LOCKUP (PVRSRV_SGX_HWPERF_MK_3D_LOCKUP)
+#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_LOCKUP (PVRSRV_SGX_HWPERF_MK_2D_LOCKUP)
+
+#define PVRSRV_SGX_HWPERF_TYPE_MK_EVENT_START (PVRSRV_SGX_HWPERF_MK_EVENT | PVRSRV_SGX_HWPERF_TYPE_OP_START)
+#define PVRSRV_SGX_HWPERF_TYPE_MK_EVENT_END (PVRSRV_SGX_HWPERF_MK_EVENT | PVRSRV_SGX_HWPERF_TYPE_OP_END)
+#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_START (PVRSRV_SGX_HWPERF_MK_TA | PVRSRV_SGX_HWPERF_TYPE_OP_START)
+#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_END (PVRSRV_SGX_HWPERF_MK_TA | PVRSRV_SGX_HWPERF_TYPE_OP_END)
+#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_START (PVRSRV_SGX_HWPERF_MK_3D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
+#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_END (PVRSRV_SGX_HWPERF_MK_3D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
+#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_START (PVRSRV_SGX_HWPERF_MK_2D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
+#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_END (PVRSRV_SGX_HWPERF_MK_2D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
+
+#define PVRSRV_SGX_HWPERF_STATUS_OFF (0x0)
+#define PVRSRV_SGX_HWPERF_STATUS_RESET_COUNTERS (1UL << 0)
+#define PVRSRV_SGX_HWPERF_STATUS_GRAPHICS_ON (1UL << 1)
+#define PVRSRV_SGX_HWPERF_STATUS_PERIODIC_ON (1UL << 2)
+#define PVRSRV_SGX_HWPERF_STATUS_MK_EXECUTION_ON (1UL << 3)
+
+
+/*!
+ *****************************************************************************
+ * One entry in the HWPerf Circular Buffer.
+ *****************************************************************************/
+typedef struct _PVRSRV_SGX_HWPERF_CB_ENTRY_
+{
+ IMG_UINT32 ui32FrameNo;
+ IMG_UINT32 ui32PID;
+ IMG_UINT32 ui32RTData;
+ IMG_UINT32 ui32Type;
+ IMG_UINT32 ui32Ordinal;
+ IMG_UINT32 ui32Info;
+ IMG_UINT32 ui32Clocksx16;
+ /* NOTE: There should always be at least as many 3D cores as TA cores. */
+ IMG_UINT32 ui32Counters[SGX_FEATURE_MP_CORE_COUNT_3D][PVRSRV_SGX_HWPERF_NUM_COUNTERS];
+ IMG_UINT32 ui32MiscCounters[SGX_FEATURE_MP_CORE_COUNT_3D][PVRSRV_SGX_HWPERF_NUM_MISC_COUNTERS];
+} PVRSRV_SGX_HWPERF_CB_ENTRY;
+
+
+/*
+ Status values control structure
+*/
+typedef struct _CTL_STATUS_
+{
+ IMG_DEV_VIRTADDR sStatusDevAddr;
+ IMG_UINT32 ui32StatusValue;
+} CTL_STATUS;
+
+
+/*!
+ List of possible requests/commands to SGXGetMiscInfo()
+*/
+typedef enum _SGX_MISC_INFO_REQUEST_
+{
+ SGX_MISC_INFO_REQUEST_CLOCKSPEED = 0,
+ SGX_MISC_INFO_REQUEST_SGXREV,
+ SGX_MISC_INFO_REQUEST_DRIVER_SGXREV,
+#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
+ SGX_MISC_INFO_REQUEST_MEMREAD,
+ SGX_MISC_INFO_REQUEST_MEMCOPY,
+#endif /* SUPPORT_SGX_EDM_MEMORY_DEBUG */
+ SGX_MISC_INFO_REQUEST_SET_HWPERF_STATUS,
+#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
+ SGX_MISC_INFO_REQUEST_SET_BREAKPOINT,
+ SGX_MISC_INFO_REQUEST_POLL_BREAKPOINT,
+ SGX_MISC_INFO_REQUEST_RESUME_BREAKPOINT,
+#endif /* SGX_FEATURE_DATA_BREAKPOINTS */
+ SGX_MISC_INFO_DUMP_DEBUG_INFO,
+ SGX_MISC_INFO_PANIC,
+ SGX_MISC_INFO_REQUEST_SPM,
+ SGX_MISC_INFO_REQUEST_ACTIVEPOWER,
+ SGX_MISC_INFO_REQUEST_LOCKUPS,
+ SGX_MISC_INFO_REQUEST_FORCE_I16 = 0x7fff
+} SGX_MISC_INFO_REQUEST;
+
+
+/******************************************************************************
+ * Struct for passing SGX core rev/features from ukernel to driver.
+ * This is accessed from the kernel part of the driver and microkernel; it is
+ * only accessed in user space during buffer allocation in srvinit.
+ ******************************************************************************/
+typedef struct _PVRSRV_SGX_MISCINFO_FEATURES
+{
+ IMG_UINT32 ui32CoreRev; /*!< SGX Core revision from HW register */
+ IMG_UINT32 ui32CoreID; /*!< SGX Core ID from HW register */
+ IMG_UINT32 ui32DDKVersion; /*!< software DDK version */
+ IMG_UINT32 ui32DDKBuild; /*!< software DDK build no. */
+ IMG_UINT32 ui32CoreIdSW; /*!< software core version (ID), e.g. SGX535, SGX540 */
+ IMG_UINT32 ui32CoreRevSW; /*!< software core revision */
+ IMG_UINT32 ui32BuildOptions; /*!< build options bit-field */
+#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
+ IMG_UINT32 ui32DeviceMemValue; /*!< device mem value read from ukernel */
+#endif
+#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
+ IMG_DEV_VIRTADDR sDevVAEDMStatusBuffer; /*!< DevVAddr of the EDM status buffer */
+ IMG_PVOID pvEDMStatusBuffer; /*!< CPUVAddr of the EDM status buffer */
+#endif
+} PVRSRV_SGX_MISCINFO_FEATURES;
+
+
+/******************************************************************************
+ * Struct for getting lock-up stats from the kernel driver
+ ******************************************************************************/
+typedef struct _PVRSRV_SGX_MISCINFO_LOCKUPS
+{
+ IMG_UINT32 ui32HostDetectedLockups; /*!< Host timer detected lockups */
+ IMG_UINT32 ui32uKernelDetectedLockups; /*!< Microkernel detected lockups */
+} PVRSRV_SGX_MISCINFO_LOCKUPS;
+
+
+/******************************************************************************
+ * Struct for getting lock-up stats from the kernel driver
+ ******************************************************************************/
+typedef struct _PVRSRV_SGX_MISCINFO_ACTIVEPOWER
+{
+ IMG_UINT32 ui32NumActivePowerEvents; /*!< active power events */
+} PVRSRV_SGX_MISCINFO_ACTIVEPOWER;
+
+
+/******************************************************************************
+ * Struct for getting SPM stats fro the kernel driver
+ ******************************************************************************/
+typedef struct _PVRSRV_SGX_MISCINFO_SPM
+{
+ IMG_HANDLE hRTDataSet; /*!< render target data set handle returned from SGXAddRenderTarget */
+ IMG_UINT32 ui32NumOutOfMemSignals; /*!< Number of Out of Mem Signals */
+ IMG_UINT32 ui32NumSPMRenders; /*!< Number of SPM renders */
+} PVRSRV_SGX_MISCINFO_SPM;
+
+
+#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
+/*!
+ ******************************************************************************
+ * Structure for SGX break points control
+ *****************************************************************************/
+typedef struct _SGX_BREAKPOINT_INFO
+{
+ /* set/clear BP boolean */
+ IMG_BOOL bBPEnable;
+ /* Index of BP to set */
+ IMG_UINT32 ui32BPIndex;
+ /* On which DataMaster(s) should the breakpoint fire? */
+ IMG_UINT32 ui32DataMasterMask;
+ /* DevVAddr of BP to set */
+ IMG_DEV_VIRTADDR sBPDevVAddr, sBPDevVAddrEnd;
+ /* Whether or not the desired breakpoint will be trapped */
+ IMG_BOOL bTrapped;
+ /* Will the requested breakpoint fire for reads? */
+ IMG_BOOL bRead;
+ /* Will the requested breakpoint fire for writes? */
+ IMG_BOOL bWrite;
+ /* Has a breakpoint been trapped? */
+ IMG_BOOL bTrappedBP;
+ /* Extra information recorded about a trapped breakpoint */
+ IMG_UINT32 ui32CoreNum;
+ IMG_DEV_VIRTADDR sTrappedBPDevVAddr;
+ IMG_UINT32 ui32TrappedBPBurstLength;
+ IMG_BOOL bTrappedBPRead;
+ IMG_UINT32 ui32TrappedBPDataMaster;
+ IMG_UINT32 ui32TrappedBPTag;
+} SGX_BREAKPOINT_INFO;
+#endif /* SGX_FEATURE_DATA_BREAKPOINTS */
+
+
+/*!
+ ******************************************************************************
+ * Structure for setting the hardware performance status
+ *****************************************************************************/
+typedef struct _PVRSRV_SGX_MISCINFO_SET_HWPERF_STATUS
+{
+ /* See PVRSRV_SGX_HWPERF_STATUS_* */
+ IMG_UINT32 ui32NewHWPerfStatus;
+
+ #if defined(SGX_FEATURE_EXTENDED_PERF_COUNTERS)
+ /* Specifies the HW's active group selectors */
+ IMG_UINT32 aui32PerfGroup[PVRSRV_SGX_HWPERF_NUM_COUNTERS];
+ /* Specifies the HW's active bit selectors */
+ IMG_UINT32 aui32PerfBit[PVRSRV_SGX_HWPERF_NUM_COUNTERS];
+ #else
+ /* Specifies the HW's active group */
+ IMG_UINT32 ui32PerfGroup;
+ #endif /* SGX_FEATURE_EXTENDED_PERF_COUNTERS */
+} PVRSRV_SGX_MISCINFO_SET_HWPERF_STATUS;
+
+
+/*!
+ ******************************************************************************
+ * Structure for misc SGX commands in services
+ *****************************************************************************/
+typedef struct _SGX_MISC_INFO_
+{
+ SGX_MISC_INFO_REQUEST eRequest; /*!< Command request to SGXGetMiscInfo() */
+ IMG_UINT32 ui32Padding;
+#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
+ IMG_DEV_VIRTADDR sDevVAddrSrc; /*!< dev virtual addr for mem read */
+ IMG_DEV_VIRTADDR sDevVAddrDest; /*!< dev virtual addr for mem write */
+ IMG_HANDLE hDevMemContext; /*!< device memory context for mem debug */
+#endif
+ union
+ {
+ IMG_UINT32 reserved; /*!< Unused: ensures valid code in the case everything else is compiled out */
+ PVRSRV_SGX_MISCINFO_FEATURES sSGXFeatures;
+ IMG_UINT32 ui32SGXClockSpeed;
+ PVRSRV_SGX_MISCINFO_ACTIVEPOWER sActivePower;
+ PVRSRV_SGX_MISCINFO_LOCKUPS sLockups;
+ PVRSRV_SGX_MISCINFO_SPM sSPM;
+#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
+ SGX_BREAKPOINT_INFO sSGXBreakpointInfo;
+#endif
+ PVRSRV_SGX_MISCINFO_SET_HWPERF_STATUS sSetHWPerfStatus;
+ } uData;
+} SGX_MISC_INFO;
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+/*
+ * The largest number of source sync objects that can be associated with a blit
+ * command. Allows for src, pattern, and mask
+ */
+#define PVRSRV_MAX_BLT_SRC_SYNCS 3
+#endif
+
+
+#define SGX_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH 256
+
+/*
+ Structure for dumping bitmaps
+*/
+typedef struct _SGX_KICKTA_DUMPBITMAP_
+{
+ IMG_DEV_VIRTADDR sDevBaseAddr;
+ IMG_UINT32 ui32Flags;
+ IMG_UINT32 ui32Width;
+ IMG_UINT32 ui32Height;
+ IMG_UINT32 ui32Stride;
+ IMG_UINT32 ui32PDUMPFormat;
+ IMG_UINT32 ui32BytesPP;
+ IMG_CHAR pszName[SGX_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH];
+} SGX_KICKTA_DUMPBITMAP, *PSGX_KICKTA_DUMPBITMAP;
+
+#define PVRSRV_SGX_PDUMP_CONTEXT_MAX_BITMAP_ARRAY_SIZE (16)
+
+/*!
+ ******************************************************************************
+ * Data required only when dumping parameters
+ *****************************************************************************/
+typedef struct _PVRSRV_SGX_PDUMP_CONTEXT_
+{
+ /* cache control word for micro kernel cache flush/invalidates */
+ IMG_UINT32 ui32CacheControl;
+
+} PVRSRV_SGX_PDUMP_CONTEXT;
+
+
+#if !defined (SUPPORT_SID_INTERFACE)
+typedef struct _SGX_KICKTA_DUMP_ROFF_
+{
+ IMG_HANDLE hKernelMemInfo; /*< Buffer handle */
+ IMG_UINT32 uiAllocIndex; /*< Alloc index for LDDM */
+ IMG_UINT32 ui32Offset; /*< Byte offset to value to dump */
+ IMG_UINT32 ui32Value; /*< Actual value to dump */
+ IMG_PCHAR pszName; /*< Name of buffer */
+} SGX_KICKTA_DUMP_ROFF, *PSGX_KICKTA_DUMP_ROFF;
+#endif
+
+#if defined (SUPPORT_SID_INTERFACE)
+typedef struct _SGX_KICKTA_DUMP_BUFFER_KM_
+#else
+typedef struct _SGX_KICKTA_DUMP_BUFFER_
+#endif
+{
+ IMG_UINT32 ui32SpaceUsed;
+ IMG_UINT32 ui32Start; /*< Byte offset of start to dump */
+ IMG_UINT32 ui32End; /*< Byte offset of end of dump (non-inclusive) */
+ IMG_UINT32 ui32BufferSize; /*< Size of buffer */
+ IMG_UINT32 ui32BackEndLength; /*< Size of back end portion, if End < Start */
+ IMG_UINT32 uiAllocIndex;
+ IMG_HANDLE hKernelMemInfo; /*< MemInfo handle for the circular buffer */
+ IMG_PVOID pvLinAddr;
+#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ IMG_HANDLE hCtrlKernelMemInfo; /*< MemInfo handle for the control structure of the
+ circular buffer */
+ IMG_DEV_VIRTADDR sCtrlDevVAddr; /*< Device virtual address of the memory in the
+ control structure to be checked */
+#endif
+ IMG_PCHAR pszName; /*< Name of buffer */
+#if defined (SUPPORT_SID_INTERFACE)
+} SGX_KICKTA_DUMP_BUFFER_KM, *PSGX_KICKTA_DUMP_BUFFER_KM;
+#else
+} SGX_KICKTA_DUMP_BUFFER, *PSGX_KICKTA_DUMP_BUFFER;
+#endif
+
+#if !defined (SUPPORT_SID_INTERFACE)
+#ifdef PDUMP
+/*
+ PDUMP version of above kick structure
+*/
+typedef struct _SGX_KICKTA_PDUMP_
+{
+ // Bitmaps to dump
+ PSGX_KICKTA_DUMPBITMAP psPDumpBitmapArray;
+ IMG_UINT32 ui32PDumpBitmapSize;
+
+ // Misc buffers to dump (e.g. TA, PDS etc..)
+ PSGX_KICKTA_DUMP_BUFFER psBufferArray;
+ IMG_UINT32 ui32BufferArraySize;
+
+ // Roffs to dump
+ PSGX_KICKTA_DUMP_ROFF psROffArray;
+ IMG_UINT32 ui32ROffArraySize;
+} SGX_KICKTA_PDUMP, *PSGX_KICKTA_PDUMP;
+#endif /* PDUMP */
+#endif /* #if !defined (SUPPORT_SID_INTERFACE) */
+
+#if defined(TRANSFER_QUEUE)
+#if defined(SGX_FEATURE_2D_HARDWARE)
+/* Maximum size of ctrl stream for 2d blit command (in 32 bit words) */
+#define SGX_MAX_2D_BLIT_CMD_SIZE 26
+#define SGX_MAX_2D_SRC_SYNC_OPS 3
+#endif
+#define SGX_MAX_TRANSFER_STATUS_VALS 2
+#define SGX_MAX_TRANSFER_SYNC_OPS 5
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __SGXAPI_KM_H__ */
+
+/******************************************************************************
+ End of file (sgxapi_km.h)
+******************************************************************************/
diff --git a/drivers/gpu/pvr/sgxdefs.h b/drivers/gpu/pvr/sgxdefs.h
new file mode 100644
index 0000000..b3a2583
--- /dev/null
+++ b/drivers/gpu/pvr/sgxdefs.h
@@ -0,0 +1,90 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _SGXDEFS_H_
+#define _SGXDEFS_H_
+
+#include "sgxerrata.h"
+#include "sgxfeaturedefs.h"
+
+#if defined(SGX520)
+#include "sgx520defs.h"
+#else
+#if defined(SGX530)
+#include "sgx530defs.h"
+#else
+#if defined(SGX535)
+#include "sgx535defs.h"
+#else
+#if defined(SGX535_V1_1)
+#include "sgx535defs.h"
+#else
+#if defined(SGX540)
+#include "sgx540defs.h"
+#else
+#if defined(SGX543)
+#if defined(FIX_HW_BRN_29954)
+#include "sgx543_v1.164defs.h"
+#else
+#include "sgx543defs.h"
+#endif
+#else
+#if defined(SGX544)
+#include "sgx544defs.h"
+#else
+#if defined(SGX545)
+#include "sgx545defs.h"
+#else
+#if defined(SGX531)
+#include "sgx531defs.h"
+#else
+#if defined(SGX554)
+#include "sgx554defs.h"
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+
+#if defined(SGX_FEATURE_MP)
+#if defined(SGX554)
+#include "sgxmpplusdefs.h"
+#else
+#include "sgxmpdefs.h"
+#endif
+#else
+#if defined(SGX_FEATURE_SYSTEM_CACHE)
+#include "mnemedefs.h"
+#endif
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/sgxerrata.h b/drivers/gpu/pvr/sgxerrata.h
new file mode 100644
index 0000000..05fd45f
--- /dev/null
+++ b/drivers/gpu/pvr/sgxerrata.h
@@ -0,0 +1,714 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _SGXERRATA_KM_H_
+#define _SGXERRATA_KM_H_
+
+#if defined(SGX520) && !defined(SGX_CORE_DEFINED)
+
+ #define SGX_CORE_REV_HEAD 0
+ #if defined(USE_SGX_CORE_REV_HEAD)
+
+ #define SGX_CORE_REV SGX_CORE_REV_HEAD
+ #endif
+
+ #if SGX_CORE_REV == 100
+ #define FIX_HW_BRN_28889
+ #else
+ #if SGX_CORE_REV == 111
+ #else
+ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
+
+ #else
+ #error "sgxerrata.h: SGX520 Core Revision unspecified"
+ #endif
+ #endif
+ #endif
+
+ #define SGX_CORE_DEFINED
+#endif
+
+#if defined(SGX530) && !defined(SGX_CORE_DEFINED)
+
+ #define SGX_CORE_REV_HEAD 0
+ #if defined(USE_SGX_CORE_REV_HEAD)
+
+ #define SGX_CORE_REV SGX_CORE_REV_HEAD
+ #endif
+
+ #if SGX_CORE_REV == 110
+ #define FIX_HW_BRN_22934
+ #define FIX_HW_BRN_28889
+ #else
+ #if SGX_CORE_REV == 111
+ #define FIX_HW_BRN_22934
+ #define FIX_HW_BRN_28889
+ #else
+ #if SGX_CORE_REV == 1111
+ #define FIX_HW_BRN_22934
+ #define FIX_HW_BRN_28889
+ #else
+ #if SGX_CORE_REV == 120
+ #define FIX_HW_BRN_22934
+ #define FIX_HW_BRN_28889
+ #else
+ #if SGX_CORE_REV == 121
+ #define FIX_HW_BRN_22934
+ #define FIX_HW_BRN_28889
+ #else
+ #if SGX_CORE_REV == 125
+ #define FIX_HW_BRN_22934
+ #define FIX_HW_BRN_28889
+ #else
+ #if SGX_CORE_REV == 130
+ #define FIX_HW_BRN_22934
+ #define FIX_HW_BRN_28889
+ #else
+ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
+
+ #else
+ #error "sgxerrata.h: SGX530 Core Revision unspecified"
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+#endif
+ #endif
+
+ #define SGX_CORE_DEFINED
+#endif
+
+#if defined(SGX531) && !defined(SGX_CORE_DEFINED)
+
+ #define SGX_CORE_REV_HEAD 0
+ #if defined(USE_SGX_CORE_REV_HEAD)
+
+ #define SGX_CORE_REV SGX_CORE_REV_HEAD
+ #endif
+
+ #if SGX_CORE_REV == 101
+ #define FIX_HW_BRN_26620
+ #define FIX_HW_BRN_28011
+ #define FIX_HW_BRN_34028
+ #else
+ #if SGX_CORE_REV == 110
+ #define FIX_HW_BRN_34028
+ #else
+ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
+
+ #else
+ #error "sgxerrata.h: SGX531 Core Revision unspecified"
+ #endif
+ #endif
+ #endif
+
+ #define SGX_CORE_DEFINED
+#endif
+
+#if (defined(SGX535) || defined(SGX535_V1_1)) && !defined(SGX_CORE_DEFINED)
+
+ #define SGX_CORE_REV_HEAD 0
+ #if defined(USE_SGX_CORE_REV_HEAD)
+
+ #define SGX_CORE_REV SGX_CORE_REV_HEAD
+ #endif
+
+ #if SGX_CORE_REV == 112
+ #define FIX_HW_BRN_23281
+ #define FIX_HW_BRN_23410
+ #define FIX_HW_BRN_22693
+ #define FIX_HW_BRN_22934
+ #define FIX_HW_BRN_22997
+ #define FIX_HW_BRN_23030
+ #else
+ #if SGX_CORE_REV == 113
+ #define FIX_HW_BRN_22934
+ #define FIX_HW_BRN_23281
+ #define FIX_HW_BRN_23944
+ #define FIX_HW_BRN_23410
+ #else
+ #if SGX_CORE_REV == 121
+ #define FIX_HW_BRN_22934
+ #define FIX_HW_BRN_23944
+ #define FIX_HW_BRN_23410
+ #else
+ #if SGX_CORE_REV == 126
+ #define FIX_HW_BRN_22934
+ #else
+ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
+
+ #else
+ #error "sgxerrata.h: SGX535 Core Revision unspecified"
+
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+
+ #define SGX_CORE_DEFINED
+#endif
+
+#if defined(SGX540) && !defined(SGX_CORE_DEFINED)
+
+ #define SGX_CORE_REV_HEAD 0
+ #if defined(USE_SGX_CORE_REV_HEAD)
+
+ #define SGX_CORE_REV SGX_CORE_REV_HEAD
+ #endif
+
+ #if SGX_CORE_REV == 101
+ #define FIX_HW_BRN_25499
+ #define FIX_HW_BRN_25503
+ #define FIX_HW_BRN_26620
+ #define FIX_HW_BRN_28011
+ #define FIX_HW_BRN_34028
+ #else
+ #if SGX_CORE_REV == 110
+ #define FIX_HW_BRN_25503
+ #define FIX_HW_BRN_26620
+ #define FIX_HW_BRN_28011
+ #define FIX_HW_BRN_34028
+ #else
+ #if SGX_CORE_REV == 120
+ #define FIX_HW_BRN_26620
+ #define FIX_HW_BRN_28011
+ #define FIX_HW_BRN_34028
+ #else
+ #if SGX_CORE_REV == 121
+ #define FIX_HW_BRN_28011
+ #define FIX_HW_BRN_34028
+ #else
+ #if SGX_CORE_REV == 130
+ #define FIX_HW_BRN_34028
+ #else
+ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
+
+ #else
+ #error "sgxerrata.h: SGX540 Core Revision unspecified"
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+
+ #define SGX_CORE_DEFINED
+#endif
+
+#if defined(SGX541) && !defined(SGX_CORE_DEFINED)
+ #if defined(SGX_FEATURE_MP)
+
+ #define SGX_CORE_REV_HEAD 0
+ #if defined(USE_SGX_CORE_REV_HEAD)
+
+ #define SGX_CORE_REV SGX_CORE_REV_HEAD
+ #endif
+
+ #if SGX_CORE_REV == 100
+ #define FIX_HW_BRN_27270
+ #define FIX_HW_BRN_28011
+ #define FIX_HW_BRN_27510
+
+ #else
+ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
+
+ #else
+ #error "sgxerrata.h: SGX541 Core Revision unspecified"
+ #endif
+ #endif
+
+ #define SGX_CORE_DEFINED
+ #else
+ #error "sgxerrata.h: SGX541 only supports MP configs (SGX_FEATURE_MP)"
+ #endif
+#endif
+
+#if defined(SGX543) && !defined(SGX_CORE_DEFINED)
+
+ #define SGX_CORE_REV_HEAD 0
+ #if defined(USE_SGX_CORE_REV_HEAD)
+
+ #define SGX_CORE_REV SGX_CORE_REV_HEAD
+ #endif
+
+ #if SGX_CORE_REV == 113
+ #define FIX_HW_BRN_29954
+ #define FIX_HW_BRN_29997
+ #define FIX_HW_BRN_30954
+ #define FIX_HW_BRN_31093
+ #define FIX_HW_BRN_31195
+ #define FIX_HW_BRN_31272
+ #define FIX_HW_BRN_31278
+ #if defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_31559
+ #endif
+ #define FIX_HW_BRN_31620
+ #define FIX_HW_BRN_31780
+ #define FIX_HW_BRN_31542
+ #define FIX_HW_BRN_32044
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #define FIX_HW_BRN_33920
+ #define FIX_HW_BRN_36513
+ #else
+ #if SGX_CORE_REV == 122
+ #define FIX_HW_BRN_29954
+ #define FIX_HW_BRN_29997
+ #define FIX_HW_BRN_30954
+ #define FIX_HW_BRN_31093
+ #define FIX_HW_BRN_31195
+ #define FIX_HW_BRN_31272
+ #define FIX_HW_BRN_31278
+ #if defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_31559
+ #endif
+ #define FIX_HW_BRN_31620
+ #define FIX_HW_BRN_31780
+ #define FIX_HW_BRN_31542
+ #define FIX_HW_BRN_32044
+ #define FIX_HW_BRN_32085
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #define FIX_HW_BRN_33920
+ #define FIX_HW_BRN_36513
+
+ #else
+ #if SGX_CORE_REV == 1221
+ #define FIX_HW_BRN_29954
+ #define FIX_HW_BRN_31195
+ #define FIX_HW_BRN_31272
+ #define FIX_HW_BRN_31278
+ #if defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_31559
+ #endif
+ #define FIX_HW_BRN_31542
+ #define FIX_HW_BRN_31671
+ #define FIX_HW_BRN_31780
+ #define FIX_HW_BRN_32044
+ #define FIX_HW_BRN_32085
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #define FIX_HW_BRN_33920
+ #define FIX_HW_BRN_36513
+
+ #else
+ #if SGX_CORE_REV == 140
+ #define FIX_HW_BRN_29954
+ #define FIX_HW_BRN_30954
+ #define FIX_HW_BRN_31093
+ #define FIX_HW_BRN_31195
+ #define FIX_HW_BRN_31272
+ #define FIX_HW_BRN_31278
+ #if defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_31559
+ #endif
+ #define FIX_HW_BRN_31620
+ #define FIX_HW_BRN_31780
+ #define FIX_HW_BRN_31542
+ #define FIX_HW_BRN_32044
+ #define FIX_HW_BRN_32085
+ #define FIX_HW_BRN_33920
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #define FIX_HW_BRN_36513
+
+ #else
+ #if SGX_CORE_REV == 1401
+ #define FIX_HW_BRN_29954
+ #define FIX_HW_BRN_30954
+ #define FIX_HW_BRN_31195
+ #define FIX_HW_BRN_31272
+ #define FIX_HW_BRN_31278
+ #if defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_31559
+ #endif
+ #define FIX_HW_BRN_31620
+ #define FIX_HW_BRN_31542
+ #define FIX_HW_BRN_31780
+ #define FIX_HW_BRN_32044
+ #define FIX_HW_BRN_32085
+ #define FIX_HW_BRN_33920
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #define FIX_HW_BRN_36513
+
+ #else
+ #if SGX_CORE_REV == 141
+ #define FIX_HW_BRN_29954
+ #if defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_31559
+ #endif
+ #define FIX_HW_BRN_31671
+ #define FIX_HW_BRN_31780
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #define FIX_HW_BRN_36513
+
+ #else
+ #if SGX_CORE_REV == 142
+ #define FIX_HW_BRN_29954
+ #if defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_31559
+ #endif
+ #define FIX_HW_BRN_31671
+ #define FIX_HW_BRN_31780
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #define FIX_HW_BRN_36513
+
+ #else
+ #if SGX_CORE_REV == 211
+ #define FIX_HW_BRN_31093
+ #define FIX_HW_BRN_31195
+ #define FIX_HW_BRN_31272
+ #define FIX_HW_BRN_31278
+ #if defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_31559
+ #endif
+ #define FIX_HW_BRN_31620
+ #define FIX_HW_BRN_31780
+ #define FIX_HW_BRN_31542
+ #define FIX_HW_BRN_32044
+ #define FIX_HW_BRN_32085
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #define FIX_HW_BRN_33920
+ #define FIX_HW_BRN_36513
+
+ #else
+ #if SGX_CORE_REV == 2111
+ #define FIX_HW_BRN_30982
+ #define FIX_HW_BRN_31093
+ #define FIX_HW_BRN_31195
+ #define FIX_HW_BRN_31272
+ #define FIX_HW_BRN_31278
+ #if defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_31559
+ #endif
+ #define FIX_HW_BRN_31620
+ #define FIX_HW_BRN_31780
+ #define FIX_HW_BRN_31542
+ #define FIX_HW_BRN_32044
+ #define FIX_HW_BRN_32085
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #define FIX_HW_BRN_33920
+ #define FIX_HW_BRN_36513
+
+ #else
+ #if SGX_CORE_REV == 213
+ #define FIX_HW_BRN_31272
+ #if defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_31559
+ #endif
+ #define FIX_HW_BRN_31671
+ #define FIX_HW_BRN_31780
+ #define FIX_HW_BRN_32085
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #define FIX_HW_BRN_33920
+ #define FIX_HW_BRN_36513
+
+ #else
+ #if SGX_CORE_REV == 216
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #define FIX_HW_BRN_36513
+ #else
+ #if SGX_CORE_REV == 302
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #else
+ #if SGX_CORE_REV == 303
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #else
+ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #else
+ #error "sgxerrata.h: SGX543 Core Revision unspecified"
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+
+ #define SGX_CORE_DEFINED
+#endif
+
+#if defined(SGX544) && !defined(SGX_CORE_DEFINED)
+
+ #define SGX_CORE_REV_HEAD 0
+ #if defined(USE_SGX_CORE_REV_HEAD)
+
+ #define SGX_CORE_REV SGX_CORE_REV_HEAD
+ #endif
+
+ #if SGX_CORE_REV == 100
+ #if defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_31559
+ #endif
+ #else
+ #if SGX_CORE_REV == 102
+ #define FIX_HW_BRN_29954
+ #define FIX_HW_BRN_31272
+ #if defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_31559
+ #endif
+ #define FIX_HW_BRN_31780
+ #define FIX_HW_BRN_32085
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #define FIX_HW_BRN_33920
+ #if defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_36513
+ #endif
+ #else
+ #if SGX_CORE_REV == 103
+ #define FIX_HW_BRN_29954
+ #define FIX_HW_BRN_31272
+ #if defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_31559
+ #endif
+ #define FIX_HW_BRN_31780
+ #define FIX_HW_BRN_32085
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #define FIX_HW_BRN_33920
+ #define FIX_HW_BRN_36513
+ #else
+ #if SGX_CORE_REV == 104
+ #define FIX_HW_BRN_29954
+ #define FIX_HW_BRN_31093
+ #define FIX_HW_BRN_31195
+ #define FIX_HW_BRN_31272
+ #define FIX_HW_BRN_31278
+ #if defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_31559
+ #endif
+ #define FIX_HW_BRN_31542
+ #define FIX_HW_BRN_31620
+ #define FIX_HW_BRN_31671
+ #define FIX_HW_BRN_31780
+ #define FIX_HW_BRN_32044
+ #define FIX_HW_BRN_32085
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #define FIX_HW_BRN_33920
+ #define FIX_HW_BRN_36513
+ #else
+ #if SGX_CORE_REV == 105
+ #if defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_31559
+ #endif
+ #define FIX_HW_BRN_31780
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #define FIX_HW_BRN_33920
+ #define FIX_HW_BRN_36513
+ #else
+ #if SGX_CORE_REV == 106
+ #define FIX_HW_BRN_31272
+ #define FIX_HW_BRN_31780
+ #define FIX_HW_BRN_33920
+ #else
+ #if SGX_CORE_REV == 110
+ #define FIX_HW_BRN_31272
+ #if defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_31559
+ #endif
+ #define FIX_HW_BRN_31780
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #define FIX_HW_BRN_33920
+ #else
+ #if SGX_CORE_REV == 112
+ #define FIX_HW_BRN_31272
+ #define FIX_HW_BRN_33920
+ #else
+ #if SGX_CORE_REV == 114
+ #define FIX_HW_BRN_31780
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #else
+ #if SGX_CORE_REV == 115
+ #define FIX_HW_BRN_31780
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #if defined(SGX_FEATURE_MP)
+ #if SGX_FEATURE_MP_CORE_COUNT > 1
+ #define FIX_HW_BRN_36513
+ #endif
+ #endif
+ #else
+ #if SGX_CORE_REV == 116
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #define FIX_HW_BRN_33809
+ #define FIX_HW_BRN_36513
+ #else
+ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #else
+ #error "sgxerrata.h: SGX544 Core Revision unspecified"
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+
+ #define SGX_CORE_DEFINED
+#endif
+
+#if defined(SGX545) && !defined(SGX_CORE_DEFINED)
+
+ #define SGX_CORE_REV_HEAD 0
+ #if defined(USE_SGX_CORE_REV_HEAD)
+
+ #define SGX_CORE_REV SGX_CORE_REV_HEAD
+ #endif
+
+ #if SGX_CORE_REV == 100
+ #define FIX_HW_BRN_26620
+ #define FIX_HW_BRN_27266
+ #define FIX_HW_BRN_27456
+ #define FIX_HW_BRN_29702
+ #define FIX_HW_BRN_29823
+ #else
+ #if SGX_CORE_REV == 109
+ #define FIX_HW_BRN_29702
+ #define FIX_HW_BRN_29823
+ #define FIX_HW_BRN_31939
+ #else
+ #if SGX_CORE_REV == 1012
+ #define FIX_HW_BRN_31939
+ #else
+ #if SGX_CORE_REV == 1013
+ #define FIX_HW_BRN_31939
+ #else
+ #if SGX_CORE_REV == 10131
+ #else
+ #if SGX_CORE_REV == 1014
+ #else
+ #if SGX_CORE_REV == 10141
+ #else
+ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
+
+ #else
+ #error "sgxerrata.h: SGX545 Core Revision unspecified"
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+
+ #define SGX_CORE_DEFINED
+#endif
+
+#if defined(SGX554) && !defined(SGX_CORE_DEFINED)
+
+ #define SGX_CORE_REV_HEAD 0
+ #if defined(USE_SGX_CORE_REV_HEAD)
+
+ #define SGX_CORE_REV SGX_CORE_REV_HEAD
+ #endif
+
+ #if SGX_CORE_REV == 1251
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #define FIX_HW_BRN_36513
+
+ #else
+ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP)
+ #define FIX_HW_BRN_33657
+ #endif
+ #else
+ #error "sgxerrata.h: SGX554 Core Revision unspecified"
+ #endif
+ #endif
+
+ #define SGX_CORE_DEFINED
+#endif
+
+#if !defined(SGX_CORE_DEFINED)
+#if defined (__GNUC__)
+ #warning "sgxerrata.h: SGX Core Version unspecified"
+#else
+ #pragma message("sgxerrata.h: SGX Core Version unspecified")
+#endif
+#endif
+
+#endif
+
diff --git a/drivers/gpu/pvr/sgxfeaturedefs.h b/drivers/gpu/pvr/sgxfeaturedefs.h
new file mode 100644
index 0000000..0679671
--- /dev/null
+++ b/drivers/gpu/pvr/sgxfeaturedefs.h
@@ -0,0 +1,247 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if defined(SGX520)
+ #define SGX_CORE_FRIENDLY_NAME "SGX520"
+ #define SGX_CORE_ID SGX_CORE_ID_520
+ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
+ #define SGX_FEATURE_NUM_USE_PIPES (1)
+ #define SGX_FEATURE_AUTOCLOCKGATING
+#else
+#if defined(SGX530)
+ #define SGX_CORE_FRIENDLY_NAME "SGX530"
+ #define SGX_CORE_ID SGX_CORE_ID_530
+ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
+ #define SGX_FEATURE_NUM_USE_PIPES (2)
+ #define SGX_FEATURE_AUTOCLOCKGATING
+#else
+#if defined(SGX531)
+ #define SGX_CORE_FRIENDLY_NAME "SGX531"
+ #define SGX_CORE_ID SGX_CORE_ID_531
+ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
+ #define SGX_FEATURE_NUM_USE_PIPES (2)
+ #define SGX_FEATURE_AUTOCLOCKGATING
+ #define SGX_FEATURE_MULTI_EVENT_KICK
+#else
+#if defined(SGX535)
+ #define SGX_CORE_FRIENDLY_NAME "SGX535"
+ #define SGX_CORE_ID SGX_CORE_ID_535
+ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
+ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
+ #define SGX_FEATURE_BIF_NUM_DIRLISTS (16)
+ #define SGX_FEATURE_2D_HARDWARE
+ #define SGX_FEATURE_NUM_USE_PIPES (2)
+ #define SGX_FEATURE_AUTOCLOCKGATING
+ #define SUPPORT_SGX_GENERAL_MAPPING_HEAP
+ #define SGX_FEATURE_EDM_VERTEX_PDSADDR_FULL_RANGE
+#else
+#if defined(SGX540)
+ #define SGX_CORE_FRIENDLY_NAME "SGX540"
+ #define SGX_CORE_ID SGX_CORE_ID_540
+ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
+ #define SGX_FEATURE_NUM_USE_PIPES (4)
+ #define SGX_FEATURE_AUTOCLOCKGATING
+ #define SGX_FEATURE_MULTI_EVENT_KICK
+#else
+#if defined(SGX543)
+ #define SGX_CORE_FRIENDLY_NAME "SGX543"
+ #define SGX_CORE_ID SGX_CORE_ID_543
+ #define SGX_FEATURE_USE_NO_INSTRUCTION_PAIRING
+ #define SGX_FEATURE_USE_UNLIMITED_PHASES
+ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
+ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
+ #define SGX_FEATURE_BIF_NUM_DIRLISTS (8)
+ #define SGX_FEATURE_NUM_USE_PIPES (4)
+ #define SGX_FEATURE_AUTOCLOCKGATING
+ #define SGX_FEATURE_MONOLITHIC_UKERNEL
+ #define SGX_FEATURE_MULTI_EVENT_KICK
+ #define SGX_FEATURE_DATA_BREAKPOINTS
+ #define SGX_FEATURE_PERPIPE_BKPT_REGS
+ #define SGX_FEATURE_PERPIPE_BKPT_REGS_NUMPIPES (2)
+ #define SGX_FEATURE_2D_HARDWARE
+ #define SGX_FEATURE_PTLA
+ #define SGX_FEATURE_EXTENDED_PERF_COUNTERS
+ #define SGX_FEATURE_EDM_VERTEX_PDSADDR_FULL_RANGE
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING)
+ #if defined(SGX_FEATURE_MP)
+ #define SGX_FEATURE_MASTER_VDM_CONTEXT_SWITCH
+ #endif
+ #define SGX_FEATURE_SLAVE_VDM_CONTEXT_SWITCH
+ #define SGX_FEATURE_SW_ISP_CONTEXT_SWITCH
+ #endif
+#else
+#if defined(SGX544)
+ #define SGX_CORE_FRIENDLY_NAME "SGX544"
+ #define SGX_CORE_ID SGX_CORE_ID_544
+ #define SGX_FEATURE_USE_NO_INSTRUCTION_PAIRING
+ #define SGX_FEATURE_USE_UNLIMITED_PHASES
+ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
+ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
+ #define SGX_FEATURE_BIF_NUM_DIRLISTS (8)
+ #define SGX_FEATURE_NUM_USE_PIPES (4)
+ #define SGX_FEATURE_AUTOCLOCKGATING
+ #define SGX_FEATURE_MONOLITHIC_UKERNEL
+ #define SGX_FEATURE_MULTI_EVENT_KICK
+ #define SGX_FEATURE_EXTENDED_PERF_COUNTERS
+ #define SGX_FEATURE_EDM_VERTEX_PDSADDR_FULL_RANGE
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING)
+ #if defined(SGX_FEATURE_MP)
+ #define SGX_FEATURE_MASTER_VDM_CONTEXT_SWITCH
+ #define SGX_FEATURE_SLAVE_VDM_CONTEXT_SWITCH
+ #endif
+ #define SGX_FEATURE_SW_ISP_CONTEXT_SWITCH
+ #endif
+#else
+#if defined(SGX545)
+ #define SGX_CORE_FRIENDLY_NAME "SGX545"
+ #define SGX_CORE_ID SGX_CORE_ID_545
+ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
+ #define SGX_FEATURE_AUTOCLOCKGATING
+ #define SGX_FEATURE_USE_NO_INSTRUCTION_PAIRING
+ #define SGX_FEATURE_USE_UNLIMITED_PHASES
+ #define SGX_FEATURE_VOLUME_TEXTURES
+ #define SGX_FEATURE_HOST_ALLOC_FROM_DPM
+ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
+ #define SGX_FEATURE_BIF_NUM_DIRLISTS (16)
+ #define SGX_FEATURE_NUM_USE_PIPES (4)
+ #define SGX_FEATURE_TEXTURESTRIDE_EXTENSION
+ #define SGX_FEATURE_PDS_DATA_INTERLEAVE_2DWORDS
+ #define SGX_FEATURE_MONOLITHIC_UKERNEL
+ #define SGX_FEATURE_ZLS_EXTERNALZ
+ #define SGX_FEATURE_NUM_PDS_PIPES (2)
+ #define SGX_FEATURE_NATIVE_BACKWARD_BLIT
+ #define SGX_FEATURE_MAX_TA_RENDER_TARGETS (512)
+ #define SGX_FEATURE_SECONDARY_REQUIRES_USE_KICK
+ #define SGX_FEATURE_WRITEBACK_DCU
+
+
+ #define SGX_FEATURE_BIF_WIDE_TILING_AND_4K_ADDRESS
+ #define SGX_FEATURE_MULTI_EVENT_KICK
+ #define SGX_FEATURE_EDM_VERTEX_PDSADDR_FULL_RANGE
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING)
+ #define SGX_FEATURE_SW_ISP_CONTEXT_SWITCH
+ #endif
+#else
+#if defined(SGX554)
+ #define SGX_CORE_FRIENDLY_NAME "SGX554"
+ #define SGX_CORE_ID SGX_CORE_ID_554
+ #define SGX_FEATURE_USE_NO_INSTRUCTION_PAIRING
+ #define SGX_FEATURE_USE_UNLIMITED_PHASES
+ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
+ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
+ #define SGX_FEATURE_BIF_NUM_DIRLISTS (8)
+ #define SGX_FEATURE_NUM_USE_PIPES (8)
+ #define SGX_FEATURE_AUTOCLOCKGATING
+ #define SGX_FEATURE_MONOLITHIC_UKERNEL
+ #define SGX_FEATURE_MULTI_EVENT_KICK
+ #define SGX_FEATURE_2D_HARDWARE
+ #define SGX_FEATURE_PTLA
+ #define SGX_FEATURE_EXTENDED_PERF_COUNTERS
+ #define SGX_FEATURE_EDM_VERTEX_PDSADDR_FULL_RANGE
+ #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING)
+ #if defined(SGX_FEATURE_MP)
+ #define SGX_FEATURE_MASTER_VDM_CONTEXT_SWITCH
+ #endif
+ #define SGX_FEATURE_SLAVE_VDM_CONTEXT_SWITCH
+ #define SGX_FEATURE_SW_ISP_CONTEXT_SWITCH
+ #endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+
+#if defined(SGX_FEATURE_SLAVE_VDM_CONTEXT_SWITCH) \
+ || defined(SGX_FEATURE_MASTER_VDM_CONTEXT_SWITCH)
+#define SGX_FEATURE_VDM_CONTEXT_SWITCH
+#endif
+
+#if defined(FIX_HW_BRN_22693)
+#undef SGX_FEATURE_AUTOCLOCKGATING
+#endif
+
+#if defined(FIX_HW_BRN_27266)
+#undef SGX_FEATURE_36BIT_MMU
+#endif
+
+#if defined(FIX_HW_BRN_27456)
+#undef SGX_FEATURE_BIF_WIDE_TILING_AND_4K_ADDRESS
+#endif
+
+#if defined(FIX_HW_BRN_22934) \
+ || defined(FIX_HW_BRN_25499)
+#undef SGX_FEATURE_MULTI_EVENT_KICK
+#endif
+
+#if defined(SGX_FEATURE_SYSTEM_CACHE)
+ #if defined(SGX_FEATURE_36BIT_MMU)
+ #error SGX_FEATURE_SYSTEM_CACHE is incompatible with SGX_FEATURE_36BIT_MMU
+ #endif
+ #if defined(FIX_HW_BRN_26620) && !defined(SGX_FEATURE_MULTI_EVENT_KICK)
+ #define SGX_BYPASS_SYSTEM_CACHE
+ #endif
+#endif
+
+#if defined(FIX_HW_BRN_29954)
+#undef SGX_FEATURE_PERPIPE_BKPT_REGS
+#endif
+
+#if defined(FIX_HW_BRN_31620)
+#undef SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
+#undef SGX_FEATURE_BIF_NUM_DIRLISTS
+#endif
+
+#if defined(SGX_FEATURE_MP)
+#if defined(SGX_FEATURE_MP_CORE_COUNT_TA) && defined(SGX_FEATURE_MP_CORE_COUNT_3D)
+#if (SGX_FEATURE_MP_CORE_COUNT_TA > SGX_FEATURE_MP_CORE_COUNT_3D)
+#error Number of TA cores larger than number of 3D cores not supported in current driver
+#endif
+#else
+#if defined(SGX_FEATURE_MP_CORE_COUNT)
+#define SGX_FEATURE_MP_CORE_COUNT_TA (SGX_FEATURE_MP_CORE_COUNT)
+#define SGX_FEATURE_MP_CORE_COUNT_3D (SGX_FEATURE_MP_CORE_COUNT)
+#else
+#error Either SGX_FEATURE_MP_CORE_COUNT or \
+both SGX_FEATURE_MP_CORE_COUNT_TA and SGX_FEATURE_MP_CORE_COUNT_3D \
+must be defined when SGX_FEATURE_MP is defined
+#endif
+#endif
+#else
+#define SGX_FEATURE_MP_CORE_COUNT (1)
+#define SGX_FEATURE_MP_CORE_COUNT_TA (1)
+#define SGX_FEATURE_MP_CORE_COUNT_3D (1)
+#endif
+
+#if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && !defined(SUPPORT_SGX_PRIORITY_SCHEDULING)
+#define SUPPORT_SGX_PRIORITY_SCHEDULING
+#endif
+
+#include "img_types.h"
+
diff --git a/drivers/gpu/pvr/sgxinfo.h b/drivers/gpu/pvr/sgxinfo.h
new file mode 100644
index 0000000..dec8577
--- /dev/null
+++ b/drivers/gpu/pvr/sgxinfo.h
@@ -0,0 +1,463 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined (__SGXINFO_H__)
+#define __SGXINFO_H__
+
+#include "sgxscript.h"
+#include "servicesint.h"
+#include "services.h"
+#if !defined (SUPPORT_SID_INTERFACE)
+#include "sgxapi_km.h"
+#endif
+#include "sgx_mkif_km.h"
+
+
+#define SGX_MAX_DEV_DATA 24
+#define SGX_MAX_INIT_MEM_HANDLES 18
+
+
+typedef struct _SGX_BRIDGE_INFO_FOR_SRVINIT
+{
+ IMG_DEV_PHYADDR sPDDevPAddr;
+ PVRSRV_HEAP_INFO asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
+} SGX_BRIDGE_INFO_FOR_SRVINIT;
+
+
+typedef enum _SGXMKIF_CMD_TYPE_
+{
+ SGXMKIF_CMD_TA = 0,
+ SGXMKIF_CMD_TRANSFER = 1,
+ SGXMKIF_CMD_2D = 2,
+ SGXMKIF_CMD_POWER = 3,
+ SGXMKIF_CMD_CONTEXTSUSPEND = 4,
+ SGXMKIF_CMD_CLEANUP = 5,
+ SGXMKIF_CMD_GETMISCINFO = 6,
+ SGXMKIF_CMD_PROCESS_QUEUES = 7,
+ SGXMKIF_CMD_DATABREAKPOINT = 8,
+ SGXMKIF_CMD_SETHWPERFSTATUS = 9,
+ SGXMKIF_CMD_FLUSHPDCACHE = 10,
+ SGXMKIF_CMD_MAX = 11,
+
+ SGXMKIF_CMD_FORCE_I32 = -1,
+
+} SGXMKIF_CMD_TYPE;
+
+
+typedef struct _SGX_BRIDGE_INIT_INFO_
+{
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelCCBMemInfo;
+ IMG_SID hKernelCCBCtlMemInfo;
+ IMG_SID hKernelCCBEventKickerMemInfo;
+ IMG_SID hKernelSGXHostCtlMemInfo;
+ IMG_SID hKernelSGXTA3DCtlMemInfo;
+#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920)
+ IMG_SID hKernelSGXPTLAWriteBackMemInfo;
+#endif
+ IMG_SID hKernelSGXMiscMemInfo;
+#else
+ IMG_HANDLE hKernelCCBMemInfo;
+ IMG_HANDLE hKernelCCBCtlMemInfo;
+ IMG_HANDLE hKernelCCBEventKickerMemInfo;
+ IMG_HANDLE hKernelSGXHostCtlMemInfo;
+ IMG_HANDLE hKernelSGXTA3DCtlMemInfo;
+#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920)
+ IMG_HANDLE hKernelSGXPTLAWriteBackMemInfo;
+#endif
+ IMG_HANDLE hKernelSGXMiscMemInfo;
+#endif
+
+ IMG_UINT32 aui32HostKickAddr[SGXMKIF_CMD_MAX];
+
+ SGX_INIT_SCRIPTS sScripts;
+
+ IMG_UINT32 ui32ClientBuildOptions;
+ SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes;
+
+#if defined(SGX_SUPPORT_HWPROFILING)
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelHWProfilingMemInfo;
+#else
+ IMG_HANDLE hKernelHWProfilingMemInfo;
+#endif
+#endif
+#if defined(SUPPORT_SGX_HWPERF)
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelHWPerfCBMemInfo;
+#else
+ IMG_HANDLE hKernelHWPerfCBMemInfo;
+#endif
+#endif
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelTASigBufferMemInfo;
+ IMG_SID hKernel3DSigBufferMemInfo;
+#else
+ IMG_HANDLE hKernelTASigBufferMemInfo;
+ IMG_HANDLE hKernel3DSigBufferMemInfo;
+#endif
+
+#if defined(FIX_HW_BRN_29702)
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelCFIMemInfo;
+#else
+ IMG_HANDLE hKernelCFIMemInfo;
+#endif
+#endif
+#if defined(FIX_HW_BRN_29823)
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelDummyTermStreamMemInfo;
+#else
+ IMG_HANDLE hKernelDummyTermStreamMemInfo;
+#endif
+#endif
+
+#if defined(FIX_HW_BRN_31542) || defined(FIX_HW_BRN_36513)
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelClearClipWAVDMStreamMemInfo;
+ IMG_SID hKernelClearClipWAIndexStreamMemInfo;
+ IMG_SID hKernelClearClipWAPDSMemInfo;
+ IMG_SID hKernelClearClipWAUSEMemInfo;
+ IMG_SID hKernelClearClipWAParamMemInfo;
+ IMG_SID hKernelClearClipWAPMPTMemInfo;
+ IMG_SID hKernelClearClipWATPCMemInfo;
+ IMG_SID hKernelClearClipWAPSGRgnHdrMemInfo;
+#else
+ IMG_HANDLE hKernelClearClipWAVDMStreamMemInfo;
+ IMG_HANDLE hKernelClearClipWAIndexStreamMemInfo;
+ IMG_HANDLE hKernelClearClipWAPDSMemInfo;
+ IMG_HANDLE hKernelClearClipWAUSEMemInfo;
+ IMG_HANDLE hKernelClearClipWAParamMemInfo;
+ IMG_HANDLE hKernelClearClipWAPMPTMemInfo;
+ IMG_HANDLE hKernelClearClipWATPCMemInfo;
+ IMG_HANDLE hKernelClearClipWAPSGRgnHdrMemInfo;
+#endif
+#endif
+
+#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && defined(FIX_HW_BRN_31559)
+ IMG_HANDLE hKernelVDMSnapShotBufferMemInfo;
+ IMG_HANDLE hKernelVDMCtrlStreamBufferMemInfo;
+#endif
+#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \
+ defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX)
+ IMG_HANDLE hKernelVDMStateUpdateBufferMemInfo;
+#endif
+#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelEDMStatusBufferMemInfo;
+#else
+ IMG_HANDLE hKernelEDMStatusBufferMemInfo;
+#endif
+#endif
+
+ IMG_UINT32 ui32EDMTaskReg0;
+ IMG_UINT32 ui32EDMTaskReg1;
+
+ IMG_UINT32 ui32ClkGateCtl;
+ IMG_UINT32 ui32ClkGateCtl2;
+ IMG_UINT32 ui32ClkGateStatusReg;
+ IMG_UINT32 ui32ClkGateStatusMask;
+#if defined(SGX_FEATURE_MP)
+ IMG_UINT32 ui32MasterClkGateStatusReg;
+ IMG_UINT32 ui32MasterClkGateStatusMask;
+ IMG_UINT32 ui32MasterClkGateStatus2Reg;
+ IMG_UINT32 ui32MasterClkGateStatus2Mask;
+#endif
+
+ IMG_UINT32 ui32CacheControl;
+
+ IMG_UINT32 asInitDevData[SGX_MAX_DEV_DATA];
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID asInitMemHandles[SGX_MAX_INIT_MEM_HANDLES];
+#else
+ IMG_HANDLE asInitMemHandles[SGX_MAX_INIT_MEM_HANDLES];
+#endif
+
+} SGX_BRIDGE_INIT_INFO;
+
+
+typedef struct _SGX_DEVICE_SYNC_LIST_
+{
+ PSGXMKIF_HWDEVICE_SYNC_LIST psHWDeviceSyncList;
+
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelHWSyncListMemInfo;
+#else
+ IMG_HANDLE hKernelHWSyncListMemInfo;
+#endif
+ PVRSRV_CLIENT_MEM_INFO *psHWDeviceSyncListClientMemInfo;
+ PVRSRV_CLIENT_MEM_INFO *psAccessResourceClientMemInfo;
+
+ volatile IMG_UINT32 *pui32Lock;
+
+ struct _SGX_DEVICE_SYNC_LIST_ *psNext;
+
+
+ IMG_UINT32 ui32NumSyncObjects;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID ahSyncHandles[1];
+#else
+ IMG_HANDLE ahSyncHandles[1];
+#endif
+} SGX_DEVICE_SYNC_LIST, *PSGX_DEVICE_SYNC_LIST;
+
+
+typedef struct _SGX_INTERNEL_STATUS_UPDATE_
+{
+ CTL_STATUS sCtlStatus;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelMemInfo;
+#else
+ IMG_HANDLE hKernelMemInfo;
+#endif
+} SGX_INTERNEL_STATUS_UPDATE;
+
+
+typedef struct _SGX_CCB_KICK_
+{
+ SGXMKIF_COMMAND sCommand;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hCCBKernelMemInfo;
+#else
+ IMG_HANDLE hCCBKernelMemInfo;
+#endif
+
+ IMG_UINT32 ui32NumDstSyncObjects;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hKernelHWSyncListMemInfo;
+#else
+ IMG_HANDLE hKernelHWSyncListMemInfo;
+#endif
+
+
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID *pahDstSyncHandles;
+#else
+ IMG_HANDLE *pahDstSyncHandles;
+#endif
+
+ IMG_UINT32 ui32NumTAStatusVals;
+ IMG_UINT32 ui32Num3DStatusVals;
+
+#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
+ SGX_INTERNEL_STATUS_UPDATE asTAStatusUpdate[SGX_MAX_TA_STATUS_VALS];
+ SGX_INTERNEL_STATUS_UPDATE as3DStatusUpdate[SGX_MAX_3D_STATUS_VALS];
+#else
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID ahTAStatusSyncInfo[SGX_MAX_TA_STATUS_VALS];
+ IMG_SID ah3DStatusSyncInfo[SGX_MAX_3D_STATUS_VALS];
+#else
+ IMG_HANDLE ahTAStatusSyncInfo[SGX_MAX_TA_STATUS_VALS];
+ IMG_HANDLE ah3DStatusSyncInfo[SGX_MAX_3D_STATUS_VALS];
+#endif
+#endif
+
+ IMG_BOOL bFirstKickOrResume;
+#if (defined(NO_HARDWARE) || defined(PDUMP))
+ IMG_BOOL bTerminateOrAbort;
+#endif
+ IMG_BOOL bLastInScene;
+
+
+ IMG_UINT32 ui32CCBOffset;
+
+#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
+
+ IMG_UINT32 ui32NumTASrcSyncs;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID ahTASrcKernelSyncInfo[SGX_MAX_TA_SRC_SYNCS];
+#else
+ IMG_HANDLE ahTASrcKernelSyncInfo[SGX_MAX_TA_SRC_SYNCS];
+#endif
+ IMG_UINT32 ui32NumTADstSyncs;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID ahTADstKernelSyncInfo[SGX_MAX_TA_DST_SYNCS];
+#else
+ IMG_HANDLE ahTADstKernelSyncInfo[SGX_MAX_TA_DST_SYNCS];
+#endif
+ IMG_UINT32 ui32Num3DSrcSyncs;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID ah3DSrcKernelSyncInfo[SGX_MAX_3D_SRC_SYNCS];
+#else
+ IMG_HANDLE ah3DSrcKernelSyncInfo[SGX_MAX_3D_SRC_SYNCS];
+#endif
+#else
+
+ IMG_UINT32 ui32NumSrcSyncs;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID ahSrcKernelSyncInfo[SGX_MAX_SRC_SYNCS_TA];
+#else
+ IMG_HANDLE ahSrcKernelSyncInfo[SGX_MAX_SRC_SYNCS_TA];
+#endif
+#endif
+
+
+ IMG_BOOL bTADependency;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hTA3DSyncInfo;
+
+ IMG_SID hTASyncInfo;
+ IMG_SID h3DSyncInfo;
+#else
+ IMG_HANDLE hTA3DSyncInfo;
+
+ IMG_HANDLE hTASyncInfo;
+ IMG_HANDLE h3DSyncInfo;
+#endif
+#if defined(PDUMP)
+ IMG_UINT32 ui32CCBDumpWOff;
+#endif
+#if defined(NO_HARDWARE)
+ IMG_UINT32 ui32WriteOpsPendingVal;
+#endif
+ IMG_HANDLE hDevMemContext;
+} SGX_CCB_KICK;
+
+
+#define SGX_KERNEL_USE_CODE_BASE_INDEX 15
+
+
+typedef struct _SGX_CLIENT_INFO_
+{
+ IMG_UINT32 ui32ProcessID;
+ IMG_VOID *pvProcess;
+ PVRSRV_MISC_INFO sMiscInfo;
+
+ IMG_UINT32 asDevData[SGX_MAX_DEV_DATA];
+
+} SGX_CLIENT_INFO;
+
+typedef struct _SGX_INTERNAL_DEVINFO_
+{
+ IMG_UINT32 ui32Flags;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hHostCtlKernelMemInfoHandle;
+#else
+ IMG_HANDLE hHostCtlKernelMemInfoHandle;
+#endif
+ IMG_BOOL bForcePTOff;
+} SGX_INTERNAL_DEVINFO;
+
+
+typedef struct _SGX_INTERNAL_DEVINFO_KM_
+{
+ IMG_UINT32 ui32Flags;
+ IMG_HANDLE hHostCtlKernelMemInfoHandle;
+ IMG_BOOL bForcePTOff;
+} SGX_INTERNAL_DEVINFO_KM;
+
+
+#if defined(TRANSFER_QUEUE)
+typedef struct _PVRSRV_TRANSFER_SGX_KICK_
+{
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hCCBMemInfo;
+#else
+ IMG_HANDLE hCCBMemInfo;
+#endif
+ IMG_UINT32 ui32SharedCmdCCBOffset;
+
+ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
+
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hTASyncInfo;
+ IMG_SID h3DSyncInfo;
+#else
+ IMG_HANDLE hTASyncInfo;
+ IMG_HANDLE h3DSyncInfo;
+#endif
+
+ IMG_UINT32 ui32NumSrcSync;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID ahSrcSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
+#else
+ IMG_HANDLE ahSrcSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
+#endif
+
+ IMG_UINT32 ui32NumDstSync;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID ahDstSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
+#else
+ IMG_HANDLE ahDstSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
+#endif
+
+ IMG_UINT32 ui32Flags;
+
+ IMG_UINT32 ui32PDumpFlags;
+#if defined(PDUMP)
+ IMG_UINT32 ui32CCBDumpWOff;
+#endif
+ IMG_HANDLE hDevMemContext;
+} PVRSRV_TRANSFER_SGX_KICK, *PPVRSRV_TRANSFER_SGX_KICK;
+
+#if defined(SGX_FEATURE_2D_HARDWARE)
+typedef struct _PVRSRV_2D_SGX_KICK_
+{
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID hCCBMemInfo;
+#else
+ IMG_HANDLE hCCBMemInfo;
+#endif
+ IMG_UINT32 ui32SharedCmdCCBOffset;
+
+ IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
+
+ IMG_UINT32 ui32NumSrcSync;
+#if defined (SUPPORT_SID_INTERFACE)
+ IMG_SID ahSrcSyncInfo[SGX_MAX_2D_SRC_SYNC_OPS];
+
+
+ IMG_SID hDstSyncInfo;
+
+
+ IMG_SID hTASyncInfo;
+
+
+ IMG_SID h3DSyncInfo;
+#else
+ IMG_HANDLE ahSrcSyncInfo[SGX_MAX_2D_SRC_SYNC_OPS];
+
+
+ IMG_HANDLE hDstSyncInfo;
+
+
+ IMG_HANDLE hTASyncInfo;
+
+
+ IMG_HANDLE h3DSyncInfo;
+#endif
+
+ IMG_UINT32 ui32PDumpFlags;
+#if defined(PDUMP)
+ IMG_UINT32 ui32CCBDumpWOff;
+#endif
+ IMG_HANDLE hDevMemContext;
+} PVRSRV_2D_SGX_KICK, *PPVRSRV_2D_SGX_KICK;
+#endif
+#endif
+
+
+#endif
diff --git a/drivers/gpu/pvr/sgxmmu.h b/drivers/gpu/pvr/sgxmmu.h
new file mode 100644
index 0000000..1b265f1
--- /dev/null
+++ b/drivers/gpu/pvr/sgxmmu.h
@@ -0,0 +1,72 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#if !defined(__SGXMMU_KM_H__)
+#define __SGXMMU_KM_H__
+
+#define SGX_MMU_PAGE_SHIFT (12)
+#define SGX_MMU_PAGE_SIZE (1U<<SGX_MMU_PAGE_SHIFT)
+#define SGX_MMU_PAGE_MASK (SGX_MMU_PAGE_SIZE - 1U)
+
+#define SGX_MMU_PD_SHIFT (10)
+#define SGX_MMU_PD_SIZE (1U<<SGX_MMU_PD_SHIFT)
+#define SGX_MMU_PD_MASK (0xFFC00000U)
+
+#if defined(SGX_FEATURE_36BIT_MMU)
+ #define SGX_MMU_PDE_ADDR_MASK (0xFFFFFF00U)
+ #define SGX_MMU_PDE_ADDR_ALIGNSHIFT (4)
+#else
+ #define SGX_MMU_PDE_ADDR_MASK (0xFFFFF000U)
+ #define SGX_MMU_PDE_ADDR_ALIGNSHIFT (0)
+#endif
+#define SGX_MMU_PDE_VALID (0x00000001U)
+#define SGX_MMU_PDE_PAGE_SIZE_4K (0x00000000U)
+#define SGX_MMU_PDE_PAGE_SIZE_16K (0x00000002U)
+#define SGX_MMU_PDE_PAGE_SIZE_64K (0x00000004U)
+#define SGX_MMU_PDE_PAGE_SIZE_256K (0x00000006U)
+#define SGX_MMU_PDE_PAGE_SIZE_1M (0x00000008U)
+#define SGX_MMU_PDE_PAGE_SIZE_4M (0x0000000AU)
+#define SGX_MMU_PDE_PAGE_SIZE_MASK (0x0000000EU)
+
+#define SGX_MMU_PT_SHIFT (10)
+#define SGX_MMU_PT_SIZE (1U<<SGX_MMU_PT_SHIFT)
+#define SGX_MMU_PT_MASK (0x003FF000U)
+
+#if defined(SGX_FEATURE_36BIT_MMU)
+ #define SGX_MMU_PTE_ADDR_MASK (0xFFFFFF00U)
+ #define SGX_MMU_PTE_ADDR_ALIGNSHIFT (4)
+#else
+ #define SGX_MMU_PTE_ADDR_MASK (0xFFFFF000U)
+ #define SGX_MMU_PTE_ADDR_ALIGNSHIFT (0)
+#endif
+#define SGX_MMU_PTE_VALID (0x00000001U)
+#define SGX_MMU_PTE_WRITEONLY (0x00000002U)
+#define SGX_MMU_PTE_READONLY (0x00000004U)
+#define SGX_MMU_PTE_CACHECONSISTENT (0x00000008U)
+#define SGX_MMU_PTE_EDMPROTECT (0x00000010U)
+
+#endif
+
diff --git a/drivers/gpu/pvr/sgxscript.h b/drivers/gpu/pvr/sgxscript.h
new file mode 100644
index 0000000..94eb453
--- /dev/null
+++ b/drivers/gpu/pvr/sgxscript.h
@@ -0,0 +1,83 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+*****************************************************************************/
+#ifndef __SGXSCRIPT_H__
+#define __SGXSCRIPT_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#define SGX_MAX_INIT_COMMANDS 64
+#define SGX_MAX_DEINIT_COMMANDS 16
+
+typedef enum _SGX_INIT_OPERATION
+{
+ SGX_INIT_OP_ILLEGAL = 0,
+ SGX_INIT_OP_WRITE_HW_REG,
+#if defined(PDUMP)
+ SGX_INIT_OP_PDUMP_HW_REG,
+#endif
+ SGX_INIT_OP_HALT
+} SGX_INIT_OPERATION;
+
+typedef union _SGX_INIT_COMMAND
+{
+ SGX_INIT_OPERATION eOp;
+ struct {
+ SGX_INIT_OPERATION eOp;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Value;
+ } sWriteHWReg;
+#if defined(PDUMP)
+ struct {
+ SGX_INIT_OPERATION eOp;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Value;
+ } sPDumpHWReg;
+#endif
+#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
+ struct {
+ SGX_INIT_OPERATION eOp;
+ } sWorkaroundBRN22997;
+#endif
+} SGX_INIT_COMMAND;
+
+typedef struct _SGX_INIT_SCRIPTS_
+{
+ SGX_INIT_COMMAND asInitCommandsPart1[SGX_MAX_INIT_COMMANDS];
+ SGX_INIT_COMMAND asInitCommandsPart2[SGX_MAX_INIT_COMMANDS];
+ SGX_INIT_COMMAND asDeinitCommands[SGX_MAX_DEINIT_COMMANDS];
+} SGX_INIT_SCRIPTS;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __SGXSCRIPT_H__ */
+
+/*****************************************************************************
+ End of file (sgxscript.h)
+*****************************************************************************/
diff --git a/drivers/gpu/pvr/srvkm.h b/drivers/gpu/pvr/srvkm.h
new file mode 100644
index 0000000..474a1ee
--- /dev/null
+++ b/drivers/gpu/pvr/srvkm.h
@@ -0,0 +1,78 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef SRVKM_H
+#define SRVKM_H
+
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+
+ #ifdef PVR_DISABLE_LOGGING
+ #define PVR_LOG(X)
+ #else
+
+ #define PVR_LOG(X) PVRSRVReleasePrintf X;
+ #endif
+
+ IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) IMG_FORMAT_PRINTF(1, 2);
+
+ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVProcessConnect(IMG_UINT32 ui32PID, IMG_UINT32 ui32Flags);
+ IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVProcessDisconnect(IMG_UINT32 ui32PID);
+
+ IMG_IMPORT IMG_VOID PVRSRVScheduleDevicesKM(IMG_VOID);
+
+ IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State);
+
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, IMG_SIZE_T *puiBufSize, IMG_BOOL bSave);
+
+ IMG_VOID PVRSRVScheduleDeviceCallbacks(IMG_VOID);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+
+#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \
+{\
+ IMG_UINT32 uiOffset, uiStart, uiCurrent; \
+ IMG_INT32 iNotLastLoop; \
+ for(uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart + 1, iNotLastLoop = 1;\
+ ((uiCurrent - uiStart + uiOffset) < (TIMEOUT)) || iNotLastLoop--; \
+ uiCurrent = OSClockus(), \
+ uiOffset = uiCurrent < uiStart ? IMG_UINT32_MAX - uiStart : uiOffset, \
+ uiStart = uiCurrent < uiStart ? 0 : uiStart)
+
+#define END_LOOP_UNTIL_TIMEOUT() \
+}
+
+IMG_IMPORT
+const IMG_CHAR *PVRSRVGetErrorStringKM(PVRSRV_ERROR eError);
+
+#endif
diff --git a/drivers/gpu/pvr/staticversion.h b/drivers/gpu/pvr/staticversion.h
new file mode 100644
index 0000000..433d126
--- /dev/null
+++ b/drivers/gpu/pvr/staticversion.h
@@ -0,0 +1,33 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+*****************************************************************************/
+#ifndef _STATICVERSION_H_
+#define _STATICVERSION_H_
+
+#define PVRVERSION_MAJ 1
+#define PVRVERSION_MIN 8
+#define PVRVERSION_BRANCH 18
+
+#endif /* _STATICVERSION_H_ */
diff --git a/drivers/gpu/pvr/syscommon.h b/drivers/gpu/pvr/syscommon.h
new file mode 100644
index 0000000..d5e5eef
--- /dev/null
+++ b/drivers/gpu/pvr/syscommon.h
@@ -0,0 +1,270 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef _SYSCOMMON_H
+#define _SYSCOMMON_H
+
+#include "sysconfig.h"
+#include "sysinfo.h"
+#include "servicesint.h"
+#include "queue.h"
+#include "power.h"
+#include "resman.h"
+#include "ra.h"
+#include "device.h"
+#include "buffer_manager.h"
+#include "pvr_debug.h"
+#include "services.h"
+
+#if defined(NO_HARDWARE) && defined(__linux__) && defined(__KERNEL__)
+#include <asm/io.h>
+#endif
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+typedef struct _SYS_DEVICE_ID_TAG
+{
+ IMG_UINT32 uiID;
+ IMG_BOOL bInUse;
+
+} SYS_DEVICE_ID;
+
+
+#define SYS_MAX_LOCAL_DEVMEM_ARENAS 4
+
+typedef IMG_HANDLE (*PFN_HTIMER_CREATE) (IMG_VOID);
+typedef IMG_UINT32 (*PFN_HTIMER_GETUS) (IMG_HANDLE);
+typedef IMG_VOID (*PFN_HTIMER_DESTROY) (IMG_HANDLE);
+typedef struct _SYS_DATA_TAG_
+{
+ IMG_UINT32 ui32NumDevices;
+ SYS_DEVICE_ID sDeviceID[SYS_DEVICE_COUNT];
+ PVRSRV_DEVICE_NODE *psDeviceNodeList;
+ PVRSRV_POWER_DEV *psPowerDeviceList;
+ PVRSRV_RESOURCE sPowerStateChangeResource;
+ PVRSRV_SYS_POWER_STATE eCurrentPowerState;
+ PVRSRV_SYS_POWER_STATE eFailedPowerState;
+ IMG_UINT32 ui32CurrentOSPowerState;
+ PVRSRV_QUEUE_INFO *psQueueList;
+ PVRSRV_KERNEL_SYNC_INFO *psSharedSyncInfoList;
+ IMG_PVOID pvEnvSpecificData;
+ IMG_PVOID pvSysSpecificData;
+ PVRSRV_RESOURCE sQProcessResource;
+ IMG_VOID *pvSOCRegsBase;
+ IMG_HANDLE hSOCTimerRegisterOSMemHandle;
+ IMG_UINT32 *pvSOCTimerRegisterKM;
+ IMG_VOID *pvSOCClockGateRegsBase;
+ IMG_UINT32 ui32SOCClockGateRegsSize;
+
+ struct _DEVICE_COMMAND_DATA_ *apsDeviceCommandData[SYS_DEVICE_COUNT];
+
+
+ RA_ARENA *apsLocalDevMemArena[SYS_MAX_LOCAL_DEVMEM_ARENAS];
+
+ IMG_CHAR *pszVersionString;
+#if defined (SUPPORT_SID_INTERFACE)
+ PVRSRV_EVENTOBJECT_KM *psGlobalEventObject;
+#else
+ PVRSRV_EVENTOBJECT *psGlobalEventObject;
+#endif
+
+ PVRSRV_MISC_INFO_CPUCACHEOP_TYPE ePendingCacheOpType;
+
+ PFN_HTIMER_CREATE pfnHighResTimerCreate;
+ PFN_HTIMER_GETUS pfnHighResTimerGetus;
+ PFN_HTIMER_DESTROY pfnHighResTimerDestroy;
+} SYS_DATA;
+
+
+
+#if defined (CUSTOM_DISPLAY_SEGMENT)
+PVRSRV_ERROR SysGetDisplaySegmentAddress (IMG_VOID *pvDevInfo, IMG_VOID *pvPhysicalAddress, IMG_UINT32 *pui32Length);
+#endif
+
+PVRSRV_ERROR SysInitialise(IMG_VOID);
+PVRSRV_ERROR SysFinalise(IMG_VOID);
+
+PVRSRV_ERROR SysDeinitialise(SYS_DATA *psSysData);
+PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
+ IMG_VOID **ppvDeviceMap);
+
+IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_UINT32 SysGetInterruptSource(SYS_DATA *psSysData,
+ PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits);
+
+PVRSRV_ERROR SysResetDevice(IMG_UINT32 ui32DeviceIndex);
+
+PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState);
+PVRSRV_ERROR SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState);
+PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+#if defined(SYS_SUPPORTS_SGX_IDLE_CALLBACK)
+IMG_VOID SysSGXIdleTransition(IMG_BOOL bSGXIdle);
+#endif
+
+#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
+PVRSRV_ERROR SysPowerLockWrap(IMG_BOOL bTryLock);
+IMG_VOID SysPowerLockUnwrap(IMG_VOID);
+#endif
+
+PVRSRV_ERROR SysOEMFunction ( IMG_UINT32 ui32ID,
+ IMG_VOID *pvIn,
+ IMG_UINT32 ulInSize,
+ IMG_VOID *pvOut,
+ IMG_UINT32 ulOutSize);
+
+
+IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_CPU_PHYADDR cpu_paddr);
+IMG_DEV_PHYADDR SysSysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr);
+IMG_SYS_PHYADDR SysDevPAddrToSysPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR SysPAddr);
+IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR SysPAddr);
+IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr);
+#if defined(PVR_LMA)
+IMG_BOOL SysVerifyCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_CPU_PHYADDR CpuPAddr);
+IMG_BOOL SysVerifySysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr);
+#endif
+
+extern SYS_DATA* gpsSysData;
+
+
+#if !defined(USE_CODE)
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysAcquireData)
+#endif
+static INLINE IMG_VOID SysAcquireData(SYS_DATA **ppsSysData)
+{
+
+ *ppsSysData = gpsSysData;
+
+
+
+
+
+ PVR_ASSERT (gpsSysData != IMG_NULL);
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysAcquireDataNoCheck)
+#endif
+static INLINE SYS_DATA * SysAcquireDataNoCheck(IMG_VOID)
+{
+
+ return gpsSysData;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysInitialiseCommon)
+#endif
+static INLINE PVRSRV_ERROR SysInitialiseCommon(SYS_DATA *psSysData)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError = PVRSRVInit(psSysData);
+
+ return eError;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysDeinitialiseCommon)
+#endif
+static INLINE IMG_VOID SysDeinitialiseCommon(SYS_DATA *psSysData)
+{
+
+ PVRSRVDeInit(psSysData);
+
+ OSDestroyResource(&psSysData->sPowerStateChangeResource);
+}
+#endif
+
+
+#if !(defined(NO_HARDWARE) && defined(__linux__) && defined(__KERNEL__))
+#define SysReadHWReg(p, o) OSReadHWReg(p, o)
+#define SysWriteHWReg(p, o, v) OSWriteHWReg(p, o, v)
+#else
+static inline IMG_UINT32 SysReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset)
+{
+ return (IMG_UINT32) readl(pvLinRegBaseAddr + ui32Offset);
+}
+
+static inline IMG_VOID SysWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
+{
+ writel(ui32Value, pvLinRegBaseAddr + ui32Offset);
+}
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysHighResTimerCreate)
+#endif
+static INLINE IMG_HANDLE SysHighResTimerCreate(IMG_VOID)
+{
+ SYS_DATA *psSysData;
+
+ SysAcquireData(&psSysData);
+ return psSysData->pfnHighResTimerCreate();
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysHighResTimerGetus)
+#endif
+static INLINE IMG_UINT32 SysHighResTimerGetus(IMG_HANDLE hTimer)
+{
+ SYS_DATA *psSysData;
+
+ SysAcquireData(&psSysData);
+ return psSysData->pfnHighResTimerGetus(hTimer);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SysHighResTimerDestroy)
+#endif
+static INLINE IMG_VOID SysHighResTimerDestroy(IMG_HANDLE hTimer)
+{
+ SYS_DATA *psSysData;
+
+ SysAcquireData(&psSysData);
+ psSysData->pfnHighResTimerDestroy(hTimer);
+}
+#endif
+
diff --git a/drivers/gpu/pvr/ttrace.h b/drivers/gpu/pvr/ttrace.h
new file mode 100644
index 0000000..9e04b88
--- /dev/null
+++ b/drivers/gpu/pvr/ttrace.h
@@ -0,0 +1,184 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "services_headers.h"
+#include "ttrace_common.h"
+#include "ttrace_tokens.h"
+
+#ifndef __TTRACE_H__
+#define __TTRACE_H__
+
+#if defined(TTRACE)
+
+ #define PVR_TTRACE(group, class, token) \
+ PVRSRVTimeTrace(group, class, token)
+ #define PVR_TTRACE_UI8(group, class, token, val) \
+ PVRSRVTimeTraceUI8(group, class, token, val)
+ #define PVR_TTRACE_UI16(group, class, token, val) \
+ PVRSRVTimeTraceUI16(group, class, token, val)
+ #define PVR_TTRACE_UI32(group, class, token, val) \
+ PVRSRVTimeTraceUI32(group, class, token, val)
+ #define PVR_TTRACE_UI64(group, class, token, val) \
+ PVRSRVTimeTraceUI64(group, class, token, val)
+ #define PVR_TTRACE_DEV_VIRTADDR(group, class, token, val) \
+ PVRSRVTimeTraceDevVirtAddr(group, class, token, val)
+ #define PVR_TTRACE_CPU_PHYADDR(group, class, token, val) \
+ PVRSRVTimeTraceCpuPhyAddr(group, class, token, val)
+ #define PVR_TTRACE_DEV_PHYADDR(group, class, token, val) \
+ PVRSRVTimeTraceDevPhysAddr(group, class, token, val)
+ #define PVR_TTRACE_SYS_PHYADDR(group, class, token, val) \
+ PVRSRVTimeTraceSysPhysAddr(group, class, token, val)
+ #define PVR_TTRACE_SYNC_OBJECT(group, token, syncobj, op) \
+ PVRSRVTimeTraceSyncObject(group, token, syncobj, op)
+
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVTimeTraceArray(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class,
+ IMG_UINT32 ui32Token, IMG_UINT32 ui32TypeSize,
+ IMG_UINT32 ui32Count, IMG_UINT8 *ui8Data);
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVTimeTrace)
+#endif
+static INLINE IMG_VOID PVRSRVTimeTrace(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class,
+ IMG_UINT32 ui32Token)
+{
+ PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, 0, 0, NULL);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVTimeTraceUI8)
+#endif
+static INLINE IMG_VOID PVRSRVTimeTraceUI8(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class,
+ IMG_UINT32 ui32Token, IMG_UINT8 ui8Value)
+{
+ PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI8,
+ 1, &ui8Value);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVTimeTraceUI16)
+#endif
+static INLINE IMG_VOID PVRSRVTimeTraceUI16(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class,
+ IMG_UINT32 ui32Token, IMG_UINT16 ui16Value)
+{
+ PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI16,
+ 1, (IMG_UINT8 *) &ui16Value);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVTimeTraceUI32)
+#endif
+static INLINE IMG_VOID PVRSRVTimeTraceUI32(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class,
+ IMG_UINT32 ui32Token, IMG_UINT32 ui32Value)
+{
+ PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI32,
+ 1, (IMG_UINT8 *) &ui32Value);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVTimeTraceUI64)
+#endif
+static INLINE IMG_VOID PVRSRVTimeTraceUI64(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class,
+ IMG_UINT32 ui32Token, IMG_UINT64 ui64Value)
+{
+ PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI64,
+ 1, (IMG_UINT8 *) &ui64Value);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVTimeTraceDevVirtAddr)
+#endif
+static INLINE IMG_VOID PVRSRVTimeTraceDevVirtAddr(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class,
+ IMG_UINT32 ui32Token, IMG_DEV_VIRTADDR psVAddr)
+{
+ PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI32,
+ 1, (IMG_UINT8 *) &psVAddr.uiAddr);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVTimeTraceCpuPhyAddr)
+#endif
+static INLINE IMG_VOID PVRSRVTimeTraceCpuPhyAddr(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class,
+ IMG_UINT32 ui32Token, IMG_CPU_PHYADDR psPAddr)
+{
+ PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI32,
+ 1, (IMG_UINT8 *) &psPAddr.uiAddr);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVTimeTraceDevPhysAddr)
+#endif
+static INLINE IMG_VOID PVRSRVTimeTraceDevPhysAddr(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class,
+ IMG_UINT32 ui32Token, IMG_DEV_PHYADDR psPAddr)
+{
+ PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI32,
+ 1, (IMG_UINT8 *) &psPAddr.uiAddr);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVTimeTraceSysPhysAddr)
+#endif
+static INLINE IMG_VOID PVRSRVTimeTraceSysPhysAddr(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class,
+ IMG_UINT32 ui32Token, IMG_SYS_PHYADDR psPAddr)
+{
+ PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, sizeof(psPAddr.uiAddr),
+ 1, (IMG_UINT8 *) &psPAddr.uiAddr);
+}
+
+#else
+
+ #define PVR_TTRACE(group, class, token) \
+ ((void) 0)
+ #define PVR_TTRACE_UI8(group, class, token, val) \
+ ((void) 0)
+ #define PVR_TTRACE_UI16(group, class, token, val) \
+ ((void) 0)
+ #define PVR_TTRACE_UI32(group, class, token, val) \
+ ((void) 0)
+ #define PVR_TTRACE_UI64(group, class, token, val) \
+ ((void) 0)
+ #define PVR_TTRACE_DEV_VIRTADDR(group, class, token, val) \
+ ((void) 0)
+ #define PVR_TTRACE_CPU_PHYADDR(group, class, token, val) \
+ ((void) 0)
+ #define PVR_TTRACE_DEV_PHYADDR(group, class, token, val) \
+ ((void) 0)
+ #define PVR_TTRACE_SYS_PHYADDR(group, class, token, val) \
+ ((void) 0)
+ #define PVR_TTRACE_SYNC_OBJECT(group, token, syncobj, op) \
+ ((void) 0)
+
+#endif
+
+IMG_IMPORT PVRSRV_ERROR PVRSRVTimeTraceInit(IMG_VOID);
+IMG_IMPORT IMG_VOID PVRSRVTimeTraceDeinit(IMG_VOID);
+
+IMG_IMPORT IMG_VOID PVRSRVTimeTraceSyncObject(IMG_UINT32 ui32Group, IMG_UINT32 ui32Token,
+ PVRSRV_KERNEL_SYNC_INFO *psSync, IMG_UINT8 ui8SyncOp);
+IMG_IMPORT PVRSRV_ERROR PVRSRVTimeTraceBufferCreate(IMG_UINT32 ui32PID);
+IMG_IMPORT PVRSRV_ERROR PVRSRVTimeTraceBufferDestroy(IMG_UINT32 ui32PID);
+
+IMG_IMPORT IMG_VOID PVRSRVDumpTimeTraceBuffers(IMG_VOID);
+#endif
diff --git a/drivers/gpu/pvr/ttrace_common.h b/drivers/gpu/pvr/ttrace_common.h
new file mode 100644
index 0000000..5aa6fec
--- /dev/null
+++ b/drivers/gpu/pvr/ttrace_common.h
@@ -0,0 +1,84 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#include "img_types.h"
+
+#ifndef __TTRACE_COMMON_H__
+#define __TTRACE_COMMON_H__
+
+#define PVRSRV_TRACE_HEADER 0
+#define PVRSRV_TRACE_TIMESTAMP 1
+#define PVRSRV_TRACE_HOSTUID 2
+#define PVRSRV_TRACE_DATA_HEADER 3
+#define PVRSRV_TRACE_DATA_PAYLOAD 4
+
+#define PVRSRV_TRACE_ITEM_SIZE 16
+
+#define PVRSRV_TRACE_GROUP_MASK 0xff
+#define PVRSRV_TRACE_CLASS_MASK 0xff
+#define PVRSRV_TRACE_TOKEN_MASK 0xffff
+
+#define PVRSRV_TRACE_GROUP_SHIFT 24
+#define PVRSRV_TRACE_CLASS_SHIFT 16
+#define PVRSRV_TRACE_TOKEN_SHIFT 0
+
+#define PVRSRV_TRACE_SIZE_MASK 0xffff
+#define PVRSRV_TRACE_TYPE_MASK 0xf
+#define PVRSRV_TRACE_COUNT_MASK 0xfff
+
+#define PVRSRV_TRACE_SIZE_SHIFT 16
+#define PVRSRV_TRACE_TYPE_SHIFT 12
+#define PVRSRV_TRACE_COUNT_SHIFT 0
+
+
+#define WRITE_HEADER(n,m) \
+ ((m & PVRSRV_TRACE_##n##_MASK) << PVRSRV_TRACE_##n##_SHIFT)
+
+#define READ_HEADER(n,m) \
+ ((m & (PVRSRV_TRACE_##n##_MASK << PVRSRV_TRACE_##n##_SHIFT)) >> PVRSRV_TRACE_##n##_SHIFT)
+
+#define TIME_TRACE_BUFFER_SIZE 4096
+
+#define PVRSRV_TRACE_TYPE_UI8 0
+#define PVRSRV_TRACE_TYPE_UI16 1
+#define PVRSRV_TRACE_TYPE_UI32 2
+#define PVRSRV_TRACE_TYPE_UI64 3
+
+#define PVRSRV_TRACE_TYPE_SYNC 15
+ #define PVRSRV_TRACE_SYNC_UID 0
+ #define PVRSRV_TRACE_SYNC_WOP 1
+ #define PVRSRV_TRACE_SYNC_WOC 2
+ #define PVRSRV_TRACE_SYNC_ROP 3
+ #define PVRSRV_TRACE_SYNC_ROC 4
+ #define PVRSRV_TRACE_SYNC_WO_DEV_VADDR 5
+ #define PVRSRV_TRACE_SYNC_RO_DEV_VADDR 6
+ #define PVRSRV_TRACE_SYNC_OP 7
+ #define PVRSRV_TRACE_SYNC_RO2P 8
+ #define PVRSRV_TRACE_SYNC_RO2C 9
+ #define PVRSRV_TRACE_SYNC_RO2_DEV_VADDR 10
+#define PVRSRV_TRACE_TYPE_SYNC_SIZE ((PVRSRV_TRACE_SYNC_RO2_DEV_VADDR + 1) * sizeof(IMG_UINT32))
+
+#endif
diff --git a/drivers/gpu/pvr/ttrace_tokens.h b/drivers/gpu/pvr/ttrace_tokens.h
new file mode 100644
index 0000000..21ea5fb
--- /dev/null
+++ b/drivers/gpu/pvr/ttrace_tokens.h
@@ -0,0 +1,84 @@
+/**********************************************************************
+ *
+ * Copyright (C) Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+
+#ifndef __TTRACE_TOKENS_H__
+#define __TTRACE_TOKENS_H__
+
+#define PVRSRV_TRACE_GROUP_KICK 0
+#define PVRSRV_TRACE_GROUP_TRANSFER 1
+#define PVRSRV_TRACE_GROUP_QUEUE 2
+#define PVRSRV_TRACE_GROUP_POWER 3
+#define PVRSRV_TRACE_GROUP_MKSYNC 4
+
+#define PVRSRV_TRACE_GROUP_PADDING 255
+
+#define PVRSRV_TRACE_CLASS_FUNCTION_ENTER 0
+#define PVRSRV_TRACE_CLASS_FUNCTION_EXIT 1
+#define PVRSRV_TRACE_CLASS_SYNC 2
+#define PVRSRV_TRACE_CLASS_CCB 3
+#define PVRSRV_TRACE_CLASS_CMD_START 4
+#define PVRSRV_TRACE_CLASS_CMD_END 5
+#define PVRSRV_TRACE_CLASS_CMD_COMP_START 6
+#define PVRSRV_TRACE_CLASS_CMD_COMP_END 7
+
+#define PVRSRV_TRACE_CLASS_NONE 255
+
+#define PVRSRV_SYNCOP_SAMPLE 0
+#define PVRSRV_SYNCOP_COMPLETE 1
+#define PVRSRV_SYNCOP_DUMP 2
+
+#define KICK_TOKEN_DOKICK 0
+#define KICK_TOKEN_CCB_OFFSET 1
+#define KICK_TOKEN_TA3D_SYNC 2
+#define KICK_TOKEN_TA_SYNC 3
+#define KICK_TOKEN_3D_SYNC 4
+#define KICK_TOKEN_SRC_SYNC 5
+#define KICK_TOKEN_DST_SYNC 6
+
+#define TRANSFER_TOKEN_SUBMIT 0
+#define TRANSFER_TOKEN_TA_SYNC 1
+#define TRANSFER_TOKEN_3D_SYNC 2
+#define TRANSFER_TOKEN_SRC_SYNC 3
+#define TRANSFER_TOKEN_DST_SYNC 4
+#define TRANSFER_TOKEN_CCB_OFFSET 5
+
+#define QUEUE_TOKEN_GET_SPACE 0
+#define QUEUE_TOKEN_INSERTKM 1
+#define QUEUE_TOKEN_SUBMITKM 2
+#define QUEUE_TOKEN_PROCESS_COMMAND 3
+#define QUEUE_TOKEN_PROCESS_QUEUES 4
+#define QUEUE_TOKEN_COMMAND_COMPLETE 5
+#define QUEUE_TOKEN_UPDATE_DST 6
+#define QUEUE_TOKEN_UPDATE_SRC 7
+#define QUEUE_TOKEN_SRC_SYNC 8
+#define QUEUE_TOKEN_DST_SYNC 9
+#define QUEUE_TOKEN_COMMAND_TYPE 10
+
+#define MKSYNC_TOKEN_KERNEL_CCB_OFFSET 0
+#define MKSYNC_TOKEN_CORE_CLK 1
+#define MKSYNC_TOKEN_UKERNEL_CLK 2
+
+#endif
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 137e1a3..dbed621 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -40,6 +40,7 @@
#include <linux/slab.h>
#include <linux/i2c-omap.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_qos_params.h>
/* I2C controller revisions */
#define OMAP_I2C_REV_2 0x20
@@ -75,6 +76,7 @@
OMAP_I2C_REVNB_LO,
OMAP_I2C_REVNB_HI,
OMAP_I2C_IRQSTATUS_RAW,
+ OMAP_I2C_IRQSTATUS,
OMAP_I2C_IRQENABLE_SET,
OMAP_I2C_IRQENABLE_CLR,
};
@@ -143,7 +145,6 @@
#define OMAP_I2C_SCLH_HSSCLH 8
/* I2C System Test Register (OMAP_I2C_SYSTEST): */
-#ifdef DEBUG
#define OMAP_I2C_SYSTEST_ST_EN (1 << 15) /* System test enable */
#define OMAP_I2C_SYSTEST_FREE (1 << 14) /* Free running mode */
#define OMAP_I2C_SYSTEST_TMODE_MASK (3 << 12) /* Test mode select */
@@ -152,7 +153,6 @@
#define OMAP_I2C_SYSTEST_SCL_O (1 << 2) /* SCL line drive out */
#define OMAP_I2C_SYSTEST_SDA_I (1 << 1) /* SDA line sense in */
#define OMAP_I2C_SYSTEST_SDA_O (1 << 0) /* SDA line drive out */
-#endif
/* OCP_SYSSTATUS bit definitions */
#define SYSS_RESETDONE_MASK (1 << 0)
@@ -179,8 +179,7 @@
struct completion cmd_complete;
struct resource *ioarea;
u32 latency; /* maximum mpu wkup latency */
- void (*set_mpu_wkup_lat)(struct device *dev,
- long latency);
+ struct pm_qos_request_list *pm_qos;
u32 speed; /* Speed of bus in Khz */
u16 cmd_err;
u8 *buf;
@@ -247,6 +246,7 @@
[OMAP_I2C_REVNB_LO] = 0x00,
[OMAP_I2C_REVNB_HI] = 0x04,
[OMAP_I2C_IRQSTATUS_RAW] = 0x24,
+ [OMAP_I2C_IRQSTATUS] = 0x28,
[OMAP_I2C_IRQENABLE_SET] = 0x2c,
[OMAP_I2C_IRQENABLE_CLR] = 0x30,
};
@@ -276,7 +276,7 @@
pm_runtime_get_sync(&pdev->dev);
- if (cpu_is_omap34xx()) {
+ if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
@@ -288,12 +288,12 @@
}
dev->idle = 0;
- /*
- * Don't write to this register if the IE state is 0 as it can
- * cause deadlock.
- */
- if (dev->iestate)
+ if (cpu_is_omap44xx() && dev->rev >= OMAP_I2C_REV_ON_4430) {
+ omap_i2c_write_reg(dev, OMAP_I2C_IRQENABLE_CLR,0x6FFF);
+ omap_i2c_write_reg(dev, OMAP_I2C_IRQENABLE_SET, dev->iestate);
+ } else {
omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
+ }
}
static void omap_i2c_idle(struct omap_i2c_dev *dev)
@@ -307,19 +307,13 @@
pdev = to_platform_device(dev->dev);
pdata = pdev->dev.platform_data;
- dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
- if (dev->rev >= OMAP_I2C_REV_ON_4430)
- omap_i2c_write_reg(dev, OMAP_I2C_IRQENABLE_CLR, 1);
+ if (cpu_is_omap44xx() && dev->rev >= OMAP_I2C_REV_ON_4430)
+ omap_i2c_write_reg(dev, OMAP_I2C_IRQENABLE_CLR, 0x6FFF);
else
omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
if (dev->rev < OMAP_I2C_REV_2) {
iv = omap_i2c_read_reg(dev, OMAP_I2C_IV_REG); /* Read clears */
- } else {
- omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, dev->iestate);
-
- /* Flush posted write before the dev->idle store occurs */
- omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
}
dev->idle = 1;
@@ -487,13 +481,7 @@
if (cpu_is_omap2430() || cpu_is_omap34xx())
dev->errata |= I2C_OMAP_ERRATA_I207;
- /* Enable interrupts */
- dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY |
- OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK |
- OMAP_I2C_IE_AL) | ((dev->fifo_size) ?
- (OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
- omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
- if (cpu_is_omap34xx()) {
+ if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
dev->pscstate = psc;
dev->scllstate = scll;
dev->sclhstate = sclh;
@@ -611,8 +599,7 @@
return 0;
/* We have an error */
- if (dev->cmd_err & (OMAP_I2C_STAT_AL | OMAP_I2C_STAT_ROVR |
- OMAP_I2C_STAT_XUDF)) {
+ if (dev->cmd_err & OMAP_I2C_STAT_AL) {
omap_i2c_init(dev);
return -EIO;
}
@@ -641,15 +628,34 @@
struct omap_i2c_dev *dev = i2c_get_adapdata(adap);
int i;
int r;
+ u16 val;
omap_i2c_unidle(dev);
r = omap_i2c_wait_for_bb(dev);
+ /* If timeout, try to again check after soft reset of I2C block */
+ if (WARN_ON(r == -ETIMEDOUT)) {
+ /* Provide a permanent clock to recover the peripheral */
+ val = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
+ val |= (OMAP_I2C_SYSTEST_ST_EN |
+ OMAP_I2C_SYSTEST_FREE |
+ (2 << OMAP_I2C_SYSTEST_TMODE_SHIFT));
+ omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, val);
+ msleep(1);
+ omap_i2c_init(dev);
+ r = omap_i2c_wait_for_bb(dev);
+ }
if (r < 0)
goto out;
- if (dev->set_mpu_wkup_lat != NULL)
- dev->set_mpu_wkup_lat(dev->dev, dev->latency);
+ /*
+ * When waiting for completion of a i2c transfer, we need to
+ * set a wake up latency constraint for the MPU. This is to
+ * ensure quick enough wakeup from idle, when transfer
+ * completes.
+ */
+ if (dev->pm_qos)
+ pm_qos_update_request(dev->pm_qos, dev->latency);
for (i = 0; i < num; i++) {
r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)));
@@ -657,8 +663,8 @@
break;
}
- if (dev->set_mpu_wkup_lat != NULL)
- dev->set_mpu_wkup_lat(dev->dev, -1);
+ if (dev->pm_qos)
+ pm_qos_update_request(dev->pm_qos, PM_QOS_DEFAULT_VALUE);
if (r == 0)
r = num;
@@ -810,15 +816,13 @@
omap_i2c_isr(int this_irq, void *dev_id)
{
struct omap_i2c_dev *dev = dev_id;
- u16 bits;
u16 stat, w;
int err, count = 0;
if (dev->idle)
return IRQ_NONE;
- bits = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
- while ((stat = (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG))) & bits) {
+ while ((stat = (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG))) & dev->iestate) {
dev_dbg(dev->dev, "IRQ (ISR = 0x%04x)\n", stat);
if (count++ == 100) {
dev_warn(dev->dev, "Too much work in one IRQ\n");
@@ -954,12 +958,10 @@
continue;
}
if (stat & OMAP_I2C_STAT_ROVR) {
- dev_err(dev->dev, "Receive overrun\n");
- dev->cmd_err |= OMAP_I2C_STAT_ROVR;
+ dev_dbg(dev->dev, "Receive overrun\n");
}
if (stat & OMAP_I2C_STAT_XUDF) {
- dev_err(dev->dev, "Transmit underflow\n");
- dev->cmd_err |= OMAP_I2C_STAT_XUDF;
+ dev_dbg(dev->dev, "Transmit underflow\n");
}
}
@@ -1007,13 +1009,10 @@
goto err_release_region;
}
- if (pdata != NULL) {
+ if (pdata)
speed = pdata->clkrate;
- dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
- } else {
+ else
speed = 100; /* Default speed */
- dev->set_mpu_wkup_lat = NULL;
- }
dev->speed = speed;
dev->idle = 1;
@@ -1025,6 +1024,17 @@
goto err_free_mem;
}
+ if (pdata && pdata->needs_wakeup_latency) {
+ dev->pm_qos = kzalloc(sizeof(struct pm_qos_request_list),
+ GFP_KERNEL);
+ if (!dev->pm_qos) {
+ r = -ENOMEM;
+ goto err_unmap;
+ }
+ pm_qos_add_request(dev->pm_qos, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+ }
+
platform_set_drvdata(pdev, dev);
if (cpu_is_omap7xx())
@@ -1040,7 +1050,8 @@
dev->regs = (u8 *) reg_map;
pm_runtime_enable(&pdev->dev);
- omap_i2c_unidle(dev);
+ pm_runtime_get_sync(&pdev->dev);
+ dev->idle = 0;
dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG) & 0xff;
@@ -1059,15 +1070,14 @@
* size. This is to ensure that we can handle the status on int
* call back latencies.
*/
- if (dev->rev >= OMAP_I2C_REV_ON_4430) {
- dev->fifo_size = 0;
+ dev->fifo_size = (dev->fifo_size / 2);
+ if (dev->rev >= OMAP_I2C_REV_ON_4430)
dev->b_hw = 0; /* Disable hardware fixes */
- } else {
- dev->fifo_size = (dev->fifo_size / 2);
+ else
dev->b_hw = 1; /* Enable hardware fixes */
- }
+
/* calculate wakeup latency constraint for MPU */
- if (dev->set_mpu_wkup_lat != NULL)
+ if (dev->pm_qos)
dev->latency = (1000000 * dev->fifo_size) /
(1000 * speed / 8);
}
@@ -1075,6 +1085,12 @@
/* reset ASAP, clearing any IRQs */
omap_i2c_init(dev);
+ /* Decide what interrupts are needed */
+ dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY |
+ OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK |
+ OMAP_I2C_IE_AL) | ((dev->fifo_size) ?
+ (OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
+
isr = (dev->rev < OMAP_I2C_REV_2) ? omap_i2c_rev1_isr : omap_i2c_isr;
r = request_irq(dev->irq, isr, 0, pdev->name, dev);
@@ -1111,6 +1127,11 @@
err_unuse_clocks:
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
omap_i2c_idle(dev);
+ if (dev->pm_qos) {
+ pm_qos_remove_request(dev->pm_qos);
+ kfree(dev->pm_qos);
+ }
+err_unmap:
iounmap(dev->base);
err_free_mem:
platform_set_drvdata(pdev, NULL);
@@ -1133,6 +1154,10 @@
i2c_del_adapter(&dev->adapter);
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
iounmap(dev->base);
+ if (dev->pm_qos) {
+ pm_qos_remove_request(dev->pm_qos);
+ kfree(dev->pm_qos);
+ }
kfree(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
@@ -1142,18 +1167,16 @@
#ifdef CONFIG_SUSPEND
static int omap_i2c_suspend(struct device *dev)
{
- if (!pm_runtime_suspended(dev))
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
- dev->bus->pm->runtime_suspend(dev);
+ if (dev->power.runtime_auto == false)
+ pm_runtime_put_sync(dev);
return 0;
}
static int omap_i2c_resume(struct device *dev)
{
- if (!pm_runtime_suspended(dev))
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
- dev->bus->pm->runtime_resume(dev);
+ if (dev->power.runtime_auto == false)
+ pm_runtime_get_sync(dev);
return 0;
}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 9a58994..d0ec08f 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -39,6 +39,7 @@
#include <linux/rwsem.h>
#include <linux/pm_runtime.h>
#include <asm/uaccess.h>
+#include <linux/interrupt.h>
#include "i2c-core.h"
@@ -518,6 +519,7 @@
client->flags = info->flags;
client->addr = info->addr;
client->irq = info->irq;
+ client->ext_master = info->ext_master;
strlcpy(client->name, info->type, sizeof(client->name));
@@ -776,15 +778,27 @@
static void i2c_scan_static_board_info(struct i2c_adapter *adapter)
{
struct i2c_devinfo *devinfo;
+ struct i2c_client *client;
down_read(&__i2c_board_lock);
list_for_each_entry(devinfo, &__i2c_board_list, list) {
- if (devinfo->busnum == adapter->nr
- && !i2c_new_device(adapter,
- &devinfo->board_info))
- dev_err(&adapter->dev,
- "Can't create device at 0x%02x\n",
- devinfo->board_info.addr);
+ if (devinfo->busnum == adapter->nr) {
+ client = i2c_new_device(adapter,&devinfo->board_info);
+ if (!client)
+ dev_err(&adapter->dev,
+ "Can't create device at 0x%02x\n",
+ devinfo->board_info.addr);
+ else {
+ /* Keep track of the newly created device(s)
+ * with external master
+ */
+ if (client->ext_master) {
+ mutex_lock(&adapter->ext_clients_lock);
+ list_add_tail(&client->detected, &adapter->ext_clients);
+ mutex_unlock(&adapter->ext_clients_lock);
+ }
+ }
+ }
}
up_read(&__i2c_board_lock);
}
@@ -838,6 +852,9 @@
mutex_init(&adap->userspace_clients_lock);
INIT_LIST_HEAD(&adap->userspace_clients);
+ mutex_init(&adap->ext_clients_lock);
+ INIT_LIST_HEAD(&adap->ext_clients);
+
/* Set default timeout to 1 second if not already set */
if (adap->timeout == 0)
adap->timeout = HZ;
@@ -1058,6 +1075,16 @@
}
mutex_unlock(&adap->userspace_clients_lock);
+ /* Clear list of extenally controlled clients */
+ mutex_lock(&adap->ext_clients_lock);
+ list_for_each_entry_safe(client, next, &adap->ext_clients,
+ detected) {
+ dev_dbg(&adap->dev, "Removing %s at 0x%x\n", client->name,
+ client->addr);
+ list_del(&client->detected);
+ }
+ mutex_unlock(&adap->ext_clients_lock);
+
/* Detach any active clients. This can't fail, thus we do not
* check the returned value. This is a two-pass process, because
* we can't remove the dummy devices during the first pass: they
@@ -1094,6 +1121,46 @@
}
EXPORT_SYMBOL(i2c_del_adapter);
+/**
+ * i2c_detect_ext_master - Perform some special handling
+ * for externally controlled I2C devices.
+ * For now we only disable the spurious IRQ
+ * @adap: the adapter driving the client
+ * Context: can sleep
+ *
+ * This detects registered I2C devices which are controlled
+ * by a remote/external proc.
+ */
+void i2c_detect_ext_master(struct i2c_adapter *adap)
+{
+ struct i2c_adapter *found;
+ struct i2c_client *client;
+
+ /* First make sure that this adapter was ever added */
+ mutex_lock(&core_lock);
+ found = idr_find(&i2c_adapter_idr, adap->nr);
+ mutex_unlock(&core_lock);
+ if (found != adap) {
+ pr_debug("i2c-core: attempting to process unregistered "
+ "adapter [%s]\n", adap->name);
+ return;
+ }
+
+ /* Disable IRQ(s) automatically registeried via HWMOD
+ * for I2C channel controlled by remote master
+ */
+ mutex_lock(&adap->ext_clients_lock);
+ list_for_each_entry(client, &adap->ext_clients,
+ detected) {
+ dev_dbg(&adap->dev, "Client detected %s at 0x%x\n",
+ client->name, client->addr);
+ disable_irq(client->irq);
+ }
+ mutex_unlock(&adap->ext_clients_lock);
+
+ return;
+}
+EXPORT_SYMBOL(i2c_detect_ext_master);
/* ------------------------------------------------------------------------- */
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 6995940..74383b1 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -114,4 +114,6 @@
source "drivers/media/dvb/Kconfig"
+source "drivers/media/video/tiler/Kconfig"
+
endif # MEDIA_SUPPORT
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index bb53de7..c474ea7 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -573,6 +573,8 @@
source "drivers/media/video/omap/Kconfig"
+source "drivers/media/video/omapgfx/Kconfig"
+
source "drivers/media/video/bt8xx/Kconfig"
config VIDEO_PMS
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index f0fecd6..169d259 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -183,6 +183,8 @@
obj-y += davinci/
obj-$(CONFIG_ARCH_OMAP) += omap/
+obj-$(CONFIG_TI_TILER) += tiler/
+obj-$(CONFIG_VIDEO_OMAP_GFX) += omapgfx/
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/video/omapgfx/Kbuild b/drivers/media/video/omapgfx/Kbuild
new file mode 100644
index 0000000..3ba7b68
--- /dev/null
+++ b/drivers/media/video/omapgfx/Kbuild
@@ -0,0 +1,6 @@
+
+gfx_vout_mod-objs := gfx_init.o gfx_io.o gfx_bc.o gfx_tiler.o
+
+obj-$(CONFIG_VIDEO_OMAP_GFX) += gfx_vout_mod.o
+
+EXTRA_CFLAGS += -Idrivers/gpu/pvr
diff --git a/drivers/media/video/omapgfx/Kconfig b/drivers/media/video/omapgfx/Kconfig
new file mode 100644
index 0000000..1d16d8a
--- /dev/null
+++ b/drivers/media/video/omapgfx/Kconfig
@@ -0,0 +1,8 @@
+config VIDEO_OMAP_GFX
+ tristate "OMAP V4L2-GFX driver"
+ select VIDEOBUF_GEN
+ select OMAP2_DSS
+ depends on VIDEO_DEV && (ARCH_OMAP34XX || ARCH_OMAP4)
+ default m
+ ---help---
+ V4L2 GFX support for OMAP based boards.
diff --git a/drivers/media/video/omapgfx/gfx_bc.c b/drivers/media/video/omapgfx/gfx_bc.c
new file mode 100644
index 0000000..619d5d3
--- /dev/null
+++ b/drivers/media/video/omapgfx/gfx_bc.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#define LINUX /* Needed by IMG headers */
+#include "pvrmodule.h"
+#include "img_defs.h"
+#include "servicesext.h"
+#include "kernelbuffer.h"
+#include "gfx_bc.h"
+#include "v4gfx.h"
+
+#define DEVICE_COUNT 1
+
+#define BCLOGNM "v4l2-gfx bc: "
+
+#define BCERR(fmt, arg...) printk(KERN_ERR BCLOGNM fmt, ## arg)
+
+#define BCLOG(fmt, arg...) \
+do { \
+ if (debug >= 1) \
+ printk(KERN_INFO BCLOGNM fmt, ## arg); \
+} while (0)
+
+
+struct bc_buffer {
+ u32 size;
+ unsigned long *paddrp; /* physical addr. array */
+ PVRSRV_SYNC_DATA *pvr_sync_data;
+};
+
+struct gfx_bc_devinfo {
+ struct bc_buffer bc_buf[VIDEO_MAX_FRAME];
+ int ref;
+ int num_bufs;
+ int ref_cnt;
+
+ /* PVR data types */
+ IMG_UINT32 pvr_id;
+ BUFFER_INFO pvr_bcinfo;
+ PVRSRV_BC_SRV2BUFFER_KMJTABLE pvr_s2b_jt;
+};
+
+static struct gfx_bc_devinfo *g_devices[DEVICE_COUNT] = { NULL };
+static PVRSRV_BC_BUFFER2SRV_KMJTABLE pvr_b2s_jt; /* Jump table from driver to SGX */
+
+/*
+ * Service to Buffer Device API - this section covers the entry points from
+ * the SGX kernel services to this driver
+ */
+static PVRSRV_ERROR s2b_open_bc_device(IMG_UINT32 ui32DeviceID,
+ IMG_HANDLE *hdevicep)
+{
+ struct gfx_bc_devinfo *devinfo;
+
+ BCLOG("+%s %d\n", __func__, (int)ui32DeviceID);
+
+#ifdef MULTIPLEBUFFERCLASSDEVICESUPPORTED
+ if (ui32DeviceID >= DEVICE_COUNT) {
+ BCERR("Attempting to open device %d, max device id is %d\n",
+ ui32DeviceID, DEVICE_COUNT-1);
+ return -EINVAL;
+
+ }
+ devinfo = g_devices[ui32DeviceID];
+#else
+ devinfo = g_devices[0];
+#endif
+ *hdevicep = (IMG_HANDLE)devinfo;
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR s2b_close_bc_device(IMG_UINT32 ui32DeviceID,
+ IMG_HANDLE hdevice)
+{
+ PVR_UNREFERENCED_PARAMETER(hdevice);
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR s2b_get_bc_buffer(IMG_HANDLE hdevice,
+ IMG_UINT32 bufno,
+ PVRSRV_SYNC_DATA *pvr_sync_data,
+ IMG_HANDLE *hbufferp)
+{
+ struct gfx_bc_devinfo *devinfo;
+ BCLOG("+%s\n", __func__);
+
+ if (!hdevice || !hbufferp)
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ devinfo = (struct gfx_bc_devinfo *) hdevice;
+
+ if (bufno < devinfo->pvr_bcinfo.ui32BufferCount) {
+ devinfo->bc_buf[bufno].pvr_sync_data = pvr_sync_data;
+ *hbufferp = (IMG_HANDLE) &devinfo->bc_buf[bufno];
+
+ } else {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR s2b_get_bc_info(IMG_HANDLE hdevice, BUFFER_INFO *bcinfop)
+{
+ struct gfx_bc_devinfo *devinfo = NULL;
+ int rv = 0;
+
+ if (!hdevice || !bcinfop) {
+ rv = PVRSRV_ERROR_INVALID_PARAMS;
+ } else {
+ devinfo = (struct gfx_bc_devinfo *) hdevice;
+ *bcinfop = devinfo->pvr_bcinfo;
+
+ BCLOG("ui32BufferCount =%d",
+ (int)devinfo->pvr_bcinfo.ui32BufferCount);
+ BCLOG("pixelformat =%d",
+ (int)devinfo->pvr_bcinfo.pixelformat);
+ BCLOG("ui32Width =%d",
+ (int)devinfo->pvr_bcinfo.ui32Width);
+ BCLOG("ui32Height =%d",
+ (int)devinfo->pvr_bcinfo.ui32Height);
+ BCLOG("ui32ByteStride =%d",
+ (int)devinfo->pvr_bcinfo.ui32ByteStride);
+ BCLOG("ui32BufferDeviceID =%d",
+ (int)devinfo->pvr_bcinfo.ui32BufferDeviceID);
+ BCLOG("ui32Flags = %d",
+ (int)devinfo->pvr_bcinfo.ui32Flags);
+
+ }
+ BCLOG("-%s %d (0x%x)\n", __func__, rv, (int)devinfo);
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR s2b_get_buffer_addr(IMG_HANDLE hdevice,
+ IMG_HANDLE hbuffer,
+ IMG_SYS_PHYADDR **sysaddrpp,
+ IMG_UINT32 *sizebytesp,
+ IMG_VOID **cpuvaddrpp,
+ IMG_HANDLE *osmapinfop,
+ IMG_BOOL *iscontiguousp,
+ IMG_UINT32 *pui32TilingStride)
+{
+ struct bc_buffer *bc_buf;
+ PVRSRV_ERROR rv = PVRSRV_OK;
+ BCLOG("+%s\n", __func__);
+
+ if (!hdevice || !hbuffer || !sysaddrpp || !sizebytesp)
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ bc_buf = (struct bc_buffer *)hbuffer;
+ *cpuvaddrpp = NULL;
+ *sizebytesp = bc_buf->size;
+
+ if (bc_buf->paddrp) {
+ *iscontiguousp = IMG_FALSE;
+ *sysaddrpp = (IMG_SYS_PHYADDR *)bc_buf->paddrp;
+ *osmapinfop = IMG_NULL;
+ *pui32TilingStride = 0;
+
+ BCLOG("+%s paddrp[0] 0x%x, vaddr = 0x%x, sizebytes = %d",
+ __func__, (int)bc_buf->paddrp[0],
+ (int)*cpuvaddrpp, (int)*sizebytesp);
+
+ } else {
+ rv = PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+ return rv;
+}
+
+/*
+ * Rest of the functions
+ */
+static PVRSRV_PIXEL_FORMAT v4l2_to_pvr_pixfmt(u32 v4l2pixelfmt)
+{
+ PVRSRV_PIXEL_FORMAT pvr_fmt;
+
+ switch (v4l2pixelfmt) {
+ case V4L2_PIX_FMT_RGB565:
+ pvr_fmt = PVRSRV_PIXEL_FORMAT_RGB565;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ pvr_fmt = PVRSRV_PIXEL_FORMAT_RGB888;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ pvr_fmt = PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YUYV;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ pvr_fmt = PVRSRV_PIXEL_FORMAT_FOURCC_ORG_UYVY;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ pvr_fmt = PVRSRV_PIXEL_FORMAT_NV12;
+ break;
+ default:
+ pvr_fmt = PVRSRV_PIXEL_FORMAT_UNKNOWN;
+ }
+ return pvr_fmt;
+}
+
+static int gfx_bc_release_device_resources(int id)
+{
+ struct gfx_bc_devinfo *devinfo;
+
+ devinfo = g_devices[id];
+ if (devinfo == NULL)
+ return -ENOENT;
+
+ if (!devinfo->num_bufs)
+ return 0;
+
+ devinfo->num_bufs = 0;
+ devinfo->pvr_bcinfo.pixelformat = PVRSRV_PIXEL_FORMAT_UNKNOWN;
+ devinfo->pvr_bcinfo.ui32Width = 0;
+ devinfo->pvr_bcinfo.ui32Height = 0;
+ devinfo->pvr_bcinfo.ui32ByteStride = 0;
+ devinfo->pvr_bcinfo.ui32BufferDeviceID = id;
+ devinfo->pvr_bcinfo.ui32Flags = 0;
+ devinfo->pvr_bcinfo.ui32BufferCount = 0;
+
+ return 0;
+}
+
+static int gfx_bc_register(int id)
+{
+ struct gfx_bc_devinfo *devinfo;
+ int rv = 0;
+ BCLOG("+%s\n", __func__);
+
+ devinfo = g_devices[id];
+
+ if (devinfo) {
+ devinfo->ref_cnt++;
+ BCLOG("%s device already registered\n", __func__);
+ rv = 0;
+ goto end;
+ }
+
+ devinfo = (struct gfx_bc_devinfo *)
+ kzalloc(sizeof(*devinfo), GFP_KERNEL);
+ if (!devinfo) {
+ rv = -ENOMEM;
+ goto end;
+ }
+ BCLOG("%s devinfo id=%d addr=0x%x\n", __func__, id, (int)devinfo);
+
+ devinfo->pvr_bcinfo.pixelformat = PVRSRV_PIXEL_FORMAT_UNKNOWN;
+ devinfo->pvr_bcinfo.ui32Width = 0;
+ devinfo->pvr_bcinfo.ui32Height = 0;
+ devinfo->pvr_bcinfo.ui32ByteStride = 0;
+ devinfo->pvr_bcinfo.ui32BufferDeviceID = id;
+ devinfo->pvr_bcinfo.ui32Flags = 0;
+ devinfo->pvr_bcinfo.ui32BufferCount = devinfo->num_bufs;
+
+ devinfo->pvr_s2b_jt.ui32TableSize =
+ sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE);
+ devinfo->pvr_s2b_jt.pfnOpenBCDevice = s2b_open_bc_device;
+ devinfo->pvr_s2b_jt.pfnCloseBCDevice = s2b_close_bc_device;
+ devinfo->pvr_s2b_jt.pfnGetBCBuffer = s2b_get_bc_buffer;
+ devinfo->pvr_s2b_jt.pfnGetBCInfo = s2b_get_bc_info;
+ devinfo->pvr_s2b_jt.pfnGetBufferAddr = s2b_get_buffer_addr;
+
+ if (pvr_b2s_jt.pfnPVRSRVRegisterBCDevice(&devinfo->pvr_s2b_jt,
+ &devinfo->pvr_id) != PVRSRV_OK) {
+ BCLOG("RegisterBCDevice failed\n");
+ rv = -EIO;
+ goto end;
+ }
+
+ BCLOG("my device id: %d\n", (int)devinfo->pvr_id);
+
+ devinfo->ref_cnt++;
+ g_devices[id] = devinfo;
+end:
+ BCLOG("-%s [%d]\n", __func__, rv);
+ return rv;
+}
+
+static int gfx_bc_unregister(int id)
+{
+ int rv = 0;
+ struct gfx_bc_devinfo *devinfo;
+
+ devinfo = g_devices[id];
+ if (devinfo == NULL) {
+ rv = -ENODEV;
+ goto end;
+ }
+
+ devinfo->ref_cnt--;
+
+ if (devinfo->ref_cnt) {
+ rv = -EAGAIN;
+ goto end;
+ }
+
+ if (pvr_b2s_jt.pfnPVRSRVRemoveBCDevice(devinfo->pvr_id) != PVRSRV_OK) {
+ rv = -EIO;
+ goto end;
+ }
+
+ kfree(devinfo);
+ g_devices[id] = NULL;
+
+end:
+ return rv;
+}
+
+#define FIELDCOPY(dst, src, field) { (dst)->field = (src)->field; }
+
+#define BC_BUF_PARAMS_COPY(dst, src) { \
+ FIELDCOPY(dst, src, count); \
+ FIELDCOPY(dst, src, width); \
+ FIELDCOPY(dst, src, height); \
+ FIELDCOPY(dst, src, pixel_fmt); \
+ FIELDCOPY(dst, src, stride); \
+ FIELDCOPY(dst, src, size); \
+ }
+
+static void gfx_bc_params2_to_common(struct bc_buf_params2 *p,
+ struct bc_buf_params_common *pc)
+{
+ BC_BUF_PARAMS_COPY(pc, p);
+}
+
+/*
+ * Validate the bc_buf_params and get the PVR pixel format
+ *
+ * We shouldn't need to do any further validation of the V4L2 pixelformat
+ * properties as this should have been taken care of in the appropriate V4L2
+ * ioctl handlers.
+ */
+static int gfx_bc_validateparams(
+ int id,
+ struct bc_buf_params_common *p,
+ struct gfx_bc_devinfo **devinfop,
+ PVRSRV_PIXEL_FORMAT *pvr_pix_fmtp)
+{
+ struct gfx_bc_devinfo *devinfo;
+ int rv = 0;
+
+ devinfo = g_devices[id];
+ if (devinfo == NULL) {
+ BCLOG("%s: no such device %d", __func__, id);
+ rv = -ENODEV;
+ }
+
+ /* validate a series of params */
+ if (p->count <= 0) {
+ BCLOG("%s: invalid count", __func__);
+ rv = -EINVAL;
+ }
+
+ *pvr_pix_fmtp = v4l2_to_pvr_pixfmt(p->pixel_fmt);
+ if (*pvr_pix_fmtp == PVRSRV_PIXEL_FORMAT_UNKNOWN) {
+ BCLOG("%s: invalid pixel format", __func__);
+ rv = -EINVAL;
+ }
+
+ *devinfop = rv != 0 ? NULL : devinfo;
+ return rv;
+}
+
+/*
+ * API for the V4L2 component
+ */
+int bc_init(void)
+{
+ int id, rv;
+ BCLOG("+%s\n", __func__);
+
+ if (!PVRGetBufferClassJTable(&pvr_b2s_jt)) {
+ BCERR("no jump table to SGX APIs\n");
+ rv = -EIO;
+ goto end;
+ }
+
+ for (id = 0; id < DEVICE_COUNT; id++) {
+ rv = gfx_bc_register(id);
+ if (rv != 0) {
+ BCERR("can't register BC service\n");
+ goto end;
+ }
+ }
+
+end:
+ BCLOG("-%s [%d]\n", __func__, rv);
+ return rv;
+}
+
+void bc_cleanup(void)
+{
+ int id;
+ for (id = 0; id < DEVICE_COUNT; id++) {
+ if (gfx_bc_release_device_resources(id) != 0)
+ BCERR("can't b/c device resources: %d\n", id);
+ if (gfx_bc_unregister(id) != 0)
+ BCERR("can't un-register BC service\n");
+ }
+}
+
+int bc_setup_complete(int id, struct bc_buf_params2 *p)
+{
+ /* Fn called after successful bc_setup() so id should be valid */
+ struct gfx_bc_devinfo *devinfo = g_devices[id];
+ if (p->count != devinfo->num_bufs) {
+ BCLOG("+%s: Count doesn't match\n", __func__);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+int bc_setup_buffer(int id, struct bc_buf_params2 *p, unsigned long *paddrp)
+{
+ int idx;
+ /* Fn called after successful bc_setup() so id should be valid */
+ struct gfx_bc_devinfo *devinfo = g_devices[id];
+ idx = devinfo->num_bufs;
+ if (unlikely(idx >= VIDEO_MAX_FRAME))
+ return -ENOENT;
+
+ devinfo->num_bufs++;
+ devinfo->pvr_bcinfo.ui32BufferCount = devinfo->num_bufs;
+
+ memset(&devinfo->bc_buf[idx], 0, sizeof(devinfo->bc_buf[idx]));
+ devinfo->bc_buf[idx].paddrp = paddrp;
+ devinfo->bc_buf[idx].size = p->size;
+ devinfo->bc_buf[idx].pvr_sync_data = IMG_NULL;
+ return 0;
+}
+
+int bc_setup(int id, struct bc_buf_params2 *p)
+{
+ struct gfx_bc_devinfo *devinfo;
+ int rv = 0;
+ PVRSRV_PIXEL_FORMAT pvr_pix_fmt;
+ struct bc_buf_params_common pc;
+
+ BCLOG("+%s\n", __func__);
+
+ gfx_bc_params2_to_common(p, &pc);
+ rv = gfx_bc_validateparams(id, &pc, &devinfo, &pvr_pix_fmt);
+ if (rv != 0)
+ goto end;
+
+ p->stride = 4096; /* Tiler stride */
+ p->size = p->height * p->stride;
+ if (p->pixel_fmt == V4L2_PIX_FMT_NV12)
+ p->size += (p->height / 2) * p->stride; /* UV size */
+
+ devinfo->num_bufs = 0; /* See bc_setup_buffer */
+
+ devinfo->pvr_bcinfo.pixelformat = pvr_pix_fmt;
+ devinfo->pvr_bcinfo.ui32Width = p->width;
+ devinfo->pvr_bcinfo.ui32Height = p->height;
+ devinfo->pvr_bcinfo.ui32ByteStride = p->stride;
+ devinfo->pvr_bcinfo.ui32BufferDeviceID = id;
+ /* I'm not 100% sure these flags are right but here goes */
+ devinfo->pvr_bcinfo.ui32Flags =
+ PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE |
+ PVRSRV_BC_FLAGS_YUVCSC_BT601;
+
+ BCLOG("buffers: count=%d, w=%d, h=%d, stride=%d, sz=%d fmt=%d\n",
+ p->count, p->width, p->height, p->stride, p->size, pvr_pix_fmt);
+end:
+ BCLOG("-%s [%d]\n", __func__, rv);
+ return rv;
+}
+
+/*
+ * The caller of this API will ensure that the arguments are valid
+ */
+int bc_sync_status(int id, int bufidx)
+{
+ struct gfx_bc_devinfo *devinfo = g_devices[id];
+ int ui32ReadOpsPending, ui32ReadOpsComplete;
+
+ ui32ReadOpsPending =
+ devinfo->bc_buf[bufidx].pvr_sync_data->ui32ReadOpsPending;
+ ui32ReadOpsComplete =
+ devinfo->bc_buf[bufidx].pvr_sync_data->ui32ReadOpsComplete;
+
+ return ui32ReadOpsComplete == ui32ReadOpsPending ? 1 : 0;
+}
+
diff --git a/drivers/media/video/omapgfx/gfx_bc.h b/drivers/media/video/omapgfx/gfx_bc.h
new file mode 100644
index 0000000..ea2bf22
--- /dev/null
+++ b/drivers/media/video/omapgfx/gfx_bc.h
@@ -0,0 +1,76 @@
+/**********************************************************************
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ ******************************************************************************/
+
+#ifndef __V4L2_GFX_BC_H__
+#define __V4L2_GFX_BC_H__
+
+#include <media/v4l2-dev.h>
+
+struct bc_buf_params_common {
+ int count; /*number of buffers */
+ int width; /*buffer width in pixel, multiple of 32 */
+ int height; /*buffer height in pixel */
+ u32 pixel_fmt; /* V4L2 buffer pixel format */
+ int stride;
+ int size;
+};
+
+struct bc_buf_params {
+ int count; /*number of buffers (in) */
+ int width; /*buffer width in pixel, multiple of 32 (in) */
+ int height; /*buffer height in pixel (in) */
+ u32 pixel_fmt; /* V4L2 buffer pixel format (in) */
+ int stride; /*(out) */
+ int size; /*(out */
+};
+
+struct bc_buf_params2 {
+ int count; /*number of buffers (in) */
+ int width; /*buffer width in pixel, multiple of 32 (in) */
+ int height; /*buffer height in pixel (in) */
+ u32 pixel_fmt; /* V4L2 buffer pixel format (in) */
+ int stride; /*(in) */
+ int size; /*(out */
+};
+extern int bc_init(void);
+extern void bc_cleanup(void);
+
+/* bc_setup
+ *
+ * This API will validate the buffer parameters in order to setup a
+ * buffer class device. Buffers should be added with subsequent calls to
+ * bc_setup_buffer()
+ */
+extern int bc_setup(int id, struct bc_buf_params2 *p);
+
+/* bc_setup_buffer
+ *
+ * Only called after a successful bc_setup(), add a physical buffer reference
+ * to this device
+ */
+extern int bc_setup_buffer(
+ int id, struct bc_buf_params2 *p, unsigned long *paddr);
+
+/* bc_setup_complete
+ *
+ * Called after all physical buffers have been added to the device
+ */
+extern int bc_setup_complete(int id, struct bc_buf_params2 *p);
+
+/* bc_sync_status
+ *
+ * Return the synchronization status of this devices buffer
+ *
+ * Return values:
+ * 0 SGX still has pending operations on the buffer
+ * 1 SGX done with the buffer
+ */
+extern int bc_sync_status(int id, int bufidx);
+#endif
diff --git a/drivers/media/video/omapgfx/gfx_init.c b/drivers/media/video/omapgfx/gfx_init.c
new file mode 100644
index 0000000..14ee80f
--- /dev/null
+++ b/drivers/media/video/omapgfx/gfx_init.c
@@ -0,0 +1,297 @@
+/*
+ * drivers/media/video/omap/v4gfx.c
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+
+#include <linux/omap_v4l2_gfx.h> /* private ioctls */
+
+#include <media/v4l2-ioctl.h>
+
+#include "v4gfx.h"
+#include "gfx_bc.h"
+
+MODULE_AUTHOR("Texas Instruments.");
+MODULE_DESCRIPTION("OMAP V4L2 GFX driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * Device node will be: /dev/video<VOUT_DEVICENODE_SUFFIX>
+ * See also /sys/devices/virtual/video4linux/<node>/name which will be
+ * whatever the value of VOUT_NAME is
+ */
+#define VOUT_DEVICENODE_SUFFIX 100
+
+static struct gbl_v4gfx *gbl_dev;
+
+int debug; /* is used outside this compilation unit too */
+module_param(debug, int, 0644);
+
+/*
+ * If bypass is set then buffer streaming operations will be bypassed. This
+ * enables us to check what the raw performance of stack above the V4L2
+ * driver is
+ */
+static int bypass;
+module_param(bypass, int, 0644);
+
+
+static int bypass_vidioc_qbuf(
+ struct file *file, void *fh, struct v4l2_buffer *buf) { return 0; }
+
+static int bypass_vidioc_dqbuf(
+ struct file *file, void *fh, struct v4l2_buffer *buf) { return 0; }
+
+static int bypass_vidioc_streamon(
+ struct file *file, void *fh, enum v4l2_buf_type i) { return 0; }
+
+static int bypass_vidioc_streamoff(
+ struct file *file, void *fh, enum v4l2_buf_type i) { return 0; }
+
+static long bypass_vidioc_default(
+ struct file *file, void *fh, int cmd, void *arg)
+{
+ struct v4l2_gfx_buf_params *parms = (struct v4l2_gfx_buf_params *)arg;
+ int rv = 0;
+
+ switch (cmd) {
+ case V4L2_GFX_IOC_CONSUMER:
+ break;
+ case V4L2_GFX_IOC_ACQ:
+ /* In bypass mode default the first buffer */
+ parms->bufid = 0;
+ break;
+ case V4L2_GFX_IOC_REL:
+ break;
+ default:
+ rv = -EINVAL;
+ }
+ return rv;
+}
+
+/*
+ * If the module is put in bypass mode the following ioctls
+ * are effectively nops
+ */
+static void v4gfx_enable_bypass(void)
+{
+ v4gfx_ioctl_ops.vidioc_qbuf = bypass_vidioc_qbuf;
+ v4gfx_ioctl_ops.vidioc_dqbuf = bypass_vidioc_dqbuf;
+ v4gfx_ioctl_ops.vidioc_streamon = bypass_vidioc_streamon;
+ v4gfx_ioctl_ops.vidioc_streamoff = bypass_vidioc_streamoff;
+ v4gfx_ioctl_ops.vidioc_default = bypass_vidioc_default;
+}
+
+static void v4gfx_cleanup_device(struct v4gfx_device *vout)
+{
+ struct video_device *vfd;
+
+ if (!vout)
+ return;
+ vfd = vout->vfd;
+
+ if (vfd) {
+ if (vfd->minor == -1) {
+ /*
+ * The device was never registered, so release the
+ * video_device struct directly.
+ */
+ video_device_release(vfd);
+ } else {
+ /*
+ * The unregister function will release the video_device
+ * struct as well as unregistering it.
+ */
+ video_unregister_device(vfd);
+ }
+ }
+
+ v4gfx_tiler_buffer_free(vout, vout->buffer_allocated, 0);
+ kfree(vout);
+}
+
+static int driver_remove(struct platform_device *pdev)
+{
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct gbl_v4gfx *dev = container_of(v4l2_dev, struct
+ gbl_v4gfx, v4l2_dev);
+ int k;
+
+ v4l2_device_unregister(v4l2_dev);
+ for (k = 0; k < pdev->num_resources; k++)
+ v4gfx_cleanup_device(dev->vouts[k]);
+
+ kfree(gbl_dev);
+ return 0;
+}
+
+static int driver_probe(struct platform_device *pdev)
+{
+ printk(KERN_INFO "Probing: " VOUT_NAME);
+ return 0;
+}
+
+static int v4gfx_create_instance(struct v4gfx_device **voutp)
+{
+ int r = 0;
+ struct v4gfx_device *vout = NULL;
+ struct video_device *vfd = NULL;
+
+ vout = kzalloc(sizeof(struct v4gfx_device), GFP_KERNEL);
+ if (vout == NULL) {
+ r = -ENOMEM;
+ goto end;
+ }
+ mutex_init(&vout->lock);
+ spin_lock_init(&vout->vbq_lock);
+ /* TODO set this to an invalid value, need to change unit test though */
+ vout->bpp = RGB565_BPP;
+ vout->gbl_dev = gbl_dev;
+ vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ init_timer(&vout->acquire_timer);
+ vout->acquire_timer.function = v4gfx_acquire_timer;
+ vout->acquire_timer.data = (unsigned long)vout;
+
+ init_waitqueue_head(&vout->sync_done);
+ init_waitqueue_head(&vout->consumer_wait);
+
+ vfd = vout->vfd = video_device_alloc();
+ if (!vfd)
+ goto end;
+
+ strlcpy(vfd->name, VOUT_NAME, sizeof(vfd->name));
+ vfd->vfl_type = VFL_TYPE_GRABBER;
+ vfd->release = video_device_release;
+ vfd->ioctl_ops = &v4gfx_ioctl_ops;
+ vfd->fops = &v4gfx_fops;
+ vfd->minor = -1;
+ vfd->debug = debug;
+
+ r = video_register_device(vfd, VFL_TYPE_GRABBER,
+ VOUT_DEVICENODE_SUFFIX);
+ if (r < 0)
+ goto end;
+
+ video_set_drvdata(vfd, vout);
+
+ *voutp = vout;
+ printk(KERN_INFO VOUT_NAME ":video device registered\n");
+ return 0;
+end:
+
+ if (vfd)
+ video_device_release(vfd);
+
+ kfree(vout); /* safe with null vout */
+
+ return r;
+}
+
+static void v4gfx_delete_instance(
+ struct v4l2_device *v4l2_dev, struct v4gfx_device *vout)
+{
+ v4l2_info(v4l2_dev, "unregistering /dev/video%d\n", vout->vfd->num);
+ video_unregister_device(vout->vfd);
+ v4gfx_buffer_array_free(vout, vout->buffer_allocated);
+ kfree(vout);
+ return;
+}
+
+static struct platform_driver v4gfx_driver = {
+ .driver = {
+ .name = VOUT_NAME,
+ },
+ .probe = driver_probe,
+ .remove = driver_remove,
+};
+
+static int module_init_v4gfx(void)
+{
+ int rv;
+ bool v4l2_dev_registered = false;
+ bool bc_dev_registered = false;
+
+ if (bypass) {
+ printk(KERN_INFO VOUT_NAME ":Enable bypass mode\n");
+ v4gfx_enable_bypass();
+ }
+
+ rv = platform_driver_register(&v4gfx_driver);
+ if (rv != 0) {
+ printk(KERN_ERR VOUT_NAME ":platform_driver_register failed\n");
+ goto end;
+ }
+
+ gbl_dev = kzalloc(sizeof(struct gbl_v4gfx), GFP_KERNEL);
+ if (gbl_dev == NULL) {
+ rv = -ENOMEM;
+ goto end;
+ }
+
+ snprintf(gbl_dev->v4l2_dev.name, sizeof(gbl_dev->v4l2_dev.name),
+ "%s-%03d", VOUT_NAME, VOUT_DEVICENODE_SUFFIX);
+
+ rv = v4l2_device_register(NULL, &gbl_dev->v4l2_dev);
+ if (rv != 0) {
+ printk(KERN_ERR VOUT_NAME ":v4l2_device_register failed\n");
+ goto end;
+ }
+ v4l2_dev_registered = true;
+
+ rv = v4gfx_create_instance(&gbl_dev->vouts[0]);
+ if (rv != 0)
+ goto end;
+
+ rv = bc_init();
+ if (rv != 0)
+ goto end;
+
+ bc_dev_registered = true;
+
+ printk(KERN_INFO VOUT_NAME ":OMAP V4L2 GFX driver loaded ok\n");
+ return rv;
+end:
+ printk(KERN_INFO VOUT_NAME ":Error %d loading OMAP V4L2 GFX driver\n",
+ rv);
+
+ if (bc_dev_registered)
+ bc_cleanup();
+
+ if (v4l2_dev_registered)
+ v4l2_device_unregister(&gbl_dev->v4l2_dev);
+
+ kfree(gbl_dev); /* gbl_dev can be null */
+
+ return rv;
+}
+
+static void module_exit_v4gfx(void)
+{
+ bc_cleanup();
+
+ v4gfx_delete_instance(&gbl_dev->v4l2_dev, gbl_dev->vouts[0]);
+
+ v4l2_device_unregister(&gbl_dev->v4l2_dev);
+
+ kfree(gbl_dev);
+
+ platform_driver_unregister(&v4gfx_driver);
+}
+
+module_init(module_init_v4gfx);
+module_exit(module_exit_v4gfx);
diff --git a/drivers/media/video/omapgfx/gfx_io.c b/drivers/media/video/omapgfx/gfx_io.c
new file mode 100644
index 0000000..e753b4e
--- /dev/null
+++ b/drivers/media/video/omapgfx/gfx_io.c
@@ -0,0 +1,1329 @@
+/*
+ * drivers/media/video/omap/v4gfx.c
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/kdev_t.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/videodev2.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/omap_v4l2_gfx.h> /* private ioctls */
+
+#include <media/videobuf-dma-contig.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+
+#include "v4gfx.h"
+#include "gfx_bc.h"
+
+#define V4GFX_WAIT_DEQUE 1 /* Poll buffer sync status during dq */
+#define V4GFX_WAIT_UNLOCK 2 /* Poll buffer sync status from render loop */
+/*
+ * V4GFX_WAITMETHOD is used to select between how we wait for SGX to release
+ * buffers sent to it.
+ */
+/* #define V4GFX_WAITMETHOD V4GFX_WAIT_DEQUE */
+#define V4GFX_WAITMETHOD V4GFX_WAIT_UNLOCK
+
+#define VID_MAX_WIDTH 2048 /* Largest width */
+#define VID_MAX_HEIGHT 2048 /* Largest height */
+#define VID_MIN_WIDTH 0
+#define VID_MIN_HEIGHT 0
+#define V4GFX_FRAME_UNLOCK_TIMEOUT 16 /* ms */
+
+
+/*
+ * This will enable dumping of the mappings obtain
+ */
+#ifdef V4L2GFX_DUMPMMAP
+#define DUMPMMAP(msg, k, vma, m, pos, p) \
+ printk(KERN_NOTICE \
+ "%s: vm_start+%d = 0x%lx, dma->vmalloc+%d = 0x%lx, w=0x%x\n", \
+ msg, k, vma->vm_start + k, m, (pos + m), p);
+#else
+#define DUMPMMAP(msg, k, vma, m, pos, p)
+#endif
+
+static struct videobuf_queue_ops video_vbq_ops;
+
+static u32 v4gfx_calc_buffer_size(
+ int bpp, u32 width, u32 height, u32 pixelformat);
+static u32 v4gfx_calc_stride(int bpp, u32 width);
+
+/*
+ * List of image formats supported by the SGX buffer-class api
+ */
+static const struct v4l2_fmtdesc gfx_bc_formats[] = {
+ {
+ /* Note: V4L2 defines RGB565 as:
+ *
+ * Byte 0 Byte 1
+ * g2 g1 g0 r4 r3 r2 r1 r0 b4 b3 b2 b1 b0 g5 g4 g3
+ *
+ * OMAP video pipelines interpret RGB565 as:
+ *
+ * Byte 0 Byte 1
+ * g2 g1 g0 b4 b3 b2 b1 b0 r4 r3 r2 r1 r0 g5 g4 g3
+ *
+ * GFX ?? TODO
+ */
+ .description = "RGB565, le",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ },
+ {
+ .description = "RGB32, le",
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ },
+ {
+ .description = "YUYV (YUV 4:2:2), packed",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .description = "UYVY, packed",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ },
+ {
+ .description = "NV12 - YUV420 format",
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ },
+};
+
+#define NUM_OUTPUT_FORMATS (ARRAY_SIZE(gfx_bc_formats))
+
+int v4gfx_try_format(struct v4l2_pix_format *pix)
+{
+ int ifmt, bpp = 0;
+
+ pix->height =
+ clamp(pix->height, (u32)VID_MIN_HEIGHT, (u32)VID_MAX_HEIGHT);
+ pix->width = clamp(pix->width, (u32)VID_MIN_WIDTH, (u32)VID_MAX_WIDTH);
+
+ for (ifmt = 0; ifmt < NUM_OUTPUT_FORMATS; ifmt++) {
+ if (pix->pixelformat == gfx_bc_formats[ifmt].pixelformat)
+ break;
+ }
+
+ if (ifmt >= NUM_OUTPUT_FORMATS)
+ ifmt = 0; /* Default V4L2_PIX_FMT_RGB565 */
+ pix->pixelformat = gfx_bc_formats[ifmt].pixelformat;
+
+ pix->field = V4L2_FIELD_ANY;
+ pix->priv = 0;
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ default:
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ bpp = YUYV_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB565X:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB565_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB24_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB32_BPP;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ bpp = 1; /* 12bits per pixel, 1 byte for Y */
+ break;
+ }
+
+ pix->bytesperline = v4gfx_calc_stride(bpp, pix->width);
+ pix->sizeimage = v4gfx_calc_buffer_size(bpp, pix->width, pix->height,
+ pix->pixelformat);
+
+ if (V4L2_PIX_FMT_NV12 == pix->pixelformat)
+ pix->sizeimage += pix->sizeimage >> 1;
+
+ return bpp;
+}
+
+void v4gfx_acquire_timer(unsigned long arg)
+{
+ struct v4gfx_device *vout = (struct v4gfx_device *)arg;
+
+ set_bit(1, &vout->acquire_timedout);
+}
+
+#if V4GFX_WAITMETHOD == V4GFX_WAIT_DEQUE
+static struct videobuf_buffer *v4gfx_get_next_syncframe(
+ struct v4gfx_device *vout)
+{
+ struct videobuf_buffer *buf;
+ mutex_lock(&vout->lock);
+ if (list_empty(&vout->sync_queue)) {
+ mutex_unlock(&vout->lock);
+ return NULL;
+ }
+ buf = list_entry(vout->sync_queue.next, struct videobuf_buffer, queue);
+ mutex_unlock(&vout->lock);
+ return buf;
+}
+
+static int v4gfx_wait_on_pending(struct v4gfx_device *vout, int bufidx)
+{
+ int dqable = 0;
+ int iteration = 0;
+
+ do {
+ dqable = bc_sync_status(0, bufidx);
+ if (!dqable) {
+ /* printk("w-on %d [%d]\n", bufidx, iteration); */
+ if (iteration++ < V4GFX_FRAME_UNLOCK_TIMEOUT) {
+ msleep(1); /* milliseconds */
+ } else {
+ /*printk("t-o %d\n", bufidx); */
+ break; /* Timed out */
+ }
+ }
+/*
+ else {
+ printk("dq-o %d\n", bufidx);
+ }
+ */
+ } while (!dqable);
+
+ return dqable;
+}
+
+static void v4gfx_done_syncframe(struct v4gfx_device *vout,
+ struct videobuf_buffer *sync_frame)
+{
+ struct timeval timevalue = {0};
+ unsigned long flags;
+ mutex_lock(&vout->lock);
+ spin_lock_irqsave(&vout->vbq_lock, flags);
+
+ list_del(&sync_frame->queue);
+
+ do_gettimeofday(&timevalue);
+ sync_frame->ts = timevalue;
+ sync_frame->state = VIDEOBUF_DONE;
+ wake_up_interruptible(&sync_frame->done);
+ spin_unlock_irqrestore(&vout->vbq_lock, flags);
+ mutex_unlock(&vout->lock);
+}
+#endif /* V4GFX_WAIT_DEQUE */
+
+
+static u32 v4gfx_calc_stride(int bpp, u32 width)
+{
+ return PAGE_ALIGN(width * bpp);
+}
+
+static u32 v4gfx_calc_buffer_size(
+ int bpp, u32 width, u32 height, u32 pixelformat)
+{
+ int stride;
+ stride = v4gfx_calc_stride(bpp, width);
+
+ /* i is the block-width - either 4K or 8K, depending upon input width*/
+ /* for NV12 format, buffer is height + height / 2*/
+ if (V4L2_PIX_FMT_NV12 == pixelformat)
+ return height * 3/2 * stride;
+ else
+ return height * stride;
+}
+
+void v4gfx_buffer_array_free(struct v4gfx_device *vout, int cnt)
+{
+ /* Fn should be robust and callable with args in a dubious state */
+ int i;
+ if (!vout || !cnt)
+ return;
+ if (vout->buf_phys_addr_array) {
+ for (i = 0; i < cnt; i++)
+ kfree(vout->buf_phys_addr_array[i]);
+ kfree(vout->buf_phys_addr_array);
+ vout->buf_phys_addr_array = NULL;
+ }
+}
+
+/*
+ * Allocate a buffer array for all the requested buffers
+ * If there is an allocation failure the function will clean up after itself
+ */
+static int v4gfx_buffer_array_realloc(struct v4gfx_device *vout,
+ int oldcnt, int newcnt)
+{
+ int i;
+
+ if (vout->buf_phys_addr_array)
+ v4gfx_buffer_array_free(vout, oldcnt);
+
+ vout->buf_phys_addr_array =
+ kzalloc(sizeof(unsigned long *) * newcnt, GFP_KERNEL);
+ if (!vout->buf_phys_addr_array)
+ return -ENOMEM;
+
+ /* 2048 is the max image height, 2 = (2048 * 4) / CPU_PAGE_SIZE */
+ for (i = 0; i < newcnt; i++) {
+ vout->buf_phys_addr_array[i] =
+ kmalloc(sizeof(unsigned long) * 2048 * 2, GFP_KERNEL);
+ if (!vout->buf_phys_addr_array[i]) {
+ v4gfx_buffer_array_free(vout, newcnt);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+static void v4gfx_buffer_array_fill(
+ struct v4gfx_device *vout,
+ int bufno,
+ unsigned long tiler_paddr_in,
+ unsigned long tiler_paddr_uv_in)
+{
+ int buf_phys_idx = 0;
+ int m = 0, i;
+ int cpu_pgwidth;
+ int tiler_increment;
+
+ v4gfx_tiler_image_incr(vout, &cpu_pgwidth, &tiler_increment);
+
+ for (i = 0; i < vout->pix.height; i++) {
+ unsigned long pg, pgend, tiler_paddr;
+
+ tiler_paddr = tiler_paddr_in+m;
+ pg = tiler_paddr;
+ pgend = pg + cpu_pgwidth;
+ do {
+ GFXLOGA(2, "%d %d: = %lx\n", bufno, buf_phys_idx,
+ (long)pg);
+ vout->buf_phys_addr_array[bufno][buf_phys_idx] = pg;
+ pg += 4096;
+ buf_phys_idx++;
+ } while (pg < pgend);
+
+ m += tiler_increment;
+ }
+
+ if (V4L2_PIX_FMT_NV12 == vout->pix.pixelformat) {
+ m = 0;
+ v4gfx_tiler_image_incr_uv(vout, &tiler_increment);
+
+ /* UV buffer is height / 2 */
+ for (i = 0; i < vout->pix.height / 2; i++) {
+ unsigned long pg;
+
+ pg = tiler_paddr_uv_in+m;
+ vout->buf_phys_addr_array[bufno][buf_phys_idx] = pg;
+ m += tiler_increment;
+ buf_phys_idx++;
+ }
+
+ GFXLOGA(1, "nv12 uv: 0x%lx\n", tiler_paddr_uv_in);
+ m += tiler_increment;
+ }
+}
+
+static int v4gfx_frame_lock(struct v4gfx_device *vout, int *bufid)
+{
+ struct videobuf_buffer *oldbuf = NULL;
+#if V4GFX_WAITMETHOD == V4GFX_WAIT_UNLOCK
+ struct timeval timevalue = {0};
+#else /* V4GFX_WAIT_DEQUE */
+ int oldbufid = -1;
+#endif
+ unsigned long flags;
+ int rv = 0;
+
+ mutex_lock(&vout->lock);
+ spin_lock_irqsave(&vout->vbq_lock, flags);
+ if (!vout->streaming || !vout->cur_frm) {
+ GFXLOG(1, V4L2DEV(vout),
+ "%s: ERROR: device not streaming yet\n", __func__);
+ rv = -EAGAIN;
+ goto unlock;
+ }
+
+ /* vout->cur_frm must be set if streaming */
+
+ if (vout->cur_frm == vout->locked_frm) {
+ /*
+ * If this frame has been locked before we will
+ * attempt to get the next buffer in the dma queue.
+ * If there is a next buffer, mark the locked
+ * buffer as done and then promote the next buffer
+ * to the current buffer whilst locking it in the
+ * process.
+ */
+ if (list_empty(&vout->dma_queue)) {
+ *bufid = vout->cur_frm->i;
+ /*
+ * We can't do anything else here, it will be upto
+ * the consumer application to decide whether it wants
+ * to re-render the texture which depends on what the
+ * app is doing.
+ */
+ goto unlock;
+ }
+
+ /* Deactivate the cur_frm */
+ oldbuf = vout->cur_frm;
+
+ vout->cur_frm = list_entry(vout->dma_queue.next,
+ struct videobuf_buffer, queue);
+
+ list_del(&vout->cur_frm->queue);
+
+ vout->cur_frm->state = VIDEOBUF_ACTIVE;
+
+ GFXLOG(2, V4L2DEV(vout), "Active frame %d\n", vout->cur_frm->i);
+
+ vout->locked_frm = vout->cur_frm;
+
+#if V4GFX_WAITMETHOD == V4GFX_WAIT_UNLOCK
+ /*
+ * Mark the previous current buffer done and release it for
+ * dequeue
+ */
+ do_gettimeofday(&timevalue);
+ oldbuf->ts = timevalue;
+ oldbuf->state = VIDEOBUF_DONE;
+ wake_up_interruptible(&oldbuf->done);
+#else /* V4GFX_WAIT_DEQUE */
+ oldbufid = oldbuf->i;
+ list_add_tail(&oldbuf->queue, &vout->sync_queue);
+ wake_up_interruptible(&vout->sync_done);
+#endif
+
+ } else {
+ /* First time we've tried to lock this frame */
+ vout->locked_frm = vout->cur_frm;
+ /* We be marked for dequeue next time */
+ }
+ *bufid = vout->locked_frm->i;
+unlock:
+ spin_unlock_irqrestore(&vout->vbq_lock, flags);
+ mutex_unlock(&vout->lock);
+
+#if V4GFX_WAITMETHOD == V4GFX_WAIT_DEQUE
+/*
+ if (oldbufid != -1)
+ printk("sync_queue + %d\n", oldbufid);
+ */
+#endif
+ return rv;
+}
+
+static int v4gfx_frame_unlock(struct v4gfx_device *vout, int bufidx)
+{
+ struct videobuf_buffer *vbuf;
+ int rv = 0;
+#if V4GFX_WAITMETHOD == V4GFX_WAIT_UNLOCK
+ int iteration = 0;
+#endif
+
+ mutex_lock(&vout->lock);
+ vbuf = vout->locked_frm;
+ if (!vbuf) {
+ GFXLOG(1, V4L2DEV(vout),
+ "%s: ERROR: trying to unlock a non-existent frame\n",
+ __func__);
+ rv = -EINVAL;
+ } else if (vbuf->i != bufidx) {
+ GFXLOG(1, V4L2DEV(vout),
+ "%s: ERROR: trying to unlock wrong frame %d %d\n",
+ __func__, vbuf->i, bufidx);
+ rv = -EINVAL;
+ }
+ mutex_unlock(&vout->lock);
+
+#if V4GFX_WAITMETHOD == V4GFX_WAIT_UNLOCK
+ if (rv != 0)
+ goto end;
+
+ do {
+ /*
+ * Interrogate the buffer class synch data buffer to see if SGX
+ * is done with this buffer
+ */
+ rv = bc_sync_status(0, bufidx);
+ if (rv == 0) {
+ if (iteration++ < V4GFX_FRAME_UNLOCK_TIMEOUT)
+ msleep(1); /* milliseconds */
+ }
+ } while (rv == 0);
+
+ if (iteration >= V4GFX_FRAME_UNLOCK_TIMEOUT) {
+ printk("%s: INFO: timed out\n", __func__);
+ rv = -ETIMEDOUT;
+ } else
+ rv = 0;
+end:
+#endif /* V4GFX_WAIT_UNLOCK */
+ return rv;
+}
+
+/*
+ * Buffer setup function is called by videobuf layer when REQBUF ioctl is
+ * called. This is used to setup buffers and return size and count of
+ * buffers allocated. After the call to this buffer, videobuf layer will
+ * setup buffer queue depending on the size and count of buffers
+ */
+static int vbq_ops_buf_setup(struct videobuf_queue *q, unsigned int *count,
+ unsigned int *size)
+{
+ struct v4gfx_device *vout = q->priv_data;
+ int rv = 0;
+ GFXLOG(1, V4L2DEV(vout), "+%s\n", __func__);
+
+ if (!vout || (V4L2_BUF_TYPE_VIDEO_OUTPUT != q->type)) {
+ rv = -EINVAL; goto end;
+ }
+
+ *size = vout->buffer_size = v4gfx_calc_buffer_size(
+ vout->bpp,
+ vout->pix.width,
+ vout->pix.height,
+ vout->pix.pixelformat);
+
+ GFXLOG(1, V4L2DEV(vout), "height=%d, size=%d\n",
+ vout->pix.height, *size);
+
+ if (v4gfx_tiler_buffer_setup(vout, count, 0, &vout->pix)) {
+ rv = -ENOMEM; goto end;
+ }
+
+end:
+ GFXLOG(1, V4L2DEV(vout), "Exiting %s\n", __func__);
+ return rv;
+}
+
+/*
+ * This function will be called when VIDIOC_QBUF ioctl is called.
+ * It prepare buffers before give out for the display. This function
+ * user space virtual address into physical address if userptr memory
+ * exchange mechanism is used.
+ */
+static int vbq_ops_buf_prepare(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct v4gfx_device *vout = q->priv_data;
+
+ if (VIDEOBUF_NEEDS_INIT == vb->state) {
+ vb->width = vout->pix.width;
+ vb->height = vout->pix.height;
+ vb->size = vb->width * vb->height * vout->bpp;
+ vb->field = field;
+
+ }
+ vb->state = VIDEOBUF_PREPARED;
+
+ return 0;
+}
+
+/*
+ * Buffer queue function will be called from the videobuf layer when _QBUF
+ * ioctl is called. It is used to enqueue buffer, which is ready to be
+ * displayed.
+ */
+static void vbq_ops_buf_queue(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct v4gfx_device *vout = q->priv_data;
+
+ list_add_tail(&vb->queue, &vout->dma_queue);
+ vb->state = VIDEOBUF_QUEUED;
+}
+
+/*
+ * Buffer release function is called from videobuf layer to release buffer
+ * which are already allocated
+ */
+static void vbq_ops_buf_release(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct v4gfx_device *vout = q->priv_data;
+
+ vb->state = VIDEOBUF_NEEDS_INIT;
+
+ if (V4L2_MEMORY_MMAP != vout->memory)
+ return;
+}
+
+/*
+ * File operations
+ */
+static void v4gfx_vm_open(struct vm_area_struct *vma)
+{
+ struct v4gfx_device *vout = vma->vm_private_data;
+
+ GFXLOG(1, V4L2DEV(vout),
+ "vm_open [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
+ vout->mmap_count++;
+}
+
+static void v4gfx_vm_close(struct vm_area_struct *vma)
+{
+ struct v4gfx_device *vout = vma->vm_private_data;
+
+ GFXLOG(1, V4L2DEV(vout),
+ "vm_close [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
+
+ vout->mmap_count--;
+}
+
+static struct vm_operations_struct v4gfx_vm_ops = {
+ .open = v4gfx_vm_open,
+ .close = v4gfx_vm_close,
+};
+
+static int vidfop_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct v4gfx_device *vout = file->private_data;
+ struct videobuf_queue *q = &vout->vbq;
+ int i;
+ void *pos;
+ int j = 0, k = 0, m = 0, p = 0, m_increment = 0;
+
+ GFXLOG(1, V4L2DEV(vout), "Entering %s\n", __func__);
+
+ /* look for the buffer to map */
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ if (V4L2_MEMORY_MMAP != q->bufs[i]->memory)
+ continue;
+ if (q->bufs[i]->boff == (vma->vm_pgoff << PAGE_SHIFT))
+ break;
+ }
+
+ if (VIDEO_MAX_FRAME == i) {
+ GFXLOG(1, V4L2DEV(vout),
+ "offset invalid [offset=0x%lx]\n",
+ (vma->vm_pgoff << PAGE_SHIFT));
+ return -EINVAL;
+ }
+ q->bufs[i]->baddr = vma->vm_start;
+
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_ops = &v4gfx_vm_ops;
+ vma->vm_private_data = (void *) vout;
+ pos = (void *)vout->buf_phy_addr[i];
+
+ /* get line width */
+ v4gfx_tiler_image_incr(vout, &p, &m_increment);
+
+ for (j = 0; j < vout->pix.height; j++) {
+ /* map each page of the line */
+ DUMPMMAP("Y buffer", k, vma, m, pos, p);
+
+ vma->vm_pgoff = ((unsigned long)pos + m) >> PAGE_SHIFT;
+
+ if (remap_pfn_range(vma, vma->vm_start + k,
+ ((unsigned long)pos + m) >> PAGE_SHIFT,
+ p, vma->vm_page_prot))
+ return -EAGAIN;
+ k += p;
+ m += m_increment;
+ }
+ m = 0;
+
+ /* UV Buffer in case of NV12 format */
+ if (V4L2_PIX_FMT_NV12 == vout->pix.pixelformat) {
+ pos = (void *)vout->buf_phy_uv_addr[i];
+
+ v4gfx_tiler_image_incr_uv(vout, &m_increment);
+
+ /* UV buffer is height / 2 */
+ for (j = 0; j < vout->pix.height / 2; j++) {
+ /* map each page of the line */
+ DUMPMMAP("UV buffer", k, vma, m, pos, p);
+
+ vma->vm_pgoff = ((unsigned long)pos + m) >> PAGE_SHIFT;
+
+ if (remap_pfn_range(vma, vma->vm_start + k,
+ ((unsigned long)pos + m) >> PAGE_SHIFT,
+ p, vma->vm_page_prot))
+ return -EAGAIN;
+ k += p;
+ m += m_increment;
+ }
+ }
+
+ vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
+ vout->mmap_count++;
+ GFXLOG(1, V4L2DEV(vout), "Exiting %s\n", __func__);
+ return 0;
+}
+
+static int vidfop_release(struct file *file)
+{
+ struct v4gfx_device *vout = file->private_data;
+ struct videobuf_queue *q;
+ unsigned int r = 0;
+
+ GFXLOG(1, V4L2DEV(vout), "Entering %s\n", __func__);
+ GFXLOG(1, V4L2DEV(vout),
+ "current process id/pid is %d\n", current->pid);
+
+ if (!vout)
+ goto end;
+
+ vout->opened = vout->opened ? vout->opened - 1 : 0;
+ if (vout->opened) {
+ r = 0;
+ goto end;
+ }
+
+ clear_bit(1, &vout->producer_ready);
+
+ q = &vout->vbq;
+
+ if (vout->streaming) {
+ del_timer_sync(&vout->acquire_timer);
+ clear_bit(1, &vout->acquire_timedout);
+
+ vout->streaming = false;
+ videobuf_streamoff(q);
+ videobuf_queue_cancel(q);
+ }
+
+ if (q->bufs[0] && (V4L2_MEMORY_MMAP == q->bufs[0]->memory))
+ videobuf_mmap_free(q);
+ vout->mmap_count = 0;
+
+ /* Free buffers */
+ if (vout->buffer_allocated) {
+ v4gfx_tiler_buffer_free(vout, vout->buffer_allocated, 0);
+ vout->buffer_allocated = 0;
+ }
+
+ memset(&vout->crop, 0, sizeof(vout->crop));
+ memset(&vout->pix, 0, sizeof(vout->pix));
+
+ file->private_data = NULL;
+
+end:
+ GFXLOG(1, V4L2DEV(vout), "Exiting %s\n", __func__);
+ return r;
+}
+
+static int vidfop_open(struct file *file)
+{
+ struct v4gfx_device *vout = NULL;
+ struct videobuf_queue *q;
+ int rv = 0;
+
+ vout = video_drvdata(file);
+ if (vout == NULL) {
+ rv = -ENODEV;
+ goto end;
+ }
+
+ GFXLOG(1, V4L2DEV(vout), "Entering %s : %x\n", __func__, (int)vout);
+ GFXLOG(1, V4L2DEV(vout), "current pid is %d\n", current->pid);
+
+ vout->opened += 1;
+ file->private_data = vout;
+
+ if (vout->opened > 1) {
+ GFXLOG(1, V4L2DEV(vout), "Another opening....\n");
+ goto end;
+ }
+
+ clear_bit(1, &vout->producer_ready);
+
+ q = &vout->vbq;
+ video_vbq_ops.buf_setup = vbq_ops_buf_setup;
+ video_vbq_ops.buf_prepare = vbq_ops_buf_prepare;
+ video_vbq_ops.buf_release = vbq_ops_buf_release;
+ video_vbq_ops.buf_queue = vbq_ops_buf_queue;
+
+ videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
+ &vout->vbq_lock, vout->type, V4L2_FIELD_NONE,
+ sizeof(struct videobuf_buffer), vout);
+
+end:
+ GFXLOG(1, V4L2DEV(vout), "Exiting %s :%d\n", __func__, rv);
+ return rv;
+}
+
+/* V4L2 ioctls */
+static int vidioc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct v4gfx_device *vout = fh;
+ GFXLOG(1, V4L2DEV(vout), "Entering %s\n", __func__);
+
+ strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
+ cap->bus_info[0] = '\0';
+ cap->version = VOUT_VERSION;
+ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT;
+ return 0;
+}
+
+static int vidioc_log_status(struct file *file, void *fh)
+{
+ /* struct v4gfx_device *vout = fh; */
+ printk(KERN_INFO "\n");
+ printk(KERN_INFO "============== START LOG STATUS ================\n");
+ printk(KERN_INFO "=============== END LOG STATUS =================\n");
+ printk(KERN_INFO "\n");
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct v4gfx_device *vout = fh;
+ int index = fmt->index;
+ enum v4l2_buf_type type = fmt->type;
+ int rv = 0;
+
+ GFXLOG(1, V4L2DEV(vout), "+%s\n", __func__);
+
+ fmt->index = index;
+ fmt->type = type;
+ if (index >= NUM_OUTPUT_FORMATS) {
+ rv = -EINVAL;
+ goto end;
+ }
+
+ fmt->flags = gfx_bc_formats[index].flags;
+ strlcpy(fmt->description, gfx_bc_formats[index].description,
+ sizeof(fmt->description));
+ fmt->pixelformat = gfx_bc_formats[index].pixelformat;
+end:
+ GFXLOG(1, V4L2DEV(vout), "-%s [%d]\n", __func__, rv);
+ return rv;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct v4gfx_device *vout = fh;
+ GFXLOG(1, V4L2DEV(vout), "+%s\n", __func__);
+
+ f->fmt.pix = vout->pix;
+
+ GFXLOG(1, V4L2DEV(vout), "-%s [%d]\n", __func__, 0);
+ return 0;
+
+}
+
+/*
+ * VIDIOC_TRY_FMT ioctl is equivalent to VIDIOC_S_FMT with one
+ * exception: it does not change driver state. It can also be called at any
+ * time, never returning EBUSY.
+ */
+static int vidioc_try_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int r;
+ struct v4gfx_device *vout = fh;
+ GFXLOG(1, V4L2DEV(vout), "+%s\n", __func__);
+
+ r = v4gfx_try_format(&f->fmt.pix);
+
+ GFXLOG(1, V4L2DEV(vout), "-%s [%d]\n", __func__, r);
+ return (r >= 0) ? 0 : r;
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct v4gfx_device *vout = fh;
+ int rv = 0;
+ int bpp;
+
+ GFXLOG(1, V4L2DEV(vout), "+%s\n", __func__);
+
+ mutex_lock(&vout->lock);
+ if (vout->streaming) {
+ rv = -EBUSY;
+ goto end;
+ }
+
+ bpp = v4gfx_try_format(&f->fmt.pix);
+ if (bpp <= 0) {
+ rv = bpp;
+ goto end;
+ }
+
+ /* try & set the new output format */
+ vout->bpp = bpp;
+ vout->pix = f->fmt.pix;
+
+end:
+ mutex_unlock(&vout->lock);
+ GFXLOG(1, V4L2DEV(vout), "-%s [%d]\n", __func__, rv);
+ return rv;
+}
+
+static int vidioc_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *req)
+{
+ struct bc_buf_params2 bc_params;
+ struct v4gfx_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+ unsigned int i;
+ int rv = 0;
+
+ GFXLOG(1, V4L2DEV(vout), "+%s\n", __func__);
+
+ if ((req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) ||
+ (req->count < 0) ||
+ (req->memory != V4L2_MEMORY_MMAP)
+ ) {
+ rv = -EINVAL; goto end;
+ }
+
+
+ mutex_lock(&vout->lock);
+ /* Cannot be requested when streaming is on */
+ if (vout->streaming) {
+ mutex_unlock(&vout->lock);
+ rv = -EBUSY; goto end;
+ }
+
+ /*
+ * TODO A count value of zero frees all buffers, after aborting or
+ * finishing any DMA in progress, an implicit VIDIOC_STREAMOFF.
+ */
+
+ /* If buffers are already allocated free them */
+ if (q->bufs[0] && (V4L2_MEMORY_MMAP == q->bufs[0]->memory)) {
+ if (vout->mmap_count) {
+ mutex_unlock(&vout->lock);
+ rv = -EBUSY; goto end;
+ }
+
+ v4gfx_tiler_buffer_free(vout, vout->buffer_allocated, 0);
+ vout->buffer_allocated = 0;
+
+ videobuf_mmap_free(q);
+ }
+
+ bc_params.count = req->count;
+ bc_params.width = vout->pix.width;
+ bc_params.height = vout->pix.height;
+ bc_params.pixel_fmt = vout->pix.pixelformat;
+/* bc_params.stride = vout->pix.bytesperline; */
+ rv = bc_setup(0, &bc_params);
+ if (rv < 0) {
+ GFXLOG(1, V4L2DEV(vout),
+ "+%s bc_setup() failed %d\n", __func__, rv);
+ goto end;
+ }
+
+ /*
+ * Note that the actual buffer allocation is done in
+ * vbq_ops_buf_setup
+ */
+ rv = videobuf_reqbufs(q, req);
+ if (rv < 0) {
+ mutex_unlock(&vout->lock);
+ goto end;
+ }
+
+ INIT_LIST_HEAD(&vout->dma_queue);
+ INIT_LIST_HEAD(&vout->sync_queue);
+
+ /*
+ * The realloc will free the old array and allocate a new one
+ */
+ rv = v4gfx_buffer_array_realloc(vout, vout->buffer_allocated,
+ req->count);
+ if (rv < 0) {
+ mutex_unlock(&vout->lock);
+ goto end;
+ }
+
+ vout->memory = req->memory;
+ vout->buffer_allocated = req->count;
+
+ for (i = 0; i < req->count; i++) {
+
+ v4gfx_buffer_array_fill(vout, i,
+ vout->buf_phy_addr[i],
+ V4L2_PIX_FMT_NV12 == vout->pix.pixelformat ?
+ vout->buf_phy_uv_addr[i] : 0);
+
+ bc_setup_buffer(0, &bc_params, vout->buf_phys_addr_array[i]);
+ }
+ bc_setup_complete(0, &bc_params);
+
+ mutex_unlock(&vout->lock);
+end:
+ GFXLOG(1, V4L2DEV(vout), "-%s [%d]\n", __func__, rv);
+ return rv;
+}
+
+static int vidioc_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *b)
+{
+ struct v4gfx_device *vout = fh;
+ int rv;
+
+ GFXLOG(1, V4L2DEV(vout), "+%s\n", __func__);
+
+ rv = videobuf_querybuf(&vout->vbq, b);
+
+ GFXLOG(1, V4L2DEV(vout), "-%s [%d]\n", __func__, rv);
+ return rv;
+}
+
+static int vidioc_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct v4gfx_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+ int rv = 0;
+
+ GFXLOG(1, V4L2DEV(vout), "qbuf buf: %d\n", buf->index);
+
+ if ((V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) ||
+ (buf->index >= vout->buffer_allocated) ||
+ (q->bufs[buf->index]->memory != buf->memory)) {
+ return -EINVAL;
+ }
+ if (V4L2_MEMORY_USERPTR == buf->memory) {
+ if ((buf->length < vout->pix.sizeimage) ||
+ (0 == buf->m.userptr)) {
+ return -EINVAL;
+ }
+ }
+
+ rv = videobuf_qbuf(q, buf);
+
+ mutex_lock(&vout->lock);
+ if (vout->streaming && vout->acquire_timeout_ms) {
+ del_timer(&vout->acquire_timer);
+ mod_timer(&vout->acquire_timer,
+ jiffies + msecs_to_jiffies(vout->acquire_timeout_ms));
+ }
+ mutex_unlock(&vout->lock);
+
+ GFXLOG(2, V4L2DEV(vout), "-%s [%d]\n", __func__, rv);
+ return rv;
+}
+
+static int vidioc_dqbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct v4gfx_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+ int rv = 0;
+ int nonblocking = file->f_flags & O_NONBLOCK ? 1 : 0;
+
+ GFXLOG(2, V4L2DEV(vout), "dqbuf buf: %x (%d)\n",
+ (int)buf, nonblocking);
+
+ mutex_lock(&vout->lock);
+ if (!vout->streaming) {
+ mutex_unlock(&vout->lock);
+ return -EINVAL;
+ }
+
+ mutex_unlock(&vout->lock);
+
+#if V4GFX_WAITMETHOD == V4GFX_WAIT_DEQUE
+{
+ struct videobuf_buffer *sync_frame = NULL;
+
+ wait_event_interruptible(vout->sync_done,
+ !list_empty(&vout->sync_queue));
+
+ sync_frame = v4gfx_get_next_syncframe(vout);
+
+ if (sync_frame) {
+ (void)v4gfx_wait_on_pending(vout, sync_frame->i);
+ v4gfx_done_syncframe(vout, sync_frame);
+ } else {
+ /* Can be from an interrupted task */
+ printk(KERN_INFO "No sync frame\n");
+ }
+}
+#endif
+
+ rv = videobuf_dqbuf(q, buf, nonblocking);
+
+ GFXLOG(2, V4L2DEV(vout), "-%s [%d]\n", __func__, rv);
+ return rv;
+}
+
+static int vidioc_streamon(struct file *file, void *fh,
+ enum v4l2_buf_type i)
+{
+ struct v4gfx_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+ int rv = 0;
+ GFXLOG(1, V4L2DEV(vout), "+%s\n", __func__);
+
+ mutex_lock(&vout->lock);
+
+ if (vout->streaming) {
+ rv = -EBUSY;
+ goto end_unlock;
+ }
+
+ vout->cur_frm = NULL;
+ vout->locked_frm = NULL;
+
+ rv = videobuf_streamon(q);
+ if (rv < 0)
+ goto end_unlock;
+
+ if (list_empty(&vout->dma_queue)) {
+ rv = -EIO;
+ goto end_unlock;
+ }
+
+ vout->streaming = true;
+
+ /* Activate the next current buffer */
+ vout->cur_frm =
+ list_entry(vout->dma_queue.next, struct videobuf_buffer, queue);
+ list_del(&vout->cur_frm->queue);
+ vout->cur_frm->state = VIDEOBUF_ACTIVE;
+
+ set_bit(1, &vout->producer_ready);
+ wake_up_interruptible(&vout->consumer_wait);
+
+end_unlock:
+ mutex_unlock(&vout->lock);
+ GFXLOG(1, V4L2DEV(vout), "-%s [%d]\n", __func__, rv);
+
+ return rv;
+}
+
+static int vidioc_streamoff(struct file *file, void *fh,
+ enum v4l2_buf_type i)
+{
+ struct v4gfx_device *vout = fh;
+ int rv;
+
+ mutex_lock(&vout->lock);
+ if (!vout->streaming) {
+ rv = -EINVAL;
+ goto end;
+ }
+
+ del_timer_sync(&vout->acquire_timer);
+ clear_bit(1, &vout->acquire_timedout);
+
+ clear_bit(1, &vout->producer_ready);
+
+ vout->streaming = false;
+
+ INIT_LIST_HEAD(&vout->dma_queue);
+ INIT_LIST_HEAD(&vout->sync_queue);
+
+ videobuf_streamoff(&vout->vbq);
+ videobuf_queue_cancel(&vout->vbq);
+end:
+ mutex_unlock(&vout->lock);
+ GFXLOG(1, V4L2DEV(vout), "-%s [%d]\n", __func__, rv);
+ return rv;
+}
+
+static int vidioc_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *cropcap)
+{
+ struct v4gfx_device *vout = fh;
+ struct v4l2_pix_format *pix = &vout->pix;
+
+ if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ /* Width and height are always even */
+ cropcap->bounds.width = pix->width & ~1;
+ cropcap->bounds.height = pix->height & ~1;
+ cropcap->pixelaspect.numerator = 1;
+ cropcap->pixelaspect.denominator = 1;
+ return 0;
+}
+
+static int vidioc_g_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+{
+ struct v4gfx_device *vout = fh;
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ crop->c = vout->crop;
+ GFXLOG(1, V4L2DEV(vout), "g_crop w:%d,h:%d\n",
+ crop->c.width, crop->c.height);
+ return 0;
+}
+
+static int vidioc_s_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+{
+ struct v4gfx_device *vout = fh;
+ GFXLOG(1, V4L2DEV(vout), "Entering %s\n", __func__);
+ vout->crop = crop->c;
+ return 0;
+}
+
+static long vidioc_default(struct file *file, void *fh, int cmd, void *arg)
+{
+ int rv = 0;
+ struct v4gfx_device *vout = fh;
+ GFXLOG(1, V4L2DEV(vout), "Entering %s (c=0x%x)\n", __func__, cmd);
+
+ switch (cmd) {
+ case V4L2_GFX_IOC_CONSUMER:
+ {
+ struct v4l2_gfx_consumer_params *parms =
+ (struct v4l2_gfx_consumer_params *)arg;
+ if (parms->type != V4L2_GFX_CONSUMER_WAITSTREAM)
+ return -EINVAL;
+
+ clear_bit(1, &vout->acquire_timedout);
+
+ rv = wait_event_interruptible(vout->consumer_wait,
+ test_bit(1, &vout->producer_ready));
+ mutex_lock(&vout->lock);
+ if (rv == -ERESTARTSYS) {
+ /*
+ * This condition is hit when the user process
+ * generates a signal, when we return this value the
+ * process will continue to block on the ioctl
+ */
+ GFXLOG(1, V4L2DEV(vout), "Woke by signal: %d\n",
+ ERESTARTSYS);
+ } else {
+ vout->acquire_timeout_ms = parms->acquire_timeout_ms;
+ }
+ mutex_unlock(&vout->lock);
+ break;
+
+ }
+ case V4L2_GFX_IOC_INFO:
+ {
+ struct v4l2_gfx_info_params *parms =
+ (struct v4l2_gfx_info_params *)arg;
+ parms->opencnt = vout->opened;
+ break;
+ }
+ case V4L2_GFX_IOC_PRODUCER:
+ {
+ struct v4l2_gfx_producer_params *parms =
+ (struct v4l2_gfx_producer_params *)arg;
+ vout->producer_flags = parms->flags;
+ if (!(vout->producer_flags & V4L2_GFX_PRODUCER_MASK_OPEN)) {
+ /*
+ * We decrement the count here because the Android
+ * mediaserver threads won't close the V4L2 device
+ */
+ if (vout->opened)
+ vout->opened--;
+ }
+ break;
+ }
+ case V4L2_GFX_IOC_ACQ:
+ {
+ struct v4l2_gfx_buf_params *parms =
+ (struct v4l2_gfx_buf_params *)arg;
+ int bufid = -1;
+ int timedout;
+ rv = v4gfx_frame_lock(vout, &bufid);
+ if (!rv) {
+ parms->bufid = bufid;
+ parms->crop_top = vout->crop.top;
+ parms->crop_left = vout->crop.left;
+ parms->crop_width = vout->crop.width;
+ parms->crop_height = vout->crop.height;
+ GFXLOG(3, V4L2DEV(vout), "%d:%d:%d:%d:%d\n",
+ parms->bufid ,
+ parms->crop_top ,
+ parms->crop_left ,
+ parms->crop_width ,
+ parms->crop_height);
+ }
+ timedout = test_and_clear_bit(1, &vout->acquire_timedout);
+ if (timedout) {
+ GFXLOG(1, V4L2DEV(vout), "ACQ Timed out\n");
+ rv = -ETIMEDOUT;
+ }
+ mutex_lock(&vout->lock);
+ if (!vout->streaming) {
+ GFXLOG(1, V4L2DEV(vout), "ACQ stream off\n");
+ rv = -ENODEV;
+ }
+ mutex_unlock(&vout->lock);
+ break;
+ }
+ case V4L2_GFX_IOC_REL:
+ {
+ struct v4l2_gfx_buf_params *parms =
+ (struct v4l2_gfx_buf_params *)arg;
+ int bufid = parms->bufid;
+ rv = v4gfx_frame_unlock(vout, bufid);
+ break;
+ }
+ default:
+ rv = -EINVAL;
+ }
+ GFXLOG(1, V4L2DEV(vout), "Leaving %s (%d)\n", __func__, rv);
+ return rv;
+}
+
+static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
+{
+ struct v4gfx_device *vout = fh;
+ GFXLOG(1, V4L2DEV(vout), "%s: %d\n", __func__, a->id);
+ return 0;
+}
+
+struct v4l2_ioctl_ops v4gfx_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_log_status = vidioc_log_status,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_cropcap = vidioc_cropcap,
+ .vidioc_g_crop = vidioc_g_crop,
+ .vidioc_s_crop = vidioc_s_crop,
+ .vidioc_default = vidioc_default,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+};
+
+const struct v4l2_file_operations v4gfx_fops = {
+ .owner = THIS_MODULE,
+ .ioctl = video_ioctl2,
+ .mmap = vidfop_mmap,
+ .open = vidfop_open,
+ .release = vidfop_release,
+};
+
diff --git a/drivers/media/video/omapgfx/gfx_tiler.c b/drivers/media/video/omapgfx/gfx_tiler.c
new file mode 100644
index 0000000..1e77983
--- /dev/null
+++ b/drivers/media/video/omapgfx/gfx_tiler.c
@@ -0,0 +1,152 @@
+/*
+ * drivers/media/video/omap/gfx_tiler.c
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "v4gfx.h"
+#include "gfx_bc.h"
+
+#ifdef CONFIG_TILER_OMAP
+#include <mach/tiler.h>
+#define TILER_ALLOCATE_V4L2
+#endif
+
+void v4gfx_tiler_buffer_free(struct v4gfx_device *vout, unsigned int count,
+ unsigned int startindex)
+{
+ int i;
+
+ if (startindex < 0)
+ startindex = 0;
+ if (startindex + count > VIDEO_MAX_FRAME)
+ count = VIDEO_MAX_FRAME - startindex;
+
+ for (i = startindex; i < startindex + count; i++) {
+ if (vout->buf_phy_addr_alloced[i])
+ tiler_free(vout->buf_phy_addr_alloced[i]);
+ if (vout->buf_phy_uv_addr_alloced[i])
+ tiler_free(vout->buf_phy_uv_addr_alloced[i]);
+ vout->buf_phy_addr[i] = 0;
+ vout->buf_phy_addr_alloced[i] = 0;
+ vout->buf_phy_uv_addr[i] = 0;
+ vout->buf_phy_uv_addr_alloced[i] = 0;
+ }
+}
+
+/* Allocate the buffers for TILER space. Ideally, the buffers will be ONLY
+ in tiler space, with different rotated views available by just a convert.
+ */
+int v4gfx_tiler_buffer_setup(struct v4gfx_device *vout,
+ unsigned int *count, unsigned int startindex,
+ struct v4l2_pix_format *pix)
+{
+ /* startindex is always passed as 0, possibly tidy up? */
+ int i, aligned = 1, bpp;
+ enum tiler_fmt fmt;
+ int rv = 0;
+
+ /* normalize buffers to allocate so we stay within bounds */
+ int start = (startindex < 0) ? 0 : startindex;
+ int n_alloc = (start + *count > VIDEO_MAX_FRAME) ?
+ VIDEO_MAX_FRAME - start : *count;
+
+ GFXLOG(1, V4L2DEV(vout), "+%s\n", __func__);
+ bpp = v4gfx_try_format(pix);
+ if (bpp <= 0) {
+ rv = bpp; /* error condition */
+ goto end;
+ }
+
+ GFXLOG(1, V4L2DEV(vout), "tiler buffer alloc: "
+ "count = %d, start = %d :\n", *count, startindex);
+
+ /* special allocation scheme for NV12 format */
+ if (V4L2_PIX_FMT_NV12 == pix->pixelformat) {
+
+ tiler_alloc_packed_nv12(&n_alloc, ALIGN(pix->width, 128),
+ pix->height,
+ (void **) vout->buf_phy_addr + start,
+ (void **) vout->buf_phy_uv_addr + start,
+ (void **) vout->buf_phy_addr_alloced + start,
+ (void **) vout->buf_phy_uv_addr_alloced + start,
+ aligned);
+
+ } else {
+ /* Only bpp of 1, 2, and 4 is supported by tiler */
+ fmt = (bpp == 1 ? TILFMT_8BIT :
+ bpp == 2 ? TILFMT_16BIT :
+ bpp == 4 ? TILFMT_32BIT : TILFMT_INVALID);
+ if (fmt == TILFMT_INVALID) {
+ rv = -ENOMEM;
+ goto end;
+ }
+
+ tiler_alloc_packed(&n_alloc, fmt, ALIGN(pix->width, 128 / bpp),
+ pix->height,
+ (void **) vout->buf_phy_addr + start,
+ (void **) vout->buf_phy_addr_alloced + start,
+ aligned);
+ }
+
+ GFXLOG(1, V4L2DEV(vout),
+ "allocated %d buffers\n", n_alloc);
+
+ if (n_alloc < *count) {
+ if (n_alloc && (startindex == -1 ||
+ V4L2_MEMORY_MMAP != vout->memory)) {
+ /* TODO: check this condition's logic */
+ v4gfx_tiler_buffer_free(vout, n_alloc, start);
+ *count = 0;
+ rv = -ENOMEM;
+ goto end;
+ }
+ }
+
+ for (i = start; i < start + n_alloc; i++) {
+ GFXLOG(1, V4L2DEV(vout),
+ "y=%08lx (%d) uv=%08lx (%d)\n",
+ vout->buf_phy_addr[i],
+ vout->buf_phy_addr_alloced[i] ? 1 : 0,
+ vout->buf_phy_uv_addr[i],
+ vout->buf_phy_uv_addr_alloced[i] ? 1 : 0);
+ }
+
+ *count = n_alloc;
+end:
+ GFXLOG(1, V4L2DEV(vout), "-%s [%d]\n", __func__, rv);
+ return rv;
+}
+
+void v4gfx_tiler_image_incr(struct v4gfx_device *vout, int *cpu_pgwidth,
+ int *tiler_increment)
+{
+ /* for NV12, Y buffer is 1bpp*/
+ if (V4L2_PIX_FMT_NV12 == vout->pix.pixelformat) {
+ *cpu_pgwidth =
+ (vout->pix.width + TILER_PAGE - 1) & ~(TILER_PAGE - 1);
+ *tiler_increment = 64 * TILER_WIDTH;
+ } else {
+ *cpu_pgwidth = (vout->pix.width * vout->bpp + TILER_PAGE - 1) &
+ ~(TILER_PAGE - 1);
+ if (vout->bpp > 1)
+ *tiler_increment = 2 * 64 * TILER_WIDTH;
+ else
+ *tiler_increment = 64 * TILER_WIDTH;
+ }
+}
+
+void v4gfx_tiler_image_incr_uv(struct v4gfx_device *vout, int *tiler_increment)
+{
+ if (vout->pix.pixelformat == V4L2_PIX_FMT_NV12)
+ *tiler_increment = 2 * 64 * TILER_WIDTH;
+ /* Otherwise do nothing */
+}
diff --git a/drivers/media/video/omapgfx/v4gfx.h b/drivers/media/video/omapgfx/v4gfx.h
new file mode 100644
index 0000000..b0b72dd
--- /dev/null
+++ b/drivers/media/video/omapgfx/v4gfx.h
@@ -0,0 +1,171 @@
+/*
+ * drivers/media/video/omapgfx/v4gfx.h
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __V4L2_GFX_H__
+#define __V4L2_GFX_H__
+
+#include <linux/version.h>
+#include <media/videobuf-core.h>
+#include <media/v4l2-device.h>
+#include <asm/atomic.h>
+
+#define MAX_VOUT_DEV 3
+
+struct gbl_v4gfx {
+ struct mutex mtx;
+ int state;
+ struct v4l2_device v4l2_dev;
+ struct v4gfx_device *vouts[MAX_VOUT_DEV];
+};
+
+/* per-device data structure */
+struct v4gfx_device {
+
+ struct video_device *vfd;
+
+ struct gbl_v4gfx *gbl_dev;
+
+ int bpp; /* bytes per pixel */
+
+ enum v4l2_buf_type type;
+
+ struct v4l2_pix_format pix;
+
+ struct v4l2_rect crop;
+
+ enum v4l2_memory memory; /* how memory is managed for the device */
+
+ /* we don't allow to change image fmt/size once buffer has
+ * been allocated
+ */
+ int buffer_allocated; /* count of buffers allocated */
+
+ /* allow to reuse previously allocated buffer which is big enough */
+ int buffer_size;
+
+ unsigned long buf_phy_addr[VIDEO_MAX_FRAME];
+
+ unsigned long buf_phy_uv_addr[VIDEO_MAX_FRAME]; /* NV12 support*/
+
+ /* keep which buffers we actually allocated (via tiler) */
+ unsigned long buf_phy_uv_addr_alloced[VIDEO_MAX_FRAME];
+
+ unsigned long buf_phy_addr_alloced[VIDEO_MAX_FRAME];
+
+ /*
+ For each V4L2 buffer requested we will have an array of page addresses
+ to give through the buffer class API
+ */
+ unsigned long **buf_phys_addr_array;
+
+ int mmap_count;
+
+ int opened; /* inc/dec on open/close of the device */
+
+ bool streaming; /* is streaming is in progress? */
+
+ struct mutex lock; /* protect shared data structures in ioctl */
+
+ struct videobuf_buffer *cur_frm;
+
+ struct videobuf_buffer *locked_frm;
+
+ struct videobuf_queue vbq;
+
+ /*
+ * Buffers added by QBUF from the producer application
+ */
+ struct list_head dma_queue;
+
+ /*
+ * Buffers marked as done with by the consumer application but could
+ * still be being used by the GPU. DQBUF will examine this queue
+ * for available buffers.
+ */
+ struct list_head sync_queue;
+
+ wait_queue_head_t sync_done;
+
+ unsigned long producer_ready;
+
+ wait_queue_head_t consumer_wait;
+
+ /*
+ * If acquire_timeout_ms is non-zero the acquire_timer will be reset
+ * when buffers are queued. If the timer expires ETIMEOUT will be
+ * returned via the V4L2_GFX_IOC_ACQ ioctl.
+ */
+ struct timer_list acquire_timer;
+
+ unsigned int acquire_timeout_ms;
+
+ unsigned long acquire_timedout;
+
+ spinlock_t vbq_lock; /* spinlock for videobuf queues */
+
+ unsigned int producer_flags;
+};
+
+extern int debug;
+
+#define GFXLOG(level, dev, fmt, arg...) \
+do { \
+ if (debug >= level) \
+ printk(KERN_INFO "%s: " fmt, (dev)->name , ## arg); \
+} while (0)
+
+#define GFXLOGA(level, fmt, arg...) \
+do { \
+ if (debug >= level) \
+ printk(KERN_INFO "v4l2-gfx: " fmt, ## arg); \
+} while (0)
+
+/*
+ * Convert local handle to v4l2_dev, currently only a global dev is supported
+ */
+#define V4L2DEV(vout) (&vout->gbl_dev->v4l2_dev)
+
+/* tiler */
+void v4gfx_tiler_buffer_free(
+ struct v4gfx_device *vout, unsigned int count,
+ unsigned int startindex);
+
+int v4gfx_tiler_buffer_setup(struct v4gfx_device *vout,
+ unsigned int *count, unsigned int startindex,
+ struct v4l2_pix_format *pix);
+
+void v4gfx_tiler_image_incr(struct v4gfx_device *vout,
+ int *cpu_pgwidth, int *tiler_increment);
+
+void v4gfx_tiler_image_incr_uv(struct v4gfx_device *vout, int *tiler_increment);
+
+/* v4gfx */
+int v4gfx_try_format(struct v4l2_pix_format *pix);
+void v4gfx_buffer_array_free(struct v4gfx_device *vout, int cnt);
+extern struct v4l2_ioctl_ops v4gfx_ioctl_ops;
+extern const struct v4l2_file_operations v4gfx_fops;
+extern void v4gfx_acquire_timer(unsigned long arg);
+
+/* Other stuff */
+#define YUYV_BPP 2
+#define RGB565_BPP 2
+#define RGB24_BPP 3
+#define RGB32_BPP 4
+
+#define VOUT_NAME "v4gfx"
+
+/* configuration macros */
+#define VOUT_MAJOR_VERSION 0
+#define VOUT_MINOR_VERSION 0
+#define VOUT_RELEASE 0
+#define VOUT_VERSION \
+ KERNEL_VERSION(VOUT_MAJOR_VERSION, VOUT_MINOR_VERSION, VOUT_RELEASE)
+
+#endif /* ifndef __V4L2_GFX_H__ */
diff --git a/drivers/media/video/tiler/Kconfig b/drivers/media/video/tiler/Kconfig
new file mode 100644
index 0000000..202f7f8
--- /dev/null
+++ b/drivers/media/video/tiler/Kconfig
@@ -0,0 +1,129 @@
+config HAVE_TI_TILER
+ bool
+ default y
+ depends on ARCH_OMAP4
+
+menuconfig TI_TILER
+ tristate "TI TILER support"
+ default y
+ depends on HAVE_TI_TILER
+ help
+ TILER and TILER-DMM driver for TI chips. The TI TILER device
+ enables video rotation on certain TI chips such as OMAP4 or
+ Netra. Video rotation will be limited without TILER support.
+
+config TILER_GRANULARITY
+ int "Allocation granularity (2^n)"
+ range 1 4096
+ default 128
+ depends on TI_TILER
+ help
+ This option sets the default TILER allocation granularity. It can
+ be overriden by the tiler.grain boot argument.
+
+ The allocation granularity is the smallest TILER block size (in
+ bytes) managed distinctly by the TILER driver. TILER blocks of any
+ size are managed in chunks of at least this size.
+
+ Must be a 2^n in the range of 1 to 4096; however, the TILER driver
+ may use a larger supported granularity.
+
+ Supported values are: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024,
+ 2048, 4096.
+
+config TILER_CACHE_LIMIT
+ int "Memory limit to cache free pages in MBytes"
+ range 0 128
+ default 40
+ depends on TI_TILER
+ help
+ This option sets the minimum memory that TILER retains even if
+ there is less TILER allocated memory is use. The unused memory is
+ instead stored in a cache to speed up allocation and freeing of
+ physical pages.
+
+ This option can be overriden by the tiler.cache boot argument.
+
+ While initially TILER will use less memory than this limit (0), it
+ will not release any memory used until it reaches this limit.
+ Thereafter, TILER will release any unused memory immediately as
+ long as there it is above this threshold.
+
+config TILER_SECURITY
+ int "Process security"
+ range 0 1
+ default 1
+ depends on TI_TILER
+ help
+ This option sets the default TILER process security. It can be
+ overriden by the tiler.secure boot argument.
+
+ If process security is enabled (1), the TILER driver uses a separate
+ TILER buffer address spaces (for mmap purposes) for each process.
+ This means that one process cannot simply map another process's
+ TILER buffer into its memory, even for sharing. However, it can
+ recreate the buffer by knowing the id-s and secret keys for the
+ TILER blocks involved. This is the preferred configuration.
+
+ Disabling security (0) allows sharing buffers simply by sharing the
+ mmap offset and size. However, because buffers can potentially be
+ shared between processes, it delays resource cleanup while any
+ process has an open TILER device.
+
+config TILER_SSPTR_ID
+ int "Use SSPtr for id"
+ range 0 1
+ default 1
+ depends on TI_TILER
+ help
+ This option sets the default behavior for TILER block ids. It can
+ be overriden by the tiler.ssptr_id boot argument.
+
+ If true, TILER driver uses the system-space (physical) address
+ (SSPtr) of a TILER block as its unique id. This may help sharing
+ TILER blocks between co-processors if using a constant key for each
+ block.
+
+ Note that the SSPtr is unique for each TILER block.
+
+config TILER_SECURE
+ bool "Secure TILER build"
+ default n
+ depends on TI_TILER
+ help
+ This option forces TILER security features that bypasses module
+ parameters.
+
+ If set, process security will be hardwired and ssptr and offset
+ lookup APIs are removed.
+
+config TILER_EXPOSE_SSPTR
+ bool "Expose SSPtr to userspace"
+ default y
+ depends on TI_TILER
+ help
+ This option sets whether SSPtr-s for blocks are exposed
+ during TILIOC_GBLK ioctls (MemMgr_Alloc APIs). In a secure
+ TILER build, this may be the only way for the userspace code
+ to learn the system-space addresses of TILER blocks.
+
+ You can use this flag to see if the userspace is relying on
+ having access to the SSPtr.
+
+config TILER_ENABLE_NV12
+ bool "Enable NV12 support"
+ default y
+ depends on TI_TILER
+ help
+ This option enables NV12 functionality in the TILER driver.
+
+ If set, nv12 support will be compiled into the driver and APIs
+ will be enabled.
+
+config TILER_ENABLE_USERSPACE
+ bool "Enable userspace API"
+ default y
+ depends on TI_TILER
+ help
+ This option enabled the userspace API. If set, an ioctl interface
+ will be available to users.
diff --git a/drivers/media/video/tiler/Makefile b/drivers/media/video/tiler/Makefile
new file mode 100644
index 0000000..7d4b113
--- /dev/null
+++ b/drivers/media/video/tiler/Makefile
@@ -0,0 +1,15 @@
+obj-$(CONFIG_TI_TILER) += tcm/
+
+obj-$(CONFIG_TI_TILER) += tiler_dmm.o
+tiler_dmm-objs = dmm.o
+
+obj-$(CONFIG_TI_TILER) += tiler.o
+tiler-objs = tiler-geom.o tiler-main.o tiler-iface.o tiler-reserve.o tmm-pat.o
+
+ifdef CONFIG_TILER_ENABLE_NV12
+tiler-objs += tiler-nv12.o
+endif
+
+ifdef CONFIG_TILER_ENABLE_USERSPACE
+tiler-objs += tiler-ioctl.o
+endif
diff --git a/drivers/media/video/tiler/_tiler.h b/drivers/media/video/tiler/_tiler.h
new file mode 100644
index 0000000..9da70d0
--- /dev/null
+++ b/drivers/media/video/tiler/_tiler.h
@@ -0,0 +1,184 @@
+/*
+ * _tiler.h
+ *
+ * TI TILER driver internal shared definitions.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TILER_H
+#define _TILER_H
+
+#include <linux/kernel.h>
+#include <mach/tiler.h>
+#include "tcm.h"
+
+#define TILER_FORMATS (TILFMT_MAX - TILFMT_MIN + 1)
+
+/* per process (thread group) info */
+struct process_info {
+ struct list_head list; /* other processes */
+ struct list_head groups; /* my groups */
+ struct list_head bufs; /* my registered buffers */
+ pid_t pid; /* really: thread group ID */
+ u32 refs; /* open tiler devices, 0 for processes
+ tracked via kernel APIs */
+ bool kernel; /* tracking kernel objects */
+};
+
+struct __buf_info {
+ struct list_head by_pid; /* list of buffers per pid */
+ struct tiler_buf_info buf_info;
+ struct mem_info *mi[TILER_MAX_NUM_BLOCKS]; /* blocks */
+};
+
+/* per group info (within a process) */
+struct gid_info {
+ struct list_head by_pid; /* other groups */
+ struct list_head areas; /* all areas in this pid/gid */
+ struct list_head reserved; /* areas pre-reserved */
+ struct list_head onedim; /* all 1D areas in this pid/gid */
+ u32 gid; /* group ID */
+ int refs; /* instances directly using this ptr */
+ struct process_info *pi; /* parent */
+};
+
+/* info for an area reserved from a container */
+struct area_info {
+ struct list_head by_gid; /* areas in this pid/gid */
+ struct list_head blocks; /* blocks in this area */
+ u32 nblocks; /* # of blocks in this area */
+
+ struct tcm_area area; /* area details */
+ struct gid_info *gi; /* link to parent, if still alive */
+};
+
+/* info for a block */
+struct mem_info {
+ struct list_head global; /* reserved / global blocks */
+ struct tiler_block_t blk; /* block info */
+ struct tiler_pa_info pa; /* pinned physical pages */
+ struct tcm_area area;
+ int refs; /* number of times referenced */
+ bool alloced; /* still alloced */
+
+ struct list_head by_area; /* blocks in the same area / 1D */
+ void *parent; /* area info for 2D, else group info */
+};
+
+/* tiler geometry information */
+struct tiler_geom {
+ u32 x_shft; /* unused X-bits (as part of bpp) */
+ u32 y_shft; /* unused Y-bits (as part of bpp) */
+ u32 bpp; /* bytes per pixel */
+ u32 slot_w; /* width of each slot (in pixels) */
+ u32 slot_h; /* height of each slot (in pixels) */
+ u32 bpp_m; /* modified bytes per pixel (=1 for page mode) */
+};
+
+/* methods and variables shared between source files */
+struct tiler_ops {
+ /* block operations */
+ s32 (*alloc) (enum tiler_fmt fmt, u32 width, u32 height,
+ u32 key,
+ u32 gid, struct process_info *pi,
+ struct mem_info **info);
+ s32 (*pin) (enum tiler_fmt fmt, u32 width, u32 height,
+ u32 key, u32 gid, struct process_info *pi,
+ struct mem_info **info, u32 usr_addr);
+ void (*reserve_nv12) (u32 n, u32 width, u32 height,
+ u32 gid, struct process_info *pi);
+ void (*reserve) (u32 n, enum tiler_fmt fmt, u32 width, u32 height,
+ u32 gid, struct process_info *pi);
+ void (*unreserve) (u32 gid, struct process_info *pi);
+
+ /* block access operations */
+ struct mem_info * (*lock) (u32 key, u32 id, struct gid_info *gi);
+ struct mem_info * (*lock_by_ssptr) (u32 sys_addr);
+ void (*describe) (struct mem_info *i, struct tiler_block_info *blk);
+ void (*unlock_free) (struct mem_info *mi, bool free);
+
+ s32 (*lay_2d) (enum tiler_fmt fmt, u16 n, u16 w, u16 h, u16 band,
+ u16 align, struct gid_info *gi,
+ struct list_head *pos);
+#ifdef CONFIG_TILER_ENABLE_NV12
+ s32 (*lay_nv12) (int n, u16 w, u16 w1, u16 h, struct gid_info *gi,
+ u8 *p);
+#endif
+ /* group operations */
+ struct gid_info * (*get_gi) (struct process_info *pi, u32 gid);
+ void (*release_gi) (struct gid_info *gi);
+ void (*destroy_group) (struct gid_info *pi);
+
+ /* group access operations */
+ void (*add_reserved) (struct list_head *reserved, struct gid_info *gi);
+ void (*release) (struct list_head *reserved);
+
+ /* area operations */
+ s32 (*analize) (enum tiler_fmt fmt, u32 width, u32 height,
+ u16 *x_area, u16 *y_area, u16 *band, u16 *align);
+
+ /* process operations */
+ void (*cleanup) (void);
+
+ /* geometry operations */
+ void (*xy) (u32 ssptr, u32 *x, u32 *y);
+ u32 (*addr) (enum tiler_fmt fmt, u32 x, u32 y);
+ const struct tiler_geom * (*geom) (enum tiler_fmt fmt);
+
+ /* additional info */
+ const struct file_operations *fops;
+#ifdef CONFIG_TILER_ENABLE_NV12
+ bool nv12_packed; /* whether NV12 is packed into same container */
+#endif
+ u32 page; /* page size */
+ u32 width; /* container width */
+ u32 height; /* container height */
+
+ struct mutex mtx; /* mutex for interfaces and ioctls */
+};
+
+void tiler_iface_init(struct tiler_ops *tiler);
+void tiler_geom_init(struct tiler_ops *tiler);
+void tiler_reserve_init(struct tiler_ops *tiler);
+void tiler_nv12_init(struct tiler_ops *tiler);
+u32 tiler_best2pack(u16 o, u16 a, u16 b, u16 w, u16 *n, u16 *_area);
+void tiler_ioctl_init(struct tiler_ops *tiler);
+struct process_info *__get_pi(pid_t pid, bool kernel);
+void _m_unregister_buf(struct __buf_info *_b);
+s32 tiler_notify_event(int event, void *data);
+void _m_free_process_info(struct process_info *pi);
+
+struct process_info *__get_pi(pid_t pid, bool kernel);
+
+#endif
diff --git a/drivers/media/video/tiler/dmm.c b/drivers/media/video/tiler/dmm.c
new file mode 100644
index 0000000..ce0f07a
--- /dev/null
+++ b/drivers/media/video/tiler/dmm.c
@@ -0,0 +1,277 @@
+/*
+ * dmm.c
+ *
+ * DMM driver support functions for TI OMAP processors.
+ *
+ * Authors: David Sin <davidsin@ti.com>
+ * Lajos Molnar <molnar@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h> /* platform_device() */
+#include <linux/io.h> /* ioremap() */
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#include <mach/dmm.h>
+
+#undef __DEBUG__
+
+#define MASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
+#define SET_FLD(reg, msb, lsb, val) \
+(((reg) & ~MASK((msb), (lsb))) | (((val) << (lsb)) & MASK((msb), (lsb))))
+
+#ifdef __DEBUG__
+#define DEBUG(x, y) printk(KERN_NOTICE "%s()::%d:%s=(0x%08x)\n", \
+ __func__, __LINE__, x, (s32)y);
+#else
+#define DEBUG(x, y)
+#endif
+
+static struct mutex dmm_mtx;
+
+static struct omap_dmm_platform_data *device_data;
+
+static int dmm_probe(struct platform_device *pdev)
+{
+ if (!pdev || !pdev->dev.platform_data) {
+ printk(KERN_ERR "dmm: invalid platform data\n");
+ return -EINVAL;
+ }
+
+ device_data = pdev->dev.platform_data;
+
+ printk(KERN_INFO "dmm: probe base: %p, irq %d\n",
+ device_data->base, device_data->irq);
+ writel(0x88888888, device_data->base + DMM_TILER_OR__0);
+ writel(0x88888888, device_data->base + DMM_TILER_OR__1);
+
+ return 0;
+}
+
+static struct platform_driver dmm_driver_ldm = {
+ .probe = dmm_probe,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "dmm",
+ },
+};
+
+s32 dmm_pat_refill(struct dmm *dmm, struct pat *pd, enum pat_mode mode)
+{
+ s32 ret = -EFAULT;
+ void __iomem *r;
+ u32 v, i;
+
+ /* Only manual refill supported */
+ if (mode != MANUAL)
+ return ret;
+
+ mutex_lock(&dmm_mtx);
+
+ /* Check that the DMM_PAT_STATUS register has not reported an error */
+ r = dmm->base + DMM_PAT_STATUS__0;
+ v = __raw_readl(r);
+ if (WARN(v & 0xFC00, KERN_ERR "Abort dmm refill, bad status\n")) {
+ ret = -EIO;
+ goto refill_error;
+ }
+
+ /* Set "next" register to NULL */
+ r = dmm->base + DMM_PAT_DESCR__0;
+ v = __raw_readl(r);
+ v = SET_FLD(v, 31, 4, (u32) NULL);
+ __raw_writel(v, r);
+
+ /* Set area to be refilled */
+ r = dmm->base + DMM_PAT_AREA__0;
+ v = __raw_readl(r);
+ v = SET_FLD(v, 30, 24, pd->area.y1);
+ v = SET_FLD(v, 23, 16, pd->area.x1);
+ v = SET_FLD(v, 14, 8, pd->area.y0);
+ v = SET_FLD(v, 7, 0, pd->area.x0);
+ __raw_writel(v, r);
+ wmb();
+
+#ifdef __DEBUG__
+ printk(KERN_NOTICE "\nx0=(%d),y0=(%d),x1=(%d),y1=(%d)\n",
+ (char)pd->area.x0,
+ (char)pd->area.y0,
+ (char)pd->area.x1,
+ (char)pd->area.y1);
+#endif
+
+ /* First, clear the DMM_PAT_IRQSTATUS register */
+ r = dmm->base + DMM_PAT_IRQSTATUS;
+ __raw_writel(0xFFFFFFFF, r);
+ wmb();
+
+ r = dmm->base + DMM_PAT_IRQSTATUS_RAW;
+ i = 1000;
+ while(__raw_readl(r) != 0) {
+ if (--i == 0) {
+ printk(KERN_ERR "Cannot clear status register\n");
+ goto refill_error;
+ }
+ udelay(1);
+ }
+
+ /* Fill data register */
+ r = dmm->base + DMM_PAT_DATA__0;
+ v = __raw_readl(r);
+
+ /* pd->data must be 16 aligned */
+ BUG_ON(pd->data & 15);
+ v = SET_FLD(v, 31, 4, pd->data >> 4);
+ __raw_writel(v, r);
+ wmb();
+
+ /* Read back PAT_DATA__0 to see if write was successful */
+ i = 1000;
+ while(__raw_readl(r) != pd->data) {
+ if (--i == 0) {
+ printk(KERN_ERR "Write failed to PAT_DATA__0\n");
+ goto refill_error;
+ }
+ udelay(1);
+ }
+
+ r = dmm->base + DMM_PAT_CTRL__0;
+ v = __raw_readl(r);
+ v = SET_FLD(v, 31, 28, pd->ctrl.ini);
+ v = SET_FLD(v, 16, 16, pd->ctrl.sync);
+ v = SET_FLD(v, 9, 8, pd->ctrl.lut_id);
+ v = SET_FLD(v, 6, 4, pd->ctrl.dir);
+ v = SET_FLD(v, 0, 0, pd->ctrl.start);
+ __raw_writel(v, r);
+ wmb();
+
+ /* Check if PAT_IRQSTATUS_RAW is set after the PAT has been refilled */
+ r = dmm->base + DMM_PAT_IRQSTATUS_RAW;
+ i = 1000;
+ while((__raw_readl(r) & 0x3) != 0x3) {
+ if (--i == 0) {
+ printk(KERN_ERR "Status check failed after PAT refill\n");
+ goto refill_error;
+ }
+ udelay(1);
+ }
+
+ /* Again, clear the DMM_PAT_IRQSTATUS register */
+ r = dmm->base + DMM_PAT_IRQSTATUS;
+ __raw_writel(0xFFFFFFFF, r);
+ wmb();
+
+ r = dmm->base + DMM_PAT_IRQSTATUS_RAW;
+ i = 1000;
+ while (__raw_readl(r) != 0x0) {
+ if (--i == 0) {
+ printk(KERN_ERR "Failed to clear DMM PAT IRQSTATUS\n");
+ goto refill_error;
+ }
+ udelay(1);
+ }
+
+ /* Again, set "next" register to NULL to clear any PAT STATUS errors */
+ r = dmm->base + DMM_PAT_DESCR__0;
+ v = __raw_readl(r);
+ v = SET_FLD(v, 31, 4, (u32) NULL);
+ __raw_writel(v, r);
+
+ /*
+ * Now, check that the DMM_PAT_STATUS register
+ * has not reported an error before exiting.
+ */
+ r = dmm->base + DMM_PAT_STATUS__0;
+ v = __raw_readl(r);
+ if ((v & 0xFC00) != 0) {
+ printk(KERN_ERR "Abort dmm refill. Operation failed\n");
+ goto refill_error;
+ }
+
+ ret = 0;
+
+refill_error:
+ mutex_unlock(&dmm_mtx);
+
+ return ret;
+}
+EXPORT_SYMBOL(dmm_pat_refill);
+
+struct dmm *dmm_pat_init(u32 id)
+{
+ u32 base;
+ struct dmm *dmm;
+ switch (id) {
+ case 0:
+ /* only support id 0 for now */
+ base = DMM_BASE;
+ break;
+ default:
+ return NULL;
+ }
+
+ dmm = kmalloc(sizeof(*dmm), GFP_KERNEL);
+ if (!dmm)
+ return NULL;
+
+ dmm->base = ioremap(base, DMM_SIZE);
+ if (!dmm->base) {
+ kfree(dmm);
+ return NULL;
+ }
+
+ __raw_writel(0x88888888, dmm->base + DMM_PAT_VIEW__0);
+ __raw_writel(0x88888888, dmm->base + DMM_PAT_VIEW__1);
+ __raw_writel(0x80808080, dmm->base + DMM_PAT_VIEW_MAP__0);
+ __raw_writel(0x80000000, dmm->base + DMM_PAT_VIEW_MAP_BASE);
+ __raw_writel(0x88888888, dmm->base + DMM_TILER_OR__0);
+ __raw_writel(0x88888888, dmm->base + DMM_TILER_OR__1);
+
+ return dmm;
+}
+EXPORT_SYMBOL(dmm_pat_init);
+
+/**
+ * Clean up the physical address translator.
+ * @param dmm Device data
+ * @return an error status.
+ */
+void dmm_pat_release(struct dmm *dmm)
+{
+ if (dmm) {
+ iounmap(dmm->base);
+ kfree(dmm);
+ }
+}
+EXPORT_SYMBOL(dmm_pat_release);
+
+static s32 __init dmm_init(void)
+{
+ mutex_init(&dmm_mtx);
+ return platform_driver_register(&dmm_driver_ldm);
+}
+
+static void __exit dmm_exit(void)
+{
+ mutex_destroy(&dmm_mtx);
+ platform_driver_unregister(&dmm_driver_ldm);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("davidsin@ti.com");
+MODULE_AUTHOR("molnar@ti.com");
+module_init(dmm_init);
+module_exit(dmm_exit);
diff --git a/drivers/media/video/tiler/tcm.h b/drivers/media/video/tiler/tcm.h
new file mode 100644
index 0000000..3189607
--- /dev/null
+++ b/drivers/media/video/tiler/tcm.h
@@ -0,0 +1,341 @@
+/*
+ * tcm.h
+ *
+ * TILER container manager specification and support functions for TI
+ * TILER driver.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TCM_H
+#define TCM_H
+
+struct tcm;
+
+/* point */
+struct tcm_pt {
+ u16 x;
+ u16 y;
+};
+
+/* 1d or 2d area */
+struct tcm_area {
+ bool is2d; /* whether area is 1d or 2d */
+ struct tcm *tcm; /* parent */
+ struct tcm_pt p0;
+ struct tcm_pt p1;
+};
+
+struct tcm {
+ u16 width, height; /* container dimensions */
+
+ /* 'pvt' structure shall contain any tcm details (attr) along with
+ linked list of allocated areas and mutex for mutually exclusive access
+ to the list. It may also contain copies of width and height to notice
+ any changes to the publicly available width and height fields. */
+ void *pvt;
+
+ /* function table */
+ s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u8 align,
+ struct tcm_area *area);
+ s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area);
+ s32 (*free) (struct tcm *tcm, struct tcm_area *area);
+ void (*deinit) (struct tcm *tcm);
+};
+
+/*=============================================================================
+ BASIC TILER CONTAINER MANAGER INTERFACE
+=============================================================================*/
+
+/*
+ * NOTE:
+ *
+ * Since some basic parameter checking is done outside the TCM algorithms,
+ * TCM implementation do NOT have to check the following:
+ *
+ * area pointer is NULL
+ * width and height fits within container
+ * number of pages is more than the size of the container
+ *
+ */
+
+/**
+ * Template for <ALGO_NAME>_tcm_init method. Define as:
+ * TCM_INIT(<ALGO_NAME>_tcm_init)
+ *
+ * Allocates and initializes a tiler container manager.
+ *
+ * @param width Width of container
+ * @param height Height of container
+ * @param attr Container manager specific configuration
+ * arguments. Please describe these in
+ * your header file.
+ *
+ * @return Pointer to the allocated and initialized container
+ * manager. NULL on failure. DO NOT leak any memory on
+ * failure!
+ */
+#define TCM_INIT(name, attr_t) \
+struct tcm *name(u16 width, u16 height, typeof(attr_t) *attr);
+
+/**
+ * Deinitialize tiler container manager.
+ *
+ * @param tcm Pointer to container manager.
+ *
+ * @return 0 on success, non-0 error value on error. The call
+ * should free as much memory as possible and meaningful
+ * even on failure. Some error codes: -ENODEV: invalid
+ * manager.
+ */
+static inline void tcm_deinit(struct tcm *tcm)
+{
+ if (tcm)
+ tcm->deinit(tcm);
+}
+
+/**
+ * Reserves a 2D area in the container.
+ *
+ * @param tcm Pointer to container manager.
+ * @param height Height(in pages) of area to be reserved.
+ * @param width Width(in pages) of area to be reserved.
+ * @param align Alignment requirement for top-left corner of area. Not
+ * all values may be supported by the container manager,
+ * but it must support 0 (1), 32 and 64.
+ * 0 value is equivalent to 1.
+ * @param area Pointer to where the reserved area should be stored.
+ *
+ * @return 0 on success. Non-0 error code on failure. Also,
+ * the tcm field of the area will be set to NULL on
+ * failure. Some error codes: -ENODEV: invalid manager,
+ * -EINVAL: invalid area, -ENOMEM: not enough space for
+ * allocation.
+ */
+static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
+ u16 align, struct tcm_area *area)
+{
+ /* perform rudimentary error checking */
+ s32 res = tcm == NULL ? -ENODEV :
+ (area == NULL || width == 0 || height == 0 ||
+ /* align must be a 2 power */
+ align & (align - 1)) ? -EINVAL :
+ (height > tcm->height || width > tcm->width) ? -ENOMEM : 0;
+
+ if (!res) {
+ area->is2d = true;
+ res = tcm->reserve_2d(tcm, height, width, align, area);
+ area->tcm = res ? NULL : tcm;
+ }
+
+ return res;
+}
+
+/**
+ * Reserves a 1D area in the container.
+ *
+ * @param tcm Pointer to container manager.
+ * @param slots Number of (contiguous) slots to reserve.
+ * @param area Pointer to where the reserved area should be stored.
+ *
+ * @return 0 on success. Non-0 error code on failure. Also,
+ * the tcm field of the area will be set to NULL on
+ * failure. Some error codes: -ENODEV: invalid manager,
+ * -EINVAL: invalid area, -ENOMEM: not enough space for
+ * allocation.
+ */
+static inline s32 tcm_reserve_1d(struct tcm *tcm, u32 slots,
+ struct tcm_area *area)
+{
+ /* perform rudimentary error checking */
+ s32 res = tcm == NULL ? -ENODEV :
+ (area == NULL || slots == 0) ? -EINVAL :
+ slots > (tcm->width * (u32) tcm->height) ? -ENOMEM : 0;
+
+ if (!res) {
+ area->is2d = false;
+ res = tcm->reserve_1d(tcm, slots, area);
+ area->tcm = res ? NULL : tcm;
+ }
+
+ return res;
+}
+
+/**
+ * Free a previously reserved area from the container.
+ *
+ * @param area Pointer to area reserved by a prior call to
+ * tcm_reserve_1d or tcm_reserve_2d call, whether
+ * it was successful or not. (Note: all fields of
+ * the structure must match.)
+ *
+ * @return 0 on success. Non-0 error code on failure. Also, the tcm
+ * field of the area is set to NULL on success to avoid subsequent
+ * freeing. This call will succeed even if supplying
+ * the area from a failed reserved call.
+ */
+static inline s32 tcm_free(struct tcm_area *area)
+{
+ s32 res = 0; /* free succeeds by default */
+
+ if (area && area->tcm) {
+ res = area->tcm->free(area->tcm, area);
+ if (res == 0)
+ area->tcm = NULL;
+ }
+
+ return res;
+}
+
+/*=============================================================================
+ HELPER FUNCTION FOR ANY TILER CONTAINER MANAGER
+=============================================================================*/
+
+/**
+ * This method slices off the topmost 2D slice from the parent area, and stores
+ * it in the 'slice' parameter. The 'parent' parameter will get modified to
+ * contain the remaining portion of the area. If the whole parent area can
+ * fit in a 2D slice, its tcm pointer is set to NULL to mark that it is no
+ * longer a valid area.
+ *
+ * @param parent Pointer to a VALID parent area that will get modified
+ * @param slice Pointer to the slice area that will get modified
+ */
+static inline void tcm_slice(struct tcm_area *parent, struct tcm_area *slice)
+{
+ *slice = *parent;
+
+ /* check if we need to slice */
+ if (slice->tcm && !slice->is2d &&
+ slice->p0.y != slice->p1.y &&
+ (slice->p0.x || (slice->p1.x != slice->tcm->width - 1))) {
+ /* set end point of slice (start always remains) */
+ slice->p1.x = slice->tcm->width - 1;
+ slice->p1.y = (slice->p0.x) ? slice->p0.y : slice->p1.y - 1;
+ /* adjust remaining area */
+ parent->p0.x = 0;
+ parent->p0.y = slice->p1.y + 1;
+ } else {
+ /* mark this as the last slice */
+ parent->tcm = NULL;
+ }
+}
+
+/* Verify if a tcm area is logically valid */
+static inline bool tcm_area_is_valid(struct tcm_area *area)
+{
+ return area && area->tcm &&
+ /* coordinate bounds */
+ area->p1.x < area->tcm->width &&
+ area->p1.y < area->tcm->height &&
+ area->p0.y <= area->p1.y &&
+ /* 1D coordinate relationship + p0.x check */
+ ((!area->is2d &&
+ area->p0.x < area->tcm->width &&
+ area->p0.x + area->p0.y * area->tcm->width <=
+ area->p1.x + area->p1.y * area->tcm->width) ||
+ /* 2D coordinate relationship */
+ (area->is2d &&
+ area->p0.x <= area->p1.x));
+}
+
+/* see if a coordinate is within an area */
+static inline bool __tcm_is_in(struct tcm_pt *p, struct tcm_area *a)
+{
+ u16 i;
+
+ if (a->is2d) {
+ return p->x >= a->p0.x && p->x <= a->p1.x &&
+ p->y >= a->p0.y && p->y <= a->p1.y;
+ } else {
+ i = p->x + p->y * a->tcm->width;
+ return i >= a->p0.x + a->p0.y * a->tcm->width &&
+ i <= a->p1.x + a->p1.y * a->tcm->width;
+ }
+}
+
+/* calculate area width */
+static inline u16 __tcm_area_width(struct tcm_area *area)
+{
+ return area->p1.x - area->p0.x + 1;
+}
+
+/* calculate area height */
+static inline u16 __tcm_area_height(struct tcm_area *area)
+{
+ return area->p1.y - area->p0.y + 1;
+}
+
+/* calculate number of slots in an area */
+static inline u16 __tcm_sizeof(struct tcm_area *area)
+{
+ return area->is2d ?
+ __tcm_area_width(area) * __tcm_area_height(area) :
+ (area->p1.x - area->p0.x + 1) + (area->p1.y - area->p0.y) *
+ area->tcm->width;
+}
+#define tcm_sizeof(area) __tcm_sizeof(&(area))
+#define tcm_awidth(area) __tcm_area_width(&(area))
+#define tcm_aheight(area) __tcm_area_height(&(area))
+#define tcm_is_in(pt, area) __tcm_is_in(&(pt), &(area))
+
+/* limit a 1D area to the first N pages */
+static inline s32 tcm_1d_limit(struct tcm_area *a, u32 num_pg)
+{
+ if (__tcm_sizeof(a) < num_pg)
+ return -ENOMEM;
+ if (!num_pg)
+ return -EINVAL;
+
+ a->p1.x = (a->p0.x + num_pg - 1) % a->tcm->width;
+ a->p1.y = a->p0.y + ((a->p0.x + num_pg - 1) / a->tcm->width);
+ return 0;
+}
+
+/**
+ * Iterate through 2D slices of a valid area. Behaves
+ * syntactically as a for(;;) statement.
+ *
+ * @param var Name of a local variable of type 'struct
+ * tcm_area *' that will get modified to
+ * contain each slice.
+ * @param area Pointer to the VALID parent area. This
+ * structure will not get modified
+ * throughout the loop.
+ *
+ */
+#define tcm_for_each_slice(var, area, safe) \
+ for (safe = area, \
+ tcm_slice(&safe, &var); \
+ var.tcm; tcm_slice(&safe, &var))
+
+#endif
diff --git a/drivers/media/video/tiler/tcm/Makefile b/drivers/media/video/tiler/tcm/Makefile
new file mode 100644
index 0000000..8434607
--- /dev/null
+++ b/drivers/media/video/tiler/tcm/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_TI_TILER) += tcm-sita.o
diff --git a/drivers/media/video/tiler/tcm/_tcm-sita.h b/drivers/media/video/tiler/tcm/_tcm-sita.h
new file mode 100644
index 0000000..20a7d86
--- /dev/null
+++ b/drivers/media/video/tiler/tcm/_tcm-sita.h
@@ -0,0 +1,85 @@
+/*
+ * _tcm_sita.h
+ *
+ * SImple Tiler Allocator (SiTA) private structures.
+ *
+ * Author: Ravi Ramachandra <r.ramachandra@ti.com>
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TCM_SITA_H
+#define _TCM_SITA_H
+
+#include "../tcm.h"
+
+/* length between two coordinates */
+#define LEN(a, b) ((a) > (b) ? (a) - (b) + 1 : (b) - (a) + 1)
+
+enum criteria {
+ CR_MAX_NEIGHS = 0x01,
+ CR_FIRST_FOUND = 0x10,
+ CR_BIAS_HORIZONTAL = 0x20,
+ CR_BIAS_VERTICAL = 0x40,
+ CR_DIAGONAL_BALANCE = 0x80
+};
+
+/* nearness to the beginning of the search field from 0 to 1000 */
+struct nearness_factor {
+ s32 x;
+ s32 y;
+};
+
+/*
+ * Statistics on immediately neighboring slots. Edge is the number of
+ * border segments that are also border segments of the scan field. Busy
+ * refers to the number of neighbors that are occupied.
+ */
+struct neighbor_stats {
+ u16 edge;
+ u16 busy;
+};
+
+/* structure to keep the score of a potential allocation */
+struct score {
+ struct nearness_factor f;
+ struct neighbor_stats n;
+ struct tcm_area a;
+ u16 neighs; /* number of busy neighbors */
+};
+
+struct sita_pvt {
+ struct mutex mtx;
+ struct tcm_pt div_pt; /* divider point splitting container */
+ struct tcm_area ***map; /* pointers to the parent area for each slot */
+};
+
+#endif
diff --git a/drivers/media/video/tiler/tcm/tcm-sita.c b/drivers/media/video/tiler/tcm/tcm-sita.c
new file mode 100644
index 0000000..d0784c6
--- /dev/null
+++ b/drivers/media/video/tiler/tcm/tcm-sita.c
@@ -0,0 +1,936 @@
+/*
+ * tcm-sita.c
+ *
+ * SImple Tiler Allocator (SiTA): 2D and 1D allocation(reservation) algorithm
+ *
+ * Authors: Ravi Ramachandra <r.ramachandra@ti.com>,
+ * Lajos Molnar <molnar@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+#include <linux/slab.h>
+
+#include "_tcm-sita.h"
+#include "tcm-sita.h"
+
+#define TCM_ALG_NAME "tcm_sita"
+#include "tcm-utils.h"
+
+#define X_SCAN_LIMITER 1
+#define Y_SCAN_LIMITER 1
+
+#define ALIGN_DOWN(value, align) ((value) & ~((align) - 1))
+
+/* Individual selection criteria for different scan areas */
+static s32 CR_L2R_T2B = CR_BIAS_HORIZONTAL;
+static s32 CR_R2L_T2B = CR_DIAGONAL_BALANCE;
+#ifdef SCAN_BOTTOM_UP
+static s32 CR_R2L_B2T = CR_FIRST_FOUND;
+static s32 CR_L2R_B2T = CR_DIAGONAL_BALANCE;
+#endif
+
+/*********************************************
+ * TCM API - Sita Implementation
+ *********************************************/
+static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
+ struct tcm_area *area);
+static s32 sita_reserve_1d(struct tcm *tcm, u32 slots, struct tcm_area *area);
+static s32 sita_free(struct tcm *tcm, struct tcm_area *area);
+static void sita_deinit(struct tcm *tcm);
+
+/*********************************************
+ * Main Scanner functions
+ *********************************************/
+static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *area);
+
+static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area);
+
+static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area);
+
+#ifdef SCAN_BOTTOM_UP
+static s32 scan_l2r_b2t(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area);
+
+static s32 scan_r2l_b2t(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area);
+#endif
+static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
+ struct tcm_area *field, struct tcm_area *area);
+
+/*********************************************
+ * Support Infrastructure Methods
+ *********************************************/
+static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h);
+
+static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
+ struct tcm_area *field, s32 criteria,
+ struct score *best);
+
+static void get_nearness_factor(struct tcm_area *field,
+ struct tcm_area *candidate,
+ struct nearness_factor *nf);
+
+static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
+ struct neighbor_stats *stat);
+
+static void fill_area(struct tcm *tcm,
+ struct tcm_area *area, struct tcm_area *parent);
+
+/*********************************************/
+
+/*********************************************
+ * Utility Methods
+ *********************************************/
+struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr)
+{
+ struct tcm *tcm;
+ struct sita_pvt *pvt;
+ struct tcm_area area = {0};
+ s32 i;
+
+ if (width == 0 || height == 0)
+ return NULL;
+
+ tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
+ pvt = kmalloc(sizeof(*pvt), GFP_KERNEL);
+ if (!tcm || !pvt)
+ goto error;
+
+ memset(tcm, 0, sizeof(*tcm));
+ memset(pvt, 0, sizeof(*pvt));
+
+ /* Updating the pointers to SiTA implementation APIs */
+ tcm->height = height;
+ tcm->width = width;
+ tcm->reserve_2d = sita_reserve_2d;
+ tcm->reserve_1d = sita_reserve_1d;
+ tcm->free = sita_free;
+ tcm->deinit = sita_deinit;
+ tcm->pvt = (void *)pvt;
+
+ mutex_init(&(pvt->mtx));
+
+ /* Creating tam map */
+ pvt->map = kmalloc(sizeof(*pvt->map) * tcm->width, GFP_KERNEL);
+ if (!pvt->map)
+ goto error;
+
+ for (i = 0; i < tcm->width; i++) {
+ pvt->map[i] =
+ kmalloc(sizeof(**pvt->map) * tcm->height,
+ GFP_KERNEL);
+ if (pvt->map[i] == NULL) {
+ while (i--)
+ kfree(pvt->map[i]);
+ kfree(pvt->map);
+ goto error;
+ }
+ }
+
+ if (attr && attr->x <= tcm->width && attr->y <= tcm->height) {
+ pvt->div_pt.x = attr->x;
+ pvt->div_pt.y = attr->y;
+
+ } else {
+ /* Defaulting to 3:1 ratio on width for 2D area split */
+ /* Defaulting to 3:1 ratio on height for 2D and 1D split */
+ pvt->div_pt.x = (tcm->width * 3) / 4;
+ pvt->div_pt.y = (tcm->height * 3) / 4;
+ }
+
+ mutex_lock(&(pvt->mtx));
+ assign(&area, 0, 0, width - 1, height - 1);
+ fill_area(tcm, &area, NULL);
+ mutex_unlock(&(pvt->mtx));
+ return tcm;
+
+error:
+ kfree(tcm);
+ kfree(pvt);
+ return NULL;
+}
+
+static void sita_deinit(struct tcm *tcm)
+{
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+ struct tcm_area area = {0};
+ s32 i;
+
+ area.p1.x = tcm->width - 1;
+ area.p1.y = tcm->height - 1;
+
+ mutex_lock(&(pvt->mtx));
+ fill_area(tcm, &area, NULL);
+ mutex_unlock(&(pvt->mtx));
+
+ mutex_destroy(&(pvt->mtx));
+
+ for (i = 0; i < tcm->height; i++)
+ kfree(pvt->map[i]);
+ kfree(pvt->map);
+ kfree(pvt);
+}
+
+/**
+ * Reserve a 1D area in the container
+ *
+ * @param num_slots size of 1D area
+ * @param area pointer to the area that will be populated with the
+ * reserved area
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots,
+ struct tcm_area *area)
+{
+ s32 ret;
+ struct tcm_area field = {0};
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ mutex_lock(&(pvt->mtx));
+#ifdef RESTRICT_1D
+ /* scan within predefined 1D boundary */
+ assign(&field, tcm->width - 1, tcm->height - 1, 0, pvt->div_pt.y);
+#else
+ /* Scanning entire container */
+ assign(&field, tcm->width - 1, tcm->height - 1, 0, 0);
+#endif
+ ret = scan_r2l_b2t_one_dim(tcm, num_slots, &field, area);
+ if (!ret)
+ /* update map */
+ fill_area(tcm, area, area);
+
+ mutex_unlock(&(pvt->mtx));
+ return ret;
+}
+
+/**
+ * Reserve a 2D area in the container
+ *
+ * @param w width
+ * @param h height
+ * @param area pointer to the area that will be populated with the reesrved
+ * area
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
+ struct tcm_area *area)
+{
+ s32 ret;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ /* not supporting more than 64 as alignment */
+ if (align > 64)
+ return -EINVAL;
+
+ /* we prefer 1, 32 and 64 as alignment */
+ align = align <= 1 ? 1 : align <= 32 ? 32 : 64;
+
+ mutex_lock(&(pvt->mtx));
+ ret = scan_areas_and_find_fit(tcm, w, h, align, area);
+ if (!ret)
+ /* update map */
+ fill_area(tcm, area, area);
+
+ mutex_unlock(&(pvt->mtx));
+ return ret;
+}
+
+/**
+ * Unreserve a previously allocated 2D or 1D area
+ * @param area area to be freed
+ * @return 0 - success
+ */
+static s32 sita_free(struct tcm *tcm, struct tcm_area *area)
+{
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ mutex_lock(&(pvt->mtx));
+
+ /* check that this is in fact an existing area */
+ WARN_ON(pvt->map[area->p0.x][area->p0.y] != area ||
+ pvt->map[area->p1.x][area->p1.y] != area);
+
+ /* Clear the contents of the associated tiles in the map */
+ fill_area(tcm, area, NULL);
+
+ mutex_unlock(&(pvt->mtx));
+
+ return 0;
+}
+
+/**
+ * Note: In general the cordinates in the scan field area relevant to the can
+ * sweep directions. The scan origin (e.g. top-left corner) will always be
+ * the p0 member of the field. Therfore, for a scan from top-left p0.x <= p1.x
+ * and p0.y <= p1.y; whereas, for a scan from bottom-right p1.x <= p0.x and p1.y
+ * <= p0.y
+ */
+
+/**
+ * Raster scan horizontally right to left from top to bottom to find a place for
+ * a 2D area of given size inside a scan field.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 x, y;
+ s16 start_x, end_x, start_y, end_y, found_x = -1;
+ struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
+ struct score best = {{0}, {0}, {0}, 0};
+
+ PA(2, "scan_r2l_t2b:", field);
+
+ start_x = field->p0.x;
+ end_x = field->p1.x;
+ start_y = field->p0.y;
+ end_y = field->p1.y;
+
+ /* check scan area co-ordinates */
+ if (field->p0.x < field->p1.x ||
+ field->p1.y < field->p0.y)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (w > LEN(start_x, end_x) || h > LEN(end_y, start_y))
+ return -ENOSPC;
+
+ /* adjust start_x and end_y, as allocation would not fit beyond */
+ start_x = ALIGN_DOWN(start_x - w + 1, align); /* - 1 to be inclusive */
+ end_y = end_y - h + 1;
+
+ /* check if allocation would still fit in scan area */
+ if (start_x < end_x)
+ return -ENOSPC;
+
+ P2("ali=%d x=%d..%d y=%d..%d", align, start_x, end_x, start_y, end_y);
+
+ /* scan field top-to-bottom, right-to-left */
+ for (y = start_y; y <= end_y; y++) {
+ for (x = start_x; x >= end_x; x -= align) {
+ if (is_area_free(map, x, y, w, h)) {
+ P3("found shoulder: %d,%d", x, y);
+ found_x = x;
+
+ /* update best candidate */
+ if (update_candidate(tcm, x, y, w, h, field,
+ CR_R2L_T2B, &best))
+ goto done;
+
+#ifdef X_SCAN_LIMITER
+ /* change upper x bound */
+ end_x = x + 1;
+#endif
+ break;
+ } else if (map[x][y] && map[x][y]->is2d) {
+ /* step over 2D areas */
+ x = ALIGN(map[x][y]->p0.x - w + 1, align);
+ P3("moving to: %d,%d", x, y);
+ }
+ }
+#ifdef Y_SCAN_LIMITER
+ /* break if you find a free area shouldering the scan field */
+ if (found_x == start_x)
+ break;
+#endif
+ }
+
+ if (!best.a.tcm)
+ return -ENOSPC;
+done:
+ assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
+ return 0;
+}
+
+#ifdef SCAN_BOTTOM_UP
+/**
+ * Raster scan horizontally right to left from bottom to top to find a place
+ * for a 2D area of given size inside a scan field.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_r2l_b2t(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ /* TODO: Should I check scan area?
+ * Might have to take it as input during initialization
+ */
+ s32 x, y;
+ s16 start_x, end_x, start_y, end_y, found_x = -1;
+ struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
+ struct score best = {{0}, {0}, {0}, 0};
+
+ PA(2, "scan_r2l_b2t:", field);
+
+ start_x = field->p0.x;
+ end_x = field->p1.x;
+ start_y = field->p0.y;
+ end_y = field->p1.y;
+
+ /* check scan area co-ordinates */
+ if (field->p1.x < field->p0.x ||
+ field->p1.y < field->p0.y)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (w > LEN(start_x, end_x) || h > LEN(start_y, end_y))
+ return -ENOSPC;
+
+ /* adjust start_x and start_y, as allocation would not fit beyond */
+ start_x = ALIGN_DOWN(start_x - w + 1, align); /* + 1 to be inclusive */
+ start_y = start_y - h + 1;
+
+ /* check if allocation would still fit in scan area */
+ if (start_x < end_x)
+ return -ENOSPC;
+
+ P2("ali=%d x=%d..%d y=%d..%d", align, start_x, end_x, start_y, end_y);
+
+ /* scan field bottom-to-top, right-to-left */
+ for (y = start_y; y >= end_y; y--) {
+ for (x = start_x; x >= end_x; x -= align) {
+ if (is_area_free(map, x, y, w, h)) {
+ P3("found shoulder: %d,%d", x, y);
+ found_x = x;
+
+ /* update best candidate */
+ if (update_candidate(tcm, x, y, w, h, field,
+ CR_R2L_B2T, &best))
+ goto done;
+#ifdef X_SCAN_LIMITER
+ /* change upper x bound */
+ end_x = x + 1;
+#endif
+ break;
+ } else if (map[x][y] && map[x][y]->is2d) {
+ /* step over 2D areas */
+ x = ALIGN(map[x][y]->p0.x - w + 1, align);
+ P3("moving to: %d,%d", x, y);
+ }
+ }
+#ifdef Y_SCAN_LIMITER
+ /* break if you find a free area shouldering the scan field */
+ if (found_x == start_x)
+ break;
+#endif
+ }
+
+ if (!best.a.tcm)
+ return -ENOSPC;
+done:
+ assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
+ return 0;
+}
+#endif
+
+/**
+ * Raster scan horizontally left to right from top to bottom to find a place for
+ * a 2D area of given size inside a scan field.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 x, y;
+ s16 start_x, end_x, start_y, end_y, found_x = -1;
+ struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
+ struct score best = {{0}, {0}, {0}, 0};
+
+ PA(2, "scan_l2r_t2b:", field);
+
+ start_x = field->p0.x;
+ end_x = field->p1.x;
+ start_y = field->p0.y;
+ end_y = field->p1.y;
+
+ /* check scan area co-ordinates */
+ if (field->p1.x < field->p0.x ||
+ field->p1.y < field->p0.y)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (w > LEN(end_x, start_x) || h > LEN(end_y, start_y))
+ return -ENOSPC;
+
+ start_x = ALIGN(start_x, align);
+
+ /* check if allocation would still fit in scan area */
+ if (w > LEN(end_x, start_x))
+ return -ENOSPC;
+
+ /* adjust end_x and end_y, as allocation would not fit beyond */
+ end_x = end_x - w + 1; /* + 1 to be inclusive */
+ end_y = end_y - h + 1;
+
+ P2("ali=%d x=%d..%d y=%d..%d", align, start_x, end_x, start_y, end_y);
+
+ /* scan field top-to-bottom, left-to-right */
+ for (y = start_y; y <= end_y; y++) {
+ for (x = start_x; x <= end_x; x += align) {
+ if (is_area_free(map, x, y, w, h)) {
+ P3("found shoulder: %d,%d", x, y);
+ found_x = x;
+
+ /* update best candidate */
+ if (update_candidate(tcm, x, y, w, h, field,
+ CR_L2R_T2B, &best))
+ goto done;
+#ifdef X_SCAN_LIMITER
+ /* change upper x bound */
+ end_x = x - 1;
+#endif
+ break;
+ } else if (map[x][y] && map[x][y]->is2d) {
+ /* step over 2D areas */
+ x = ALIGN_DOWN(map[x][y]->p1.x, align);
+ P3("moving to: %d,%d", x, y);
+ }
+ }
+#ifdef Y_SCAN_LIMITER
+ /* break if you find a free area shouldering the scan field */
+ if (found_x == start_x)
+ break;
+#endif
+ }
+
+ if (!best.a.tcm)
+ return -ENOSPC;
+done:
+ assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
+ return 0;
+}
+
+#ifdef SCAN_BOTTOM_UP
+/**
+ * Raster scan horizontally left to right from bottom to top to find a
+ * place for a 2D area of given size inside a scan field.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_l2r_b2t(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 x, y;
+ s16 start_x, end_x, start_y, end_y, found_x = -1;
+ struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
+ struct score best = {{0}, {0}, {0}, 0};
+
+ PA(2, "scan_l2r_b2t:", field);
+
+ start_x = field->p0.x;
+ end_x = field->p1.x;
+ start_y = field->p0.y;
+ end_y = field->p1.y;
+
+ /* check scan area co-ordinates */
+ if (field->p1.x < field->p0.x ||
+ field->p0.y < field->p1.y)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (w > LEN(end_x, start_x) || h > LEN(start_y, end_y))
+ return -ENOSPC;
+
+ start_x = ALIGN(start_x, align);
+
+ /* check if allocation would still fit in scan area */
+ if (w > LEN(end_x, start_x))
+ return -ENOSPC;
+
+ /* adjust end_x and start_y, as allocation would not fit beyond */
+ end_x = end_x - w + 1; /* + 1 to be inclusive */
+ start_y = start_y - h + 1;
+
+ P2("ali=%d x=%d..%d y=%d..%d", align, start_x, end_x, start_y, end_y);
+
+ /* scan field bottom-to-top, left-to-right */
+ for (y = start_y; y >= end_y; y--) {
+ for (x = start_x; x <= end_x; x += align) {
+ if (is_area_free(map, x, y, w, h)) {
+ P3("found shoulder: %d,%d", x, y);
+ found_x = x;
+
+ /* update best candidate */
+ if (update_candidate(tcm, x, y, w, h, field,
+ CR_L2R_B2T, &best))
+ goto done;
+#ifdef X_SCAN_LIMITER
+ /* change upper x bound */
+ end_x = x - 1;
+#endif
+ break;
+ } else if (map[x][y] && map[x][y]->is2d) {
+ /* step over 2D areas */
+ x = ALIGN_DOWN(map[x][y]->p1.x, align);
+ P3("moving to: %d,%d", x, y);
+ }
+ }
+
+#ifdef Y_SCAN_LIMITER
+ /* break if you find a free area shouldering the scan field */
+ if (found_x == start_x)
+ break;
+#endif
+ }
+
+ if (!best.a.tcm)
+ return -ENOSPC;
+done:
+ assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
+ return 0;
+}
+#endif
+
+/**
+ * Raster scan horizontally right to left from bottom to top to find a place
+ * for a 1D area of given size inside a scan field.
+ *
+ * @param num_slots size of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best
+ * position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 found = 0;
+ s16 x, y;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+ struct tcm_area *p;
+
+ /* check scan area co-ordinates */
+ if (field->p0.y < field->p1.y)
+ return -EINVAL;
+
+ PA(2, "scan_r2l_b2t_one_dim:", field);
+
+ /**
+ * Currently we only support full width 1D scan field, which makes sense
+ * since 1D slot-ordering spans the full container width.
+ */
+ if (tcm->width != field->p0.x - field->p1.x + 1)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (num_slots > tcm->width * LEN(field->p0.y, field->p1.y))
+ return -ENOSPC;
+
+ x = field->p0.x;
+ y = field->p0.y;
+
+ /* find num_slots consecutive free slots to the left */
+ while (found < num_slots) {
+ if (y < 0)
+ return -ENOSPC;
+
+ /* remember bottom-right corner */
+ if (found == 0) {
+ area->p1.x = x;
+ area->p1.y = y;
+ }
+
+ /* skip busy regions */
+ p = pvt->map[x][y];
+ if (p) {
+ /* move to left of 2D areas, top left of 1D */
+ x = p->p0.x;
+ if (!p->is2d)
+ y = p->p0.y;
+
+ /* start over */
+ found = 0;
+ } else {
+ /* count consecutive free slots */
+ found++;
+ if (found == num_slots)
+ break;
+ }
+
+ /* move to the left */
+ if (x == 0)
+ y--;
+ x = (x ? : tcm->width) - 1;
+
+ }
+
+ /* set top-left corner */
+ area->p0.x = x;
+ area->p0.y = y;
+ return 0;
+}
+
+/**
+ * Find a place for a 2D area of given size inside a scan field based on its
+ * alignment needs.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *area)
+{
+ s32 ret = 0;
+ struct tcm_area field = {0};
+ u16 boundary_x, boundary_y;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ if (align > 1) {
+ /* prefer top-left corner */
+ boundary_x = pvt->div_pt.x - 1;
+ boundary_y = pvt->div_pt.y - 1;
+
+ /* expand width and height if needed */
+ if (w > pvt->div_pt.x)
+ boundary_x = tcm->width - 1;
+ if (h > pvt->div_pt.y)
+ boundary_y = tcm->height - 1;
+
+ assign(&field, 0, 0, boundary_x, boundary_y);
+ ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
+
+ /* scan whole container if failed, but do not scan 2x */
+ if (ret != 0 && (boundary_x != tcm->width - 1 ||
+ boundary_y != tcm->height - 1)) {
+ /* scan the entire container if nothing found */
+ assign(&field, 0, 0, tcm->width - 1, tcm->height - 1);
+ ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
+ }
+ } else if (align == 1) {
+ /* prefer top-right corner */
+ boundary_x = pvt->div_pt.x;
+ boundary_y = pvt->div_pt.y - 1;
+
+ /* expand width and height if needed */
+ if (w > (tcm->width - pvt->div_pt.x))
+ boundary_x = 0;
+ if (h > pvt->div_pt.y)
+ boundary_y = tcm->height - 1;
+
+ assign(&field, tcm->width - 1, 0, boundary_x, boundary_y);
+ ret = scan_r2l_t2b(tcm, w, h, align, &field, area);
+
+ /* scan whole container if failed, but do not scan 2x */
+ if (ret != 0 && (boundary_x != 0 ||
+ boundary_y != tcm->height - 1)) {
+ /* scan the entire container if nothing found */
+ assign(&field, tcm->width - 1, 0, 0, tcm->height - 1);
+ ret = scan_r2l_t2b(tcm, w, h, align, &field,
+ area);
+ }
+ }
+
+ return ret;
+}
+
+/* check if an entire area is free */
+static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h)
+{
+ u16 x = 0, y = 0;
+ for (y = y0; y < y0 + h; y++) {
+ for (x = x0; x < x0 + w; x++) {
+ if (map[x][y])
+ return false;
+ }
+ }
+ return true;
+}
+
+/* fills an area with a parent tcm_area */
+static void fill_area(struct tcm *tcm, struct tcm_area *area,
+ struct tcm_area *parent)
+{
+ s32 x, y;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+ struct tcm_area a, a_;
+
+ /* set area's tcm; otherwise, enumerator considers it invalid */
+ area->tcm = tcm;
+
+ tcm_for_each_slice(a, *area, a_) {
+ PA(2, "fill 2d area", &a);
+ for (x = a.p0.x; x <= a.p1.x; ++x)
+ for (y = a.p0.y; y <= a.p1.y; ++y)
+ pvt->map[x][y] = parent;
+
+ }
+}
+
+/**
+ * Compares a candidate area to the current best area, and if it is a better
+ * fit, it updates the best to this one.
+ *
+ * @param x0, y0, w, h top, left, width, height of candidate area
+ * @param field scan field
+ * @param criteria scan criteria
+ * @param best best candidate and its scores
+ *
+ * @return 1 (true) if the candidate area is known to be the final best, so no
+ * more searching should be performed
+ */
+static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
+ struct tcm_area *field, s32 criteria,
+ struct score *best)
+{
+ struct score me; /* score for area */
+
+ /*
+ * If first found is enabled then we stop looking
+ * NOTE: For horizontal bias we always give the first found, because our
+ * scan is horizontal-raster-based and the first candidate will always
+ * have the horizontal bias.
+ */
+ bool first = criteria & (CR_FIRST_FOUND | CR_BIAS_HORIZONTAL);
+
+ assign(&me.a, x0, y0, x0 + w - 1, y0 + h - 1);
+
+ /* calculate score for current candidate */
+ if (!first) {
+ get_neighbor_stats(tcm, &me.a, &me.n);
+ me.neighs = me.n.edge + me.n.busy;
+ get_nearness_factor(field, &me.a, &me.f);
+ }
+
+ /* the 1st candidate is always the best */
+ if (!best->a.tcm)
+ goto better;
+
+ BUG_ON(first);
+
+ /* see if this are is better than the best so far */
+
+ /* neighbor check */
+ if ((criteria & CR_MAX_NEIGHS) &&
+ me.neighs > best->neighs)
+ goto better;
+
+ /* vertical bias check */
+ if ((criteria & CR_BIAS_VERTICAL) &&
+ /*
+ * NOTE: not checking if lengths are same, because that does not
+ * find new shoulders on the same row after a fit
+ */
+ LEN(me.a.p0.y, field->p0.y) >
+ LEN(best->a.p0.y, field->p0.y))
+ goto better;
+
+ /* diagonal balance check */
+ if ((criteria & CR_DIAGONAL_BALANCE) &&
+ best->neighs <= me.neighs &&
+ (best->neighs < me.neighs ||
+ /* this implies that neighs and occupied match */
+ best->n.busy < me.n.busy ||
+ (best->n.busy == me.n.busy &&
+ /* check the nearness factor */
+ best->f.x + best->f.y > me.f.x + me.f.y)))
+ goto better;
+
+ /* not better, keep going */
+ return 0;
+
+better:
+ /* save current area as best */
+ memcpy(best, &me, sizeof(me));
+ best->a.tcm = tcm;
+ return first;
+}
+
+/**
+ * Calculate the nearness factor of an area in a search field. The nearness
+ * factor is smaller if the area is closer to the search origin.
+ */
+static void get_nearness_factor(struct tcm_area *field, struct tcm_area *area,
+ struct nearness_factor *nf)
+{
+ /**
+ * Using signed math as field coordinates may be reversed if
+ * search direction is right-to-left or bottom-to-top.
+ */
+ nf->x = (s32)(area->p0.x - field->p0.x) * 1000 /
+ (field->p1.x - field->p0.x);
+ nf->y = (s32)(area->p0.y - field->p0.y) * 1000 /
+ (field->p1.y - field->p0.y);
+}
+
+/* get neighbor statistics */
+static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
+ struct neighbor_stats *stat)
+{
+ s16 x = 0, y = 0;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ /* Clearing any exisiting values */
+ memset(stat, 0, sizeof(*stat));
+
+ /* process top & bottom edges */
+ for (x = area->p0.x; x <= area->p1.x; x++) {
+ if (area->p0.y == 0)
+ stat->edge++;
+ else if (pvt->map[x][area->p0.y - 1])
+ stat->busy++;
+
+ if (area->p1.y == tcm->height - 1)
+ stat->edge++;
+ else if (pvt->map[x][area->p1.y + 1])
+ stat->busy++;
+ }
+
+ /* process left & right edges */
+ for (y = area->p0.y; y <= area->p1.y; ++y) {
+ if (area->p0.x == 0)
+ stat->edge++;
+ else if (pvt->map[area->p0.x - 1][y])
+ stat->busy++;
+
+ if (area->p1.x == tcm->width - 1)
+ stat->edge++;
+ else if (pvt->map[area->p1.x + 1][y])
+ stat->busy++;
+ }
+}
diff --git a/drivers/media/video/tiler/tcm/tcm-sita.h b/drivers/media/video/tiler/tcm/tcm-sita.h
new file mode 100644
index 0000000..6b604bf
--- /dev/null
+++ b/drivers/media/video/tiler/tcm/tcm-sita.h
@@ -0,0 +1,59 @@
+/*
+ * tcm_sita.h
+ *
+ * SImple Tiler Allocator (SiTA) interface.
+ *
+ * Author: Ravi Ramachandra <r.ramachandra@ti.com>
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TCM_SITA_H
+#define TCM_SITA_H
+
+#include "../tcm.h"
+
+/**
+ * Create a SiTA tiler container manager.
+ *
+ * @param width Container width
+ * @param height Container height
+ * @param attr preferred division point between 64-aligned
+ * allocation (top left), 32-aligned allocations
+ * (top right), and page mode allocations (bottom)
+ *
+ * @return TCM instance
+ */
+struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr);
+
+TCM_INIT(sita_init, struct tcm_pt);
+
+#endif /* TCM_SITA_H_ */
diff --git a/drivers/media/video/tiler/tcm/tcm-utils.h b/drivers/media/video/tiler/tcm/tcm-utils.h
new file mode 100644
index 0000000..3fe8f7d
--- /dev/null
+++ b/drivers/media/video/tiler/tcm/tcm-utils.h
@@ -0,0 +1,74 @@
+/*
+ * tcm_utils.h
+ *
+ * Utility functions for implementing TILER container managers.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TCM_UTILS_H
+#define TCM_UTILS_H
+
+#include "../tcm.h"
+
+/* TCM_ALG_NAME must be defined to use the debug methods */
+
+#ifdef DEBUG
+#define IFDEBUG(x) x
+#else
+/* compile-check debug statements even if not DEBUG */
+#define IFDEBUG(x) do { if (0) x; } while (0)
+#endif
+
+#define P(level, fmt, ...) \
+ IFDEBUG(printk(level TCM_ALG_NAME ":%d:%s()" fmt "\n", \
+ __LINE__, __func__, ##__VA_ARGS__))
+
+#define P1(fmt, ...) P(KERN_NOTICE, fmt, ##__VA_ARGS__)
+#define P2(fmt, ...) P(KERN_INFO, fmt, ##__VA_ARGS__)
+#define P3(fmt, ...) P(KERN_DEBUG, fmt, ##__VA_ARGS__)
+
+#define PA(level, msg, p_area) P##level(msg " (%03d %03d)-(%03d %03d)\n", \
+ (p_area)->p0.x, (p_area)->p0.y, (p_area)->p1.x, (p_area)->p1.y)
+
+/* assign coordinates to area */
+static inline
+void assign(struct tcm_area *a, u16 x0, u16 y0, u16 x1, u16 y1)
+{
+ a->p0.x = x0;
+ a->p0.y = y0;
+ a->p1.x = x1;
+ a->p1.y = y1;
+}
+
+#endif
diff --git a/drivers/media/video/tiler/tiler-geom.c b/drivers/media/video/tiler/tiler-geom.c
new file mode 100644
index 0000000..f95ae5c
--- /dev/null
+++ b/drivers/media/video/tiler/tiler-geom.c
@@ -0,0 +1,372 @@
+/*
+ * tiler-geom.c
+ *
+ * TILER geometry functions for TI TILER hardware block.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/module.h>
+#include "_tiler.h"
+
+/* bits representing the same slot in DMM-TILER hw-block */
+#define SLOT_WIDTH_BITS 6
+#define SLOT_HEIGHT_BITS 6
+
+/* bits reserved to describe coordinates in DMM-TILER hw-block */
+#define CONT_WIDTH_BITS 14
+#define CONT_HEIGHT_BITS 13
+
+static struct tiler_geom geom[TILER_FORMATS] = {
+ {
+ .x_shft = 0,
+ .y_shft = 0,
+ },
+ {
+ .x_shft = 0,
+ .y_shft = 1,
+ },
+ {
+ .x_shft = 1,
+ .y_shft = 1,
+ },
+ {
+ .x_shft = SLOT_WIDTH_BITS,
+ .y_shft = SLOT_HEIGHT_BITS,
+ },
+};
+
+/* tiler space addressing bitfields */
+#define MASK_XY_FLIP (1 << 31)
+#define MASK_Y_INVERT (1 << 30)
+#define MASK_X_INVERT (1 << 29)
+#define SHIFT_ACC_MODE 27
+#define MASK_ACC_MODE 3
+
+/* calculated constants */
+#define TILER_PAGE (1 << (SLOT_WIDTH_BITS + SLOT_HEIGHT_BITS))
+#define TILER_WIDTH (1 << (CONT_WIDTH_BITS - SLOT_WIDTH_BITS))
+#define TILER_HEIGHT (1 << (CONT_HEIGHT_BITS - SLOT_HEIGHT_BITS))
+
+#define VIEW_SIZE (1u << (CONT_WIDTH_BITS + CONT_HEIGHT_BITS))
+#define VIEW_MASK (VIEW_SIZE - 1u)
+
+#define MASK(bits) ((1 << (bits)) - 1)
+
+#define TILER_FMT(x) ((enum tiler_fmt) \
+ ((x >> SHIFT_ACC_MODE) & MASK_ACC_MODE))
+
+#define MASK_VIEW (MASK_X_INVERT | MASK_Y_INVERT | MASK_XY_FLIP)
+
+/* location of the various tiler views in physical address space */
+#define TILVIEW_8BIT 0x60000000u
+#define TILVIEW_16BIT (TILVIEW_8BIT + VIEW_SIZE)
+#define TILVIEW_32BIT (TILVIEW_16BIT + VIEW_SIZE)
+#define TILVIEW_PAGE (TILVIEW_32BIT + VIEW_SIZE)
+#define TILVIEW_END (TILVIEW_PAGE + VIEW_SIZE)
+
+/* create tsptr by adding view orientation and access mode */
+#define TIL_ADDR(x, orient, a)\
+ ((u32) (x) | (orient) | ((a) << SHIFT_ACC_MODE))
+
+bool is_tiler_addr(u32 phys)
+{
+ return phys >= TILVIEW_8BIT && phys < TILVIEW_END;
+}
+EXPORT_SYMBOL(is_tiler_addr);
+
+u32 tiler_bpp(const struct tiler_block_t *b)
+{
+ enum tiler_fmt fmt = tiler_fmt(b->phys);
+ BUG_ON(fmt == TILFMT_INVALID);
+
+ return geom[fmt].bpp_m;
+}
+EXPORT_SYMBOL(tiler_bpp);
+
+/* return the stride of a tiler-block in tiler space */
+static inline s32 tiler_stride(u32 tsptr)
+{
+ enum tiler_fmt fmt = TILER_FMT(tsptr);
+
+ if (fmt == TILFMT_PAGE)
+ return 0;
+ else if (tsptr & MASK_XY_FLIP)
+ return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft);
+ else
+ return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
+}
+
+u32 tiler_pstride(const struct tiler_block_t *b)
+{
+ enum tiler_fmt fmt = tiler_fmt(b->phys);
+ BUG_ON(fmt == TILFMT_INVALID);
+
+ /* return the virtual stride for page mode */
+ if (fmt == TILFMT_PAGE)
+ return tiler_vstride(b);
+
+ return tiler_stride(b->phys & ~MASK_VIEW);
+}
+EXPORT_SYMBOL(tiler_pstride);
+
+enum tiler_fmt tiler_fmt(u32 phys)
+{
+ if (!is_tiler_addr(phys))
+ return TILFMT_INVALID;
+
+ return TILER_FMT(phys);
+}
+EXPORT_SYMBOL(tiler_fmt);
+
+/* returns the tiler geometry information for a format */
+static const struct tiler_geom *get_geom(enum tiler_fmt fmt)
+{
+ if (fmt >= TILFMT_MIN && fmt <= TILFMT_MAX)
+ return geom + fmt;
+ return NULL;
+}
+
+/**
+ * Returns the natural x and y coordinates for a pixel in tiler space address.
+ * That is, the coordinates for the same pixel in the natural (non-rotated,
+ * non-mirrored) view. This allows to uniquely identify a tiler pixel in any
+ * view orientation.
+ */
+static void tiler_get_natural_xy(u32 tsptr, u32 *x, u32 *y)
+{
+ u32 x_bits, y_bits, offset;
+ enum tiler_fmt fmt;
+
+ fmt = TILER_FMT(tsptr);
+
+ x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
+ y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
+ offset = (tsptr & VIEW_MASK) >> (geom[fmt].x_shft + geom[fmt].y_shft);
+
+ /* separate coordinate bitfields based on view orientation */
+ if (tsptr & MASK_XY_FLIP) {
+ *x = offset >> y_bits;
+ *y = offset & MASK(y_bits);
+ } else {
+ *x = offset & MASK(x_bits);
+ *y = offset >> x_bits;
+ }
+
+ /* account for mirroring */
+ if (tsptr & MASK_X_INVERT)
+ *x ^= MASK(x_bits);
+ if (tsptr & MASK_Y_INVERT)
+ *y ^= MASK(y_bits);
+}
+
+/* calculate the tiler space address of a pixel in a view orientation */
+static u32 tiler_get_address(u32 orient, enum tiler_fmt fmt, u32 x, u32 y)
+{
+ u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
+
+ x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
+ y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
+ alignment = geom[fmt].x_shft + geom[fmt].y_shft;
+
+ /* validate coordinate */
+ x_mask = MASK(x_bits);
+ y_mask = MASK(y_bits);
+ if (x < 0 || x > x_mask || y < 0 || y > y_mask)
+ return 0;
+
+ /* account for mirroring */
+ if (orient & MASK_X_INVERT)
+ x ^= x_mask;
+ if (orient & MASK_Y_INVERT)
+ y ^= y_mask;
+
+ /* get coordinate address */
+ if (orient & MASK_XY_FLIP)
+ tmp = ((x << y_bits) + y);
+ else
+ tmp = ((y << x_bits) + x);
+
+ return TIL_ADDR((tmp << alignment), orient, fmt);
+}
+
+void tilview_create(struct tiler_view_t *view, u32 phys, u32 width, u32 height)
+{
+ BUG_ON(!is_tiler_addr(phys));
+
+ view->tsptr = phys & ~MASK_VIEW;
+ view->bpp = geom[TILER_FMT(phys)].bpp_m;
+ view->width = width;
+ view->height = height;
+ view->h_inc = view->bpp;
+ view->v_inc = tiler_stride(view->tsptr);
+}
+EXPORT_SYMBOL(tilview_create);
+
+void tilview_get(struct tiler_view_t *view, struct tiler_block_t *blk)
+{
+ view->tsptr = blk->phys & ~MASK_VIEW;
+ view->bpp = tiler_bpp(blk);
+ view->width = blk->width;
+ view->height = blk->height;
+ view->h_inc = view->bpp;
+ view->v_inc = tiler_stride(view->tsptr);
+}
+EXPORT_SYMBOL(tilview_get);
+
+s32 tilview_crop(struct tiler_view_t *view, u32 left, u32 top, u32 width,
+ u32 height)
+{
+ /* check for valid crop */
+ if (left + width < left || left + width > view->width ||
+ top + height < top || top + height > view->height)
+ return -EINVAL;
+
+ view->tsptr += left * view->h_inc + top * view->v_inc;
+ view->width = width;
+ view->height = height;
+ return 0;
+}
+EXPORT_SYMBOL(tilview_crop);
+
+/* calculate tilerspace address and stride after view orientation change */
+static void reorient(struct tiler_view_t *view, u32 orient)
+{
+ u32 x, y;
+
+ tiler_get_natural_xy(view->tsptr, &x, &y);
+ view->tsptr = tiler_get_address(orient,
+ TILER_FMT(view->tsptr), x, y);
+ view->v_inc = tiler_stride(view->tsptr);
+}
+
+s32 tilview_rotate(struct tiler_view_t *view, s32 rotation)
+{
+ u32 orient;
+
+ if (rotation % 90)
+ return -EINVAL;
+
+ /* normalize rotation to quarters */
+ rotation = (rotation / 90) & 3;
+ if (!rotation)
+ return 0; /* nothing to do */
+
+ /* PAGE mode view cannot be rotated */
+ if (TILER_FMT(view->tsptr) == TILFMT_PAGE)
+ return -EPERM;
+
+ /*
+ * first adjust top-left corner. NOTE: it rotates counter-clockwise:
+ * 0 < 3
+ * v ^
+ * 1 > 2
+ */
+ if (rotation < 3)
+ view->tsptr += (view->height - 1) * view->v_inc;
+ if (rotation > 1)
+ view->tsptr += (view->width - 1) * view->h_inc;
+
+ /* then rotate view itself */
+ orient = view->tsptr & MASK_VIEW;
+
+ /* rotate first 2 quarters */
+ if (rotation & 2) {
+ orient ^= MASK_X_INVERT;
+ orient ^= MASK_Y_INVERT;
+ }
+
+ /* rotate last quarter */
+ if (rotation & 1) {
+ orient ^= (orient & MASK_XY_FLIP) ?
+ MASK_X_INVERT : MASK_Y_INVERT;
+
+ /* swap x & y */
+ orient ^= MASK_XY_FLIP;
+ swap(view->height, view->width);
+ }
+
+ /* finally reorient view */
+ reorient(view, orient);
+ return 0;
+}
+EXPORT_SYMBOL(tilview_rotate);
+
+s32 tilview_flip(struct tiler_view_t *view, bool flip_x, bool flip_y)
+{
+ u32 orient;
+ orient = view->tsptr & MASK_VIEW;
+
+ if (!flip_x && !flip_y)
+ return 0; /* nothing to do */
+
+ /* PAGE mode view cannot be flipped */
+ if (TILER_FMT(view->tsptr) == TILFMT_PAGE)
+ return -EPERM;
+
+ /* adjust top-left corner */
+ if (flip_x)
+ view->tsptr += (view->width - 1) * view->h_inc;
+ if (flip_y)
+ view->tsptr += (view->height - 1) * view->v_inc;
+
+ /* flip view orientation */
+ if (orient & MASK_XY_FLIP)
+ swap(flip_x, flip_y);
+
+ if (flip_x)
+ orient ^= MASK_X_INVERT;
+ if (flip_y)
+ orient ^= MASK_Y_INVERT;
+
+ /* finally reorient view */
+ reorient(view, orient);
+ return 0;
+}
+EXPORT_SYMBOL(tilview_flip);
+
+/* return the alias address for a coordinate */
+static inline u32 alias_address(enum tiler_fmt fmt, u32 x, u32 y)
+{
+ return tiler_get_address(0, fmt, x, y) + TILVIEW_8BIT;
+}
+
+/* get the coordinates for an alias address */
+static inline void alias_xy(u32 ssptr, u32 *x, u32 *y)
+{
+ tiler_get_natural_xy(ssptr & ~MASK_VIEW, x, y);
+}
+
+/* initialize shared geometric data */
+void tiler_geom_init(struct tiler_ops *tiler)
+{
+ struct tiler_geom *g;
+
+ tiler->xy = alias_xy;
+ tiler->addr = alias_address;
+ tiler->geom = get_geom;
+
+ tiler->page = TILER_PAGE;
+ tiler->width = TILER_WIDTH;
+ tiler->height = TILER_HEIGHT;
+
+ /* calculate geometry */
+ for (g = geom; g < geom + TILER_FORMATS; g++) {
+ g->bpp_m = g->bpp = 1 << (g->x_shft + g->y_shft);
+ g->slot_w = 1 << (SLOT_WIDTH_BITS - g->x_shft);
+ g->slot_h = 1 << (SLOT_HEIGHT_BITS - g->y_shft);
+ }
+
+ /* set bpp_m = 1 for page mode as most applications deal in byte data */
+ geom[TILFMT_PAGE].bpp_m = 1;
+}
diff --git a/drivers/media/video/tiler/tiler-iface.c b/drivers/media/video/tiler/tiler-iface.c
new file mode 100644
index 0000000..b7d84d5
--- /dev/null
+++ b/drivers/media/video/tiler/tiler-iface.c
@@ -0,0 +1,340 @@
+/*
+ * tiler-iface.c
+ *
+ * TILER driver interace functions for TI TILER hardware block.
+ *
+ * Authors: Lajos Molnar <molnar@ti.com>
+ * David Sin <davidsin@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/fs.h> /* fops */
+#include <linux/uaccess.h> /* copy_to_user */
+#include <linux/slab.h> /* kmalloc */
+#include <linux/sched.h> /* current */
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <asm/mach/map.h> /* for ioremap_page */
+
+#include "_tiler.h"
+
+static bool security = CONFIG_TILER_SECURITY;
+
+module_param(security, bool, 0644);
+MODULE_PARM_DESC(security,
+ "Separate allocations by different processes into different pages");
+
+static struct list_head procs; /* list of process info structs */
+static struct tiler_ops *ops; /* shared methods and variables */
+
+/*
+ * process_info handling methods
+ * ==========================================================================
+ */
+
+/* get process info, and increment refs for device tracking */
+struct process_info *__get_pi(pid_t pid, bool kernel)
+{
+ struct process_info *pi;
+
+ /*
+ * treat all processes as the same, kernel processes are still treated
+ * differently so not to free kernel allocated areas when a user process
+ * closes the tiler driver
+ */
+ if (!security)
+ pid = 0;
+
+ /* find process context */
+ mutex_lock(&ops->mtx);
+ list_for_each_entry(pi, &procs, list) {
+ if (pi->pid == pid && pi->kernel == kernel)
+ goto done;
+ }
+
+ /* create process context */
+ pi = kmalloc(sizeof(*pi), GFP_KERNEL);
+ if (!pi)
+ goto done;
+ memset(pi, 0, sizeof(*pi));
+
+ pi->pid = pid;
+ pi->kernel = kernel;
+ INIT_LIST_HEAD(&pi->groups);
+ INIT_LIST_HEAD(&pi->bufs);
+ list_add(&pi->list, &procs);
+done:
+ /* increment reference count */
+ if (pi && !kernel)
+ pi->refs++;
+ mutex_unlock(&ops->mtx);
+ return pi;
+}
+
+/**
+ * Free all info kept by a process: all registered buffers, allocated blocks,
+ * and unreferenced blocks. Any blocks/areas still referenced will move to the
+ * orphaned lists to avoid issues if a new process is created with the same pid.
+ *
+ * caller MUST already have mtx
+ */
+void _m_free_process_info(struct process_info *pi)
+{
+ struct gid_info *gi, *gi_;
+#ifdef CONFIG_TILER_ENABLE_USERSPACE
+ struct __buf_info *_b = NULL, *_b_ = NULL;
+
+ if (!list_empty(&pi->bufs))
+ tiler_notify_event(TILER_DEVICE_CLOSE, NULL);
+
+ /* unregister all buffers */
+ list_for_each_entry_safe(_b, _b_, &pi->bufs, by_pid)
+ _m_unregister_buf(_b);
+#endif
+ BUG_ON(!list_empty(&pi->bufs));
+
+ /* free all allocated blocks, and remove unreferenced ones */
+ list_for_each_entry_safe(gi, gi_, &pi->groups, by_pid)
+ ops->destroy_group(gi);
+
+ BUG_ON(!list_empty(&pi->groups));
+ list_del(&pi->list);
+ kfree(pi);
+}
+
+static void destroy_processes(void)
+{
+ struct process_info *pi, *pi_;
+
+ mutex_lock(&ops->mtx);
+
+ list_for_each_entry_safe(pi, pi_, &procs, list)
+ _m_free_process_info(pi);
+ BUG_ON(!list_empty(&procs));
+
+ mutex_unlock(&ops->mtx);
+}
+
+
+/* initialize tiler interface */
+void tiler_iface_init(struct tiler_ops *tiler)
+{
+ ops = tiler;
+ ops->cleanup = destroy_processes;
+
+#ifdef CONFIG_TILER_SECURE
+ security = true;
+#endif
+ INIT_LIST_HEAD(&procs);
+}
+
+/*
+ * Kernel APIs
+ * ==========================================================================
+ */
+
+u32 tiler_virt2phys(u32 usr)
+{
+ pmd_t *pmd;
+ pte_t *ptep;
+ pgd_t *pgd = pgd_offset(current->mm, usr);
+
+ if (pgd_none(*pgd) || pgd_bad(*pgd))
+ return 0;
+
+ pmd = pmd_offset(pgd, usr);
+ if (pmd_none(*pmd) || pmd_bad(*pmd))
+ return 0;
+
+ ptep = pte_offset_map(pmd, usr);
+ if (ptep && pte_present(*ptep))
+ return (*ptep & PAGE_MASK) | (~PAGE_MASK & usr);
+
+ return 0;
+}
+EXPORT_SYMBOL(tiler_virt2phys);
+
+void tiler_reservex(u32 n, enum tiler_fmt fmt, u32 width, u32 height,
+ u32 gid, pid_t pid)
+{
+ struct process_info *pi = __get_pi(pid, true);
+
+ if (pi)
+ ops->reserve(n, fmt, width, height, gid, pi);
+}
+EXPORT_SYMBOL(tiler_reservex);
+
+void tiler_reserve(u32 n, enum tiler_fmt fmt, u32 width, u32 height)
+{
+ tiler_reservex(n, fmt, width, height, 0, current->tgid);
+}
+EXPORT_SYMBOL(tiler_reserve);
+
+#ifdef CONFIG_TILER_ENABLE_NV12
+void tiler_reservex_nv12(u32 n, u32 width, u32 height,
+ u32 gid, pid_t pid)
+{
+ struct process_info *pi = __get_pi(pid, true);
+
+ if (pi)
+ ops->reserve_nv12(n, width, height, gid, pi);
+}
+EXPORT_SYMBOL(tiler_reservex_nv12);
+
+void tiler_reserve_nv12(u32 n, u32 width, u32 height)
+{
+ tiler_reservex_nv12(n, width, height, 0, current->tgid);
+}
+EXPORT_SYMBOL(tiler_reserve_nv12);
+#endif
+
+s32 tiler_allocx(struct tiler_block_t *blk, enum tiler_fmt fmt,
+ u32 gid, pid_t pid)
+{
+ struct mem_info *mi;
+ struct process_info *pi;
+ s32 res;
+
+ BUG_ON(!blk || blk->phys);
+
+ pi = __get_pi(pid, true);
+ if (!pi)
+ return -ENOMEM;
+
+ res = ops->alloc(fmt, blk->width, blk->height, blk->key, gid, pi, &mi);
+ if (mi) {
+ blk->phys = mi->blk.phys;
+ blk->id = mi->blk.id;
+ }
+ return res;
+}
+EXPORT_SYMBOL(tiler_allocx);
+
+s32 tiler_alloc(struct tiler_block_t *blk, enum tiler_fmt fmt)
+{
+ return tiler_allocx(blk, fmt, 0, current->tgid);
+}
+EXPORT_SYMBOL(tiler_alloc);
+
+s32 tiler_mapx(struct tiler_block_t *blk, enum tiler_fmt fmt, u32 gid,
+ pid_t pid, u32 usr_addr)
+{
+ struct mem_info *mi;
+ struct process_info *pi;
+ s32 res;
+
+ BUG_ON(!blk || blk->phys);
+
+ pi = __get_pi(pid, true);
+ if (!pi)
+ return -ENOMEM;
+
+ res = ops->pin(fmt, blk->width, blk->height, blk->key, gid, pi, &mi,
+ usr_addr);
+ if (mi) {
+ blk->phys = mi->blk.phys;
+ blk->id = mi->blk.id;
+ }
+ return res;
+
+}
+EXPORT_SYMBOL(tiler_mapx);
+
+s32 tiler_map(struct tiler_block_t *blk, enum tiler_fmt fmt, u32 usr_addr)
+{
+ return tiler_mapx(blk, fmt, 0, current->tgid, usr_addr);
+}
+EXPORT_SYMBOL(tiler_map);
+
+s32 tiler_mmap_blk(struct tiler_block_t *blk, u32 offs, u32 size,
+ struct vm_area_struct *vma, u32 voffs)
+{
+ u32 v, p, len;
+
+ /* mapping must fit into vma */
+ BUG_ON(vma->vm_start > vma->vm_start + voffs ||
+ vma->vm_start + voffs > vma->vm_start + voffs + size ||
+ vma->vm_start + voffs + size > vma->vm_end);
+
+ /* mapping must fit into block */
+ BUG_ON(offs > offs + size || offs + size > tiler_size(blk));
+
+ v = tiler_vstride(blk);
+ p = tiler_pstride(blk);
+
+ /* remap block portion */
+ len = v - (offs % v); /* initial area to map */
+ while (size) {
+ /* restrict to size still needs mapping */
+ if (len > size)
+ len = size;
+
+ vma->vm_pgoff = (blk->phys + offs) >> PAGE_SHIFT;
+ if (remap_pfn_range(vma, vma->vm_start + voffs, vma->vm_pgoff,
+ len, vma->vm_page_prot))
+ return -EAGAIN;
+ voffs += len;
+ offs += len + p - v;
+ size -= len;
+ len = v; /* subsequent area to map */
+ }
+ return 0;
+}
+EXPORT_SYMBOL(tiler_mmap_blk);
+
+s32 tiler_ioremap_blk(struct tiler_block_t *blk, u32 offs, u32 size,
+ u32 addr, u32 mtype)
+{
+ u32 v, p;
+ u32 len; /* area to map */
+ const struct mem_type *type = get_mem_type(mtype);
+
+ /* mapping must fit into address space */
+ BUG_ON(addr > addr + size);
+
+ /* mapping must fit into block */
+ BUG_ON(offs > offs + size || offs + size > tiler_size(blk));
+
+ v = tiler_vstride(blk);
+ p = tiler_pstride(blk);
+
+ /* move offset and address to end */
+ offs += blk->phys + size;
+ addr += size;
+
+ len = v - (offs % v); /* initial area to map */
+ while (size) {
+ while (len && size) {
+ if (ioremap_page(addr - size, offs - size, type))
+ return -EAGAIN;
+ len -= PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+
+ offs += p - v;
+ len = v; /* subsequent area to map */
+ }
+ return 0;
+}
+EXPORT_SYMBOL(tiler_ioremap_blk);
+
+void tiler_free(struct tiler_block_t *blk)
+{
+ /* find block */
+ struct mem_info *mi = ops->lock(blk->key, blk->id, NULL);
+ if (mi)
+ ops->unlock_free(mi, true);
+ blk->phys = blk->id = 0;
+}
+EXPORT_SYMBOL(tiler_free);
diff --git a/drivers/media/video/tiler/tiler-ioctl.c b/drivers/media/video/tiler/tiler-ioctl.c
new file mode 100644
index 0000000..b54c39f
--- /dev/null
+++ b/drivers/media/video/tiler/tiler-ioctl.c
@@ -0,0 +1,529 @@
+/*
+ * tiler-ioctl.c
+ *
+ * TILER driver userspace interface functions for TI TILER hardware block.
+ *
+ * Authors: Lajos Molnar <molnar@ti.com>
+ * David Sin <davidsin@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/fs.h> /* fops */
+#include <linux/uaccess.h> /* copy_to_user */
+#include <linux/slab.h> /* kmalloc */
+#include <linux/sched.h> /* current */
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <asm/mach/map.h> /* for ioremap_page */
+
+#include "_tiler.h"
+
+static bool ssptr_lookup = true;
+static bool offset_lookup = true;
+
+module_param(ssptr_lookup, bool, 0644);
+MODULE_PARM_DESC(ssptr_lookup,
+ "Allow looking up a block by ssptr - This is a security risk");
+module_param(offset_lookup, bool, 0644);
+MODULE_PARM_DESC(offset_lookup,
+ "Allow looking up a buffer by offset - This is a security risk");
+
+static struct tiler_ops *ops; /* shared methods and variables */
+static struct blocking_notifier_head notifier; /* notifier for events */
+
+/*
+ * Event notification methods
+ * ==========================================================================
+ */
+
+s32 tiler_notify_event(int event, void *data)
+{
+ return blocking_notifier_call_chain(¬ifier, event, data);
+}
+
+/*
+ * Buffer handling methods
+ * ==========================================================================
+ */
+
+/* check if an offset is used */
+static bool _m_offs_in_use(u32 offs, u32 length, struct process_info *pi)
+{
+ struct __buf_info *_b;
+ /* have mutex */
+ list_for_each_entry(_b, &pi->bufs, by_pid)
+ if (_b->buf_info.offset < offs + length &&
+ _b->buf_info.offset + _b->buf_info.length > offs)
+ return 1;
+ return 0;
+}
+
+/* get an offset */
+static u32 _m_get_offs(struct process_info *pi, u32 length)
+{
+ static u32 offs = 0xda7a;
+
+ /* ensure no-one is using this offset */
+ while ((offs << PAGE_SHIFT) + length < length ||
+ _m_offs_in_use(offs << PAGE_SHIFT, length, pi)) {
+ /* use a pseudo-random generator to get a new offset to try */
+
+ /* Galois LSF: 20, 17 */
+ offs = (offs >> 1) ^ (u32)((0 - (offs & 1u)) & 0x90000);
+ }
+
+ return offs << PAGE_SHIFT;
+}
+
+/* find and lock a block. process_info is optional */
+static struct mem_info *
+_m_lock_block(u32 key, u32 id, struct process_info *pi) {
+ struct gid_info *gi;
+ struct mem_info *mi;
+
+ /* if process_info is given, look there first */
+ if (pi) {
+ /* have mutex */
+
+ /* find block in process list and free it */
+ list_for_each_entry(gi, &pi->groups, by_pid) {
+ mi = ops->lock(key, id, gi);
+ if (mi)
+ return mi;
+ }
+ }
+
+ /* if not found or no process_info given, find block in global list */
+ return ops->lock(key, id, NULL);
+}
+
+/* register a buffer */
+static s32 _m_register_buf(struct __buf_info *_b, struct process_info *pi)
+{
+ struct mem_info *mi;
+ struct tiler_buf_info *b = &_b->buf_info;
+ u32 i, num = b->num_blocks, offs;
+
+ /* check validity */
+ if (num > TILER_MAX_NUM_BLOCKS || num == 0)
+ return -EINVAL;
+
+ /* find each block */
+ b->length = 0;
+ for (i = 0; i < num; i++) {
+ mi = _m_lock_block(b->blocks[i].key, b->blocks[i].id, pi);
+ if (!mi) {
+ /* unlock any blocks already found */
+ while (i--)
+ ops->unlock_free(_b->mi[i], false);
+ return -EACCES;
+ }
+ _b->mi[i] = mi;
+
+ /* we don't keep track of ptr and 1D stride so clear them */
+ b->blocks[i].ptr = NULL;
+ b->blocks[i].stride = 0;
+
+ ops->describe(mi, b->blocks + i);
+ b->length += tiler_size(&mi->blk);
+ }
+
+ /* if found all, register buffer */
+ offs = _b->mi[0]->blk.phys & ~PAGE_MASK;
+ b->offset = _m_get_offs(pi, b->length) + offs;
+ b->length -= offs;
+
+ /* have mutex */
+ list_add(&_b->by_pid, &pi->bufs);
+
+ return 0;
+}
+
+/* unregister a buffer */
+void _m_unregister_buf(struct __buf_info *_b)
+{
+ u32 i;
+
+ /* unregister */
+ list_del(&_b->by_pid);
+
+ /* no longer using the blocks */
+ for (i = 0; i < _b->buf_info.num_blocks; i++)
+ ops->unlock_free(_b->mi[i], false);
+
+ kfree(_b);
+}
+
+
+/*
+ * File operations (mmap, ioctl, open, close)
+ * ==========================================================================
+ */
+
+/* mmap tiler buffer into user's virtual space */
+static s32 tiler_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct __buf_info *_b;
+ struct tiler_buf_info *b = NULL;
+ u32 i, map_offs, map_size, blk_offs, blk_size, mapped_size;
+ struct process_info *pi = filp->private_data;
+ u32 offs = vma->vm_pgoff << PAGE_SHIFT;
+ u32 size = vma->vm_end - vma->vm_start;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ /* find tiler buffer to mmap */
+ mutex_lock(&ops->mtx);
+ list_for_each_entry(_b, &pi->bufs, by_pid) {
+ /* we support partial mmaping of a whole tiler buffer */
+ if (offs >= (_b->buf_info.offset & PAGE_MASK) &&
+ offs + size <= PAGE_ALIGN(_b->buf_info.offset +
+ _b->buf_info.length)) {
+ b = &_b->buf_info;
+ break;
+ }
+ }
+ mutex_unlock(&ops->mtx);
+
+ /* we use b to detect if we found the bufffer */
+ if (!b)
+ return -ENXIO;
+
+ /* mmap relevant blocks */
+ blk_offs = _b->buf_info.offset;
+
+ /* start at the beginning of the region */
+ mapped_size = 0;
+ for (i = 0; i < b->num_blocks; i++, blk_offs += blk_size) {
+ blk_size = tiler_size(&_b->mi[i]->blk);
+ /* see if tiler block is inside the requested region */
+ if (offs >= blk_offs + blk_size || offs + size < blk_offs)
+ continue;
+ /* get the offset and map size for this particular block */
+ map_offs = max(offs, blk_offs) - blk_offs;
+ map_size = min(size - mapped_size, blk_size);
+
+ /* mmap block */
+ if (tiler_mmap_blk(&_b->mi[i]->blk, map_offs, map_size, vma,
+ mapped_size))
+ return -EAGAIN;
+
+ /* update mmap region pointer */
+ mapped_size += map_size;
+ }
+ return 0;
+}
+
+/* ioctl handler */
+static long tiler_ioctl(struct file *filp, u32 cmd, unsigned long arg)
+{
+ s32 r;
+ void __user *data = (void __user *)arg;
+ struct process_info *pi = filp->private_data;
+ struct __buf_info *_b;
+ struct tiler_buf_info buf_info = {0};
+ struct tiler_block_info block_info = {0};
+ struct mem_info *mi;
+ u32 phys_addr;
+
+ switch (cmd) {
+ /* allocate block */
+ case TILIOC_GBLK:
+ if (copy_from_user(&block_info, data, sizeof(block_info)))
+ return -EFAULT;
+
+ switch (block_info.fmt) {
+ case TILFMT_PAGE:
+ r = ops->alloc(block_info.fmt, block_info.dim.len, 1,
+ block_info.key, block_info.group_id,
+ pi, &mi);
+ break;
+ case TILFMT_8BIT:
+ case TILFMT_16BIT:
+ case TILFMT_32BIT:
+ r = ops->alloc(block_info.fmt,
+ block_info.dim.area.width,
+ block_info.dim.area.height,
+ block_info.key, block_info.group_id,
+ pi, &mi);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (r)
+ return r;
+
+ /* fill out block info */
+ if (mi) {
+ block_info.ptr = NULL;
+ ops->describe(mi, &block_info);
+ }
+
+ if (copy_to_user(data, &block_info, sizeof(block_info)))
+ return -EFAULT;
+ break;
+ /* free/unmap block */
+ case TILIOC_FBLK:
+ case TILIOC_UMBLK:
+ if (copy_from_user(&block_info, data, sizeof(block_info)))
+ return -EFAULT;
+
+ /* search current process first, then all processes */
+ mutex_lock(&ops->mtx);
+ mi = _m_lock_block(block_info.key, block_info.id, pi);
+ mutex_unlock(&ops->mtx);
+ if (mi)
+ ops->unlock_free(mi, true);
+
+ /* free always succeeds */
+ break;
+ /* get physical address */
+ case TILIOC_GSSP:
+ down_read(¤t->mm->mmap_sem);
+ phys_addr = tiler_virt2phys(arg);
+ up_read(¤t->mm->mmap_sem);
+ return phys_addr;
+ break;
+ /* map block */
+ case TILIOC_MBLK:
+ if (copy_from_user(&block_info, data, sizeof(block_info)))
+ return -EFAULT;
+
+ if (!block_info.ptr)
+ return -EFAULT;
+
+ r = ops->pin(block_info.fmt, block_info.dim.len, 1,
+ block_info.key, block_info.group_id, pi,
+ &mi, (u32)block_info.ptr);
+ if (r)
+ return r;
+
+ /* fill out block info */
+ if (mi)
+ ops->describe(mi, &block_info);
+
+ if (copy_to_user(data, &block_info, sizeof(block_info)))
+ return -EFAULT;
+ break;
+#ifndef CONFIG_TILER_SECURE
+ /* query buffer information by offset */
+ case TILIOC_QBUF:
+ if (!offset_lookup)
+ return -EPERM;
+
+ if (copy_from_user(&buf_info, data, sizeof(buf_info)))
+ return -EFAULT;
+
+ /* find buffer */
+ mutex_lock(&ops->mtx);
+ r = -ENOENT;
+ /* buffer registration is per process */
+ list_for_each_entry(_b, &pi->bufs, by_pid) {
+ if (buf_info.offset == _b->buf_info.offset) {
+ memcpy(&buf_info, &_b->buf_info,
+ sizeof(buf_info));
+ r = 0;
+ break;
+ }
+ }
+ mutex_unlock(&ops->mtx);
+
+ if (r)
+ return r;
+
+ if (copy_to_user(data, &_b->buf_info, sizeof(_b->buf_info)))
+ return -EFAULT;
+ break;
+#endif
+ /* register buffer */
+ case TILIOC_RBUF:
+ /* save buffer information */
+ _b = kmalloc(sizeof(*_b), GFP_KERNEL);
+ if (!_b)
+ return -ENOMEM;
+ memset(_b, 0, sizeof(*_b));
+
+ if (copy_from_user(&_b->buf_info, data, sizeof(_b->buf_info))) {
+ kfree(_b);
+ return -EFAULT;
+ }
+
+ mutex_lock(&ops->mtx);
+ r = _m_register_buf(_b, pi);
+ mutex_unlock(&ops->mtx);
+
+ if (r) {
+ kfree(_b);
+ return -EACCES;
+ }
+
+ /* undo registration on failure */
+ if (copy_to_user(data, &_b->buf_info, sizeof(_b->buf_info))) {
+ mutex_lock(&ops->mtx);
+ _m_unregister_buf(_b);
+ mutex_unlock(&ops->mtx);
+ return -EFAULT;
+ }
+ break;
+ /* unregister a buffer */
+ case TILIOC_URBUF:
+ if (copy_from_user(&buf_info, data, sizeof(buf_info)))
+ return -EFAULT;
+
+ /* find buffer */
+ r = -EFAULT;
+ mutex_lock(&ops->mtx);
+ /* buffer registration is per process */
+ list_for_each_entry(_b, &pi->bufs, by_pid) {
+ if (buf_info.offset == _b->buf_info.offset) {
+ /* only retrieve buffer length */
+ buf_info.length = _b->buf_info.length;
+ _m_unregister_buf(_b);
+ r = 0;
+ break;
+ }
+ }
+ mutex_unlock(&ops->mtx);
+
+ if (r)
+ return r;
+
+ if (copy_to_user(data, &buf_info, sizeof(buf_info)))
+ return -EFAULT;
+ break;
+ /* prereserv blocks */
+ case TILIOC_PRBLK:
+ if (copy_from_user(&block_info, data, sizeof(block_info)))
+ return -EFAULT;
+
+ if (block_info.fmt == TILFMT_8AND16)
+#ifdef CONFIG_TILER_ENABLE_NV12
+ ops->reserve_nv12(block_info.key,
+ block_info.dim.area.width,
+ block_info.dim.area.height,
+ block_info.group_id, pi);
+#else
+ return -EINVAL;
+#endif
+ else
+ ops->reserve(block_info.key,
+ block_info.fmt,
+ block_info.dim.area.width,
+ block_info.dim.area.height,
+ block_info.group_id, pi);
+ break;
+ /* unreserve blocks */
+ case TILIOC_URBLK:
+ ops->unreserve(arg, pi);
+ break;
+ /* query a tiler block */
+ case TILIOC_QBLK:
+ if (copy_from_user(&block_info, data, sizeof(block_info)))
+ return -EFAULT;
+
+ if (block_info.id) {
+ /* look up by id if specified */
+ mutex_lock(&ops->mtx);
+ mi = _m_lock_block(block_info.key, block_info.id, pi);
+ mutex_unlock(&ops->mtx);
+ } else
+#ifndef CONFIG_TILER_SECURE
+ if (ssptr_lookup) {
+ /* otherwise, look up by ssptr if allowed */
+ mi = ops->lock_by_ssptr(block_info.ssptr);
+ } else
+#endif
+ return -EPERM;
+
+ if (!mi)
+ return -EFAULT;
+
+ /* we don't keep track of ptr and 1D stride so clear them */
+ block_info.ptr = NULL;
+ block_info.stride = 0;
+
+ ops->describe(mi, &block_info);
+ ops->unlock_free(mi, false);
+
+ if (copy_to_user(data, &block_info, sizeof(block_info)))
+ return -EFAULT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* open tiler driver */
+static s32 tiler_open(struct inode *ip, struct file *filp)
+{
+ struct process_info *pi = __get_pi(current->tgid, false);
+ if (!pi)
+ return -ENOMEM;
+
+ filp->private_data = pi;
+ return 0;
+}
+
+/* close tiler driver */
+static s32 tiler_release(struct inode *ip, struct file *filp)
+{
+ struct process_info *pi = filp->private_data;
+
+ mutex_lock(&ops->mtx);
+ /* free resources if last device in this process */
+ if (0 == --pi->refs)
+ _m_free_process_info(pi);
+
+ mutex_unlock(&ops->mtx);
+
+ return 0;
+}
+
+/* tiler driver file operations */
+static const struct file_operations tiler_fops = {
+ .open = tiler_open,
+ .unlocked_ioctl = tiler_ioctl,
+ .release = tiler_release,
+ .mmap = tiler_mmap,
+};
+
+
+void tiler_ioctl_init(struct tiler_ops *tiler)
+{
+ ops = tiler;
+ ops->fops = &tiler_fops;
+
+#ifdef CONFIG_TILER_SECURE
+ offset_lookup = ssptr_lookup = false;
+#endif
+ BLOCKING_INIT_NOTIFIER_HEAD(¬ifier);
+}
+
+
+s32 tiler_reg_notifier(struct notifier_block *nb)
+{
+ if (!nb)
+ return -EINVAL;
+ return blocking_notifier_chain_register(¬ifier, nb);
+}
+EXPORT_SYMBOL(tiler_reg_notifier);
+
+s32 tiler_unreg_notifier(struct notifier_block *nb)
+{
+ if (!nb)
+ return -EINVAL;
+ return blocking_notifier_chain_unregister(¬ifier, nb);
+}
+EXPORT_SYMBOL(tiler_unreg_notifier);
diff --git a/drivers/media/video/tiler/tiler-main.c b/drivers/media/video/tiler/tiler-main.c
new file mode 100644
index 0000000..34bb1e4
--- /dev/null
+++ b/drivers/media/video/tiler/tiler-main.c
@@ -0,0 +1,1772 @@
+/*
+ * tiler-main.c
+ *
+ * TILER driver main support functions for TI TILER hardware block.
+ *
+ * Authors: Lajos Molnar <molnar@ti.com>
+ * David Sin <davidsin@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h> /* struct cdev */
+#include <linux/kdev_t.h> /* MKDEV() */
+#include <linux/fs.h> /* register_chrdev_region() */
+#include <linux/device.h> /* struct class */
+#include <linux/platform_device.h> /* platform_device() */
+#include <linux/err.h> /* IS_ERR() */
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/dma-mapping.h> /* dma_alloc_coherent */
+#include <linux/pagemap.h> /* page_cache_release() */
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+#include <mach/dmm.h>
+#include "tmm.h"
+#include "_tiler.h"
+#include "tcm/tcm-sita.h" /* TCM algorithm */
+
+static bool ssptr_id = CONFIG_TILER_SSPTR_ID;
+static uint granularity = CONFIG_TILER_GRANULARITY;
+static uint tiler_alloc_debug;
+
+/*
+ * We can only change ssptr_id if there are no blocks allocated, so that
+ * pseudo-random ids and ssptrs do not potentially clash. For now make it
+ * read-only.
+ */
+module_param(ssptr_id, bool, 0444);
+MODULE_PARM_DESC(ssptr_id, "Use ssptr as block ID");
+module_param_named(grain, granularity, uint, 0644);
+MODULE_PARM_DESC(grain, "Granularity (bytes)");
+module_param_named(alloc_debug, tiler_alloc_debug, uint, 0644);
+MODULE_PARM_DESC(alloc_debug, "Allocation debug flag");
+
+struct tiler_dev {
+ struct cdev cdev;
+};
+static struct dentry *dbgfs;
+static struct dentry *dbg_map;
+
+static struct tiler_ops tiler; /* shared methods and variables */
+
+static struct list_head blocks; /* all tiler blocks */
+static struct list_head orphan_areas; /* orphaned 2D areas */
+static struct list_head orphan_onedim; /* orphaned 1D areas */
+
+static s32 tiler_major;
+static s32 tiler_minor;
+static struct tiler_dev *tiler_device;
+static struct class *tilerdev_class;
+static struct mutex mtx;
+static struct tcm *tcm[TILER_FORMATS];
+static struct tmm *tmm[TILER_FORMATS];
+static u32 *dmac_va;
+static dma_addr_t dmac_pa;
+static DEFINE_MUTEX(dmac_mtx);
+
+/*
+ * TMM connectors
+ * ==========================================================================
+ */
+/* wrapper around tmm_pin */
+static s32 pin_mem_to_area(struct tmm *tmm, struct tcm_area *area, u32 *ptr)
+{
+ s32 res = 0;
+ struct pat_area p_area = {0};
+ struct tcm_area slice, area_s;
+
+ /* Ensure the data reaches to main memory before PAT refill */
+ wmb();
+
+ mutex_lock(&dmac_mtx);
+ tcm_for_each_slice(slice, *area, area_s) {
+ p_area.x0 = slice.p0.x;
+ p_area.y0 = slice.p0.y;
+ p_area.x1 = slice.p1.x;
+ p_area.y1 = slice.p1.y;
+
+ memcpy(dmac_va, ptr, sizeof(*ptr) * tcm_sizeof(slice));
+ ptr += tcm_sizeof(slice);
+
+ /* pin memory into DMM */
+ if (tmm_pin(tmm, p_area, dmac_pa)) {
+ res = -EFAULT;
+ break;
+ }
+ }
+ mutex_unlock(&dmac_mtx);
+
+ return res;
+}
+
+/* wrapper around tmm_unpin */
+static void unpin_mem_from_area(struct tmm *tmm, struct tcm_area *area)
+{
+ struct pat_area p_area = {0};
+ struct tcm_area slice, area_s;
+
+ mutex_lock(&dmac_mtx);
+ tcm_for_each_slice(slice, *area, area_s) {
+ p_area.x0 = slice.p0.x;
+ p_area.y0 = slice.p0.y;
+ p_area.x1 = slice.p1.x;
+ p_area.y1 = slice.p1.y;
+
+ tmm_unpin(tmm, p_area);
+ }
+ mutex_unlock(&dmac_mtx);
+}
+
+/*
+ * ID handling methods
+ * ==========================================================================
+ */
+
+/* check if an id is used */
+static bool _m_id_in_use(u32 id)
+{
+ struct mem_info *mi;
+ list_for_each_entry(mi, &blocks, global)
+ if (mi->blk.id == id)
+ return 1;
+ return 0;
+}
+
+/* get an id */
+static u32 _m_get_id(void)
+{
+ static u32 id = 0x2d7ae;
+
+ /* ensure noone is using this id */
+ while (_m_id_in_use(id)) {
+ /* generate a new pseudo-random ID */
+
+ /* Galois LSFR: 32, 22, 2, 1 */
+ id = (id >> 1) ^ (u32)((0 - (id & 1u)) & 0x80200003u);
+ }
+
+ return id;
+}
+
+/*
+ * Debugfs support
+ * ==========================================================================
+ */
+struct tiler_debugfs_data {
+ char name[17];
+ void (*func)(struct seq_file *, u32 arg);
+ u32 arg;
+};
+
+static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
+ char c, bool ovw)
+{
+ int x, y;
+ for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
+ for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
+ if (map[y][x] == ' ' || ovw)
+ map[y][x] = c;
+}
+
+static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
+ char c)
+{
+ map[p->y / ydiv][p->x / xdiv] = c;
+}
+
+static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
+{
+ return map[p->y / ydiv][p->x / xdiv];
+}
+
+static int map_width(int xdiv, int x0, int x1)
+{
+ return (x1 / xdiv) - (x0 / xdiv) + 1;
+}
+
+static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
+{
+ char *p = map[yd] + (x0 / xdiv);
+ int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
+ if (w >= 0) {
+ p += w;
+ while (*nice)
+ *p++ = *nice++;
+ }
+}
+
+static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
+ struct tcm_area *a)
+{
+ sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
+ if (a->p0.y + 1 < a->p1.y) {
+ text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
+ tiler.width - 1);
+ } else if (a->p0.y < a->p1.y) {
+ if (strlen(nice) < map_width(xdiv, a->p0.x, tiler.width - 1))
+ text_map(map, xdiv, nice, a->p0.y / ydiv,
+ a->p0.x + xdiv, tiler.width - 1);
+ else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
+ text_map(map, xdiv, nice, a->p1.y / ydiv,
+ 0, a->p1.y - xdiv);
+ } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
+ text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
+ }
+}
+
+static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
+ struct tcm_area *a)
+{
+ sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
+ if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
+ text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
+ a->p0.x, a->p1.x);
+}
+
+static void debug_allocation_map(struct seq_file *s, u32 arg)
+{
+ int xdiv = (arg >> 8) & 0xFF;
+ int ydiv = arg & 0xFF;
+ int i;
+ char **map, *global_map;
+ struct area_info *ai;
+ struct mem_info *mi;
+ struct tcm_area a, p;
+ static char *m2d = "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
+ static char *a2d = ".,:;'\"`~!^-+";
+ char *m2dp = m2d, *a2dp = a2d;
+ char nice[128];
+
+ /* allocate map */
+ map = kzalloc(tiler.height / ydiv * sizeof(*map), GFP_KERNEL);
+ global_map = kzalloc((tiler.width / xdiv + 1) * tiler.height / ydiv,
+ GFP_KERNEL);
+ if (!map || !global_map) {
+ printk(KERN_ERR "could not allocate map for debug print\n");
+ goto error;
+ }
+ memset(global_map, ' ', (tiler.width / xdiv + 1) * tiler.height / ydiv);
+ for (i = 0; i < tiler.height / ydiv; i++) {
+ map[i] = global_map + i * (tiler.width / xdiv + 1);
+ map[i][tiler.width / xdiv] = 0;
+ }
+
+ /* get all allocations */
+ mutex_lock(&mtx);
+
+ list_for_each_entry(mi, &blocks, global) {
+ if (mi->area.is2d) {
+ ai = mi->parent;
+ fill_map(map, xdiv, ydiv, &ai->area, *a2dp, false);
+ fill_map(map, xdiv, ydiv, &mi->area, *m2dp, true);
+ if (!*++a2dp)
+ a2dp = a2d;
+ if (!*++m2dp)
+ m2dp = m2d;
+ map_2d_info(map, xdiv, ydiv, nice, &mi->area);
+ } else {
+ bool start = read_map_pt(map, xdiv, ydiv, &mi->area.p0)
+ == ' ';
+ bool end = read_map_pt(map, xdiv, ydiv, &mi->area.p1)
+ == ' ';
+ tcm_for_each_slice(a, mi->area, p)
+ fill_map(map, xdiv, ydiv, &a, '=', true);
+ fill_map_pt(map, xdiv, ydiv, &mi->area.p0,
+ start ? '<' : 'X');
+ fill_map_pt(map, xdiv, ydiv, &mi->area.p1,
+ end ? '>' : 'X');
+ map_1d_info(map, xdiv, ydiv, nice, &mi->area);
+ }
+ }
+
+ seq_printf(s, "BEGIN TILER MAP\n");
+ for (i = 0; i < tiler.height / ydiv; i++)
+ seq_printf(s, "%03d:%s\n", i * ydiv, map[i]);
+ seq_printf(s, "END TILER MAP\n");
+
+ mutex_unlock(&mtx);
+
+error:
+ kfree(map);
+ kfree(global_map);
+}
+
+const struct tiler_debugfs_data debugfs_maps[] = {
+ { "1x1", debug_allocation_map, 0x0101 },
+ { "2x1", debug_allocation_map, 0x0201 },
+ { "4x1", debug_allocation_map, 0x0401 },
+ { "2x2", debug_allocation_map, 0x0202 },
+ { "4x2", debug_allocation_map, 0x0402 },
+ { "4x4", debug_allocation_map, 0x0404 },
+};
+
+static int tiler_debug_show(struct seq_file *s, void *unused)
+{
+ struct tiler_debugfs_data *fn = s->private;
+ fn->func(s, fn->arg);
+ return 0;
+}
+
+static int tiler_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tiler_debug_show, inode->i_private);
+}
+
+static const struct file_operations tiler_debug_fops = {
+ .open = tiler_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * gid_info handling methods
+ * ==========================================================================
+ */
+
+/* get or create new gid_info object */
+static struct gid_info *_m_get_gi(struct process_info *pi, u32 gid)
+{
+ struct gid_info *gi;
+
+ /* have mutex */
+
+ /* see if group already exist */
+ list_for_each_entry(gi, &pi->groups, by_pid) {
+ if (gi->gid == gid)
+ goto done;
+ }
+
+ /* create new group */
+ gi = kmalloc(sizeof(*gi), GFP_KERNEL);
+ if (!gi)
+ return gi;
+
+ memset(gi, 0, sizeof(*gi));
+ INIT_LIST_HEAD(&gi->areas);
+ INIT_LIST_HEAD(&gi->onedim);
+ INIT_LIST_HEAD(&gi->reserved);
+ gi->pi = pi;
+ gi->gid = gid;
+ list_add(&gi->by_pid, &pi->groups);
+done:
+ /*
+ * Once area is allocated, the group info's ref count will be
+ * decremented as the reference is no longer needed.
+ */
+ gi->refs++;
+ return gi;
+}
+
+/* free gid_info object if empty */
+static void _m_try_free_group(struct gid_info *gi)
+{
+ /* have mutex */
+ if (gi && list_empty(&gi->areas) && list_empty(&gi->onedim) &&
+ /* also ensure noone is still using this group */
+ !gi->refs) {
+ BUG_ON(!list_empty(&gi->reserved));
+ list_del(&gi->by_pid);
+
+ /* if group is tracking kernel objects, we may free even
+ the process info */
+ if (gi->pi->kernel && list_empty(&gi->pi->groups)) {
+ list_del(&gi->pi->list);
+ kfree(gi->pi);
+ }
+
+ kfree(gi);
+ }
+}
+
+/* --- external versions --- */
+
+static struct gid_info *get_gi(struct process_info *pi, u32 gid)
+{
+ struct gid_info *gi;
+ mutex_lock(&mtx);
+ gi = _m_get_gi(pi, gid);
+ mutex_unlock(&mtx);
+ return gi;
+}
+
+static void release_gi(struct gid_info *gi)
+{
+ mutex_lock(&mtx);
+ gi->refs--;
+ _m_try_free_group(gi);
+ mutex_unlock(&mtx);
+}
+
+/*
+ * Area handling methods
+ * ==========================================================================
+ */
+
+/* allocate an reserved area of size, alignment and link it to gi */
+/* leaves mutex locked to be able to add block to area */
+static struct area_info *area_new_m(u16 width, u16 height, u16 align,
+ struct tcm *tcm, struct gid_info *gi)
+{
+ struct area_info *ai = kmalloc(sizeof(*ai), GFP_KERNEL);
+ if (!ai)
+ return NULL;
+
+ /* set up empty area info */
+ memset(ai, 0x0, sizeof(*ai));
+ INIT_LIST_HEAD(&ai->blocks);
+
+ /* reserve an allocation area */
+ if (tcm_reserve_2d(tcm, width, height, align, &ai->area)) {
+ kfree(ai);
+ return NULL;
+ }
+
+ ai->gi = gi;
+ mutex_lock(&mtx);
+ list_add_tail(&ai->by_gid, &gi->areas);
+ return ai;
+}
+
+/* (must have mutex) free an area */
+static inline void _m_area_free(struct area_info *ai)
+{
+ if (ai) {
+ list_del(&ai->by_gid);
+ kfree(ai);
+ }
+}
+
+static s32 __analize_area(enum tiler_fmt fmt, u32 width, u32 height,
+ u16 *x_area, u16 *y_area, u16 *band,
+ u16 *align)
+{
+ /* input: width, height is in pixels */
+ /* output: x_area, y_area, band, align */
+
+ /* slot width, height, and row size */
+ u32 slot_row, min_align;
+ const struct tiler_geom *g;
+
+ /* set alignment to page size */
+ *align = PAGE_SIZE;
+
+ /* width and height must be positive */
+ if (!width || !height)
+ return -EINVAL;
+
+ if (fmt == TILFMT_PAGE) {
+ /* for 1D area keep the height (1), width is in tiler slots */
+ *x_area = DIV_ROUND_UP(width, tiler.page);
+ *y_area = *band = 1;
+
+ if (*x_area * *y_area > tiler.width * tiler.height)
+ return -ENOMEM;
+ return 0;
+ }
+
+ /* format must be valid */
+ g = tiler.geom(fmt);
+ if (!g)
+ return -EINVAL;
+
+ /* get the # of bytes per row in 1 slot */
+ slot_row = g->slot_w * g->bpp;
+
+ /* how many slots are can be accessed via one physical page */
+ *band = PAGE_SIZE / slot_row;
+
+ /* minimum alignment is at least 1 slot */
+ min_align = max(slot_row, granularity);
+ *align = ALIGN(*align, min_align);
+
+ /* adjust to slots */
+ *x_area = DIV_ROUND_UP(width, g->slot_w);
+ *y_area = DIV_ROUND_UP(height, g->slot_h);
+ *align /= slot_row;
+
+ if (*x_area > tiler.width || *y_area > tiler.height)
+ return -ENOMEM;
+ return 0;
+}
+
+void fill_virt_array(struct tiler_block_t *blk, u32 *virt_array)
+{
+ u32 v, p, len, size;
+ u32 i = 0, offs = 0;
+
+ if (!virt_array)
+ return;
+
+ /* get page aligned stride */
+ v = tiler_vstride(blk);
+ p = tiler_pstride(blk);
+
+ /* get page aligned virtual size for the block */
+ size = tiler_size(blk);
+ offs = blk->phys;
+ while (size) {
+ /* set len to length of one row (2D), or full length if 1D */
+ len = v;
+
+ while (len && size) {
+ virt_array[i++] = PAGE_ALIGN(offs);
+ size -= PAGE_SIZE;
+ len -= PAGE_SIZE;
+ offs += PAGE_SIZE;
+ }
+
+ /* set offset to next row beginning */
+ offs += p - v;
+ }
+}
+
+/**
+ * Find a place where a 2D block would fit into a 2D area of the
+ * same height.
+ *
+ * @author a0194118 (3/19/2010)
+ *
+ * @param w Width of the block.
+ * @param align Alignment of the block.
+ * @param ai Pointer to area info
+ * @param next Pointer to the variable where the next block
+ * will be stored. The block should be inserted
+ * before this block.
+ *
+ * @return the end coordinate (x1 + 1) where a block would fit,
+ * or 0 if it does not fit.
+ *
+ * (must have mutex)
+ */
+static u16 _m_blk_find_fit(u16 w, u16 align,
+ struct area_info *ai, struct list_head **before)
+{
+ int x = ai->area.p0.x + w;
+ struct mem_info *mi;
+
+ /* area blocks are sorted by x */
+ list_for_each_entry(mi, &ai->blocks, by_area) {
+ /* check if buffer would fit before this area */
+ if (x <= mi->area.p0.x) {
+ *before = &mi->by_area;
+ return x;
+ }
+ x = ALIGN(mi->area.p1.x + 1, align) + w;
+ }
+ *before = &ai->blocks;
+
+ /* check if buffer would fit after last area */
+ return (x <= ai->area.p1.x + 1) ? x : 0;
+}
+
+/* (must have mutex) adds a block to an area with certain x coordinates */
+static inline
+struct mem_info *_m_add2area(struct mem_info *mi, struct area_info *ai,
+ u16 x0, u16 w, struct list_head *before)
+{
+ mi->parent = ai;
+ mi->area = ai->area;
+ mi->area.p0.x = x0;
+ mi->area.p1.x = x0 + w - 1;
+ list_add_tail(&mi->by_area, before);
+ ai->nblocks++;
+ return mi;
+}
+
+static struct mem_info *get_2d_area(u16 w, u16 h, u16 align, u16 band,
+ struct gid_info *gi, struct tcm *tcm)
+{
+ struct area_info *ai = NULL;
+ struct mem_info *mi = NULL;
+ struct list_head *before = NULL;
+ u16 x = 0; /* this holds the end of a potential area */
+
+ /* allocate map info */
+
+ /* see if there is available prereserved space */
+ mutex_lock(&mtx);
+ list_for_each_entry(mi, &gi->reserved, global) {
+ if (mi->area.tcm == tcm &&
+ tcm_aheight(mi->area) == h &&
+ tcm_awidth(mi->area) == w &&
+ (mi->area.p0.x & (align - 1)) == 0) {
+ /* this area is already set up */
+
+ /* remove from reserved list */
+ list_del(&mi->global);
+ if (tiler_alloc_debug & 1)
+ printk(KERN_ERR "(=2d (%d-%d,%d-%d) in (%d-%d,%d-%d) prereserved)\n",
+ mi->area.p0.x, mi->area.p1.x,
+ mi->area.p0.y, mi->area.p1.y,
+ ((struct area_info *) mi->parent)->area.p0.x,
+ ((struct area_info *) mi->parent)->area.p1.x,
+ ((struct area_info *) mi->parent)->area.p0.y,
+ ((struct area_info *) mi->parent)->area.p1.y);
+
+ goto done;
+ }
+ }
+ mutex_unlock(&mtx);
+
+ /* if not, reserve a block struct */
+ mi = kmalloc(sizeof(*mi), GFP_KERNEL);
+ if (!mi)
+ return mi;
+ memset(mi, 0, sizeof(*mi));
+
+ /* see if allocation fits in one of the existing areas */
+ /* this sets x, ai and before */
+ mutex_lock(&mtx);
+ list_for_each_entry(ai, &gi->areas, by_gid) {
+ if (ai->area.tcm == tcm &&
+ tcm_aheight(ai->area) == h) {
+ x = _m_blk_find_fit(w, align, ai, &before);
+ if (x) {
+ _m_add2area(mi, ai, x - w, w, before);
+
+ if (tiler_alloc_debug & 1)
+ printk(KERN_ERR "(+2d (%d-%d,%d-%d) in (%d-%d,%d-%d) existing)\n",
+ mi->area.p0.x, mi->area.p1.x,
+ mi->area.p0.y, mi->area.p1.y,
+ ((struct area_info *) mi->parent)->area.p0.x,
+ ((struct area_info *) mi->parent)->area.p1.x,
+ ((struct area_info *) mi->parent)->area.p0.y,
+ ((struct area_info *) mi->parent)->area.p1.y);
+
+ goto done;
+ }
+ }
+ }
+ mutex_unlock(&mtx);
+
+ /* if no area fit, reserve a new one */
+ ai = area_new_m(ALIGN(w, max(band, align)), h,
+ max(band, align), tcm, gi);
+ if (ai) {
+ _m_add2area(mi, ai, ai->area.p0.x, w, &ai->blocks);
+ if (tiler_alloc_debug & 1)
+ printk(KERN_ERR "(+2d (%d-%d,%d-%d) in (%d-%d,%d-%d) new)\n",
+ mi->area.p0.x, mi->area.p1.x,
+ mi->area.p0.y, mi->area.p1.y,
+ ai->area.p0.x, ai->area.p1.x,
+ ai->area.p0.y, ai->area.p1.y);
+ } else {
+ /* clean up */
+ kfree(mi);
+ return NULL;
+ }
+
+done:
+ mutex_unlock(&mtx);
+ return mi;
+}
+
+/* layout reserved 2d blocks in a larger area */
+/* NOTE: band, w, h, a(lign) is in slots */
+static s32 lay_2d(enum tiler_fmt fmt, u16 n, u16 w, u16 h, u16 band,
+ u16 align, struct gid_info *gi,
+ struct list_head *pos)
+{
+ u16 x, x0, e = ALIGN(w, align), w_res = (n - 1) * e + w;
+ struct mem_info *mi = NULL;
+ struct area_info *ai = NULL;
+
+ printk(KERN_INFO "packing %u %u buffers into %u width\n",
+ n, w, w_res);
+
+ /* calculate dimensions, band, and alignment in slots */
+ /* reserve an area */
+ ai = area_new_m(ALIGN(w_res, max(band, align)), h,
+ max(band, align), tcm[fmt], gi);
+ if (!ai)
+ return -ENOMEM;
+
+ /* lay out blocks in the reserved area */
+ for (n = 0, x = 0; x < w_res; x += e, n++) {
+ /* reserve a block struct */
+ mi = kmalloc(sizeof(*mi), GFP_KERNEL);
+ if (!mi)
+ break;
+
+ memset(mi, 0, sizeof(*mi));
+ x0 = ai->area.p0.x + x;
+ _m_add2area(mi, ai, x0, w, &ai->blocks);
+ list_add(&mi->global, pos);
+ }
+
+ mutex_unlock(&mtx);
+ return n;
+}
+
+#ifdef CONFIG_TILER_ENABLE_NV12
+/* layout reserved nv12 blocks in a larger area */
+/* NOTE: area w(idth), w1 (8-bit block width), h(eight) are in slots */
+/* p is a pointer to a packing description, which is a list of offsets in
+ the area for consecutive 8-bit and 16-bit blocks */
+static s32 lay_nv12(int n, u16 w, u16 w1, u16 h, struct gid_info *gi, u8 *p)
+{
+ u16 wh = (w1 + 1) >> 1, width, x0;
+ int m;
+ int a = PAGE_SIZE / tiler.geom(TILFMT_8BIT)->slot_w;
+
+ struct mem_info *mi = NULL;
+ struct area_info *ai = NULL;
+ struct list_head *pos;
+
+ /* reserve area */
+ ai = area_new_m(w, h, a, TILFMT_8BIT, gi);
+ if (!ai)
+ return -ENOMEM;
+
+ /* lay out blocks in the reserved area */
+ for (m = 0; m < 2 * n; m++) {
+ width = (m & 1) ? wh : w1;
+ x0 = ai->area.p0.x + *p++;
+
+ /* get insertion head */
+ list_for_each(pos, &ai->blocks) {
+ mi = list_entry(pos, struct mem_info, by_area);
+ if (mi->area.p0.x > x0)
+ break;
+ }
+
+ /* reserve a block struct */
+ mi = kmalloc(sizeof(*mi), GFP_KERNEL);
+ if (!mi)
+ break;
+
+ memset(mi, 0, sizeof(*mi));
+
+ _m_add2area(mi, ai, x0, width, pos);
+ list_add(&mi->global, &gi->reserved);
+ }
+
+ mutex_unlock(&mtx);
+ return n;
+}
+#endif
+
+static void _m_unpin(struct mem_info *mi)
+{
+ /* release memory */
+ if (mi->pa.memtype == TILER_MEM_GOT_PAGES) {
+ int i;
+ for (i = 0; i < mi->pa.num_pg; i++) {
+ struct page *page = phys_to_page(mi->pa.mem[i]);
+ if (page) {
+ if (!PageReserved(page))
+ SetPageDirty(page);
+ page_cache_release(page);
+ }
+ }
+ } else if (mi->pa.memtype == TILER_MEM_ALLOCED && mi->pa.mem) {
+ tmm_free(tmm[tiler_fmt(mi->blk.phys)], mi->pa.mem);
+ /*
+ * TRICKY: tmm module uses the same mi->pa.mem pointer which
+ * it just freed. We need to clear ours so we don't double free
+ */
+ mi->pa.mem = NULL;
+ }
+ kfree(mi->pa.mem);
+ mi->pa.mem = NULL;
+ mi->pa.num_pg = 0;
+ unpin_mem_from_area(tmm[tiler_fmt(mi->blk.phys)], &mi->area);
+}
+
+/* (must have mutex) free block and any freed areas */
+static s32 _m_free(struct mem_info *mi)
+{
+ struct area_info *ai = NULL;
+ s32 res = 0;
+
+ _m_unpin(mi);
+
+ /* safe deletion as list may not have been assigned */
+ if (mi->global.next)
+ list_del(&mi->global);
+ if (mi->by_area.next)
+ list_del(&mi->by_area);
+
+ /* remove block from area first if 2D */
+ if (mi->area.is2d) {
+ ai = mi->parent;
+
+ /* check to see if area needs removing also */
+ if (ai && !--ai->nblocks) {
+ if (tiler_alloc_debug & 1)
+ printk(KERN_ERR "(-2d (%d-%d,%d-%d) in (%d-%d,%d-%d) last)\n",
+ mi->area.p0.x, mi->area.p1.x,
+ mi->area.p0.y, mi->area.p1.y,
+ ai->area.p0.x, ai->area.p1.x,
+ ai->area.p0.y, ai->area.p1.y);
+
+ res = tcm_free(&ai->area);
+ list_del(&ai->by_gid);
+ /* try to remove parent if it became empty */
+ _m_try_free_group(ai->gi);
+ kfree(ai);
+ ai = NULL;
+ } else if (tiler_alloc_debug & 1)
+ printk(KERN_ERR "(-2d (%d-%d,%d-%d) in (%d-%d,%d-%d) remaining)\n",
+ mi->area.p0.x, mi->area.p1.x,
+ mi->area.p0.y, mi->area.p1.y,
+ ai->area.p0.x, ai->area.p1.x,
+ ai->area.p0.y, ai->area.p1.y);
+
+ } else {
+ if (tiler_alloc_debug & 1)
+ printk(KERN_ERR "(-1d: %d,%d..%d,%d)\n",
+ mi->area.p0.x, mi->area.p0.y,
+ mi->area.p1.x, mi->area.p1.y);
+ /* remove 1D area */
+ res = tcm_free(&mi->area);
+ /* try to remove parent if it became empty */
+ _m_try_free_group(mi->parent);
+ }
+
+ kfree(mi);
+ return res;
+}
+
+/* (must have mutex) returns true if block was freed */
+static bool _m_chk_ref(struct mem_info *mi)
+{
+ /* check references */
+ if (mi->refs)
+ return 0;
+
+ if (_m_free(mi))
+ printk(KERN_ERR "error while removing tiler block\n");
+
+ return 1;
+}
+
+/* (must have mutex) */
+static inline bool _m_dec_ref(struct mem_info *mi)
+{
+ if (mi->refs-- <= 1)
+ return _m_chk_ref(mi);
+
+ return 0;
+}
+
+/* (must have mutex) */
+static inline void _m_inc_ref(struct mem_info *mi)
+{
+ mi->refs++;
+}
+
+/* (must have mutex) returns true if block was freed */
+static inline bool _m_try_free(struct mem_info *mi)
+{
+ if (mi->alloced) {
+ mi->refs--;
+ mi->alloced = false;
+ }
+ return _m_chk_ref(mi);
+}
+
+/* --- external methods --- */
+
+/* find a block by key/id and lock it */
+static struct mem_info *
+find_n_lock(u32 key, u32 id, struct gid_info *gi) {
+ struct area_info *ai = NULL;
+ struct mem_info *mi = NULL;
+
+ mutex_lock(&mtx);
+
+ /* if group is not given, look globally */
+ if (!gi) {
+ list_for_each_entry(mi, &blocks, global) {
+ if (mi->blk.key == key && mi->blk.id == id)
+ goto done;
+ }
+ } else {
+ /* is id is ssptr, we know if block is 1D or 2D by the address,
+ so we optimize lookup */
+ if (!ssptr_id ||
+ tiler_fmt(id) == TILFMT_PAGE) {
+ list_for_each_entry(mi, &gi->onedim, by_area) {
+ if (mi->blk.key == key && mi->blk.id == id)
+ goto done;
+ }
+ }
+
+ if (!ssptr_id ||
+ tiler_fmt(id) != TILFMT_PAGE) {
+ list_for_each_entry(ai, &gi->areas, by_gid) {
+ list_for_each_entry(mi, &ai->blocks, by_area) {
+ if (mi->blk.key == key &&
+ mi->blk.id == id)
+ goto done;
+ }
+ }
+ }
+ }
+
+ mi = NULL;
+done:
+ /* lock block by increasing its ref count */
+ if (mi)
+ mi->refs++;
+
+ mutex_unlock(&mtx);
+
+ return mi;
+}
+
+/* unlock a block, and optionally free it */
+static void unlock_n_free(struct mem_info *mi, bool free)
+{
+ mutex_lock(&mtx);
+
+ _m_dec_ref(mi);
+ if (free)
+ _m_try_free(mi);
+
+ mutex_unlock(&mtx);
+}
+
+/**
+ * Free all blocks in a group:
+ *
+ * allocated blocks, and unreferenced blocks. Any blocks/areas still referenced
+ * will move to the orphaned lists to avoid issues if a new process is created
+ * with the same pid.
+ *
+ * (must have mutex)
+ */
+static void destroy_group(struct gid_info *gi)
+{
+ struct area_info *ai, *ai_;
+ struct mem_info *mi, *mi_;
+ bool ai_autofreed, need2free;
+
+ mutex_lock(&mtx);
+
+ /* free all allocated blocks, and remove unreferenced ones */
+
+ /*
+ * Group info structs when they become empty on an _m_try_free.
+ * However, if the group info is already empty, we need to
+ * remove it manually
+ */
+ need2free = list_empty(&gi->areas) && list_empty(&gi->onedim);
+ list_for_each_entry_safe(ai, ai_, &gi->areas, by_gid) {
+ ai_autofreed = true;
+ list_for_each_entry_safe(mi, mi_, &ai->blocks, by_area)
+ ai_autofreed &= _m_try_free(mi);
+
+ /* save orphaned areas for later removal */
+ if (!ai_autofreed) {
+ need2free = true;
+ ai->gi = NULL;
+ list_move(&ai->by_gid, &orphan_areas);
+ }
+ }
+
+ list_for_each_entry_safe(mi, mi_, &gi->onedim, by_area) {
+ if (!_m_try_free(mi)) {
+ need2free = true;
+ /* save orphaned 1D blocks */
+ mi->parent = NULL;
+ list_move(&mi->by_area, &orphan_onedim);
+ }
+ }
+
+ /* if group is still alive reserved list should have been
+ emptied as there should be no reference on those blocks */
+ if (need2free) {
+ BUG_ON(!list_empty(&gi->onedim));
+ BUG_ON(!list_empty(&gi->areas));
+ _m_try_free_group(gi);
+ }
+
+ mutex_unlock(&mtx);
+}
+
+/* release (reserved) blocks */
+static void release_blocks(struct list_head *reserved)
+{
+ struct mem_info *mi, *mi_;
+
+ mutex_lock(&mtx);
+
+ /* find block in global list and free it */
+ list_for_each_entry_safe(mi, mi_, reserved, global) {
+ BUG_ON(mi->refs || mi->alloced);
+ _m_free(mi);
+ }
+ mutex_unlock(&mtx);
+}
+
+/* add reserved blocks to a group */
+static void add_reserved_blocks(struct list_head *reserved, struct gid_info *gi)
+{
+ mutex_lock(&mtx);
+ list_splice_init(reserved, &gi->reserved);
+ mutex_unlock(&mtx);
+}
+
+/* find a block by ssptr */
+static struct mem_info *find_block_by_ssptr(u32 sys_addr)
+{
+ struct mem_info *i;
+ struct tcm_pt pt;
+ u32 x, y;
+ enum tiler_fmt fmt;
+ const struct tiler_geom *g;
+
+ fmt = tiler_fmt(sys_addr);
+ if (fmt == TILFMT_INVALID)
+ return NULL;
+
+ g = tiler.geom(fmt);
+
+ /* convert x & y pixel coordinates to slot coordinates */
+ tiler.xy(sys_addr, &x, &y);
+ pt.x = x / g->slot_w;
+ pt.y = y / g->slot_h;
+
+ mutex_lock(&mtx);
+ list_for_each_entry(i, &blocks, global) {
+ if (tiler_fmt(i->blk.phys) == tiler_fmt(sys_addr) &&
+ tcm_is_in(pt, i->area)) {
+ i->refs++;
+ goto found;
+ }
+ }
+ i = NULL;
+
+found:
+ mutex_unlock(&mtx);
+ return i;
+}
+
+/* find a block by ssptr */
+static void fill_block_info(struct mem_info *i, struct tiler_block_info *blk)
+{
+ blk->fmt = tiler_fmt(i->blk.phys);
+#ifdef CONFIG_TILER_EXPOSE_SSPTR
+ blk->ssptr = i->blk.phys;
+#endif
+ if (blk->fmt == TILFMT_PAGE) {
+ blk->dim.len = i->blk.width;
+ blk->group_id = ((struct gid_info *) i->parent)->gid;
+ } else {
+ blk->stride = tiler_vstride(&i->blk);
+ blk->dim.area.width = i->blk.width;
+ blk->dim.area.height = i->blk.height;
+ blk->group_id = ((struct area_info *) i->parent)->gi->gid;
+ }
+ blk->id = i->blk.id;
+ blk->key = i->blk.key;
+}
+
+/*
+ * Block operations
+ * ==========================================================================
+ */
+static struct mem_info *alloc_area(enum tiler_fmt fmt, u32 width, u32 height,
+ struct gid_info *gi)
+{
+ u16 x, y, band, align;
+ struct mem_info *mi = NULL;
+ const struct tiler_geom *g = tiler.geom(fmt);
+
+ /* calculate dimensions, band, and alignment in slots */
+ if (__analize_area(fmt, width, height, &x, &y, &band, &align))
+ return NULL;
+
+ if (fmt == TILFMT_PAGE) {
+ /* 1D areas don't pack */
+ mi = kmalloc(sizeof(*mi), GFP_KERNEL);
+ if (!mi)
+ return NULL;
+ memset(mi, 0x0, sizeof(*mi));
+
+ if (tcm_reserve_1d(tcm[fmt], x * y, &mi->area)) {
+ kfree(mi);
+ return NULL;
+ }
+
+ if (tiler_alloc_debug & 1)
+ printk(KERN_ERR "(+1d: %d,%d..%d,%d)\n",
+ mi->area.p0.x, mi->area.p0.y,
+ mi->area.p1.x, mi->area.p1.y);
+
+ mutex_lock(&mtx);
+ mi->parent = gi;
+ list_add(&mi->by_area, &gi->onedim);
+ } else {
+ mi = get_2d_area(x, y, align, band, gi, tcm[fmt]);
+ if (!mi)
+ return NULL;
+
+ mutex_lock(&mtx);
+ }
+
+ list_add(&mi->global, &blocks);
+ mi->alloced = true;
+ mi->refs++;
+ gi->refs--;
+ mutex_unlock(&mtx);
+
+ mi->blk.phys = tiler.addr(fmt,
+ mi->area.p0.x * g->slot_w, mi->area.p0.y * g->slot_h);
+ return mi;
+}
+
+static struct mem_info *alloc_block_area(enum tiler_fmt fmt, u32 width,
+ u32 height, u32 key, u32 gid,
+ struct process_info *pi)
+{
+ struct mem_info *mi = NULL;
+ struct gid_info *gi = NULL;
+
+ /* validate parameters */
+ if (!pi)
+ return ERR_PTR(-EINVAL);
+
+ /* get group context */
+ mutex_lock(&mtx);
+ gi = _m_get_gi(pi, gid);
+ mutex_unlock(&mtx);
+
+ if (!gi)
+ return ERR_PTR(-ENOMEM);
+
+ /* reserve area in tiler container */
+ mi = alloc_area(fmt, width, height, gi);
+ if (!mi) {
+ mutex_lock(&mtx);
+ gi->refs--;
+ _m_try_free_group(gi);
+ mutex_unlock(&mtx);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mi->blk.width = width;
+ mi->blk.height = height;
+ mi->blk.key = key;
+ if (ssptr_id) {
+ mi->blk.id = mi->blk.phys;
+ } else {
+ mutex_lock(&mtx);
+ mi->blk.id = _m_get_id();
+ mutex_unlock(&mtx);
+ }
+
+ return mi;
+}
+
+static s32 pin_memory(struct mem_info *mi, struct tiler_pa_info *pa)
+{
+ enum tiler_fmt fmt = tiler_fmt(mi->blk.phys);
+ struct tcm_area area = mi->area;
+
+ /* ensure we can pin */
+ if (!tmm_can_pin(tmm[fmt]))
+ return -EINVAL;
+
+ /* ensure pages fit into area */
+ if (pa->num_pg > tcm_sizeof(mi->area))
+ return -ENOMEM;
+
+ /* for 2D area, pages must fit exactly */
+ if (fmt != TILFMT_PAGE &&
+ pa->num_pg != tcm_sizeof(mi->area))
+ return -EINVAL;
+
+ /* save pages used */
+ mi->pa = *pa;
+ pa->mem = NULL; /* transfered array */
+
+ /* only refill available pages for 1D */
+ if (fmt == TILFMT_PAGE)
+ tcm_1d_limit(&area, pa->num_pg);
+ if (mi->pa.num_pg)
+ return pin_mem_to_area(tmm[fmt], &area, mi->pa.mem);
+ return 0;
+}
+
+void tiler_pa_free(struct tiler_pa_info *pa)
+{
+ if (pa)
+ kfree(pa->mem);
+ kfree(pa);
+}
+EXPORT_SYMBOL(tiler_pa_free);
+
+/* allocate physical pages for a block */
+static struct tiler_pa_info *get_new_pa(struct tmm *tmm, u32 num_pg)
+{
+ struct tiler_pa_info *pa = NULL;
+ pa = kzalloc(sizeof(*pa), GFP_KERNEL);
+ if (!pa)
+ return NULL;
+
+ pa->mem = tmm_get(tmm, num_pg);
+ if (pa->mem) {
+ pa->num_pg = num_pg;
+ pa->memtype = TILER_MEM_ALLOCED;
+ return pa;
+ } else {
+ kfree(pa);
+ return NULL;
+ }
+}
+
+static s32 alloc_block(enum tiler_fmt fmt, u32 width, u32 height,
+ u32 key, u32 gid, struct process_info *pi,
+ struct mem_info **info)
+{
+ struct mem_info *mi;
+ struct tiler_pa_info *pa = NULL;
+ int res;
+
+ *info = NULL;
+
+ /* allocate tiler container area */
+ mi = alloc_block_area(fmt, width, height, key, gid, pi);
+ if (IS_ERR_OR_NULL(mi))
+ return mi ? -ENOMEM : PTR_ERR(mi);
+
+ /* allocate memory */
+ pa = get_new_pa(tmm[fmt], tcm_sizeof(mi->area));
+ if (IS_ERR_OR_NULL(pa)) {
+ res = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* pin memory */
+ res = pin_memory(mi, pa);
+ tiler_pa_free(pa);
+ if (res)
+ goto cleanup;
+
+ *info = mi;
+ return 0;
+
+cleanup:
+ mutex_lock(&mtx);
+ _m_free(mi);
+ mutex_unlock(&mtx);
+ return res;
+}
+
+
+/* get physical pages of a user block */
+struct tiler_pa_info *user_block_to_pa(u32 usr_addr, u32 num_pg)
+{
+ struct task_struct *curr_task = current;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma = NULL;
+
+ struct tiler_pa_info *pa = NULL;
+ struct page **pages = NULL;
+ u32 *mem = NULL, write, i;
+ int usr_count;
+
+ pa = kzalloc(sizeof(*pa), GFP_KERNEL);
+ if (!pa)
+ return NULL;
+
+ mem = kzalloc(num_pg * sizeof(*mem), GFP_KERNEL);
+ if (!mem) {
+ kfree(pa);
+ return NULL;
+ }
+
+ pages = kmalloc(num_pg * sizeof(*pages), GFP_KERNEL);
+ if (!pages) {
+ kfree(mem);
+ kfree(pa);
+ return NULL;
+ }
+
+ /*
+ * Important Note: usr_addr is mapped from user
+ * application process to current process - it must lie
+ * completely within the current virtual memory address
+ * space in order to be of use to us here.
+ */
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, usr_addr + (num_pg << PAGE_SHIFT));
+
+ if (!vma || (usr_addr < vma->vm_start)) {
+ kfree(mem);
+ kfree(pa);
+ kfree(pages);
+ up_read(&mm->mmap_sem);
+ printk(KERN_ERR "Address is outside VMA: address start = %08x, "
+ "user end = %08x\n",
+ usr_addr, (usr_addr + (num_pg << PAGE_SHIFT)));
+ return ERR_PTR(-EFAULT);
+ }
+
+ if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
+ write = 1;
+
+ usr_count = get_user_pages(curr_task, mm, usr_addr, num_pg, write, 1,
+ pages, NULL);
+
+ if (usr_count > 0) {
+ /* process user allocated buffer */
+ if (usr_count != num_pg) {
+ /* release the pages we did get */
+ for (i = 0; i < usr_count; i++)
+ page_cache_release(pages[i]);
+ } else {
+ /* fill in the physical address information */
+ for (i = 0; i < num_pg; i++) {
+ mem[i] = page_to_phys(pages[i]);
+ BUG_ON(pages[i] != phys_to_page(mem[i]));
+ }
+ }
+ } else {
+ /* fallback for kernel allocated buffers */
+ for (i = 0; i < num_pg; i++) {
+ mem[i] = tiler_virt2phys(usr_addr);
+
+ if (!mem[i]) {
+ printk(KERN_ERR "VMA not in page table\n");
+ break;
+ }
+
+ usr_addr += PAGE_SIZE;
+ }
+ }
+
+ up_read(&mm->mmap_sem);
+
+ kfree(pages);
+
+ /* if failed to map all pages */
+ if (i < num_pg) {
+ kfree(mem);
+ kfree(pa);
+ return ERR_PTR(-EFAULT);
+ }
+
+ pa->mem = mem;
+ pa->memtype = usr_count > 0 ? TILER_MEM_GOT_PAGES : TILER_MEM_USING;
+ pa->num_pg = num_pg;
+ return pa;
+}
+EXPORT_SYMBOL(user_block_to_pa);
+
+/* allocate area from container and pin memory */
+static s32 pin_any_block(enum tiler_fmt fmt, u32 width, u32 height,
+ u32 key, u32 gid, struct process_info *pi,
+ struct mem_info **info, struct tiler_pa_info *pa)
+{
+ s32 res = -EPERM;
+ struct mem_info *mi = NULL;
+
+ *info = NULL;
+
+ /* check if mapping is supported by tmm */
+ if (!tmm_can_pin(tmm[fmt]))
+ goto done;
+
+ /* get allocation area */
+ mi = alloc_block_area(fmt, width, height, key, gid, pi);
+ if (IS_ERR_OR_NULL(mi)) {
+ res = mi ? PTR_ERR(mi) : -ENOMEM;
+ goto done;
+ }
+
+ /* pin pages to tiler container */
+ res = pin_memory(mi, pa);
+
+ /* success */
+ if (!res) {
+ *info = mi;
+ } else {
+ mutex_lock(&mtx);
+ _m_free(mi);
+ mutex_unlock(&mtx);
+ }
+done:
+ tiler_pa_free(pa);
+ return res;
+}
+
+static s32 pin_block(enum tiler_fmt fmt, u32 width, u32 height,
+ u32 key, u32 gid, struct process_info *pi,
+ struct mem_info **info, u32 usr_addr)
+{
+ struct tiler_pa_info *pa = NULL;
+
+ /* we only support mapping a user buffer in page mode */
+ if (fmt != TILFMT_PAGE)
+ return -ENOMEM;
+
+ /* get user pages */
+ pa = user_block_to_pa(usr_addr, DIV_ROUND_UP(width, PAGE_SIZE));
+ if (IS_ERR_OR_NULL(pa))
+ return pa ? PTR_ERR(pa) : -ENOMEM;
+
+ return pin_any_block(fmt, width, height, key, gid, pi, info, pa);
+}
+
+s32 tiler_pin_block(tiler_blk_handle block, u32 *addr_array, u32 nents)
+{
+ struct tiler_pa_info *pa = NULL;
+ u32 *mem = NULL;
+ int res;
+
+ pa = kzalloc(sizeof(*pa), GFP_KERNEL);
+ if (!pa)
+ return -ENOMEM;
+
+ mem = kmemdup(addr_array, sizeof(*addr_array)*nents, GFP_KERNEL);
+ if (!mem) {
+ kfree(pa);
+ return -ENOMEM;
+ }
+
+ pa->mem = mem;
+ pa->memtype = TILER_MEM_USING;
+ pa->num_pg = nents;
+
+ res = pin_memory(block, pa);
+ tiler_pa_free(pa);
+
+ return res;
+}
+EXPORT_SYMBOL(tiler_pin_block);
+
+/*
+ * Driver code
+ * ==========================================================================
+ */
+
+#ifdef CONFIG_PM
+static int tiler_resume(struct device *pdev)
+{
+ struct mem_info *mi;
+ struct pat_area area = {0};
+
+ /* clear out PAT entries and set dummy page */
+ area.x1 = tiler.width - 1;
+ area.y1 = tiler.height - 1;
+ mutex_lock(&dmac_mtx);
+ tmm_unpin(tmm[TILFMT_8BIT], area);
+ mutex_unlock(&dmac_mtx);
+
+ /* iterate over all the blocks and refresh the PAT entries */
+ list_for_each_entry(mi, &blocks, global) {
+ if (mi->pa.mem)
+ if (pin_mem_to_area(tmm[tiler_fmt(mi->blk.phys)],
+ &mi->area, mi->pa.mem))
+ printk(KERN_ERR "Failed PAT restore - %08x\n",
+ mi->blk.phys);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops tiler_pm_ops = {
+ .resume = tiler_resume,
+};
+#endif
+
+static struct platform_driver tiler_driver_ldm = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tiler",
+#ifdef CONFIG_PM
+ .pm = &tiler_pm_ops,
+#endif
+ },
+};
+
+static s32 __init tiler_init(void)
+{
+ dev_t dev = 0;
+ s32 r = -1;
+ struct device *device = NULL;
+ struct tcm_pt div_pt;
+ struct tcm *sita = NULL;
+ struct tmm *tmm_pat = NULL;
+ struct pat_area area = {0};
+
+ tiler.alloc = alloc_block;
+ tiler.pin = pin_block;
+ tiler.lock = find_n_lock;
+ tiler.unlock_free = unlock_n_free;
+ tiler.lay_2d = lay_2d;
+#ifdef CONFIG_TILER_ENABLE_NV12
+ tiler.lay_nv12 = lay_nv12;
+#endif
+ tiler.destroy_group = destroy_group;
+ tiler.lock_by_ssptr = find_block_by_ssptr;
+ tiler.describe = fill_block_info;
+ tiler.get_gi = get_gi;
+ tiler.release_gi = release_gi;
+ tiler.release = release_blocks;
+ tiler.add_reserved = add_reserved_blocks;
+ tiler.analize = __analize_area;
+ tiler_geom_init(&tiler);
+ tiler_reserve_init(&tiler);
+
+ mutex_init(&tiler.mtx);
+ tiler_iface_init(&tiler);
+#ifdef CONFIG_TILER_ENABLE_USERSPACE
+ tiler_ioctl_init(&tiler);
+#endif
+#ifdef CONFIG_TILER_ENABLE_NV12
+ tiler_nv12_init(&tiler);
+#endif
+
+ /* check module parameters for correctness */
+ if (granularity < 1 || granularity > PAGE_SIZE ||
+ granularity & (granularity - 1))
+ return -EINVAL;
+
+ /*
+ * Array of physical pages for PAT programming, which must be a 16-byte
+ * aligned physical address.
+ */
+ dmac_va = dma_alloc_coherent(NULL, tiler.width * tiler.height *
+ sizeof(*dmac_va), &dmac_pa, GFP_ATOMIC);
+ if (!dmac_va)
+ return -ENOMEM;
+
+ /* Allocate tiler container manager (we share 1 on OMAP4) */
+ div_pt.x = tiler.width; /* hardcoded default */
+ div_pt.y = (3 * tiler.height) / 4;
+ sita = sita_init(tiler.width, tiler.height, (void *)&div_pt);
+
+ tcm[TILFMT_8BIT] = sita;
+ tcm[TILFMT_16BIT] = sita;
+ tcm[TILFMT_32BIT] = sita;
+ tcm[TILFMT_PAGE] = sita;
+
+ /* Allocate tiler memory manager (must have 1 unique TMM per TCM ) */
+ tmm_pat = tmm_pat_init(0, dmac_va, dmac_pa);
+ tmm[TILFMT_8BIT] = tmm_pat;
+ tmm[TILFMT_16BIT] = tmm_pat;
+ tmm[TILFMT_32BIT] = tmm_pat;
+ tmm[TILFMT_PAGE] = tmm_pat;
+
+ /* Clear out all PAT entries */
+ area.x1 = tiler.width - 1;
+ area.y1 = tiler.height - 1;
+ tmm_unpin(tmm_pat, area);
+
+#ifdef CONFIG_TILER_ENABLE_NV12
+ tiler.nv12_packed = tcm[TILFMT_8BIT] == tcm[TILFMT_16BIT];
+#endif
+
+ tiler_device = kmalloc(sizeof(*tiler_device), GFP_KERNEL);
+ if (!tiler_device || !sita || !tmm_pat) {
+ r = -ENOMEM;
+ goto error;
+ }
+
+ memset(tiler_device, 0x0, sizeof(*tiler_device));
+ if (tiler_major) {
+ dev = MKDEV(tiler_major, tiler_minor);
+ r = register_chrdev_region(dev, 1, "tiler");
+ } else {
+ r = alloc_chrdev_region(&dev, tiler_minor, 1, "tiler");
+ tiler_major = MAJOR(dev);
+ }
+
+ cdev_init(&tiler_device->cdev, tiler.fops);
+ tiler_device->cdev.owner = THIS_MODULE;
+ tiler_device->cdev.ops = tiler.fops;
+
+ r = cdev_add(&tiler_device->cdev, dev, 1);
+ if (r)
+ printk(KERN_ERR "cdev_add():failed\n");
+
+ tilerdev_class = class_create(THIS_MODULE, "tiler");
+
+ if (IS_ERR(tilerdev_class)) {
+ printk(KERN_ERR "class_create():failed\n");
+ goto error;
+ }
+
+ device = device_create(tilerdev_class, NULL, dev, NULL, "tiler");
+ if (device == NULL)
+ printk(KERN_ERR "device_create() fail\n");
+
+ r = platform_driver_register(&tiler_driver_ldm);
+
+ mutex_init(&mtx);
+ INIT_LIST_HEAD(&blocks);
+ INIT_LIST_HEAD(&orphan_areas);
+ INIT_LIST_HEAD(&orphan_onedim);
+
+ dbgfs = debugfs_create_dir("tiler", NULL);
+ if (IS_ERR_OR_NULL(dbgfs))
+ dev_warn(device, "failed to create debug files.\n");
+ else
+ dbg_map = debugfs_create_dir("map", dbgfs);
+ if (!IS_ERR_OR_NULL(dbg_map)) {
+ int i;
+ for (i = 0; i < ARRAY_SIZE(debugfs_maps); i++)
+ debugfs_create_file(debugfs_maps[i].name, S_IRUGO,
+ dbg_map, (void *) (debugfs_maps + i),
+ &tiler_debug_fops);
+ }
+
+error:
+ /* TODO: error handling for device registration */
+ if (r) {
+ kfree(tiler_device);
+ tcm_deinit(sita);
+ tmm_deinit(tmm_pat);
+ dma_free_coherent(NULL, tiler.width * tiler.height *
+ sizeof(*dmac_va), dmac_va, dmac_pa);
+ }
+
+ return r;
+}
+
+static void __exit tiler_exit(void)
+{
+ int i, j;
+
+ mutex_lock(&mtx);
+
+ debugfs_remove_recursive(dbgfs);
+
+ /* free all process data */
+ tiler.cleanup();
+
+ /* all lists should have cleared */
+ BUG_ON(!list_empty(&blocks));
+ BUG_ON(!list_empty(&orphan_onedim));
+ BUG_ON(!list_empty(&orphan_areas));
+
+ mutex_unlock(&mtx);
+
+ dma_free_coherent(NULL, tiler.width * tiler.height * sizeof(*dmac_va),
+ dmac_va, dmac_pa);
+
+ /* close containers only once */
+ for (i = TILFMT_MIN; i <= TILFMT_MAX; i++) {
+ /* remove identical containers (tmm is unique per tcm) */
+ for (j = i + 1; j <= TILFMT_MAX; j++)
+ if (tcm[i] == tcm[j]) {
+ tcm[j] = NULL;
+ tmm[j] = NULL;
+ }
+
+ tcm_deinit(tcm[i]);
+ tmm_deinit(tmm[i]);
+ }
+
+ mutex_destroy(&mtx);
+ platform_driver_unregister(&tiler_driver_ldm);
+ cdev_del(&tiler_device->cdev);
+ kfree(tiler_device);
+ device_destroy(tilerdev_class, MKDEV(tiler_major, tiler_minor));
+ class_destroy(tilerdev_class);
+}
+
+tiler_blk_handle tiler_map_1d_block(struct tiler_pa_info *pa)
+{
+ struct mem_info *mi = NULL;
+ struct tiler_pa_info *pa_tmp = kmemdup(pa, sizeof(*pa), GFP_KERNEL);
+ s32 res = pin_any_block(TILFMT_PAGE, pa->num_pg << PAGE_SHIFT, 1, 0, 0,
+ __get_pi(0, true), &mi, pa_tmp);
+ return res ? ERR_PTR(res) : mi;
+}
+EXPORT_SYMBOL(tiler_map_1d_block);
+
+void tiler_free_block_area(tiler_blk_handle block)
+{
+ mutex_lock(&mtx);
+ _m_try_free(block);
+ mutex_unlock(&mtx);
+}
+EXPORT_SYMBOL(tiler_free_block_area);
+
+tiler_blk_handle tiler_alloc_block_area(enum tiler_fmt fmt, u32 width,
+ u32 height, u32 *ssptr, u32 *virt_array)
+{
+ struct mem_info *mi;
+ *ssptr = 0;
+
+ /* if tiler is not initialized fail gracefully */
+ if (!tilerdev_class)
+ return NULL;
+
+ mi = alloc_block_area(fmt, width, height, 0, 0, __get_pi(0, true));
+
+ if (IS_ERR_OR_NULL(mi))
+ goto done;
+
+ fill_virt_array(&mi->blk, virt_array);
+ *ssptr = mi->blk.phys;
+
+done:
+ return mi;
+}
+EXPORT_SYMBOL(tiler_alloc_block_area);
+
+void tiler_unpin_block(tiler_blk_handle block)
+{
+ mutex_lock(&mtx);
+ _m_unpin(block);
+ mutex_unlock(&mtx);
+}
+EXPORT_SYMBOL(tiler_unpin_block);
+
+s32 tiler_memsize(enum tiler_fmt fmt, u32 width, u32 height, u32 *alloc_pages,
+ u32 *virt_pages)
+{
+ u16 x, y, band, align;
+ int res;
+ struct tiler_block_t blk;
+
+ *alloc_pages = *virt_pages = 0;
+
+ res = tiler.analize(fmt, width, height, &x, &y, &align, &band);
+
+ if (!res) {
+ blk.height = height;
+ blk.width = width;
+ blk.phys = tiler.addr(fmt, 0, 0);
+ *alloc_pages = x*y;
+ *virt_pages = tiler_size(&blk) / PAGE_SIZE;
+ }
+
+ return res;
+}
+EXPORT_SYMBOL(tiler_memsize);
+
+u32 tiler_block_vstride(tiler_blk_handle block)
+{
+ return tiler_vstride(&block->blk);
+}
+EXPORT_SYMBOL(tiler_block_vstride);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Lajos Molnar <molnar@ti.com>");
+MODULE_AUTHOR("David Sin <davidsin@ti.com>");
+module_init(tiler_init);
+module_exit(tiler_exit);
diff --git a/drivers/media/video/tiler/tiler-nv12.c b/drivers/media/video/tiler/tiler-nv12.c
new file mode 100644
index 0000000..e166122
--- /dev/null
+++ b/drivers/media/video/tiler/tiler-nv12.c
@@ -0,0 +1,417 @@
+/*
+ * tiler-nv12.c
+ *
+ * TILER driver NV12 area reservation functions for TI TILER hardware block.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include "_tiler.h"
+
+static struct tiler_ops *ops; /* shared methods and variables */
+static int band_8;
+static int band_16;
+
+/*
+ * NV12 Reservation Functions
+ *
+ * TILER is designed so that a (w * h) * 8bit area is twice as wide as a
+ * (w/2 * h/2) * 16bit area. Since having pairs of such 8-bit and 16-bit
+ * blocks is a common usecase for TILER, we optimize packing these into a
+ * TILER area.
+ *
+ * During reservation we want to find the most effective packing (most used area
+ * in the smallest overall area)
+ *
+ * We have two algorithms for packing nv12 blocks: either pack 8- and 16-bit
+ * blocks into separate container areas, or pack them together into same area.
+ */
+
+/**
+ * Calculate effectiveness of packing. We weight total area much higher than
+ * packing efficiency to get the smallest overall container use.
+ *
+ * @param w width of one (8-bit) block
+ * @param n buffers in a packing
+ * @param area width of packing area
+ * @param n_total total number of buffers to be packed
+ * @return effectiveness, the higher the better
+ */
+static inline u32 nv12_eff(u16 w, u16 n, u16 area, u16 n_total)
+{
+ return 0x10000000 -
+ /* weigh against total area needed (for all buffers) */
+ /* 64-slots = -2048 */
+ DIV_ROUND_UP(n_total, n) * area * 32 +
+ /* packing efficiency (0 - 1024) */
+ 1024 * n * ((w * 3 + 1) >> 1) / area;
+}
+
+/**
+ * Fallback nv12 packing algorithm: pack 8 and 16 bit block into separate
+ * areas.
+ *
+ * @author a0194118 (7/16/2010)
+ *
+ * @param o desired offset (<a)
+ * @param a desired alignment (>=2)
+ * @param w block width (>0)
+ * @param n number of blocks desired
+ * @param area pointer to store total area needed
+ *
+ * @return number of blocks that can be allocated
+ */
+static u16 nv12_separate(u16 o, u16 a, u16 w, u16 n, u16 *area)
+{
+ tiler_best2pack(o, a, band_8, w, &n, area);
+ tiler_best2pack(o >> 1, a >> 1, band_16, (w + 1) >> 1, &n, area);
+ *area *= 3;
+ return n;
+}
+
+/*
+ * Specialized NV12 Reservation Algorithms
+ *
+ * We use 4 packing methods that pack nv12 blocks into the same area. Together
+ * these 4 methods give the optimal result for most possible input parameters.
+ *
+ * For now we pack into a 64-slot area, so that we don't have to worry about
+ * stride issues (all blocks get 4K stride). For some of the algorithms this
+ * could be true even if the area was 128.
+ */
+
+/**
+ * Packing types are marked using a letter sequence, capital letters denoting
+ * 8-bit blocks, lower case letters denoting corresponding 16-bit blocks.
+ *
+ * All methods have the following parameters. They also define the maximum
+ * number of coordinates that could potentially be packed.
+ *
+ * @param o, a, w, n offset, alignment, width, # of blocks as usual
+ * @param area pointer to store area needed for packing
+ * @param p pointer to store packing coordinates
+ * @return number of blocks that can be packed
+ */
+
+/* Method A: progressive packing: AAAAaaaaBBbbCc into 64-slot area */
+#define MAX_A 21
+static int nv12_A(u16 o, u16 a, u16 w, u16 n, u16 *area, u8 *p)
+{
+ u16 x = o, u, l, m = 0;
+ *area = band_8;
+
+ while (x + w < *area && m < n) {
+ /* current 8bit upper bound (a) is next 8bit lower bound (B) */
+ l = u = (*area + x) >> 1;
+
+ /* pack until upper bound */
+ while (x + w <= u && m < n) {
+ /* save packing */
+ BUG_ON(m + 1 >= MAX_A);
+ *p++ = x;
+ *p++ = l;
+ l = (*area + x + w + 1) >> 1;
+ x = ALIGN(x + w - o, a) + o;
+ m++;
+ }
+ x = ALIGN(l - o, a) + o; /* set new lower bound */
+ }
+ return m;
+}
+
+/* Method -A: regressive packing: cCbbBBaaaaAAAA into 64-slot area */
+static int nv12_revA(u16 o, u16 a, u16 w, u16 n, u16 *area, u8 *p)
+{
+ u16 m;
+
+ /* this is a mirrored packing of method A */
+ n = nv12_A((a - (o + w) % a) % a, a, w, n, area, p);
+
+ /* reverse packing */
+ for (m = 0; m < n; m++) {
+ *p = *area - *p - w;
+ p++;
+ *p = *area - *p - ((w + 1) >> 1);
+ p++;
+ }
+ return n;
+}
+
+/* Method B: simple layout: aAbcBdeCfgDhEFGH */
+#define MAX_B 8
+static int nv12_B(u16 o, u16 a, u16 w, u16 n, u16 *area, u8 *p)
+{
+ u16 e = (o + w) % a; /* end offset */
+ u16 o1 = (o >> 1) % a; /* half offset */
+ u16 e1 = ((o + w + 1) >> 1) % a; /* half end offset */
+ u16 o2 = o1 + (a >> 2); /* 2nd half offset */
+ u16 e2 = e1 + (a >> 2); /* 2nd half end offset */
+ u16 m = 0;
+ *area = band_8;
+
+ /* ensure 16-bit blocks don't overlap 8-bit blocks */
+
+ /* width cannot wrap around alignment, half block must be before block,
+ 2nd half can be before or after */
+ if (w < a && o < e && e1 <= o && (e2 <= o || o2 >= e))
+ while (o + w <= *area && m < n) {
+ BUG_ON(m + 1 >= MAX_B);
+ *p++ = o;
+ *p++ = o >> 1;
+ m++;
+ o += a;
+ }
+ return m;
+}
+
+/* Method C: butterfly layout: AAbbaaBB */
+#define MAX_C 20
+static int nv12_C(u16 o, u16 a, u16 w, u16 n, u16 *area, u8 *p)
+{
+ int m = 0;
+ u16 o2, e = ALIGN(w, a), i = 0, j = 0;
+ *area = band_8;
+ o2 = *area - (a - (o + w) % a) % a; /* end of last possible block */
+
+ m = (min(o2 - 2 * o, 2 * o2 - o - *area) / 3 - w) / e + 1;
+ for (i = j = 0; i < m && j < n; i++, j++) {
+ BUG_ON(j + 1 >= MAX_C);
+ *p++ = o + i * e;
+ *p++ = (o + i * e + *area) >> 1;
+ if (++j < n) {
+ *p++ = o2 - i * e - w;
+ *p++ = (o2 - i * e - w) >> 1;
+ }
+ }
+ return j;
+}
+
+/* Method D: for large allocation: aA or Aa */
+#define MAX_D 1
+static int nv12_D(u16 o, u16 a, u16 w, u16 n, u16 *area, u8 *p)
+{
+ u16 o1, w1 = (w + 1) >> 1, d;
+ *area = ALIGN(o + w, band_8);
+
+ for (d = 0; n > 0 && d + o + w <= *area; d += a) {
+ /* try to fit 16-bit before 8-bit */
+ o1 = ((o + d) % band_8) >> 1;
+ if (o1 + w1 <= o + d) {
+ *p++ = o + d;
+ *p++ = o1;
+ return 1;
+ }
+
+ /* try to fit 16-bit after 8-bit */
+ o1 += ALIGN(d + o + w - o1, band_16);
+ if (o1 + w1 <= *area) {
+ *p++ = o;
+ *p++ = o1;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Umbrella nv12 packing method. This selects the best packings from the above
+ * methods. It also contains hardcoded packings for parameter combinations
+ * that have more efficient packings. This method provides is guaranteed to
+ * provide the optimal packing if 2 <= a <= 64 and w <= 64 and n is large.
+ */
+#define MAX_ANY 21 /* must be MAX(method-MAX-s, hardcoded n-s) */
+static u16 nv12_together(u16 o, u16 a, u16 w, u16 n, u16 *area, u8 *packing)
+{
+ u16 n_best, a_best, n2, a_, o_, w_;
+
+ /* algo results (packings) */
+ u8 pack_A[MAX_A * 2], pack_rA[MAX_A * 2];
+ u8 pack_B[MAX_B * 2], pack_C[MAX_C * 2];
+ u8 pack_D[MAX_D * 2];
+
+ /*
+ * Hardcoded packings. They are sorted by increasing area, and then by
+ * decreasing n. We may not get the best efficiency if less than n
+ * blocks are needed as packings are not necessarily sorted in
+ * increasing order. However, for those n-s one of the other 4 methods
+ * may return the optimal packing.
+ */
+ u8 packings[] = {
+ /* n=9, o=2, w=4, a=4, area=64 */
+ 9, 2, 4, 4, 64,
+ /* 8-bit, 16-bit block coordinate pairs */
+ 2, 33, 6, 35, 10, 37, 14, 39, 18, 41,
+ 46, 23, 50, 25, 54, 27, 58, 29,
+ /* o=0, w=12, a=4, n=3 */
+ 3, 0, 12, 4, 64,
+ 0, 32, 12, 38, 48, 24,
+ /* end */
+ 0
+ }, *p = packings, *p_best = NULL, *p_end;
+ p_end = packings + sizeof(packings) - 1;
+
+ /* see which method gives the best packing */
+
+ /* start with smallest area algorithms A, B & C, stop if we can
+ pack all buffers */
+ n_best = nv12_A(o, a, w, n, area, pack_A);
+ p_best = pack_A;
+ if (n_best < n) {
+ n2 = nv12_revA(o, a, w, n, &a_best, pack_rA);
+ if (n2 > n_best) {
+ n_best = n2;
+ p_best = pack_rA;
+ *area = a_best;
+ }
+ }
+ if (n_best < n) {
+ n2 = nv12_B(o, a, w, n, &a_best, pack_B);
+ if (n2 > n_best) {
+ n_best = n2;
+ p_best = pack_B;
+ *area = a_best;
+ }
+ }
+ if (n_best < n) {
+ n2 = nv12_C(o, a, w, n, &a_best, pack_C);
+ if (n2 > n_best) {
+ n_best = n2;
+ p_best = pack_C;
+ *area = a_best;
+ }
+ }
+
+ /* traverse any special packings */
+ while (*p) {
+ n2 = *p++;
+ o_ = *p++;
+ w_ = *p++;
+ a_ = *p++;
+ /* stop if we already have a better packing */
+ if (n2 < n_best)
+ break;
+
+ /* check if this packing is satisfactory */
+ if (a_ >= a && o + w + ALIGN(o_ - o, a) <= o_ + w_) {
+ *area = *p++;
+ n_best = min(n2, n);
+ p_best = p;
+ break;
+ }
+
+ /* skip to next packing */
+ p += 1 + n2 * 2;
+ }
+
+ /*
+ * If so far unsuccessful, check whether 8 and 16 bit blocks can be
+ * co-packed. This will actually be done in the end by the normal
+ * allocation, but we need to reserve a big-enough area.
+ */
+ if (!n_best) {
+ n_best = nv12_D(o, a, w, n, area, pack_D);
+ p_best = NULL;
+ }
+
+ /* store best packing */
+ if (p_best && n_best) {
+ BUG_ON(n_best > MAX_ANY);
+ memcpy(packing, p_best, n_best * 2 * sizeof(*pack_A));
+ }
+
+ return n_best;
+}
+
+/* reserve nv12 blocks */
+static void reserve_nv12(u32 n, u32 width, u32 height,
+ u32 gid, struct process_info *pi)
+{
+ u16 w, h, band, a, o = 0;
+ struct gid_info *gi;
+ int res = 0, res2, i;
+ u16 n_t, n_s, area_t, area_s;
+ u8 packing[2 * MAX_ANY];
+ struct list_head reserved = LIST_HEAD_INIT(reserved);
+
+ /* Check input parameters for correctness, and support */
+ if (!width || !height || !n ||
+ n > ops->width * ops->height / 2)
+ return;
+
+ /* calculate dimensions, band, and alignment in slots */
+ if (ops->analize(TILFMT_8BIT, width, height, &w, &h, &band, &a))
+ return;
+
+ /* get group context */
+ gi = ops->get_gi(pi, gid);
+ if (!gi)
+ return;
+
+ /* reserve in groups until failed or all is reserved */
+ for (i = 0; i < n && res >= 0; i += res) {
+ /* check packing separately vs together */
+ n_s = nv12_separate(o, a, w, n - i, &area_s);
+ if (ops->nv12_packed)
+ n_t = nv12_together(o, a, w, n - i, &area_t, packing);
+ else
+ n_t = 0;
+
+ /* pack based on better efficiency */
+ res = -1;
+ if (!ops->nv12_packed ||
+ nv12_eff(w, n_s, area_s, n - i) >
+ nv12_eff(w, n_t, area_t, n - i)) {
+
+ /*
+ * Reserve blocks separately into a temporary list, so
+ * that we can free them if unsuccessful. We need to be
+ * able to reserve both 8- and 16-bit blocks as the
+ * offsets of them must match.
+ */
+ res = ops->lay_2d(TILFMT_8BIT, n_s, w, h, band_8, a,
+ gi, &reserved);
+ res2 = ops->lay_2d(TILFMT_16BIT, n_s, (w + 1) >> 1, h,
+ band_16, a >> 1, gi, &reserved);
+
+ if (res2 < 0 || res < 0 || res != res2) {
+ /* clean up */
+ ops->release(&reserved);
+ res = -1;
+ } else {
+ /* add list to reserved */
+ ops->add_reserved(&reserved, gi);
+ }
+ }
+
+ /* if separate packing failed, still try to pack together */
+ if (res < 0 && ops->nv12_packed && n_t) {
+ /* pack together */
+ res = ops->lay_nv12(n_t, area_t, w, h, gi, packing);
+ }
+ }
+
+ ops->release_gi(gi);
+}
+
+/* initialize shared method pointers and global static variables */
+void tiler_nv12_init(struct tiler_ops *tiler)
+{
+ ops = tiler;
+
+ ops->reserve_nv12 = reserve_nv12;
+
+ band_8 = PAGE_SIZE / ops->geom(TILFMT_8BIT)->slot_w
+ / ops->geom(TILFMT_8BIT)->bpp;
+ band_16 = PAGE_SIZE / ops->geom(TILFMT_16BIT)->slot_w
+ / ops->geom(TILFMT_16BIT)->bpp;
+}
diff --git a/drivers/media/video/tiler/tiler-reserve.c b/drivers/media/video/tiler/tiler-reserve.c
new file mode 100644
index 0000000..fbabc6d
--- /dev/null
+++ b/drivers/media/video/tiler/tiler-reserve.c
@@ -0,0 +1,154 @@
+/*
+ * tiler-reserve.c
+ *
+ * TILER driver area reservation functions for TI TILER hardware block.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include "_tiler.h"
+
+static struct tiler_ops *ops; /* shared methods and variables */
+
+/**
+ * Calculate the maximum number buffers that can be packed next to each other,
+ * and the area they occupy. This method is used for both 2D and NV12 packing.
+ *
+ * @author a0194118 (7/16/2010)
+ *
+ * @param o desired offset
+ * @param w width of one block (>0)
+ * @param a desired alignment
+ * @param b band width (each block must occupy the same number of bands)
+ * @param n pointer to the desired number of blocks to pack. It will be
+ * updated with the maximum number of blocks that can be packed.
+ * @param _area pointer to store total area needed
+ *
+ * @return packing efficiency (0-1024)
+ */
+u32 tiler_best2pack(u16 o, u16 a, u16 b, u16 w, u16 *n, u16 *_area)
+{
+ u16 m = 0, max_n = *n; /* m is mostly n - 1 */
+ u16 e = ALIGN(w, a); /* effective width of one block */
+ u32 eff, best_eff = 0; /* best values */
+ u16 stride = ALIGN(o + w, b); /* block stride */
+ u16 area = stride; /* area needed (for m + 1 blocks) */
+
+ /* NOTE: block #m+1 occupies the range (o + m * e, o + m * e + w) */
+
+ /* see how many blocks we can pack */
+ while (m < max_n &&
+ /* blocks must fit in tiler container */
+ o + m * e + w <= ops->width &&
+ /* block stride must be correct */
+ stride == ALIGN(area - o - m * e, b)) {
+
+ m++;
+ eff = m * w * 1024 / area;
+ if (eff > best_eff) {
+ /* store packing for best efficiency & smallest area */
+ best_eff = eff;
+ *n = m;
+ if (_area)
+ *_area = area;
+ }
+ /* update area */
+ area = ALIGN(o + m * e + w, b);
+ }
+
+ return best_eff;
+}
+
+/**
+ * We also optimize packing regular 2D areas as the auto-packing may result in
+ * sub-optimal efficiency. This is most pronounced if the area is wider than
+ * half a PAGE_SIZE (e.g. 2048 in 8-bit mode, or 1024 in 16-bit mode).
+ */
+
+/* reserve 2d blocks */
+static void reserve_blocks(u32 n, enum tiler_fmt fmt, u32 width, u32 height,
+ u32 gid,
+ struct process_info *pi)
+{
+ u32 bpt, res = 0, i;
+ u16 a, band, w, h, n_try;
+ struct gid_info *gi;
+ const struct tiler_geom *g;
+
+ /* Check input parameters for correctness, and support */
+ if (!width || !height || !n ||
+ fmt < TILFMT_8BIT || fmt > TILFMT_32BIT)
+ return;
+
+ /* tiler slot in bytes */
+ g = ops->geom(fmt);
+ bpt = g->slot_w * g->bpp;
+
+ /*
+ * For blocks narrower than half PAGE_SIZE the default allocation is
+ * sufficient. Also check for basic area info.
+ */
+ if (width * g->bpp * 2 <= PAGE_SIZE ||
+ ops->analize(fmt, width, height, &w, &h, &band, &a))
+ return;
+
+ /* get group id */
+ gi = ops->get_gi(pi, gid);
+ if (!gi)
+ return;
+
+ /* reserve in groups until failed or all is reserved */
+ for (i = 0; i < n && res >= 0; i += res + 1) {
+ /* blocks to allocate in one area */
+ n_try = min(n - i, ops->width);
+ tiler_best2pack(0, a, band, w, &n_try, NULL);
+
+ res = -1;
+ while (n_try > 1) {
+ /* adjust res so we fail on 0 return value */
+ res = ops->lay_2d(fmt, n_try, w, h, band, a,
+ gi, &gi->reserved) - 1;
+ if (res >= 0)
+ break;
+
+ /* reduce n if failed to allocate area */
+ n_try--;
+ }
+ }
+ /* keep reserved blocks even if failed to reserve all */
+
+ ops->release_gi(gi);
+}
+
+/* unreserve blocks for a group id */
+static void unreserve_blocks(u32 gid, struct process_info *pi)
+{
+ struct gid_info *gi;
+
+ gi = ops->get_gi(pi, gid);
+ if (!gi)
+ return;
+
+ ops->release(&gi->reserved);
+
+ ops->release_gi(gi);
+}
+
+/* initialize shared method pointers and global static variables */
+void tiler_reserve_init(struct tiler_ops *tiler)
+{
+ ops = tiler;
+
+ ops->reserve = reserve_blocks;
+ ops->unreserve = unreserve_blocks;
+}
diff --git a/drivers/media/video/tiler/tmm-pat.c b/drivers/media/video/tiler/tmm-pat.c
new file mode 100644
index 0000000..2d902f9
--- /dev/null
+++ b/drivers/media/video/tiler/tmm-pat.c
@@ -0,0 +1,326 @@
+/*
+ * tmm-pat.c
+ *
+ * DMM driver support functions for TI TILER hardware block.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>, David Sin <dsin@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <asm/cacheflush.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include "tmm.h"
+
+static int param_set_mem(const char *val, struct kernel_param *kp);
+
+/* Memory limit to cache free pages. TILER will eventually use this much */
+static u32 cache_limit = CONFIG_TILER_CACHE_LIMIT << 20;
+
+param_check_uint(cache, &cache_limit);
+module_param_call(cache, param_set_mem, param_get_uint, &cache_limit, 0644);
+__MODULE_PARM_TYPE(cache, "uint");
+MODULE_PARM_DESC(cache, "Cache free pages if total memory is under this limit");
+
+/* global state - statically initialized */
+static LIST_HEAD(free_list); /* page cache: list of free pages */
+static u32 total_mem; /* total memory allocated (free & used) */
+static u32 refs; /* number of tmm_pat instances */
+static DEFINE_MUTEX(mtx); /* global mutex */
+
+/* The page struct pointer and physical address of each page.*/
+struct mem {
+ struct list_head list;
+ struct page *pg; /* page struct */
+ u32 pa; /* physical address */
+};
+
+/* Used to keep track of mem per tmm_pat_get_pages call */
+struct fast {
+ struct list_head list;
+ struct mem **mem; /* array of page info */
+ u32 *pa; /* array of physical addresses */
+ u32 num; /* number of pages */
+};
+
+/* TMM PAT private structure */
+struct dmm_mem {
+ struct list_head fast_list;
+ struct dmm *dmm;
+ u32 *dmac_va; /* coherent memory */
+ u32 dmac_pa; /* phys.addr of coherent memory */
+ struct page *dummy_pg; /* dummy page */
+ u32 dummy_pa; /* phys.addr of dummy page */
+};
+
+/* read mem values for a param */
+static int param_set_mem(const char *val, struct kernel_param *kp)
+{
+ u32 a;
+ char *p;
+
+ /* must specify memory */
+ if (!val)
+ return -EINVAL;
+
+ /* parse value */
+ a = memparse(val, &p);
+ if (p == val || *p)
+ return -EINVAL;
+
+ /* store parsed value */
+ *(uint *)kp->arg = a;
+ return 0;
+}
+
+/**
+ * Frees pages in a fast structure. Moves pages to the free list if there
+ * are less pages used than max_to_keep. Otherwise, it frees the pages
+ */
+static void free_fast(struct fast *f)
+{
+ s32 i = 0;
+
+ /* mutex is locked */
+ for (i = 0; i < f->num; i++) {
+ if (total_mem < cache_limit) {
+ /* cache free page if under the limit */
+ list_add(&f->mem[i]->list, &free_list);
+ } else {
+ /* otherwise, free */
+ total_mem -= PAGE_SIZE;
+ __free_page(f->mem[i]->pg);
+ kfree(f->mem[i]);
+ }
+ }
+ kfree(f->pa);
+ kfree(f->mem);
+ /* remove only if element was added */
+ if (f->list.next)
+ list_del(&f->list);
+ kfree(f);
+}
+
+/* allocate and flush a page */
+static struct mem *alloc_mem(void)
+{
+ struct mem *m = kmalloc(sizeof(*m), GFP_KERNEL);
+ if (!m)
+ return NULL;
+ memset(m, 0, sizeof(*m));
+
+ m->pg = alloc_page(GFP_KERNEL | GFP_DMA);
+ if (!m->pg) {
+ kfree(m);
+ return NULL;
+ }
+
+ m->pa = page_to_phys(m->pg);
+
+ /* flush the cache entry for each page we allocate. */
+ dmac_flush_range(page_address(m->pg),
+ page_address(m->pg) + PAGE_SIZE);
+ outer_flush_range(m->pa, m->pa + PAGE_SIZE);
+
+ return m;
+}
+
+static void free_page_cache(void)
+{
+ struct mem *m, *m_;
+
+ /* mutex is locked */
+ list_for_each_entry_safe(m, m_, &free_list, list) {
+ __free_page(m->pg);
+ total_mem -= PAGE_SIZE;
+ list_del(&m->list);
+ kfree(m);
+ }
+}
+
+static void tmm_pat_deinit(struct tmm *tmm)
+{
+ struct fast *f, *f_;
+ struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt;
+
+ mutex_lock(&mtx);
+
+ /* free all outstanding used memory */
+ list_for_each_entry_safe(f, f_, &pvt->fast_list, list)
+ free_fast(f);
+
+ /* if this is the last tmm_pat, free all memory */
+ if (--refs == 0)
+ free_page_cache();
+
+ __free_page(pvt->dummy_pg);
+
+ mutex_unlock(&mtx);
+}
+
+static u32 *tmm_pat_get_pages(struct tmm *tmm, u32 n)
+{
+ struct mem *m;
+ struct fast *f;
+ struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt;
+
+ f = kmalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+ memset(f, 0, sizeof(*f));
+
+ /* array of mem struct pointers */
+ f->mem = kmalloc(n * sizeof(*f->mem), GFP_KERNEL);
+
+ /* array of physical addresses */
+ f->pa = kmalloc(n * sizeof(*f->pa), GFP_KERNEL);
+
+ /* no pages have been allocated yet (needed for cleanup) */
+ f->num = 0;
+
+ if (!f->mem || !f->pa)
+ goto cleanup;
+
+ memset(f->mem, 0, n * sizeof(*f->mem));
+ memset(f->pa, 0, n * sizeof(*f->pa));
+
+ /* fill out fast struct mem array with free pages */
+ mutex_lock(&mtx);
+ while (f->num < n) {
+ /* if there is a free cached page use it */
+ if (!list_empty(&free_list)) {
+ /* unbind first element from list */
+ m = list_first_entry(&free_list, typeof(*m), list);
+ list_del(&m->list);
+ } else {
+ mutex_unlock(&mtx);
+
+ /**
+ * Unlock mutex during allocation and cache flushing.
+ */
+ m = alloc_mem();
+ if (!m)
+ goto cleanup;
+
+ mutex_lock(&mtx);
+ total_mem += PAGE_SIZE;
+ }
+
+ f->mem[f->num] = m;
+ f->pa[f->num++] = m->pa;
+ }
+
+ list_add(&f->list, &pvt->fast_list);
+ mutex_unlock(&mtx);
+ return f->pa;
+
+cleanup:
+ free_fast(f);
+ return NULL;
+}
+
+static void tmm_pat_free_pages(struct tmm *tmm, u32 *page_list)
+{
+ struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt;
+ struct fast *f, *f_;
+
+ mutex_lock(&mtx);
+ /* find fast struct based on 1st page */
+ list_for_each_entry_safe(f, f_, &pvt->fast_list, list) {
+ if (f->pa[0] == page_list[0]) {
+ free_fast(f);
+ break;
+ }
+ }
+ mutex_unlock(&mtx);
+}
+
+static s32 tmm_pat_pin(struct tmm *tmm, struct pat_area area, u32 page_pa)
+{
+ struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt;
+ struct pat pat_desc = {0};
+
+ /* send pat descriptor to dmm driver */
+ pat_desc.ctrl.dir = 0;
+ pat_desc.ctrl.ini = 0;
+ pat_desc.ctrl.lut_id = 0;
+ pat_desc.ctrl.start = 1;
+ pat_desc.ctrl.sync = 0;
+ pat_desc.area = area;
+ pat_desc.next = NULL;
+
+ /* must be a 16-byte aligned physical address */
+ pat_desc.data = page_pa;
+ return dmm_pat_refill(pvt->dmm, &pat_desc, MANUAL);
+}
+
+static void tmm_pat_unpin(struct tmm *tmm, struct pat_area area)
+{
+ u16 w = (u8) area.x1 - (u8) area.x0;
+ u16 h = (u8) area.y1 - (u8) area.y0;
+ u16 i = (w + 1) * (h + 1);
+ struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt;
+
+ while (i--)
+ pvt->dmac_va[i] = pvt->dummy_pa;
+
+ tmm_pat_pin(tmm, area, pvt->dmac_pa);
+}
+
+struct tmm *tmm_pat_init(u32 pat_id, u32 *dmac_va, u32 dmac_pa)
+{
+ struct tmm *tmm = NULL;
+ struct dmm_mem *pvt = NULL;
+
+ struct dmm *dmm = dmm_pat_init(pat_id);
+ if (dmm)
+ tmm = kmalloc(sizeof(*tmm), GFP_KERNEL);
+ if (tmm)
+ pvt = kmalloc(sizeof(*pvt), GFP_KERNEL);
+ if (pvt)
+ pvt->dummy_pg = alloc_page(GFP_KERNEL | GFP_DMA);
+ if (pvt->dummy_pg) {
+ /* private data */
+ pvt->dmm = dmm;
+ pvt->dmac_pa = dmac_pa;
+ pvt->dmac_va = dmac_va;
+ pvt->dummy_pa = page_to_phys(pvt->dummy_pg);
+
+ INIT_LIST_HEAD(&pvt->fast_list);
+
+ /* increate tmm_pat references */
+ mutex_lock(&mtx);
+ refs++;
+ mutex_unlock(&mtx);
+
+ /* public data */
+ tmm->pvt = pvt;
+ tmm->deinit = tmm_pat_deinit;
+ tmm->get = tmm_pat_get_pages;
+ tmm->free = tmm_pat_free_pages;
+ tmm->pin = tmm_pat_pin;
+ tmm->unpin = tmm_pat_unpin;
+
+ return tmm;
+ }
+
+ kfree(pvt);
+ kfree(tmm);
+ dmm_pat_release(dmm);
+ return NULL;
+}
+EXPORT_SYMBOL(tmm_pat_init);
diff --git a/drivers/media/video/tiler/tmm.h b/drivers/media/video/tiler/tmm.h
new file mode 100644
index 0000000..dc1b5b3
--- /dev/null
+++ b/drivers/media/video/tiler/tmm.h
@@ -0,0 +1,130 @@
+/*
+ * tmm.h
+ *
+ * TMM interface definition for TI TILER driver.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TMM_H
+#define TMM_H
+
+#include <mach/dmm.h>
+/**
+ * TMM interface
+ */
+struct tmm {
+ void *pvt;
+
+ /* function table */
+ u32 *(*get) (struct tmm *tmm, u32 num_pages);
+ void (*free) (struct tmm *tmm, u32 *pages);
+ s32 (*pin) (struct tmm *tmm, struct pat_area area, u32 page_pa);
+ void (*unpin) (struct tmm *tmm, struct pat_area area);
+ void (*deinit) (struct tmm *tmm);
+};
+
+/**
+ * Request a set of pages from the DMM free page stack.
+ * @return a pointer to a list of physical page addresses.
+ */
+static inline
+u32 *tmm_get(struct tmm *tmm, u32 num_pages)
+{
+ if (tmm && tmm->pvt)
+ return tmm->get(tmm, num_pages);
+ return NULL;
+}
+
+/**
+ * Return a set of used pages to the DMM free page stack.
+ * @param list a pointer to a list of physical page addresses.
+ */
+static inline
+void tmm_free(struct tmm *tmm, u32 *pages)
+{
+ if (tmm && tmm->pvt)
+ tmm->free(tmm, pages);
+}
+
+/**
+ * Program the physical address translator.
+ * @param area PAT area
+ * @param list of pages
+ */
+static inline
+s32 tmm_pin(struct tmm *tmm, struct pat_area area, u32 page_pa)
+{
+ if (tmm && tmm->pin && tmm->pvt)
+ return tmm->pin(tmm, area, page_pa);
+ return -ENODEV;
+}
+
+/**
+ * Clears the physical address translator.
+ * @param area PAT area
+ */
+static inline
+void tmm_unpin(struct tmm *tmm, struct pat_area area)
+{
+ if (tmm && tmm->unpin && tmm->pvt)
+ tmm->unpin(tmm, area);
+}
+
+/**
+ * Checks whether tiler memory manager supports mapping
+ */
+static inline
+bool tmm_can_pin(struct tmm *tmm)
+{
+ return tmm && tmm->pin;
+}
+
+/**
+ * Deinitialize tiler memory manager
+ */
+static inline
+void tmm_deinit(struct tmm *tmm)
+{
+ if (tmm && tmm->pvt)
+ tmm->deinit(tmm);
+}
+
+/**
+ * TMM implementation for PAT support.
+ *
+ * Initialize TMM for PAT with given id.
+ */
+struct tmm *tmm_pat_init(u32 pat_id, u32 *dmac_va, u32 dmac_pa);
+
+#endif
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 6ca938a..8f83bfc 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -218,6 +218,18 @@
and load scripts controlling which resources are switched off/on
or reset when a sleep, wakeup or warm reset event occurs.
+config TWL6030_POWER
+ bool "Support power resources on TWL6030 family chips"
+ depends on TWL4030_CORE
+ help
+ Say yes here if you want to use the power resources on the
+ TWL6030 family chips. Most of these resources are regulators,
+ which have a separate driver; some are control signals, such
+ as clock request handshaking.
+
+ This driver defaults to assuming only APPs processor uses
+ the resource, it can however be overridden by board file
+
config TWL4030_CODEC
bool
depends on TWL4030_CORE
@@ -233,6 +245,26 @@
Say yes here if you want support for TWL6030 PWM.
This is used to control charging LED brightness.
+config TWL6030_POWEROFF
+ bool "TWL6030 device poweroff"
+ depends on TWL4030_CORE
+
+config TWL6030_MADC
+ tristate "Texas Instruments TWL6030 MADC"
+ depends on TWL4030_CORE
+ help
+ This driver provides support for TWL6030-MADC. The
+ driver supports both RT and SW conversion methods.
+
+ This driver can be built as a module. If so it will be
+ named twl6030-madc
+
+config TWL6040_CODEC
+ bool
+ depends on TWL4030_CORE
+ select MFD_CORE
+ default n
+
config MFD_STMPE
bool "Support STMicroelectronics STMPE"
depends on I2C=y && GENERIC_HARDIRQS
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index d7d47d2..60f9021 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -42,6 +42,10 @@
obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
obj-$(CONFIG_TWL4030_CODEC) += twl4030-codec.o
obj-$(CONFIG_TWL6030_PWM) += twl6030-pwm.o
+obj-$(CONFIG_TWL6030_MADC) += twl6030-madc.o
+obj-$(CONFIG_TWL6040_CODEC) += twl6040-codec.o twl6040-irq.o
+obj-$(CONFIG_TWL6030_POWEROFF) += twl6030-poweroff.o
+obj-$(CONFIG_TWL6030_POWER) += twl6030-power.o
obj-$(CONFIG_MFD_MC13XXX) += mc13xxx-core.o
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index e67c3d3..212a338 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -26,8 +26,9 @@
#include <linux/spinlock.h>
#include <linux/gpio.h>
#include <plat/usb.h>
+#include <linux/pm_runtime.h>
-#define USBHS_DRIVER_NAME "usbhs-omap"
+#define USBHS_DRIVER_NAME "usbhs_omap"
#define OMAP_EHCI_DEVICE "ehci-omap"
#define OMAP_OHCI_DEVICE "ohci-omap3"
@@ -146,9 +147,6 @@
struct usbhs_hcd_omap {
- struct clk *usbhost_ick;
- struct clk *usbhost_hs_fck;
- struct clk *usbhost_fs_fck;
struct clk *xclk60mhsp1_ck;
struct clk *xclk60mhsp2_ck;
struct clk *utmi_p1_fck;
@@ -158,8 +156,6 @@
struct clk *usbhost_p2_fck;
struct clk *usbtll_p2_fck;
struct clk *init_60m_fclk;
- struct clk *usbtll_fck;
- struct clk *usbtll_ick;
void __iomem *uhh_base;
void __iomem *tll_base;
@@ -168,7 +164,6 @@
u32 usbhs_rev;
spinlock_t lock;
- int count;
};
/*-------------------------------------------------------------------------*/
@@ -318,6 +313,7 @@
return ret;
}
+static void omap_usbhs_init(struct device *dev);
/**
* usbhs_omap_probe - initialize TI-based HCDs
*
@@ -353,46 +349,13 @@
omap->platdata.ehci_data = pdata->ehci_data;
omap->platdata.ohci_data = pdata->ohci_data;
- omap->usbhost_ick = clk_get(dev, "usbhost_ick");
- if (IS_ERR(omap->usbhost_ick)) {
- ret = PTR_ERR(omap->usbhost_ick);
- dev_err(dev, "usbhost_ick failed error:%d\n", ret);
- goto err_end;
- }
-
- omap->usbhost_hs_fck = clk_get(dev, "hs_fck");
- if (IS_ERR(omap->usbhost_hs_fck)) {
- ret = PTR_ERR(omap->usbhost_hs_fck);
- dev_err(dev, "usbhost_hs_fck failed error:%d\n", ret);
- goto err_usbhost_ick;
- }
-
- omap->usbhost_fs_fck = clk_get(dev, "fs_fck");
- if (IS_ERR(omap->usbhost_fs_fck)) {
- ret = PTR_ERR(omap->usbhost_fs_fck);
- dev_err(dev, "usbhost_fs_fck failed error:%d\n", ret);
- goto err_usbhost_hs_fck;
- }
-
- omap->usbtll_fck = clk_get(dev, "usbtll_fck");
- if (IS_ERR(omap->usbtll_fck)) {
- ret = PTR_ERR(omap->usbtll_fck);
- dev_err(dev, "usbtll_fck failed error:%d\n", ret);
- goto err_usbhost_fs_fck;
- }
-
- omap->usbtll_ick = clk_get(dev, "usbtll_ick");
- if (IS_ERR(omap->usbtll_ick)) {
- ret = PTR_ERR(omap->usbtll_ick);
- dev_err(dev, "usbtll_ick failed error:%d\n", ret);
- goto err_usbtll_fck;
- }
+ pm_runtime_enable(dev);
omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
if (IS_ERR(omap->utmi_p1_fck)) {
ret = PTR_ERR(omap->utmi_p1_fck);
dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
- goto err_usbtll_ick;
+ goto err_end;
}
omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
@@ -451,6 +414,35 @@
goto err_usbtll_p2_fck;
}
+ if (is_ehci_phy_mode(pdata->port_mode[0])) {
+ /* for OMAP3 , the clk set paretn fails */
+ ret = clk_set_parent(omap->utmi_p1_fck,
+ omap->xclk60mhsp1_ck);
+ if (ret != 0)
+ dev_err(dev, "xclk60mhsp1_ck set parent"
+ "failed error:%d\n", ret);
+ } else if (is_ehci_tll_mode(pdata->port_mode[0])) {
+ ret = clk_set_parent(omap->utmi_p1_fck,
+ omap->init_60m_fclk);
+ if (ret != 0)
+ dev_err(dev, "init_60m_fclk set parent"
+ "failed error:%d\n", ret);
+ }
+
+ if (is_ehci_phy_mode(pdata->port_mode[1])) {
+ ret = clk_set_parent(omap->utmi_p2_fck,
+ omap->xclk60mhsp2_ck);
+ if (ret != 0)
+ dev_err(dev, "xclk60mhsp2_ck set parent"
+ "failed error:%d\n", ret);
+ } else if (is_ehci_tll_mode(pdata->port_mode[1])) {
+ ret = clk_set_parent(omap->utmi_p2_fck,
+ omap->init_60m_fclk);
+ if (ret != 0)
+ dev_err(dev, "init_60m_fclk set parent"
+ "failed error:%d\n", ret);
+ }
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "uhh");
if (!res) {
dev_err(dev, "UHH EHCI get resource failed\n");
@@ -487,6 +479,8 @@
goto err_alloc;
}
+ omap_usbhs_init(dev);
+
goto end_probe;
err_alloc:
@@ -522,28 +516,15 @@
err_utmi_p1_fck:
clk_put(omap->utmi_p1_fck);
-err_usbtll_ick:
- clk_put(omap->usbtll_ick);
-
-err_usbtll_fck:
- clk_put(omap->usbtll_fck);
-
-err_usbhost_fs_fck:
- clk_put(omap->usbhost_fs_fck);
-
-err_usbhost_hs_fck:
- clk_put(omap->usbhost_hs_fck);
-
-err_usbhost_ick:
- clk_put(omap->usbhost_ick);
-
err_end:
+ pm_runtime_disable(dev);
kfree(omap);
end_probe:
return ret;
}
+static void omap_usbhs_deinit(struct device *dev);
/**
* usbhs_omap_remove - shutdown processing for UHH & TLL HCDs
* @pdev: USB Host Controller being removed
@@ -554,12 +535,7 @@
{
struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
- if (omap->count != 0) {
- dev_err(&pdev->dev,
- "Either EHCI or OHCI is still using usbhs core\n");
- return -EBUSY;
- }
-
+ omap_usbhs_deinit(&pdev->dev);
iounmap(omap->tll_base);
iounmap(omap->uhh_base);
clk_put(omap->init_60m_fclk);
@@ -571,11 +547,7 @@
clk_put(omap->utmi_p2_fck);
clk_put(omap->xclk60mhsp1_ck);
clk_put(omap->utmi_p1_fck);
- clk_put(omap->usbtll_ick);
- clk_put(omap->usbtll_fck);
- clk_put(omap->usbhost_fs_fck);
- clk_put(omap->usbhost_hs_fck);
- clk_put(omap->usbhost_ick);
+ pm_runtime_disable(&pdev->dev);
kfree(omap);
return 0;
@@ -688,30 +660,72 @@
}
}
-static int usbhs_enable(struct device *dev)
+static int usbhs_runtime_resume(struct device *dev)
{
struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
struct usbhs_omap_platform_data *pdata = &omap->platdata;
- unsigned long flags = 0;
- int ret = 0;
- unsigned long timeout;
- unsigned reg;
- dev_dbg(dev, "starting TI HSUSB Controller\n");
+ dev_dbg(dev, "usbhs_runtime_resume\n");
+
if (!pdata) {
dev_dbg(dev, "missing platform_data\n");
return -ENODEV;
}
- spin_lock_irqsave(&omap->lock, flags);
- if (omap->count > 0)
- goto end_count;
+ if (is_omap_usbhs_rev2(omap)) {
+ if (is_ehci_tll_mode(pdata->port_mode[0])) {
+ clk_enable(omap->usbhost_p1_fck);
+ clk_enable(omap->usbtll_p1_fck);
+ }
+ if (is_ehci_tll_mode(pdata->port_mode[1])) {
+ clk_enable(omap->usbhost_p2_fck);
+ clk_enable(omap->usbtll_p2_fck);
+ }
+ clk_enable(omap->utmi_p1_fck);
+ clk_enable(omap->utmi_p2_fck);
+ }
+ return 0;
+}
- clk_enable(omap->usbhost_ick);
- clk_enable(omap->usbhost_hs_fck);
- clk_enable(omap->usbhost_fs_fck);
- clk_enable(omap->usbtll_fck);
- clk_enable(omap->usbtll_ick);
+static int usbhs_runtime_suspend(struct device *dev)
+{
+ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+ struct usbhs_omap_platform_data *pdata = &omap->platdata;
+
+ dev_dbg(dev, "usbhs_runtime_suspend\n");
+
+ if (!pdata) {
+ dev_dbg(dev, "missing platform_data\n");
+ return -ENODEV;
+ }
+
+ if (is_omap_usbhs_rev2(omap)) {
+ if (is_ehci_tll_mode(pdata->port_mode[0])) {
+ clk_disable(omap->usbhost_p1_fck);
+ clk_disable(omap->usbtll_p1_fck);
+ }
+ if (is_ehci_tll_mode(pdata->port_mode[1])) {
+ clk_disable(omap->usbhost_p2_fck);
+ clk_disable(omap->usbtll_p2_fck);
+ }
+ clk_disable(omap->utmi_p2_fck);
+ clk_disable(omap->utmi_p1_fck);
+ }
+ return 0;
+}
+
+static void omap_usbhs_init(struct device *dev)
+{
+ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+ struct usbhs_omap_platform_data *pdata = &omap->platdata;
+ unsigned long flags = 0;
+ unsigned reg;
+
+ dev_dbg(dev, "starting TI HSUSB Controller\n");
+
+ pm_runtime_get_sync(dev);
+
+ spin_lock_irqsave(&omap->lock, flags);
if (pdata->ehci_data->phy_reset) {
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) {
@@ -735,49 +749,13 @@
omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
- /* perform TLL soft reset, and wait until reset is complete */
- usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
- OMAP_USBTLL_SYSCONFIG_SOFTRESET);
-
- /* Wait for TLL reset to complete */
- timeout = jiffies + msecs_to_jiffies(1000);
- while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
- & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
- cpu_relax();
-
- if (time_after(jiffies, timeout)) {
- dev_dbg(dev, "operation timed out\n");
- ret = -EINVAL;
- goto err_tll;
- }
- }
-
- dev_dbg(dev, "TLL RESET DONE\n");
-
- /* (1<<3) = no idle mode only for initial debugging */
- usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
- OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
- OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
- OMAP_USBTLL_SYSCONFIG_AUTOIDLE);
-
- /* Put UHH in NoIdle/NoStandby mode */
- reg = usbhs_read(omap->uhh_base, OMAP_UHH_SYSCONFIG);
- if (is_omap_usbhs_rev1(omap)) {
- reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
- | OMAP_UHH_SYSCONFIG_SIDLEMODE
- | OMAP_UHH_SYSCONFIG_CACTIVITY
- | OMAP_UHH_SYSCONFIG_MIDLEMODE);
- reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
-
-
- } else if (is_omap_usbhs_rev2(omap)) {
- reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR;
- reg |= OMAP4_UHH_SYSCONFIG_NOIDLE;
- reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR;
- reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY;
- }
-
- usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
+ /*
+ * Really enable the port clocks
+ * first call of pm_runtime_get_sync does not enable these
+ * port clocks; because omap->usbhs_rev was not available
+ * This omap->usbhs_rev is available now!
+ */
+ usbhs_runtime_resume(dev);
reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
/* setup ULPI bypass and burst configurations */
@@ -824,49 +802,6 @@
reg &= ~OMAP4_P1_MODE_CLEAR;
reg &= ~OMAP4_P2_MODE_CLEAR;
- if (is_ehci_phy_mode(pdata->port_mode[0])) {
- ret = clk_set_parent(omap->utmi_p1_fck,
- omap->xclk60mhsp1_ck);
- if (ret != 0) {
- dev_err(dev, "xclk60mhsp1_ck set parent"
- "failed error:%d\n", ret);
- goto err_tll;
- }
- } else if (is_ehci_tll_mode(pdata->port_mode[0])) {
- ret = clk_set_parent(omap->utmi_p1_fck,
- omap->init_60m_fclk);
- if (ret != 0) {
- dev_err(dev, "init_60m_fclk set parent"
- "failed error:%d\n", ret);
- goto err_tll;
- }
- clk_enable(omap->usbhost_p1_fck);
- clk_enable(omap->usbtll_p1_fck);
- }
-
- if (is_ehci_phy_mode(pdata->port_mode[1])) {
- ret = clk_set_parent(omap->utmi_p2_fck,
- omap->xclk60mhsp2_ck);
- if (ret != 0) {
- dev_err(dev, "xclk60mhsp1_ck set parent"
- "failed error:%d\n", ret);
- goto err_tll;
- }
- } else if (is_ehci_tll_mode(pdata->port_mode[1])) {
- ret = clk_set_parent(omap->utmi_p2_fck,
- omap->init_60m_fclk);
- if (ret != 0) {
- dev_err(dev, "init_60m_fclk set parent"
- "failed error:%d\n", ret);
- goto err_tll;
- }
- clk_enable(omap->usbhost_p2_fck);
- clk_enable(omap->usbtll_p2_fck);
- }
-
- clk_enable(omap->utmi_p1_fck);
- clk_enable(omap->utmi_p2_fck);
-
if (is_ehci_tll_mode(pdata->port_mode[0]) ||
(is_ohci_port(pdata->port_mode[0])))
reg |= OMAP4_P1_MODE_TLL;
@@ -912,107 +847,17 @@
(pdata->ehci_data->reset_gpio_port[1], 1);
}
-end_count:
- omap->count++;
spin_unlock_irqrestore(&omap->lock, flags);
- return 0;
-
-err_tll:
- if (pdata->ehci_data->phy_reset) {
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
- gpio_free(pdata->ehci_data->reset_gpio_port[0]);
-
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
- gpio_free(pdata->ehci_data->reset_gpio_port[1]);
- }
-
- clk_disable(omap->usbtll_ick);
- clk_disable(omap->usbtll_fck);
- clk_disable(omap->usbhost_fs_fck);
- clk_disable(omap->usbhost_hs_fck);
- clk_disable(omap->usbhost_ick);
- spin_unlock_irqrestore(&omap->lock, flags);
- return ret;
+ pm_runtime_put_sync(dev);
}
-static void usbhs_disable(struct device *dev)
+static void omap_usbhs_deinit(struct device *dev)
{
struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
struct usbhs_omap_platform_data *pdata = &omap->platdata;
- unsigned long flags = 0;
- unsigned long timeout;
dev_dbg(dev, "stopping TI HSUSB Controller\n");
- spin_lock_irqsave(&omap->lock, flags);
-
- if (omap->count == 0)
- goto end_disble;
-
- omap->count--;
-
- if (omap->count != 0)
- goto end_disble;
-
- /* Reset OMAP modules for insmod/rmmod to work */
- usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG,
- is_omap_usbhs_rev2(omap) ?
- OMAP4_UHH_SYSCONFIG_SOFTRESET :
- OMAP_UHH_SYSCONFIG_SOFTRESET);
-
- timeout = jiffies + msecs_to_jiffies(100);
- while (!(usbhs_read(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & (1 << 0))) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(dev, "operation timed out\n");
- }
-
- while (!(usbhs_read(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & (1 << 1))) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(dev, "operation timed out\n");
- }
-
- while (!(usbhs_read(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & (1 << 2))) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(dev, "operation timed out\n");
- }
-
- usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1));
-
- while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
- & (1 << 0))) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(dev, "operation timed out\n");
- }
-
- if (is_omap_usbhs_rev2(omap)) {
- if (is_ehci_tll_mode(pdata->port_mode[0]))
- clk_enable(omap->usbtll_p1_fck);
- if (is_ehci_tll_mode(pdata->port_mode[1]))
- clk_enable(omap->usbtll_p2_fck);
- clk_disable(omap->utmi_p2_fck);
- clk_disable(omap->utmi_p1_fck);
- }
-
- clk_disable(omap->usbtll_ick);
- clk_disable(omap->usbtll_fck);
- clk_disable(omap->usbhost_fs_fck);
- clk_disable(omap->usbhost_hs_fck);
- clk_disable(omap->usbhost_ick);
-
- /* The gpio_free migh sleep; so unlock the spinlock */
- spin_unlock_irqrestore(&omap->lock, flags);
-
if (pdata->ehci_data->phy_reset) {
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
gpio_free(pdata->ehci_data->reset_gpio_port[0]);
@@ -1020,28 +865,18 @@
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
gpio_free(pdata->ehci_data->reset_gpio_port[1]);
}
- return;
-
-end_disble:
- spin_unlock_irqrestore(&omap->lock, flags);
}
-int omap_usbhs_enable(struct device *dev)
-{
- return usbhs_enable(dev->parent);
-}
-EXPORT_SYMBOL_GPL(omap_usbhs_enable);
-
-void omap_usbhs_disable(struct device *dev)
-{
- usbhs_disable(dev->parent);
-}
-EXPORT_SYMBOL_GPL(omap_usbhs_disable);
+static const struct dev_pm_ops usbhsomap_dev_pm_ops = {
+ .runtime_suspend = usbhs_runtime_suspend,
+ .runtime_resume = usbhs_runtime_resume,
+};
static struct platform_driver usbhs_omap_driver = {
.driver = {
.name = (char *)usbhs_driver_name,
.owner = THIS_MODULE,
+ .pm = &usbhsomap_dev_pm_ops,
},
.remove = __exit_p(usbhs_omap_remove),
};
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index f82413a..953189c 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -83,7 +83,7 @@
#define twl_has_madc() false
#endif
-#ifdef CONFIG_TWL4030_POWER
+#if defined(CONFIG_TWL4030_POWER) || defined(CONFIG_TWL6030_POWER)
#define twl_has_power() true
#else
#define twl_has_power() false
@@ -126,6 +126,7 @@
/* Last - for index max*/
#define TWL4030_MODULE_LAST TWL4030_MODULE_SECURED_REG
+#define TWL6030_MODULE_LAST TWL6030_MODULE_SLAVE_RES
#define TWL_NUM_SLAVES 4
@@ -141,7 +142,7 @@
#define SUB_CHIP_ID2 2
#define SUB_CHIP_ID3 3
-#define TWL_MODULE_LAST TWL4030_MODULE_LAST
+#define TWL_MODULE_LAST TWL6030_MODULE_LAST
/* Base Address defns for twl4030_map[] */
@@ -187,6 +188,7 @@
#define TWL6030_BASEADD_MEM 0x0017
#define TWL6030_BASEADD_PM_MASTER 0x001F
#define TWL6030_BASEADD_PM_SLAVE_MISC 0x0030 /* PM_RECEIVER */
+#define TWL6030_BASEADD_PM_SLAVE_RES 0x00AD
#define TWL6030_BASEADD_PM_MISC 0x00E2
#define TWL6030_BASEADD_PM_PUPD 0x00F0
@@ -333,6 +335,7 @@
{ SUB_CHIP_ID0, TWL6030_BASEADD_RTC },
{ SUB_CHIP_ID0, TWL6030_BASEADD_MEM },
{ SUB_CHIP_ID1, TWL6025_BASEADD_CHARGER },
+ { SUB_CHIP_ID0, TWL6030_BASEADD_PM_SLAVE_RES },
};
/*----------------------------------------------------------------------*/
@@ -386,8 +389,9 @@
/* i2c_transfer returns number of messages transferred */
if (ret != 1) {
- pr_err("%s: i2c_write failed to transfer all messages\n",
- DRIVER_NAME);
+ pr_err("%s: i2c_write failed to transfer all messages "
+ "(addr 0x%04x, reg %d, len %d)\n",
+ DRIVER_NAME, twl->address, reg, msg->len);
if (ret < 0)
return ret;
else
@@ -445,8 +449,9 @@
/* i2c_transfer returns number of messages transferred */
if (ret != 2) {
- pr_err("%s: i2c_read failed to transfer all messages\n",
- DRIVER_NAME);
+ pr_err("%s: i2c_read failed to transfer all messages "
+ "(addr 0x%04x, reg %d, len %d)\n",
+ DRIVER_NAME, twl->address, reg, msg->len);
if (ret < 0)
return ret;
else
@@ -827,7 +832,7 @@
/* Phoenix codec driver is probed directly atm */
if (twl_has_codec() && pdata->codec && twl_class_is_6030()) {
sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid;
- child = add_child(sub_chip_id, "twl6040-codec",
+ child = add_child(sub_chip_id, "twl6040-audio",
pdata->codec, sizeof(*pdata->codec),
false, 0, 0);
if (IS_ERR(child))
@@ -970,6 +975,26 @@
features);
if (IS_ERR(child))
return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_CLK32KAUDIO,
+ pdata->clk32kaudio, features);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VDD3, pdata->vdd3,
+ features);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VMEM, pdata->vmem,
+ features);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_V2V1, pdata->v2v1,
+ features);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
}
/* 6030 and 6025 share this regulator */
@@ -1238,8 +1263,12 @@
}
/* load power event scripts */
- if (twl_has_power() && pdata->power)
- twl4030_power_init(pdata->power);
+ if (twl_has_power()) {
+ if (twl_class_is_4030() && pdata->power)
+ twl4030_power_init(pdata->power);
+ if (twl_class_is_6030())
+ twl6030_power_init(pdata->power);
+ }
/* Maybe init the T2 Interrupt subsystem */
if (client->irq
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index b0563b6..fa18b02 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -37,6 +37,8 @@
#include <linux/kthread.h>
#include <linux/i2c/twl.h>
#include <linux/platform_device.h>
+#include <linux/suspend.h>
+#include <linux/reboot.h>
#include "twl-core.h"
@@ -55,7 +57,7 @@
static int twl6030_interrupt_mapping[24] = {
PWR_INTR_OFFSET, /* Bit 0 PWRON */
PWR_INTR_OFFSET, /* Bit 1 RPWRON */
- PWR_INTR_OFFSET, /* Bit 2 BAT_VLOW */
+ TWL_VLOW_INTR_OFFSET, /* Bit 2 BAT_VLOW */
RTC_INTR_OFFSET, /* Bit 3 RTC_ALARM */
RTC_INTR_OFFSET, /* Bit 4 RTC_PERIOD */
HOTDIE_INTR_OFFSET, /* Bit 5 HOT_DIE */
@@ -82,9 +84,50 @@
};
/*----------------------------------------------------------------------*/
-static unsigned twl6030_irq_base;
+static unsigned twl6030_irq_base, twl6030_irq_end;
+static int twl_irq;
+static bool twl_irq_wake_enabled;
+static struct task_struct *task;
static struct completion irq_event;
+static atomic_t twl6030_wakeirqs = ATOMIC_INIT(0);
+
+static int twl6030_irq_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *unused)
+{
+ int chained_wakeups;
+
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ chained_wakeups = atomic_read(&twl6030_wakeirqs);
+
+ if (chained_wakeups && !twl_irq_wake_enabled) {
+ if (enable_irq_wake(twl_irq))
+ pr_err("twl6030 IRQ wake enable failed\n");
+ else
+ twl_irq_wake_enabled = true;
+ } else if (!chained_wakeups && twl_irq_wake_enabled) {
+ disable_irq_wake(twl_irq);
+ twl_irq_wake_enabled = false;
+ }
+
+ disable_irq(twl_irq);
+ break;
+
+ case PM_POST_SUSPEND:
+ enable_irq(twl_irq);
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block twl6030_irq_pm_notifier_block = {
+ .notifier_call = twl6030_irq_pm_notifier,
+};
/*
* This thread processes interrupts reported by the Primary Interrupt Handler.
@@ -104,6 +147,7 @@
u8 bytes[4];
u32 int_sts;
} sts;
+ u32 int_sts; /* sts.int_sts converted to CPU endianness */
/* Wait for IRQ, then read PIH irq status (also blocking) */
wait_for_completion_interruptible(&irq_event);
@@ -135,9 +179,10 @@
if (sts.bytes[2] & 0x10)
sts.bytes[2] |= 0x08;
- for (i = 0; sts.int_sts; sts.int_sts >>= 1, i++) {
+ int_sts = le32_to_cpu(sts.int_sts);
+ for (i = 0; int_sts; int_sts >>= 1, i++) {
local_irq_disable();
- if (sts.int_sts & 0x1) {
+ if (int_sts & 0x1) {
int module_irq = twl6030_irq_base +
twl6030_interrupt_mapping[i];
generic_handle_irq(module_irq);
@@ -181,6 +226,17 @@
return IRQ_HANDLED;
}
+/*
+ * handle_twl6030_vlow() is a threaded BAT_VLOW interrupt handler. BAT_VLOW
+ * is a secondary interrupt generated in twl6030_irq_thread().
+ */
+static irqreturn_t handle_twl6030_vlow(int irq, void *unused)
+{
+ pr_info("handle_twl6030_vlow: kernel_power_off()\n");
+ kernel_power_off();
+ return IRQ_HANDLED;
+}
+
/*----------------------------------------------------------------------*/
static inline void activate_irq(int irq)
@@ -196,6 +252,16 @@
#endif
}
+int twl6030_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ if (on)
+ atomic_inc(&twl6030_wakeirqs);
+ else
+ atomic_dec(&twl6030_wakeirqs);
+
+ return 0;
+}
+
/*----------------------------------------------------------------------*/
static unsigned twl6030_irq_next;
@@ -299,12 +365,75 @@
}
EXPORT_SYMBOL(twl6030_mmc_card_detect);
+int twl6030_vlow_init(int vlow_irq)
+{
+ int status;
+ u8 val;
+
+ status = twl_i2c_read_u8(TWL_MODULE_PM_SLAVE_RES, &val,
+ REG_VBATMIN_HI_CFG_STATE);
+ if (status < 0) {
+ pr_err("twl6030: I2C err reading REG_VBATMIN_HI_CFG_STATE: %d\n",
+ status);
+ return status;
+ }
+
+ status = twl_i2c_write_u8(TWL_MODULE_PM_SLAVE_RES,
+ val | VBATMIN_VLOW_EN, REG_VBATMIN_HI_CFG_STATE);
+ if (status < 0) {
+ pr_err("twl6030: I2C err writing REG_VBATMIN_HI_CFG_STATE: %d\n",
+ status);
+ return status;
+ }
+
+ status = twl_i2c_read_u8(TWL_MODULE_PIH, &val, REG_INT_MSK_LINE_A);
+ if (status < 0) {
+ pr_err("twl6030: I2C err reading REG_INT_MSK_LINE_A: %d\n",
+ status);
+ return status;
+ }
+
+ status = twl_i2c_write_u8(TWL_MODULE_PIH, val & ~VLOW_INT_MASK,
+ REG_INT_MSK_LINE_A);
+ if (status < 0) {
+ pr_err("twl6030: I2C err writing REG_INT_MSK_LINE_A: %d\n",
+ status);
+ return status;
+ }
+
+ status = twl_i2c_read_u8(TWL_MODULE_PIH, &val, REG_INT_MSK_STS_A);
+ if (status < 0) {
+ pr_err("twl6030: I2C err reading REG_INT_MSK_STS_A: %d\n",
+ status);
+ return status;
+ }
+
+ status = twl_i2c_write_u8(TWL_MODULE_PIH, val & ~VLOW_INT_MASK,
+ REG_INT_MSK_STS_A);
+ if (status < 0) {
+ pr_err("twl6030: I2C err writing REG_INT_MSK_STS_A: %d\n",
+ status);
+ return status;
+ }
+
+ /* install an irq handler for vlow */
+ status = request_threaded_irq(vlow_irq, NULL, handle_twl6030_vlow,
+ IRQF_ONESHOT,
+ "TWL6030-VLOW", handle_twl6030_vlow);
+ if (status < 0) {
+ pr_err("twl6030: could not claim vlow irq %d: %d\n", vlow_irq,
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
{
int status = 0;
int i;
- struct task_struct *task;
int ret;
u8 mask[4];
@@ -320,6 +449,7 @@
REG_INT_STS_A, 3); /* clear INT_STS_A,B,C */
twl6030_irq_base = irq_base;
+ twl6030_irq_end = irq_end;
/* install an irq handler for each of the modules;
* clone dummy irq_chip since PIH can't *do* anything
@@ -327,10 +457,12 @@
twl6030_irq_chip = dummy_irq_chip;
twl6030_irq_chip.name = "twl6030";
twl6030_irq_chip.irq_set_type = NULL;
+ twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
for (i = irq_base; i < irq_end; i++) {
irq_set_chip_and_handler(i, &twl6030_irq_chip,
handle_simple_irq);
+ irq_set_chip_data(i, (void *)irq_num);
activate_irq(i);
}
@@ -353,10 +485,22 @@
pr_err("twl6030: could not claim irq%d: %d\n", irq_num, status);
goto fail_irq;
}
+
+ twl_irq = irq_num;
+ register_pm_notifier(&twl6030_irq_pm_notifier_block);
+
+ status = twl6030_vlow_init(twl6030_irq_base + TWL_VLOW_INTR_OFFSET);
+ if (status < 0)
+ goto fail_vlow;
+
return status;
-fail_irq:
+
+fail_vlow:
free_irq(irq_num, &irq_event);
+fail_irq:
+ kthread_stop(task);
+
fail_kthread:
for (i = irq_base; i < irq_end; i++)
irq_set_chip_and_handler(i, NULL, NULL);
@@ -365,11 +509,25 @@
int twl6030_exit_irq(void)
{
+ int i;
+ unregister_pm_notifier(&twl6030_irq_pm_notifier_block);
- if (twl6030_irq_base) {
+ if (task)
+ kthread_stop(task);
+
+ if (!twl6030_irq_base || !twl6030_irq_end) {
pr_err("twl6030: can't yet clean up IRQs?\n");
return -ENOSYS;
}
+
+ free_irq(twl6030_irq_base + TWL_VLOW_INTR_OFFSET,
+ handle_twl6030_vlow);
+
+ free_irq(twl_irq, &irq_event);
+
+ for (i = twl6030_irq_base; i < twl6030_irq_end; i++)
+ irq_set_chip_and_handler(i, NULL, NULL);
+
return 0;
}
diff --git a/drivers/mfd/twl6030-madc.c b/drivers/mfd/twl6030-madc.c
new file mode 100644
index 0000000..f537ba5
--- /dev/null
+++ b/drivers/mfd/twl6030-madc.c
@@ -0,0 +1,354 @@
+/*
+ *
+ * TWL6030 MADC module driver-This driver only implements the ADC read
+ * functions
+ *
+ * Copyright (C) 2011 Samsung Telecommunications of America
+ *
+ * Based on twl4030-madc.c
+ * Copyright (C) 2008 Nokia Corporation
+ * Mikko Ylinen <mikko.k.ylinen@nokia.com>
+ *
+ * Amit Kucheria <amit.kucheria@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/i2c/twl.h>
+#include <linux/i2c/twl6030-madc.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+#include <linux/types.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+#include <linux/wakelock.h>
+
+#define GPADCS (1 << 1)
+#define GPADCR (1 << 0)
+#define REG_TOGGLE1 0x90
+
+#define DRIVER_NAME (twl6030_madc_driver.driver.name)
+static struct platform_driver twl6030_madc_driver;
+
+/*
+ * struct twl6030_madc_data - a container for madc info
+ * @dev - pointer to device structure for madc
+ * @lock - mutex protecting this data structure
+ */
+struct twl6030_madc_data {
+ struct device *dev;
+ struct mutex lock;
+ struct dentry *file;
+ struct wake_lock wakelock;
+};
+
+static struct twl6030_madc_data *twl6030_madc;
+static u8 gpadc_ctrl_reg;
+
+static inline int twl6030_madc_start_conversion(struct twl6030_madc_data *madc)
+{
+ int ret;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, GPADCS, REG_TOGGLE1);
+ if (ret) {
+ dev_err(madc->dev, "unable to write register 0x%X\n",
+ REG_TOGGLE1);
+ return ret;
+ }
+
+ udelay(100);
+ ret = twl_i2c_write_u8(TWL_MODULE_MADC, TWL6030_MADC_SP1,
+ TWL6030_MADC_CTRL_P1);
+ if (ret) {
+ dev_err(madc->dev, "unable to write register 0x%X\n",
+ TWL6030_MADC_CTRL_P1);
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * Function that waits for conversion to be ready
+ * @madc - pointer to twl4030_madc_data struct
+ * @timeout_ms - timeout value in milliseconds
+ * @status_reg - ctrl register
+ * returns 0 if succeeds else a negative error value
+ */
+static int twl6030_madc_wait_conversion_ready(struct twl6030_madc_data *madc,
+ unsigned int timeout_ms,
+ u8 status_reg)
+{
+ unsigned long timeout;
+ unsigned long delta;
+ u8 reg;
+ int ret;
+
+ delta = msecs_to_jiffies(timeout_ms);
+
+ if (delta < 2)
+ delta = 2;
+
+ wake_lock(&madc->wakelock);
+ timeout = jiffies + delta;
+ do {
+ ret = twl_i2c_read_u8(TWL6030_MODULE_MADC, ®, status_reg);
+ if (ret) {
+ dev_err(madc->dev,
+ "unable to read status register 0x%X\n",
+ status_reg);
+ goto unlock;
+ }
+ if (!(reg & TWL6030_MADC_BUSY) && (reg & TWL6030_MADC_EOCP1)) {
+ ret = 0;
+ goto unlock;
+ }
+
+ if (time_after(jiffies, timeout))
+ break;
+
+ usleep_range(500, 2000);
+ } while (1);
+
+ dev_err(madc->dev, "conversion timeout, ctrl_px=0x%08x\n", reg);
+ ret = -EAGAIN;
+
+unlock:
+ wake_unlock(&madc->wakelock);
+ return ret;
+}
+
+/*
+ * Function to read a particular channel value.
+ * @madc - pointer to struct twl6030_madc_data
+ * @reg - lsb of ADC Channel
+ * If the i2c read fails it returns an error else returns 0.
+ */
+static int twl6030_madc_channel_raw_read(struct twl6030_madc_data *madc,
+ u8 reg)
+{
+ u8 msb, lsb;
+ int ret;
+
+ mutex_lock(&madc->lock);
+ ret = twl6030_madc_start_conversion(twl6030_madc);
+ if (ret)
+ goto unlock;
+
+ ret = twl6030_madc_wait_conversion_ready(twl6030_madc, 5,
+ TWL6030_MADC_CTRL_P1);
+ if (ret)
+ goto unlock;
+
+ /*
+ * For each ADC channel, we have MSB and LSB register
+ * pair. MSB address is always LSB address+1. reg parameter is
+ * the address of LSB register
+ */
+ ret = twl_i2c_read_u8(TWL6030_MODULE_MADC, &msb, reg + 1);
+ if (ret) {
+ dev_err(madc->dev, "unable to read MSB register 0x%X\n",
+ reg + 1);
+ goto unlock;
+ }
+ ret = twl_i2c_read_u8(TWL6030_MODULE_MADC, &lsb, reg);
+ if (ret) {
+ dev_err(madc->dev, "unable to read LSB register 0x%X\n", reg);
+ goto unlock;
+ }
+ ret = (int)((msb << 8) | lsb);
+unlock:
+ /* Disable GPADC for power savings. */
+ twl_i2c_write_u8(TWL6030_MODULE_ID1, GPADCR, REG_TOGGLE1);
+ mutex_unlock(&madc->lock);
+ return ret;
+}
+
+/*
+ * Return channel value
+ * Or < 0 on failure.
+ */
+int twl6030_get_madc_conversion(int channel_no)
+{
+ u8 reg = TWL6030_MADC_GPCH0_LSB + (2 * channel_no);
+ if (!twl6030_madc) {
+ pr_err("%s: No ADC device\n", __func__);
+ return -EINVAL;
+ }
+ if (channel_no >= TWL6030_MADC_MAX_CHANNELS) {
+ dev_err(twl6030_madc->dev,
+ "%s: Channel number (%d) exceeds max (%d)\n",
+ __func__, channel_no, TWL6030_MADC_MAX_CHANNELS);
+ return -EINVAL;
+ }
+
+ return twl6030_madc_channel_raw_read(twl6030_madc, reg);
+}
+EXPORT_SYMBOL_GPL(twl6030_get_madc_conversion);
+
+#ifdef CONFIG_DEBUG_FS
+
+static int debug_twl6030_madc_show(struct seq_file *s, void *_)
+{
+ int i, result;
+ for (i = 0; i < TWL6030_MADC_MAX_CHANNELS; i++) {
+ result = twl6030_get_madc_conversion(i);
+ seq_printf(s, "channel %3d returns result %d\n",
+ i, result);
+ }
+ return 0;
+}
+
+static int debug_twl6030_madc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_twl6030_madc_show, inode->i_private);
+}
+
+static const struct file_operations debug_fops = {
+ .open = debug_twl6030_madc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#define DEBUG_FOPS (&debug_fops)
+
+#else
+#define DEBUG_FOPS NULL
+#endif
+
+/*
+ * Initialize MADC
+ */
+static int __devinit twl6030_madc_probe(struct platform_device *pdev)
+{
+ struct twl6030_madc_data *madc;
+ struct twl4030_madc_platform_data *pdata = pdev->dev.platform_data;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform_data not available\n");
+ return -EINVAL;
+ }
+ madc = kzalloc(sizeof(*madc), GFP_KERNEL);
+ if (!madc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, madc);
+ madc->dev = &pdev->dev;
+ mutex_init(&madc->lock);
+ madc->file = debugfs_create_file(DRIVER_NAME, S_IRUGO, NULL,
+ madc, DEBUG_FOPS);
+ wake_lock_init(&madc->wakelock, WAKE_LOCK_SUSPEND, "twl6030 adc");
+ twl6030_madc = madc;
+ return 0;
+}
+
+static int __devexit twl6030_madc_remove(struct platform_device *pdev)
+{
+ struct twl6030_madc_data *madc = platform_get_drvdata(pdev);
+
+ wake_lock_destroy(&madc->wakelock);
+ mutex_destroy(&madc->lock);
+ free_irq(platform_get_irq(pdev, 0), madc);
+ platform_set_drvdata(pdev, NULL);
+ twl6030_madc = NULL;
+ debugfs_remove(madc->file);
+ kfree(madc);
+
+ return 0;
+}
+
+static int twl6030_madc_suspend(struct device *pdev)
+{
+ int ret;
+ u8 reg_val;
+
+ ret = twl_i2c_read_u8(TWL_MODULE_MADC, ®_val, TWL6030_MADC_CTRL);
+ if (!ret) {
+ reg_val &= ~(TWL6030_MADC_TEMP1_EN);
+ ret = twl_i2c_write_u8(TWL_MODULE_MADC, reg_val,
+ TWL6030_MADC_CTRL);
+ }
+
+ if (ret) {
+ dev_err(twl6030_madc->dev, "unable to disable madc temp1!\n");
+ gpadc_ctrl_reg = TWL6030_MADC_TEMP1_EN;
+ } else
+ gpadc_ctrl_reg = reg_val;
+
+ return 0;
+};
+
+static int twl6030_madc_resume(struct device *pdev)
+{
+ int ret;
+
+ if (!(gpadc_ctrl_reg & TWL6030_MADC_TEMP1_EN)) {
+ gpadc_ctrl_reg |= TWL6030_MADC_TEMP1_EN;
+ ret = twl_i2c_write_u8(TWL_MODULE_MADC, gpadc_ctrl_reg,
+ TWL6030_MADC_CTRL);
+ if (ret)
+ dev_err(twl6030_madc->dev,
+ "unable to enable madc temp1!\n");
+ }
+
+ return 0;
+};
+
+static const struct dev_pm_ops twl6030_madc_pm_ops = {
+ .suspend = twl6030_madc_suspend,
+ .resume = twl6030_madc_resume,
+};
+
+static struct platform_driver twl6030_madc_driver = {
+ .probe = twl6030_madc_probe,
+ .remove = __exit_p(twl6030_madc_remove),
+ .driver = {
+ .name = "twl6030_madc",
+ .owner = THIS_MODULE,
+ .pm = &twl6030_madc_pm_ops,
+ },
+};
+
+static int __init twl6030_madc_init(void)
+{
+ return platform_driver_register(&twl6030_madc_driver);
+}
+
+module_init(twl6030_madc_init);
+
+static void __exit twl6030_madc_exit(void)
+{
+ platform_driver_unregister(&twl6030_madc_driver);
+}
+
+module_exit(twl6030_madc_exit);
+
+MODULE_DESCRIPTION("TWL6030 ADC driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("J Keerthy");
+MODULE_ALIAS("platform:twl6030_madc");
diff --git a/drivers/mfd/twl6030-power.c b/drivers/mfd/twl6030-power.c
new file mode 100644
index 0000000..2bbea1a
--- /dev/null
+++ b/drivers/mfd/twl6030-power.c
@@ -0,0 +1,251 @@
+/*
+ * Handling for Resource Mapping for TWL6030 Family of chips
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/i2c/twl.h>
+#include <linux/platform_device.h>
+#include <linux/suspend.h>
+
+#include <asm/mach-types.h>
+
+#define VREG_GRP 0
+
+static u8 dev_on_group;
+
+/**
+ * struct twl6030_resource_map - describe the resource mapping for TWL6030
+ * @name: name of the resource
+ * @res_id: resource ID
+ * @base_addr: base address
+ * @group: which device group can control this resource?
+ */
+struct twl6030_resource_map {
+ char *name;
+ u8 res_id;
+ u8 base_addr;
+ u8 group;
+};
+
+/* list of all s/w modifiable resources in TWL6030 */
+static __initdata struct twl6030_resource_map twl6030_res_map[] = {
+ {.res_id = RES_V1V29,.name = "V1V29",.base_addr = 0x40,.group = DEV_GRP_P1,},
+ {.res_id = RES_V1V8,.name = "V1V8",.base_addr = 0x46,.group = DEV_GRP_P1,},
+ {.res_id = RES_V2V1,.name = "V2V1",.base_addr = 0x4c,.group = DEV_GRP_P1,},
+ {.res_id = RES_VDD1,.name = "CORE1",.base_addr = 0x52,.group = DEV_GRP_P1,},
+ {.res_id = RES_VDD2,.name = "CORE2",.base_addr = 0x58,.group = DEV_GRP_P1,},
+ {.res_id = RES_VDD3,.name = "CORE3",.base_addr = 0x5e,.group = DEV_GRP_P1,},
+ {.res_id = RES_VMEM,.name = "VMEM",.base_addr = 0x64,.group = DEV_GRP_P1,},
+ /* VANA cannot be modified */
+ {.res_id = RES_VUAX1,.name = "VUAX1",.base_addr = 0x84,.group = DEV_GRP_P1,},
+ {.res_id = RES_VAUX2,.name = "VAUX2",.base_addr = 0x88,.group = DEV_GRP_P1,},
+ {.res_id = RES_VAUX3,.name = "VAUX3",.base_addr = 0x8c,.group = DEV_GRP_P1,},
+ {.res_id = RES_VCXIO,.name = "VCXIO",.base_addr = 0x90,.group = DEV_GRP_P1,},
+ {.res_id = RES_VDAC,.name = "VDAC",.base_addr = 0x94,.group = DEV_GRP_P1,},
+ {.res_id = RES_VMMC1,.name = "VMMC",.base_addr = 0x98,.group = DEV_GRP_P1,},
+ {.res_id = RES_VPP,.name = "VPP",.base_addr = 0x9c,.group = DEV_GRP_P1,},
+ /* VRTC cannot be modified */
+ {.res_id = RES_VUSBCP,.name = "VUSB",.base_addr = 0xa0,.group = DEV_GRP_P1,},
+ {.res_id = RES_VSIM,.name = "VSIM",.base_addr = 0xa4,.group = DEV_GRP_P1,},
+ {.res_id = RES_REGEN,.name = "REGEN1",.base_addr = 0xad,.group = DEV_GRP_P1,},
+ {.res_id = RES_REGEN2,.name = "REGEN2",.base_addr = 0xb0,.group = DEV_GRP_P1,},
+ {.res_id = RES_SYSEN,.name = "SYSEN",.base_addr = 0xb3,.group = DEV_GRP_P1,},
+ /* NRES_PWRON cannot be modified */
+ /* 32KCLKAO cannot be modified */
+ {.res_id = RES_32KCLKG,.name = "32KCLKG",.base_addr = 0xbc,.group = DEV_GRP_P1,},
+ {.res_id = RES_32KCLKAUDIO,.name = "32KCLKAUDIO",.base_addr = 0xbf,.group = DEV_GRP_P1,},
+ /* BIAS cannot be modified */
+ /* VBATMIN_HI cannot be modified */
+ /* RC6MHZ cannot be modified */
+ /* TEMP cannot be modified */
+};
+
+static struct twl4030_system_config twl6030_sys_config[] = {
+ {.name = "DEV_ON", .group = DEV_GRP_P1,},
+};
+
+/* Actual power groups that TWL understands */
+#define P3_GRP_6030 BIT(2) /* secondary processor, modem, etc */
+#define P2_GRP_6030 BIT(1) /* "peripherals" */
+#define P1_GRP_6030 BIT(0) /* CPU/Linux */
+
+static __init void twl6030_process_system_config(void)
+{
+ u8 grp;
+ int r;
+ bool i = false;
+
+ struct twl4030_system_config *sys_config;
+ sys_config = twl6030_sys_config;
+
+ while (sys_config && sys_config->name) {
+ if (!strcmp(sys_config->name, "DEV_ON")) {
+ dev_on_group = sys_config->group;
+ i = true;
+ break;
+ }
+ sys_config++;
+ }
+ if (!i)
+ pr_err("%s: Couldn't find DEV_ON resource configuration!"
+ " MOD & CON group would be kept active.\n", __func__);
+
+ if (dev_on_group) {
+ r = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &grp,
+ TWL6030_PHOENIX_DEV_ON);
+ if (r) {
+ pr_err("%s: Error(%d) reading {addr=0x%02x}",
+ __func__, r, TWL6030_PHOENIX_DEV_ON);
+ /*
+ * On error resetting to 0, so that all the process
+ * groups are kept active.
+ */
+ dev_on_group = 0;
+ } else {
+ /*
+ * Unmapped processor groups are disabled by writing
+ * 1 to corresponding group in DEV_ON.
+ */
+ grp |= (dev_on_group & DEV_GRP_P1) ? 0 : P1_GRP_6030;
+ grp |= (dev_on_group & DEV_GRP_P2) ? 0 : P2_GRP_6030;
+ grp |= (dev_on_group & DEV_GRP_P3) ? 0 : P3_GRP_6030;
+ dev_on_group = grp;
+ }
+ }
+}
+
+static __init void twl6030_program_map(void)
+{
+ struct twl6030_resource_map *res = twl6030_res_map;
+ int r, i;
+
+ for (i = 0; i < ARRAY_SIZE(twl6030_res_map); i++) {
+ u8 grp = 0;
+
+ /* map back from generic device id to TWL6030 ID */
+ grp |= (res->group & DEV_GRP_P1) ? P1_GRP_6030 : 0;
+ grp |= (res->group & DEV_GRP_P2) ? P2_GRP_6030 : 0;
+ grp |= (res->group & DEV_GRP_P3) ? P3_GRP_6030 : 0;
+
+ r = twl_i2c_write_u8(TWL6030_MODULE_ID0, res->group,
+ res->base_addr);
+ if (r)
+ pr_err("%s: Error(%d) programming map %s {addr=0x%02x},"
+ "grp=0x%02X\n", __func__, r, res->name,
+ res->base_addr, res->group);
+ res++;
+ }
+}
+
+static __init void twl6030_update_system_map
+ (struct twl4030_system_config *sys_list)
+{
+ int i;
+ struct twl4030_system_config *sys_res;
+
+ while (sys_list && sys_list->name) {
+ sys_res = twl6030_sys_config;
+ for (i = 0; i < ARRAY_SIZE(twl6030_sys_config); i++) {
+ if (!strcmp(sys_res->name, sys_list->name))
+ sys_res->group = sys_list->group &
+ (DEV_GRP_P1 | DEV_GRP_P2 | DEV_GRP_P3);
+ sys_res++;
+ }
+ sys_list++;
+ }
+}
+
+static __init void twl6030_update_map(struct twl4030_resconfig *res_list)
+{
+ int i, res_idx = 0;
+ struct twl6030_resource_map *res;
+
+ while (res_list->resource != TWL4030_RESCONFIG_UNDEF) {
+ res = twl6030_res_map;
+ for (i = 0; i < ARRAY_SIZE(twl6030_res_map); i++) {
+ if (res->res_id == res_list->resource) {
+ res->group = res_list->devgroup &
+ (DEV_GRP_P1 | DEV_GRP_P2 | DEV_GRP_P3);
+ break;
+ }
+ res++;
+ }
+
+ if (i == ARRAY_SIZE(twl6030_res_map)) {
+ pr_err("%s: in platform_data resource index %d, cannot"
+ " find match for resource 0x%02x. NO Update!\n",
+ __func__, res_idx, res_list->resource);
+ }
+ res_list++;
+ res_idx++;
+ }
+}
+
+
+static int twl6030_power_notifier_cb(struct notifier_block *notifier,
+ unsigned long pm_event, void *unused)
+{
+ int r = 0;
+
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ r = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, dev_on_group,
+ TWL6030_PHOENIX_DEV_ON);
+ if (r)
+ pr_err("%s: Error(%d) programming {addr=0x%02x}",
+ __func__, r, TWL6030_PHOENIX_DEV_ON);
+ break;
+ }
+
+ return notifier_from_errno(r);
+}
+
+static struct notifier_block twl6030_power_pm_notifier = {
+ .notifier_call = twl6030_power_notifier_cb,
+};
+
+/**
+ * twl6030_power_init() - Update the power map to reflect connectivity of board
+ * @power_data: power resource map to update (OPTIONAL) - use this if a resource
+ * is used by other devices other than APP (DEV_GRP_P1)
+ */
+void __init twl6030_power_init(struct twl4030_power_data *power_data)
+{
+ int r;
+
+ if (power_data && (!power_data->resource_config &&
+ !power_data->sys_config)) {
+ pr_err("%s: power data from platform without configuration!\n",
+ __func__);
+ return;
+ }
+
+ if (power_data && power_data->resource_config)
+ twl6030_update_map(power_data->resource_config);
+
+ if (power_data && power_data->sys_config)
+ twl6030_update_system_map(power_data->sys_config);
+
+ twl6030_process_system_config();
+
+ twl6030_program_map();
+
+ r = register_pm_notifier(&twl6030_power_pm_notifier);
+ if (r)
+ pr_err("%s: twl6030 power registration failed!\n", __func__);
+
+ return;
+}
diff --git a/drivers/mfd/twl6030-poweroff.c b/drivers/mfd/twl6030-poweroff.c
new file mode 100644
index 0000000..776a251
--- /dev/null
+++ b/drivers/mfd/twl6030-poweroff.c
@@ -0,0 +1,75 @@
+/*
+ * /drivers/mfd/twl6030-poweroff.c
+ *
+ * Power off device
+ *
+ * Copyright (C) 2011 Texas Instruments Corporation
+ *
+ * Written by Rajeev Kulkarni <rajeevk@ti.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/i2c/twl.h>
+
+#define TWL6030_PHOENIX_DEV_ON 0x25
+#define APP_DEVOFF (1<<0)
+#define CON_DEVOFF (1<<1)
+#define MOD_DEVOFF (1<<2)
+
+void twl6030_poweroff(void)
+{
+ u8 val = 0;
+ int err = 0;
+
+ err = twl_i2c_read_u8(TWL6030_MODULE_ID0, &val,
+ TWL6030_PHOENIX_DEV_ON);
+ if (err) {
+ pr_warning("I2C error %d reading PHOENIX_DEV_ON\n", err);
+ return;
+ }
+
+ val |= APP_DEVOFF | CON_DEVOFF | MOD_DEVOFF;
+
+ err = twl_i2c_write_u8(TWL6030_MODULE_ID0, val,
+ TWL6030_PHOENIX_DEV_ON);
+
+ if (err) {
+ pr_warning("I2C error %d writing PHOENIX_DEV_ON\n", err);
+ return;
+ }
+
+ return;
+}
+
+static int __init twl6030_poweroff_init(void)
+{
+ pm_power_off = twl6030_poweroff;
+
+ return 0;
+}
+
+static void __exit twl6030_poweroff_exit(void)
+{
+ pm_power_off = NULL;
+}
+
+module_init(twl6030_poweroff_init);
+module_exit(twl6030_poweroff_exit);
+
+MODULE_DESCRIPTION("TLW6030 device power off");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rajeev Kulkarni");
diff --git a/drivers/mfd/twl6040-codec.c b/drivers/mfd/twl6040-codec.c
new file mode 100644
index 0000000..4633d70
--- /dev/null
+++ b/drivers/mfd/twl6040-codec.c
@@ -0,0 +1,820 @@
+/*
+ * MFD driver for twl6040 codec submodule
+ *
+ * Authors: Jorge Eduardo Candelaria <jorge.candelaria@ti.com>
+ * Misael Lopez Cruz <misael.lopez@ti.com>
+ *
+ * Copyright: (C) 20010 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/i2c/twl.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/twl6040-codec.h>
+
+int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
+{
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl6040->io_mutex);
+ ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
+ if (ret < 0) {
+ mutex_unlock(&twl6040->io_mutex);
+ return ret;
+ }
+ mutex_unlock(&twl6040->io_mutex);
+
+ return val;
+}
+EXPORT_SYMBOL(twl6040_reg_read);
+
+int twl6040_reg_write(struct twl6040 *twl6040, unsigned int reg, u8 val)
+{
+ int ret;
+
+ mutex_lock(&twl6040->io_mutex);
+ ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
+ mutex_unlock(&twl6040->io_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(twl6040_reg_write);
+
+int twl6040_set_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
+{
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl6040->io_mutex);
+ ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
+ if (ret)
+ goto out;
+
+ val |= mask;
+ ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
+out:
+ mutex_unlock(&twl6040->io_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(twl6040_set_bits);
+
+int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
+{
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl6040->io_mutex);
+ ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
+ if (ret)
+ goto out;
+
+ val &= ~mask;
+ ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
+out:
+ mutex_unlock(&twl6040->io_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(twl6040_clear_bits);
+
+/* twl6040 codec manual power-up sequence */
+static int twl6040_power_up(struct twl6040 *twl6040)
+{
+ u8 ncpctl, ldoctl, lppllctl, accctl;
+ int ret;
+
+ ncpctl = twl6040_reg_read(twl6040, TWL6040_REG_NCPCTL);
+ ldoctl = twl6040_reg_read(twl6040, TWL6040_REG_LDOCTL);
+ lppllctl = twl6040_reg_read(twl6040, TWL6040_REG_LPPLLCTL);
+ accctl = twl6040_reg_read(twl6040, TWL6040_REG_ACCCTL);
+
+ /* enable reference system */
+ ldoctl |= TWL6040_REFENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ if (ret)
+ return ret;
+ msleep(10);
+
+ /* enable internal oscillator */
+ ldoctl |= TWL6040_OSCENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ if (ret)
+ goto osc_err;
+ udelay(10);
+
+ /* enable high-side ldo */
+ ldoctl |= TWL6040_HSLDOENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ if (ret)
+ goto hsldo_err;
+ udelay(244);
+
+ /* enable negative charge pump */
+ ncpctl |= TWL6040_NCPENA | TWL6040_NCPOPEN;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_NCPCTL, ncpctl);
+ if (ret)
+ goto ncp_err;
+ udelay(488);
+
+ /* enable low-side ldo */
+ ldoctl |= TWL6040_LSLDOENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ if (ret)
+ goto lsldo_err;
+ udelay(244);
+
+ /* enable low-power pll */
+ lppllctl |= TWL6040_LPLLENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+ if (ret)
+ goto lppll_err;
+
+ /* reset state machine */
+ accctl |= TWL6040_RESETSPLIT;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_ACCCTL, accctl);
+ if (ret)
+ goto rst_err;
+ mdelay(5);
+ accctl &= ~TWL6040_RESETSPLIT;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_ACCCTL, accctl);
+ if (ret)
+ goto rst_err;
+
+ /* disable internal oscillator */
+ ldoctl &= ~TWL6040_OSCENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ if (ret)
+ goto rst_err;
+
+ return 0;
+
+rst_err:
+ lppllctl &= ~TWL6040_LPLLENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+lppll_err:
+ ldoctl &= ~TWL6040_LSLDOENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ udelay(244);
+lsldo_err:
+ ncpctl &= ~(TWL6040_NCPENA | TWL6040_NCPOPEN);
+ twl6040_reg_write(twl6040, TWL6040_REG_NCPCTL, ncpctl);
+ udelay(488);
+ncp_err:
+ ldoctl &= ~TWL6040_HSLDOENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ udelay(244);
+hsldo_err:
+ ldoctl &= ~TWL6040_OSCENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+osc_err:
+ ldoctl &= ~TWL6040_REFENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ msleep(10);
+
+ return ret;
+}
+
+/* twl6040 codec manual power-down sequence */
+static int twl6040_power_down(struct twl6040 *twl6040)
+{
+ u8 ncpctl, ldoctl, lppllctl, accctl;
+ int ret;
+
+ ncpctl = twl6040_reg_read(twl6040, TWL6040_REG_NCPCTL);
+ ldoctl = twl6040_reg_read(twl6040, TWL6040_REG_LDOCTL);
+ lppllctl = twl6040_reg_read(twl6040, TWL6040_REG_LPPLLCTL);
+ accctl = twl6040_reg_read(twl6040, TWL6040_REG_ACCCTL);
+
+ /* enable internal oscillator */
+ ldoctl |= TWL6040_OSCENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ if (ret)
+ return ret;
+ udelay(10);
+
+ /* disable low-power pll */
+ lppllctl &= ~TWL6040_LPLLENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+ if (ret)
+ goto lppll_err;
+
+ /* disable low-side ldo */
+ ldoctl &= ~TWL6040_LSLDOENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ if (ret)
+ goto lsldo_err;
+ udelay(244);
+
+ /* disable negative charge pump */
+ ncpctl &= ~(TWL6040_NCPENA | TWL6040_NCPOPEN);
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_NCPCTL, ncpctl);
+ if (ret)
+ goto ncp_err;
+ udelay(488);
+
+ /* disable high-side ldo */
+ ldoctl &= ~TWL6040_HSLDOENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ if (ret)
+ goto hsldo_err;
+ udelay(244);
+
+ /* disable internal oscillator */
+ ldoctl &= ~TWL6040_OSCENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ if (ret)
+ goto osc_err;
+
+ /* disable reference system */
+ ldoctl &= ~TWL6040_REFENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ if (ret)
+ goto ref_err;
+ msleep(10);
+
+ return 0;
+
+ref_err:
+ ldoctl |= TWL6040_OSCENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ udelay(10);
+osc_err:
+ ldoctl |= TWL6040_HSLDOENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ udelay(244);
+hsldo_err:
+ ncpctl |= TWL6040_NCPENA | TWL6040_NCPOPEN;
+ twl6040_reg_write(twl6040, TWL6040_REG_NCPCTL, ncpctl);
+ udelay(488);
+ncp_err:
+ ldoctl |= TWL6040_LSLDOENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ udelay(244);
+lsldo_err:
+ lppllctl |= TWL6040_LPLLENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+lppll_err:
+ lppllctl |= TWL6040_LPLLENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+ accctl |= TWL6040_RESETSPLIT;
+ twl6040_reg_write(twl6040, TWL6040_REG_ACCCTL, accctl);
+ mdelay(5);
+ accctl &= ~TWL6040_RESETSPLIT;
+ twl6040_reg_write(twl6040, TWL6040_REG_ACCCTL, accctl);
+ ldoctl &= ~TWL6040_OSCENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ msleep(10);
+
+ return ret;
+}
+
+static irqreturn_t twl6040_naudint_handler(int irq, void *data)
+{
+ struct twl6040 *twl6040 = data;
+ u8 intid;
+
+ intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
+
+ if (intid & TWL6040_READYINT)
+ complete(&twl6040->ready);
+
+ return IRQ_HANDLED;
+}
+
+static int twl6040_power_up_completion(struct twl6040 *twl6040,
+ int naudint)
+{
+ int time_left;
+ int round = 0;
+ int ret = 0;
+ int retry = 0;
+ u8 intid;
+ u8 ncpctl;
+ u8 ldoctl;
+ u8 lppllctl;
+ u8 ncpctl_exp;
+ u8 ldoctl_exp;
+ u8 lppllctl_exp;
+
+ /* NCPCTL expected value: NCP enabled */
+ ncpctl_exp = (TWL6040_TSHUTENA | TWL6040_NCPENA);
+
+ /* LDOCTL expected value: HS/LS LDOs and Reference enabled */
+ ldoctl_exp = (TWL6040_REFENA | TWL6040_HSLDOENA | TWL6040_LSLDOENA);
+
+ /* LPPLLCTL expected value: Low-Power PLL enabled */
+ lppllctl_exp = TWL6040_LPLLENA;
+
+ do {
+ gpio_set_value(twl6040->audpwron, 1);
+ time_left = wait_for_completion_timeout(&twl6040->ready,
+ msecs_to_jiffies(144));
+ if (!time_left) {
+ intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
+ if (!(intid & TWL6040_READYINT)) {
+ dev_err(twl6040->dev,
+ "timeout waiting for READYINT\n");
+ return -ETIMEDOUT;
+ }
+ }
+ /*
+ * Power on seemingly completed.
+ * Look for clues that the twl6040 might be still booting.
+ */
+
+ retry = 0;
+ ncpctl = twl6040_reg_read(twl6040, TWL6040_REG_NCPCTL);
+ if (ncpctl != ncpctl_exp)
+ retry++;
+
+ ldoctl = twl6040_reg_read(twl6040, TWL6040_REG_LDOCTL);
+ if (ldoctl != ldoctl_exp)
+ retry++;
+
+ lppllctl = twl6040_reg_read(twl6040, TWL6040_REG_LPPLLCTL);
+ if (lppllctl != lppllctl_exp)
+ retry++;
+
+ if (retry) {
+ dev_err(twl6040->dev,
+ "NCPCTL: 0x%02x (should be 0x%02x)\n"
+ "LDOCTL: 0x%02x (should be 0x%02x)\n"
+ "LPLLCTL: 0x%02x (should be 0x%02x)\n",
+ ncpctl, ncpctl_exp,
+ ldoctl, ldoctl_exp,
+ lppllctl, lppllctl_exp);
+ round++;
+ gpio_set_value(twl6040->audpwron, 0);
+ usleep_range(1000, 1500);
+ continue;
+ }
+ } while (round && (round < 3));
+
+ if (round >= 3) {
+ dev_err(twl6040->dev,
+ "Automatic power on failed, reverting to manual\n");
+ twl6040->audpwron = -EINVAL;
+ ret = twl6040_power_up(twl6040);
+ if (ret)
+ dev_err(twl6040->dev, "Manual power-up failed\n");
+ }
+
+ return ret;
+}
+
+static int twl6040_power(struct twl6040 *twl6040, int enable)
+{
+ struct twl4030_codec_data *pdata = dev_get_platdata(twl6040->dev);
+ int audpwron = twl6040->audpwron;
+ int naudint = twl6040->irq;
+ int ret = 0;
+
+ if (enable) {
+ /* enable 32kHz external clock */
+ if (pdata->set_ext_clk32k) {
+ ret = pdata->set_ext_clk32k(true);
+ if (ret) {
+ dev_err(twl6040->dev,
+ "failed to enable CLK32K %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* disable internal 32kHz oscillator */
+ twl6040_clear_bits(twl6040, TWL6040_REG_ACCCTL,
+ TWL6040_CLK32KSEL);
+
+ if (gpio_is_valid(audpwron)) {
+ /* wait for power-up completion */
+ ret = twl6040_power_up_completion(twl6040, naudint);
+ if (ret) {
+ dev_err(twl6040->dev,
+ "automatic power-down failed\n");
+ return ret;
+ }
+ } else {
+ /* use manual power-up sequence */
+ ret = twl6040_power_up(twl6040);
+ if (ret) {
+ dev_err(twl6040->dev,
+ "manual power-up failed\n");
+ return ret;
+ }
+ }
+ twl6040->pll = TWL6040_LPPLL_ID;
+ twl6040->sysclk = 19200000;
+ } else {
+ if (gpio_is_valid(audpwron)) {
+ /* use AUDPWRON line */
+ gpio_set_value(audpwron, 0);
+
+ /* power-down sequence latency */
+ udelay(500);
+ } else {
+ /* use manual power-down sequence */
+ ret = twl6040_power_down(twl6040);
+ if (ret) {
+ dev_err(twl6040->dev,
+ "manual power-down failed\n");
+ return ret;
+ }
+ }
+
+ /* enable internal 32kHz oscillator */
+ twl6040_set_bits(twl6040, TWL6040_REG_ACCCTL,
+ TWL6040_CLK32KSEL);
+
+ /* disable 32kHz external clock */
+ if (pdata->set_ext_clk32k) {
+ ret = pdata->set_ext_clk32k(false);
+ if (ret)
+ dev_err(twl6040->dev,
+ "failed to disable CLK32K %d\n", ret);
+ }
+
+ twl6040->pll = TWL6040_NOPLL_ID;
+ twl6040->sysclk = 0;
+ }
+
+ twl6040->powered = enable;
+
+ return ret;
+}
+
+int twl6040_enable(struct twl6040 *twl6040)
+{
+ int ret = 0;
+
+ mutex_lock(&twl6040->mutex);
+ if (!twl6040->power_count++)
+ ret = twl6040_power(twl6040, 1);
+ mutex_unlock(&twl6040->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(twl6040_enable);
+
+int twl6040_disable(struct twl6040 *twl6040)
+{
+ int ret = 0;
+
+ mutex_lock(&twl6040->mutex);
+ WARN(!twl6040->power_count, "TWL6040 is already disabled");
+ if (!--twl6040->power_count)
+ ret = twl6040_power(twl6040, 0);
+ mutex_unlock(&twl6040->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(twl6040_disable);
+
+int twl6040_is_enabled(struct twl6040 *twl6040)
+{
+ return twl6040->power_count;
+}
+EXPORT_SYMBOL(twl6040_is_enabled);
+
+int twl6040_set_pll(struct twl6040 *twl6040, enum twl6040_pll_id id,
+ unsigned int freq_in, unsigned int freq_out)
+{
+ u8 hppllctl, lppllctl;
+ int ret = 0;
+
+ mutex_lock(&twl6040->mutex);
+
+ hppllctl = twl6040_reg_read(twl6040, TWL6040_REG_HPPLLCTL);
+ lppllctl = twl6040_reg_read(twl6040, TWL6040_REG_LPPLLCTL);
+
+ switch (id) {
+ case TWL6040_LPPLL_ID:
+ /* lppll divider */
+ switch (freq_out) {
+ case 17640000:
+ lppllctl |= TWL6040_LPLLFIN;
+ break;
+ case 19200000:
+ lppllctl &= ~TWL6040_LPLLFIN;
+ break;
+ default:
+ dev_err(twl6040->dev,
+ "freq_out %d not supported\n", freq_out);
+ ret = -EINVAL;
+ goto pll_out;
+ }
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+
+ switch (freq_in) {
+ case 32768:
+ lppllctl |= TWL6040_LPLLENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
+ lppllctl);
+ mdelay(5);
+ lppllctl &= ~TWL6040_HPLLSEL;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
+ lppllctl);
+ hppllctl &= ~TWL6040_HPLLENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_HPPLLCTL,
+ hppllctl);
+ break;
+ default:
+ dev_err(twl6040->dev,
+ "freq_in %d not supported\n", freq_in);
+ ret = -EINVAL;
+ goto pll_out;
+ }
+
+ twl6040->pll = TWL6040_LPPLL_ID;
+ break;
+ case TWL6040_HPPLL_ID:
+ /* high-performance pll can provide only 19.2 MHz */
+ if (freq_out != 19200000) {
+ dev_err(twl6040->dev,
+ "freq_out %d not supported\n", freq_out);
+ ret = -EINVAL;
+ goto pll_out;
+ }
+
+ hppllctl &= ~TWL6040_MCLK_MSK;
+
+ switch (freq_in) {
+ case 12000000:
+ /* mclk input, pll enabled */
+ hppllctl |= TWL6040_MCLK_12000KHZ |
+ TWL6040_HPLLSQRBP |
+ TWL6040_HPLLENA;
+ break;
+ case 19200000:
+ /* mclk input, pll disabled */
+ hppllctl |= TWL6040_MCLK_19200KHZ |
+ TWL6040_HPLLSQRENA |
+ TWL6040_HPLLBP;
+ break;
+ case 26000000:
+ /* mclk input, pll enabled */
+ hppllctl |= TWL6040_MCLK_26000KHZ |
+ TWL6040_HPLLSQRBP |
+ TWL6040_HPLLENA;
+ break;
+ case 38400000:
+ /* clk slicer, pll disabled */
+ hppllctl |= TWL6040_MCLK_38400KHZ |
+ TWL6040_HPLLSQRENA |
+ TWL6040_HPLLBP;
+ break;
+ default:
+ dev_err(twl6040->dev,
+ "freq_in %d not supported\n", freq_in);
+ ret = -EINVAL;
+ goto pll_out;
+ }
+
+ twl6040_reg_write(twl6040, TWL6040_REG_HPPLLCTL, hppllctl);
+ udelay(500);
+ lppllctl |= TWL6040_HPLLSEL;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+ lppllctl &= ~TWL6040_LPLLENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+
+ twl6040->pll = TWL6040_HPPLL_ID;
+ break;
+ default:
+ dev_err(twl6040->dev, "unknown pll id %d\n", id);
+ ret = -EINVAL;
+ goto pll_out;
+ }
+
+ twl6040->sysclk = freq_out;
+
+pll_out:
+ mutex_unlock(&twl6040->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(twl6040_set_pll);
+
+enum twl6040_pll_id twl6040_get_pll(struct twl6040 *twl6040)
+{
+ return twl6040->pll;
+}
+EXPORT_SYMBOL(twl6040_get_pll);
+
+unsigned int twl6040_get_sysclk(struct twl6040 *twl6040)
+{
+ return twl6040->sysclk;
+}
+EXPORT_SYMBOL(twl6040_get_sysclk);
+
+int twl6040_get_icrev(struct twl6040 *twl6040)
+{
+ return twl6040->icrev;
+}
+EXPORT_SYMBOL(twl6040_get_icrev);
+
+static int __devinit twl6040_probe(struct platform_device *pdev)
+{
+ struct twl4030_codec_data *pdata = pdev->dev.platform_data;
+ struct twl6040 *twl6040;
+ struct mfd_cell *cell = NULL;
+ unsigned int naudint;
+ int audpwron;
+ int ret, children = 0;
+ u8 accctl;
+
+ if(!pdata) {
+ dev_err(&pdev->dev, "Platform data is missing\n");
+ return -EINVAL;
+ }
+
+ twl6040 = kzalloc(sizeof(struct twl6040), GFP_KERNEL);
+ if (!twl6040)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, twl6040);
+
+ twl6040->dev = &pdev->dev;
+ mutex_init(&twl6040->mutex);
+ mutex_init(&twl6040->io_mutex);
+
+ twl6040->icrev = twl6040_reg_read(twl6040, TWL6040_REG_ASICREV);
+ if (twl6040->icrev < 0) {
+ ret = twl6040->icrev;
+ goto gpio1_err;
+ }
+
+ if (pdata && (twl6040_get_icrev(twl6040) > TWL6040_REV_1_0))
+ audpwron = pdata->audpwron_gpio;
+ else
+ audpwron = -EINVAL;
+
+ if (pdata)
+ naudint = pdata->naudint_irq;
+ else
+ naudint = 0;
+
+ twl6040->audpwron = audpwron;
+ twl6040->powered = 0;
+ twl6040->irq = naudint;
+ twl6040->irq_base = pdata->irq_base;
+ init_completion(&twl6040->ready);
+
+ if (gpio_is_valid(audpwron)) {
+ ret = gpio_request(audpwron, "audpwron");
+ if (ret)
+ goto gpio1_err;
+
+ ret = gpio_direction_output(audpwron, 0);
+ if (ret)
+ goto gpio2_err;
+ }
+
+ if (naudint) {
+ /* codec interrupt */
+ ret = twl6040_irq_init(twl6040);
+ if (ret)
+ goto gpio2_err;
+
+ ret = twl6040_request_irq(twl6040, TWL6040_IRQ_READY,
+ twl6040_naudint_handler, "twl6040_irq_ready",
+ twl6040);
+ if (ret) {
+ dev_err(twl6040->dev, "READY IRQ request failed: %d\n",
+ ret);
+ goto irq_err;
+ }
+ }
+
+ /* dual-access registers controlled by I2C only */
+ accctl = twl6040_reg_read(twl6040, TWL6040_REG_ACCCTL);
+ twl6040_reg_write(twl6040, TWL6040_REG_ACCCTL, accctl | TWL6040_I2CSEL);
+
+ if (pdata->get_ext_clk32k) {
+ ret = pdata->get_ext_clk32k();
+ if (ret) {
+ dev_err(twl6040->dev,
+ "failed to get external 32kHz clock %d\n",
+ ret);
+ goto clk32k_err;
+ }
+ }
+
+ if (pdata->audio) {
+ cell = &twl6040->cells[children];
+ cell->name = "twl6040-codec";
+ cell->platform_data = pdata->audio;
+ cell->pdata_size = sizeof(*pdata->audio);
+ children++;
+ }
+
+ if (pdata->vibra) {
+ cell = &twl6040->cells[children];
+ cell->name = "twl6040-vibra";
+ cell->platform_data = pdata->vibra;
+ cell->pdata_size = sizeof(*pdata->vibra);
+ children++;
+ }
+
+ if (children) {
+ ret = mfd_add_devices(&pdev->dev, pdev->id, twl6040->cells,
+ children, NULL, 0);
+ if (ret)
+ goto mfd_err;
+ } else {
+ dev_err(&pdev->dev, "No platform data found for children\n");
+ ret = -ENODEV;
+ goto mfd_err;
+ }
+
+ return 0;
+
+mfd_err:
+ if (pdata->put_ext_clk32k)
+ pdata->put_ext_clk32k();
+clk32k_err:
+ if (naudint)
+ twl6040_free_irq(twl6040, TWL6040_IRQ_READY, twl6040);
+irq_err:
+ if (naudint)
+ twl6040_irq_exit(twl6040);
+gpio2_err:
+ if (gpio_is_valid(audpwron))
+ gpio_free(audpwron);
+gpio1_err:
+ platform_set_drvdata(pdev, NULL);
+ kfree(twl6040);
+ return ret;
+}
+
+static int __devexit twl6040_remove(struct platform_device *pdev)
+{
+ struct twl6040 *twl6040 = platform_get_drvdata(pdev);
+ struct twl4030_codec_data *pdata = dev_get_platdata(twl6040->dev);
+ int audpwron = twl6040->audpwron;
+ int naudint = twl6040->irq;
+
+ twl6040_disable(twl6040);
+
+ twl6040_free_irq(twl6040, TWL6040_IRQ_READY, twl6040);
+
+ if (gpio_is_valid(audpwron))
+ gpio_free(audpwron);
+
+ if (naudint)
+ twl6040_irq_exit(twl6040);
+
+ mfd_remove_devices(&pdev->dev);
+
+ if (pdata->put_ext_clk32k)
+ pdata->put_ext_clk32k();
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(twl6040);
+
+ return 0;
+}
+
+static struct platform_driver twl6040_driver = {
+ .probe = twl6040_probe,
+ .remove = __devexit_p(twl6040_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "twl6040-audio",
+ },
+};
+
+static int __devinit twl6040_init(void)
+{
+ return platform_driver_register(&twl6040_driver);
+}
+module_init(twl6040_init);
+
+static void __devexit twl6040_exit(void)
+{
+ platform_driver_unregister(&twl6040_driver);
+}
+
+module_exit(twl6040_exit);
+
+MODULE_DESCRIPTION("TWL6040 MFD");
+MODULE_AUTHOR("Jorge Eduardo Candelaria <jorge.candelaria@ti.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:twl6040-audio");
diff --git a/drivers/mfd/twl6040-irq.c b/drivers/mfd/twl6040-irq.c
new file mode 100644
index 0000000..6beac7a
--- /dev/null
+++ b/drivers/mfd/twl6040-irq.c
@@ -0,0 +1,196 @@
+/*
+ * twl6040-irq.c -- Interrupt controller support for TWL6040
+ *
+ * Copyright 2010 Texas Instruments Inc.
+ *
+ * Author: Misael Lopez Cruz <misael.lopez@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/twl6040-codec.h>
+
+struct twl6040_irq_data {
+ int mask;
+ int status;
+};
+
+static struct twl6040_irq_data twl6040_irqs[] = {
+ {
+ .mask = TWL6040_THMSK,
+ .status = TWL6040_THINT,
+ },
+ {
+ .mask = TWL6040_PLUGMSK,
+ .status = TWL6040_PLUGINT | TWL6040_UNPLUGINT,
+ },
+ {
+ .mask = TWL6040_HOOKMSK,
+ .status = TWL6040_HOOKINT,
+ },
+ {
+ .mask = TWL6040_HFMSK,
+ .status = TWL6040_HFINT,
+ },
+ {
+ .mask = TWL6040_VIBMSK,
+ .status = TWL6040_VIBINT,
+ },
+ {
+ .mask = TWL6040_READYMSK,
+ .status = TWL6040_READYINT,
+ },
+};
+
+static inline struct twl6040_irq_data *irq_to_twl6040_irq(struct twl6040 *twl6040,
+ int irq)
+{
+ return &twl6040_irqs[irq - twl6040->irq_base];
+}
+
+static void twl6040_irq_lock(struct irq_data *data)
+{
+ struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&twl6040->irq_mutex);
+}
+
+static void twl6040_irq_sync_unlock(struct irq_data *data)
+{
+ struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
+
+ /* write back to hardware any change in irq mask */
+ if (twl6040->irq_masks_cur != twl6040->irq_masks_cache) {
+ twl6040->irq_masks_cache = twl6040->irq_masks_cur;
+ twl6040_reg_write(twl6040, TWL6040_REG_INTMR,
+ twl6040->irq_masks_cur);
+ }
+
+ mutex_unlock(&twl6040->irq_mutex);
+}
+
+static void twl6040_irq_unmask(struct irq_data *data)
+{
+ struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
+ struct twl6040_irq_data *irq_data = irq_to_twl6040_irq(twl6040, data->irq);
+
+ twl6040->irq_masks_cur &= ~irq_data->mask;
+}
+
+static void twl6040_irq_mask(struct irq_data *data)
+{
+ struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
+ struct twl6040_irq_data *irq_data = irq_to_twl6040_irq(twl6040, data->irq);
+
+ twl6040->irq_masks_cur |= irq_data->mask;
+}
+
+static struct irq_chip twl6040_irq_chip = {
+ .name = "twl6040",
+ .irq_bus_lock = twl6040_irq_lock,
+ .irq_bus_sync_unlock = twl6040_irq_sync_unlock,
+ .irq_mask = twl6040_irq_mask,
+ .irq_unmask = twl6040_irq_unmask,
+};
+
+static irqreturn_t twl6040_irq_thread(int irq, void *data)
+{
+ struct twl6040 *twl6040 = data;
+ u8 intid;
+ int i;
+
+ intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
+
+ /* apply masking and report (backwards to handle READYINT first) */
+ for (i = ARRAY_SIZE(twl6040_irqs) - 1; i >= 0; i--) {
+ if (twl6040->irq_masks_cur & twl6040_irqs[i].mask)
+ intid &= ~twl6040_irqs[i].status;
+ if (intid & twl6040_irqs[i].status)
+ handle_nested_irq(twl6040->irq_base + i);
+ }
+
+ /* ack unmasked irqs */
+ twl6040_reg_write(twl6040, TWL6040_REG_INTID, intid);
+
+ return IRQ_HANDLED;
+}
+
+int twl6040_irq_init(struct twl6040 *twl6040)
+{
+ int cur_irq, ret;
+ u8 val;
+
+ mutex_init(&twl6040->irq_mutex);
+
+ /* mask the individual interrupt sources */
+ twl6040->irq_masks_cur = TWL6040_ALLINT_MSK;
+ twl6040->irq_masks_cache = TWL6040_ALLINT_MSK;
+ twl6040_reg_write(twl6040, TWL6040_REG_INTMR, TWL6040_ALLINT_MSK);
+
+ if (!twl6040->irq) {
+ dev_warn(twl6040->dev,
+ "no interrupt specified, no interrupts\n");
+ twl6040->irq_base = 0;
+ return 0;
+ }
+
+ if (!twl6040->irq_base) {
+ dev_err(twl6040->dev,
+ "no interrupt base specified, no interrupts\n");
+ return 0;
+ }
+
+ /* Register them with genirq */
+ for (cur_irq = twl6040->irq_base;
+ cur_irq < twl6040->irq_base + ARRAY_SIZE(twl6040_irqs);
+ cur_irq++) {
+ irq_set_chip_data(cur_irq, twl6040);
+ irq_set_chip_and_handler(cur_irq, &twl6040_irq_chip,
+ handle_level_irq);
+ irq_set_nested_thread(cur_irq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ set_irq_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(twl6040->irq, NULL, twl6040_irq_thread,
+ IRQF_ONESHOT,
+ "twl6040", twl6040);
+ if (ret) {
+ dev_err(twl6040->dev, "failed to request IRQ %d: %d\n",
+ twl6040->irq, ret);
+ return ret;
+ }
+
+ /* reset interrupts */
+ val = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
+
+ /* interrupts cleared on write */
+ val = twl6040_reg_read(twl6040, TWL6040_REG_ACCCTL)
+ & ~TWL6040_INTCLRMODE;
+ twl6040_reg_write(twl6040, TWL6040_REG_ACCCTL, val);
+
+ return 0;
+}
+EXPORT_SYMBOL(twl6040_irq_init);
+
+void twl6040_irq_exit(struct twl6040 *twl6040)
+{
+ if (twl6040->irq)
+ free_irq(twl6040->irq, twl6040);
+}
+EXPORT_SYMBOL(twl6040_irq_exit);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 051bfe9..9344c71 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -525,6 +525,13 @@
If your platform uses a different flash partition label for storing
crashdumps, enter it here.
+config OMAP_DIE_TEMP_SENSOR
+ bool "OMAP On-Die temp sensor support"
+ depends on OMAP_TEMP_SENSOR
+ help
+ Enabling this config will give support for the on-die
+ temp sensor for the OMAP platform.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 606b27f..ff217f2 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -50,3 +50,4 @@
obj-$(CONFIG_WL127X_RFKILL) += wl127x-rfkill.o
obj-$(CONFIG_APANIC) += apanic.o
obj-$(CONFIG_SENSORS_AK8975) += akm8975.o
+obj-$(CONFIG_OMAP_DIE_TEMP_SENSOR) += omap_temp_sensor.o
diff --git a/drivers/misc/omap_temp_sensor.c b/drivers/misc/omap_temp_sensor.c
new file mode 100644
index 0000000..1c2788c
--- /dev/null
+++ b/drivers/misc/omap_temp_sensor.c
@@ -0,0 +1,748 @@
+/*
+ * OMAP4 Temperature sensor driver file
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: J Keerthy <j-keerthy@ti.com>
+ * Author: Moiz Sonasath <m-sonasath@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include <plat/common.h>
+#include <plat/omap-pm.h>
+#include <plat/omap_device.h>
+#include <plat/temperature_sensor.h>
+#include <plat/omap-pm.h>
+
+/* TO DO: This needs to be fixed */
+#include "../../../../arch/arm/mach-omap2/control.h"
+/* #include <plat/control.h> */
+
+#include <mach/ctrl_module_core_44xx.h>
+
+extern void omap_thermal_throttle(void);
+extern void omap_thermal_unthrottle(void);
+
+static void throttle_delayed_work_fn(struct work_struct *work);
+
+#define THROTTLE_DELAY_MS 1000
+
+#define TSHUT_THRESHOLD_TSHUT_HOT 110000 /* 110 deg C */
+#define TSHUT_THRESHOLD_TSHUT_COLD 100000 /* 100 deg C */
+#define BGAP_THRESHOLD_T_HOT 64000 /* 64 deg C */
+#define BGAP_THRESHOLD_T_COLD 61000 /* 61 deg C */
+#define OMAP_ADC_START_VALUE 530
+#define OMAP_ADC_END_VALUE 923
+
+/*
+ * omap_temp_sensor structure
+ * @pdev - Platform device pointer
+ * @dev - device pointer
+ * @clock - Clock pointer
+ * @sensor_mutex - Mutex for sysfs, irq and PM
+ * @irq - MPU Irq number for thermal alertemp_sensor
+ * @tshut_irq - Thermal shutdown IRQ
+ * @phy_base - Physical base of the temp I/O
+ * @is_efuse_valid - Flag to determine if eFuse is valid or not
+ * @clk_on - Manages the current clock state
+ * @clk_rate - Holds current clock rate
+ */
+struct omap_temp_sensor {
+ struct platform_device *pdev;
+ struct device *dev;
+ struct clk *clock;
+ struct spinlock lock;
+ unsigned int irq;
+ unsigned int tshut_irq;
+ unsigned long phy_base;
+ int is_efuse_valid;
+ u8 clk_on;
+ unsigned long clk_rate;
+ u32 current_temp;
+ struct delayed_work throttle_work;
+};
+
+#ifdef CONFIG_PM
+struct omap_temp_sensor_regs {
+ u32 temp_sensor_ctrl;
+ u32 bg_ctrl;
+ u32 bg_counter;
+ u32 bg_threshold;
+ u32 temp_sensor_tshut_threshold;
+};
+
+static struct omap_temp_sensor_regs temp_sensor_context;
+static struct omap_temp_sensor *temp_sensor_pm;
+#endif
+
+/*
+ * Temperature values in milli degrees celsius ADC code values from 530 to 923
+ */
+static int adc_to_temp[] = {
+ -40000, -40000, -40000, -40000, -39800, -39400, -39000, -38600, -38200,
+ -37800, -37300, -36800, -36400, -36000, -35600, -35200, -34800,
+ -34300, -33800, -33400, -33000, -32600, -32200, -31800, -31300,
+ -30800, -30400, -30000, -29600, -29200, -28700, -28200, -27800,
+ -27400, -27000, -26600, -26200, -25700, -25200, -24800, -24400,
+ -24000, -23600, -23200, -22700, -22200, -21800, -21400, -21000,
+ -20600, -20200, -19700, -19200, -18800, -18400, -18000, -17600,
+ -17200, -16700, -16200, -15800, -15400, -15000, -14600, -14200,
+ -13700, -13200, -12800, -12400, -12000, -11600, -11200, -10700,
+ -10200, -9800, -9400, -9000, -8600, -8200, -7700, -7200, -6800,
+ -6400, -6000, -5600, -5200, -4800, -4300, -3800, -3400, -3000,
+ -2600, -2200, -1800, -1300, -800, -400, 0, 400, 800, 1200, 1600,
+ 2100, 2600, 3000, 3400, 3800, 4200, 4600, 5100, 5600, 6000, 6400,
+ 6800, 7200, 7600, 8000, 8500, 9000, 9400, 9800, 10200, 10600, 11000,
+ 11400, 11900, 12400, 12800, 13200, 13600, 14000, 14400, 14800,
+ 15300, 15800, 16200, 16600, 17000, 17400, 17800, 18200, 18700,
+ 19200, 19600, 20000, 20400, 20800, 21200, 21600, 22100, 22600,
+ 23000, 23400, 23800, 24200, 24600, 25000, 25400, 25900, 26400,
+ 26800, 27200, 27600, 28000, 28400, 28800, 29300, 29800, 30200,
+ 30600, 31000, 31400, 31800, 32200, 32600, 33100, 33600, 34000,
+ 34400, 34800, 35200, 35600, 36000, 36400, 36800, 37300, 37800,
+ 38200, 38600, 39000, 39400, 39800, 40200, 40600, 41100, 41600,
+ 42000, 42400, 42800, 43200, 43600, 44000, 44400, 44800, 45300,
+ 45800, 46200, 46600, 47000, 47400, 47800, 48200, 48600, 49000,
+ 49500, 50000, 50400, 50800, 51200, 51600, 52000, 52400, 52800,
+ 53200, 53700, 54200, 54600, 55000, 55400, 55800, 56200, 56600,
+ 57000, 57400, 57800, 58200, 58700, 59200, 59600, 60000, 60400,
+ 60800, 61200, 61600, 62000, 62400, 62800, 63300, 63800, 64200,
+ 64600, 65000, 65400, 65800, 66200, 66600, 67000, 67400, 67800,
+ 68200, 68700, 69200, 69600, 70000, 70400, 70800, 71200, 71600,
+ 72000, 72400, 72800, 73200, 73600, 74100, 74600, 75000, 75400,
+ 75800, 76200, 76600, 77000, 77400, 77800, 78200, 78600, 79000,
+ 79400, 79800, 80300, 80800, 81200, 81600, 82000, 82400, 82800,
+ 83200, 83600, 84000, 84400, 84800, 85200, 85600, 86000, 86400,
+ 86800, 87300, 87800, 88200, 88600, 89000, 89400, 89800, 90200,
+ 90600, 91000, 91400, 91800, 92200, 92600, 93000, 93400, 93800,
+ 94200, 94600, 95000, 95500, 96000, 96400, 96800, 97200, 97600,
+ 98000, 98400, 98800, 99200, 99600, 100000, 100400, 100800, 101200,
+ 101600, 102000, 102400, 102800, 103200, 103600, 104000, 104400,
+ 104800, 105200, 105600, 106100, 106600, 107000, 107400, 107800,
+ 108200, 108600, 109000, 109400, 109800, 110200, 110600, 111000,
+ 111400, 111800, 112200, 112600, 113000, 113400, 113800, 114200,
+ 114600, 115000, 115400, 115800, 116200, 116600, 117000, 117400,
+ 117800, 118200, 118600, 119000, 119400, 119800, 120200, 120600,
+ 121000, 121400, 121800, 122200, 122600, 123000
+};
+
+static unsigned long omap_temp_sensor_readl(struct omap_temp_sensor
+ *temp_sensor, u32 reg)
+{
+ return omap_ctrl_readl(temp_sensor->phy_base + reg);
+}
+
+static void omap_temp_sensor_writel(struct omap_temp_sensor *temp_sensor,
+ u32 val, u32 reg)
+{
+ omap_ctrl_writel(val, (temp_sensor->phy_base + reg));
+}
+
+static int adc_to_temp_conversion(int adc_val)
+{
+ if (adc_val < OMAP_ADC_START_VALUE || adc_val > OMAP_ADC_END_VALUE) {
+ pr_err("%s:Temp read is invalid %i\n", __func__, adc_val);
+ return -EINVAL;
+ }
+
+ return adc_to_temp[adc_val - OMAP_ADC_START_VALUE];
+}
+
+static int temp_to_adc_conversion(long temp)
+{
+ int i;
+
+ for (i = 0; i <= OMAP_ADC_END_VALUE - OMAP_ADC_START_VALUE; i++)
+ if (temp < adc_to_temp[i])
+ return OMAP_ADC_START_VALUE + i - 1;
+ return -EINVAL;
+}
+
+static int omap_read_current_temp(struct omap_temp_sensor *temp_sensor)
+{
+ int adc;
+
+ adc = omap_temp_sensor_readl(temp_sensor, TEMP_SENSOR_CTRL_OFFSET);
+ adc &= (OMAP4_BGAP_TEMP_SENSOR_DTEMP_MASK);
+
+ if (!temp_sensor->is_efuse_valid)
+ pr_err_once("%s: Invalid EFUSE, Non-trimmed BGAP,"
+ "Temp not accurate\n", __func__ );
+
+ if (adc < OMAP_ADC_START_VALUE || adc > OMAP_ADC_END_VALUE) {
+ pr_err("%s:Invalid adc code reported by the sensor %d",
+ __func__, adc);
+ return -EINVAL;
+ }
+
+ return adc_to_temp_conversion(adc);
+}
+
+static void omap_configure_temp_sensor_thresholds(struct omap_temp_sensor
+ *temp_sensor)
+{
+ u32 temp = 0, t_hot, t_cold, tshut_hot, tshut_cold;
+
+ t_hot = temp_to_adc_conversion(BGAP_THRESHOLD_T_HOT);
+ t_cold = temp_to_adc_conversion(BGAP_THRESHOLD_T_COLD);
+
+ if ((t_hot == -EINVAL) || (t_cold == -EINVAL)) {
+ pr_err("%s:Temp thresholds out of bounds\n", __func__);
+ return;
+ }
+ temp |= ((t_hot << OMAP4_T_HOT_SHIFT) | (t_cold << OMAP4_T_COLD_SHIFT));
+ omap_temp_sensor_writel(temp_sensor, temp, BGAP_THRESHOLD_OFFSET);
+
+ tshut_hot = temp_to_adc_conversion(TSHUT_THRESHOLD_TSHUT_HOT);
+ tshut_cold = temp_to_adc_conversion(TSHUT_THRESHOLD_TSHUT_COLD);
+ if ((tshut_hot == -EINVAL) || (tshut_cold == -EINVAL)) {
+ pr_err("%s:Temp shutdown thresholds out of bounds\n", __func__);
+ return;
+ }
+ temp |= ((tshut_hot << OMAP4_TSHUT_HOT_SHIFT)
+ | (tshut_cold << OMAP4_TSHUT_COLD_SHIFT));
+ omap_temp_sensor_writel(temp_sensor, temp, BGAP_TSHUT_OFFSET);
+}
+
+static void omap_configure_temp_sensor_counter(struct omap_temp_sensor
+ *temp_sensor, u32 counter)
+{
+ u32 val;
+
+ val = omap_temp_sensor_readl(temp_sensor, BGAP_COUNTER_OFFSET);
+ val = val & ~(OMAP4_COUNTER_MASK);
+ val = val | (counter << OMAP4_COUNTER_SHIFT);
+ omap_temp_sensor_writel(temp_sensor, val, BGAP_COUNTER_OFFSET);
+}
+
+static void omap_enable_continuous_mode(struct omap_temp_sensor *temp_sensor)
+{
+ u32 val;
+
+ val = omap_temp_sensor_readl(temp_sensor, BGAP_CTRL_OFFSET);
+
+ val = val | (1 << OMAP4_SINGLE_MODE_SHIFT);
+
+ omap_temp_sensor_writel(temp_sensor, val, BGAP_CTRL_OFFSET);
+}
+
+/*
+ * sysfs hook functions
+ */
+static ssize_t omap_temp_show_current(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct omap_temp_sensor *temp_sensor = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", omap_read_current_temp(temp_sensor));
+}
+
+static ssize_t omap_throttle_store(struct device *dev,
+ struct device_attribute *devattr, const char *buf, size_t count)
+{
+ if (count && buf[0] == '1')
+ omap_thermal_throttle();
+ else
+ omap_thermal_unthrottle();
+
+ return count;
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, omap_temp_show_current, NULL);
+static DEVICE_ATTR(throttle, S_IWUSR, NULL, omap_throttle_store);
+static struct attribute *omap_temp_sensor_attributes[] = {
+ &dev_attr_temperature.attr,
+ &dev_attr_throttle.attr,
+ NULL
+};
+
+static const struct attribute_group omap_temp_sensor_group = {
+ .attrs = omap_temp_sensor_attributes,
+};
+
+static int omap_temp_sensor_enable(struct omap_temp_sensor *temp_sensor)
+{
+ u32 temp;
+ u32 ret = 0;
+ unsigned long clk_rate;
+
+ unsigned long flags;
+
+ spin_lock_irqsave(&temp_sensor->lock, flags);
+
+ if (temp_sensor->clk_on) {
+ pr_debug("%s: clock already on\n", __func__);
+ goto out;
+ }
+
+ ret = pm_runtime_get_sync(&temp_sensor->pdev->dev);
+ if (ret) {
+ pr_err("%s:get sync failed\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ clk_set_rate(temp_sensor->clock, 1000000);
+ clk_rate = clk_get_rate(temp_sensor->clock);
+ temp_sensor->clk_rate = clk_rate;
+
+ temp = omap_temp_sensor_readl(temp_sensor,
+ TEMP_SENSOR_CTRL_OFFSET);
+ temp &= ~(OMAP4_BGAP_TEMPSOFF_MASK);
+
+ /* write BGAP_TEMPSOFF should be reset to 0 */
+ omap_temp_sensor_writel(temp_sensor, temp,
+ TEMP_SENSOR_CTRL_OFFSET);
+ temp_sensor->clk_on = 1;
+
+out:
+spin_unlock_irqrestore(&temp_sensor->lock, flags);
+ return ret;
+}
+
+
+static int omap_temp_sensor_disable(struct omap_temp_sensor *temp_sensor)
+{
+ u32 temp;
+ u32 ret = 0;
+ u32 counter = 1000;
+ unsigned long flags;
+
+ spin_lock_irqsave(&temp_sensor->lock, flags);
+
+ if (!temp_sensor->clk_on) {
+ pr_debug("%s: clock already off\n", __func__);
+ goto out;
+ }
+ temp = omap_temp_sensor_readl(temp_sensor,
+ TEMP_SENSOR_CTRL_OFFSET);
+ temp |= OMAP4_BGAP_TEMPSOFF_MASK;
+
+ /* write BGAP_TEMPSOFF should be set to 1 before gating clock */
+ omap_temp_sensor_writel(temp_sensor, temp,
+ TEMP_SENSOR_CTRL_OFFSET);
+ temp = omap_temp_sensor_readl(temp_sensor, BGAP_STATUS_OFFSET);
+
+ /* wait till the clean stop bit is set */
+ while ((temp & OMAP4_CLEAN_STOP_MASK) && --counter)
+ temp = omap_temp_sensor_readl(temp_sensor,
+ BGAP_STATUS_OFFSET);
+ /* Gate the clock */
+ ret = pm_runtime_put_sync_suspend(&temp_sensor->pdev->dev);
+ if (ret) {
+ pr_err("%s:put sync failed\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+ temp_sensor->clk_on = 0;
+
+out:
+ spin_unlock_irqrestore(&temp_sensor->lock, flags);
+ return ret;
+}
+
+/*
+ * Check if the die sensor is cooling down. If it's higher than
+ * t_hot since the last throttle then throttle it again.
+ * OMAP junction temperature could stay for a long time in an
+ * unacceptable temperature range. The idea here is to check after
+ * t_hot->throttle the system really came below t_hot else re-throttle
+ * and keep doing till it's under t_hot temp range.
+ */
+static void throttle_delayed_work_fn(struct work_struct *work)
+{
+ int curr;
+ struct omap_temp_sensor *temp_sensor =
+ container_of(work, struct omap_temp_sensor,
+ throttle_work.work);
+ curr = omap_read_current_temp(temp_sensor);
+
+ if (curr >= BGAP_THRESHOLD_T_HOT || curr < 0) {
+ pr_warn("%s: OMAP temp read %d exceeds the threshold\n",
+ __func__, curr);
+ omap_thermal_throttle();
+ schedule_delayed_work(&temp_sensor->throttle_work,
+ msecs_to_jiffies(THROTTLE_DELAY_MS));
+ } else {
+ schedule_delayed_work(&temp_sensor->throttle_work,
+ msecs_to_jiffies(THROTTLE_DELAY_MS));
+ }
+}
+
+static irqreturn_t omap_tshut_irq_handler(int irq, void *data)
+{
+ struct omap_temp_sensor *temp_sensor = (struct omap_temp_sensor *)data;
+
+ /* Need to handle thermal mgmt in bootloader
+ * to avoid restart again at kernel level
+ */
+ if (temp_sensor->is_efuse_valid) {
+ pr_emerg("%s: Thermal shutdown reached rebooting device\n",
+ __func__);
+ kernel_restart(NULL);
+ } else {
+ pr_err("%s:Invalid EFUSE, Non-trimmed BGAP\n", __func__);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t omap_talert_irq_handler(int irq, void *data)
+{
+ struct omap_temp_sensor *temp_sensor = (struct omap_temp_sensor *)data;
+ int t_hot, t_cold, temp_offset;
+
+ t_hot = omap_temp_sensor_readl(temp_sensor, BGAP_STATUS_OFFSET)
+ & OMAP4_HOT_FLAG_MASK;
+ t_cold = omap_temp_sensor_readl(temp_sensor, BGAP_STATUS_OFFSET)
+ & OMAP4_COLD_FLAG_MASK;
+ temp_offset = omap_temp_sensor_readl(temp_sensor, BGAP_CTRL_OFFSET);
+ if (t_hot) {
+ omap_thermal_throttle();
+ schedule_delayed_work(&temp_sensor->throttle_work,
+ msecs_to_jiffies(THROTTLE_DELAY_MS));
+ temp_offset &= ~(OMAP4_MASK_HOT_MASK);
+ temp_offset |= OMAP4_MASK_COLD_MASK;
+ } else if (t_cold) {
+ cancel_delayed_work_sync(&temp_sensor->throttle_work);
+ omap_thermal_unthrottle();
+ temp_offset &= ~(OMAP4_MASK_COLD_MASK);
+ temp_offset |= OMAP4_MASK_HOT_MASK;
+ }
+
+ omap_temp_sensor_writel(temp_sensor, temp_offset, BGAP_CTRL_OFFSET);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit omap_temp_sensor_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct omap_temp_sensor_pdata *pdata = pdev->dev.platform_data;
+ struct omap_temp_sensor *temp_sensor;
+ struct resource *mem;
+ int ret = 0, val;
+
+ if (!pdata) {
+ dev_err(dev, "%s: platform data missing\n", __func__);
+ return -EINVAL;
+ }
+
+ temp_sensor = kzalloc(sizeof(struct omap_temp_sensor), GFP_KERNEL);
+ if (!temp_sensor)
+ return -ENOMEM;
+
+ spin_lock_init(&temp_sensor->lock);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(dev, "%s:no mem resource\n", __func__);
+ ret = -EINVAL;
+ goto plat_res_err;
+ }
+
+ temp_sensor->irq = platform_get_irq_byname(pdev, "thermal_alert");
+ if (temp_sensor->irq < 0) {
+ dev_err(dev, "%s:Cannot get thermal alert irq\n",
+ __func__);
+ ret = -EINVAL;
+ goto get_irq_err;
+ }
+
+ ret = gpio_request_one(OMAP_TSHUT_GPIO, GPIOF_DIR_IN,
+ "thermal_shutdown");
+ if (ret) {
+ dev_err(dev, "%s: Could not get tshut_gpio\n",
+ __func__);
+ goto tshut_gpio_req_err;
+ }
+
+ temp_sensor->tshut_irq = gpio_to_irq(OMAP_TSHUT_GPIO);
+ if (temp_sensor->tshut_irq < 0) {
+ dev_err(dev, "%s:Cannot get thermal shutdown irq\n",
+ __func__);
+ ret = -EINVAL;
+ goto get_tshut_irq_err;
+ }
+
+ temp_sensor->phy_base = pdata->offset;
+ temp_sensor->pdev = pdev;
+ temp_sensor->dev = dev;
+
+ pm_runtime_enable(dev);
+ pm_runtime_irq_safe(dev);
+
+ /*
+ * check if the efuse has a non-zero value if not
+ * it is an untrimmed sample and the temperatures
+ * may not be accurate */
+ if (omap_readl(OMAP4_CTRL_MODULE_CORE +
+ OMAP4_CTRL_MODULE_CORE_STD_FUSE_OPP_BGAP))
+ temp_sensor->is_efuse_valid = 1;
+
+ temp_sensor->clock = clk_get(&temp_sensor->pdev->dev, "fck");
+ if (IS_ERR(temp_sensor->clock)) {
+ ret = PTR_ERR(temp_sensor->clock);
+ pr_err("%s:Unable to get fclk: %d\n", __func__, ret);
+ ret = -EINVAL;
+ goto clk_get_err;
+ }
+
+ /* Init delayed work for throttle decision */
+ INIT_DELAYED_WORK(&temp_sensor->throttle_work,
+ throttle_delayed_work_fn);
+
+ platform_set_drvdata(pdev, temp_sensor);
+
+ ret = omap_temp_sensor_enable(temp_sensor);
+ if (ret) {
+ dev_err(dev, "%s:Cannot enable temp sensor\n", __func__);
+ goto sensor_enable_err;
+ }
+
+ omap_enable_continuous_mode(temp_sensor);
+ omap_configure_temp_sensor_thresholds(temp_sensor);
+ /* 1 ms */
+ omap_configure_temp_sensor_counter(temp_sensor, 1);
+
+ /* Wait till the first conversion is done wait for at least 1ms */
+ mdelay(2);
+
+ /* Read the temperature once due to hw issue*/
+ omap_read_current_temp(temp_sensor);
+
+ /* Set 2 seconds time as default counter */
+ omap_configure_temp_sensor_counter(temp_sensor,
+ temp_sensor->clk_rate * 2);
+ ret = request_threaded_irq(temp_sensor->irq, NULL,
+ omap_talert_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "temp_sensor", (void *)temp_sensor);
+ if (ret) {
+ dev_err(dev, "Request threaded irq failed.\n");
+ goto req_irq_err;
+ }
+
+ ret = request_threaded_irq(temp_sensor->tshut_irq, NULL,
+ omap_tshut_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "tshut", (void *)temp_sensor);
+ if (ret) {
+ dev_err(dev, "Request threaded irq failed for TSHUT.\n");
+ goto tshut_irq_req_err;
+ }
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &omap_temp_sensor_group);
+ if (ret) {
+ dev_err(&pdev->dev, "could not create sysfs files\n");
+ goto sysfs_create_err;
+ }
+
+ /* unmask the T_COLD and unmask T_HOT at init */
+ val = omap_temp_sensor_readl(temp_sensor, BGAP_CTRL_OFFSET);
+ val |= OMAP4_MASK_COLD_MASK;
+ val |= OMAP4_MASK_HOT_MASK;
+ omap_temp_sensor_writel(temp_sensor, val, BGAP_CTRL_OFFSET);
+
+ dev_info(dev, "%s probed", pdata->name);
+
+ temp_sensor_pm = temp_sensor;
+
+ return 0;
+
+sysfs_create_err:
+ free_irq(temp_sensor->tshut_irq, temp_sensor);
+ cancel_delayed_work_sync(&temp_sensor->throttle_work);
+tshut_irq_req_err:
+ free_irq(temp_sensor->irq, temp_sensor);
+req_irq_err:
+ platform_set_drvdata(pdev, NULL);
+ omap_temp_sensor_disable(temp_sensor);
+sensor_enable_err:
+ clk_put(temp_sensor->clock);
+clk_get_err:
+ pm_runtime_disable(dev);
+get_tshut_irq_err:
+ gpio_free(OMAP_TSHUT_GPIO);
+tshut_gpio_req_err:
+get_irq_err:
+plat_res_err:
+ kfree(temp_sensor);
+ return ret;
+}
+
+static int __devexit omap_temp_sensor_remove(struct platform_device *pdev)
+{
+ struct omap_temp_sensor *temp_sensor = platform_get_drvdata(pdev);
+
+ sysfs_remove_group(&pdev->dev.kobj, &omap_temp_sensor_group);
+ cancel_delayed_work_sync(&temp_sensor->throttle_work);
+ omap_temp_sensor_disable(temp_sensor);
+ clk_put(temp_sensor->clock);
+ platform_set_drvdata(pdev, NULL);
+ if (temp_sensor->irq)
+ free_irq(temp_sensor->irq, temp_sensor);
+ if (temp_sensor->tshut_irq)
+ free_irq(temp_sensor->tshut_irq, temp_sensor);
+ kfree(temp_sensor);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static void omap_temp_sensor_save_ctxt(struct omap_temp_sensor *temp_sensor)
+{
+ temp_sensor_context.temp_sensor_ctrl =
+ omap_temp_sensor_readl(temp_sensor, TEMP_SENSOR_CTRL_OFFSET);
+ temp_sensor_context.bg_ctrl =
+ omap_temp_sensor_readl(temp_sensor, BGAP_CTRL_OFFSET);
+ temp_sensor_context.bg_counter =
+ omap_temp_sensor_readl(temp_sensor, BGAP_COUNTER_OFFSET);
+ temp_sensor_context.bg_threshold =
+ omap_temp_sensor_readl(temp_sensor, BGAP_THRESHOLD_OFFSET);
+ temp_sensor_context.temp_sensor_tshut_threshold =
+ omap_temp_sensor_readl(temp_sensor, BGAP_TSHUT_OFFSET);
+}
+
+static void omap_temp_sensor_restore_ctxt(struct omap_temp_sensor *temp_sensor)
+{
+ omap_temp_sensor_writel(temp_sensor,
+ temp_sensor_context.temp_sensor_ctrl,
+ TEMP_SENSOR_CTRL_OFFSET);
+ omap_temp_sensor_writel(temp_sensor,
+ temp_sensor_context.bg_ctrl,
+ BGAP_CTRL_OFFSET);
+ omap_temp_sensor_writel(temp_sensor,
+ temp_sensor_context.bg_counter,
+ BGAP_COUNTER_OFFSET);
+ omap_temp_sensor_writel(temp_sensor,
+ temp_sensor_context.bg_threshold,
+ BGAP_THRESHOLD_OFFSET);
+ omap_temp_sensor_writel(temp_sensor,
+ temp_sensor_context.temp_sensor_tshut_threshold,
+ BGAP_TSHUT_OFFSET);
+}
+
+static int omap_temp_sensor_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct omap_temp_sensor *temp_sensor = platform_get_drvdata(pdev);
+
+ omap_temp_sensor_disable(temp_sensor);
+
+ return 0;
+}
+
+static int omap_temp_sensor_resume(struct platform_device *pdev)
+{
+ struct omap_temp_sensor *temp_sensor = platform_get_drvdata(pdev);
+
+ omap_temp_sensor_enable(temp_sensor);
+
+ return 0;
+}
+
+void omap_temp_sensor_idle(int idle_state)
+{
+ if (!temp_sensor_pm)
+ return;
+
+ if (idle_state)
+ omap_temp_sensor_disable(temp_sensor_pm);
+ else
+ omap_temp_sensor_enable(temp_sensor_pm);
+}
+
+#else
+omap_temp_sensor_suspend NULL
+omap_temp_sensor_resume NULL
+
+#endif /* CONFIG_PM */
+static int omap_temp_sensor_runtime_suspend(struct device *dev)
+{
+ struct omap_temp_sensor *temp_sensor =
+ platform_get_drvdata(to_platform_device(dev));
+
+ omap_temp_sensor_save_ctxt(temp_sensor);
+ return 0;
+}
+
+static int omap_temp_sensor_runtime_resume(struct device *dev)
+{
+ struct omap_temp_sensor *temp_sensor =
+ platform_get_drvdata(to_platform_device(dev));
+ if (omap_pm_was_context_lost(dev)) {
+ omap_temp_sensor_restore_ctxt(temp_sensor);
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops omap_temp_sensor_dev_pm_ops = {
+ .runtime_suspend = omap_temp_sensor_runtime_suspend,
+ .runtime_resume = omap_temp_sensor_runtime_resume,
+};
+
+static struct platform_driver omap_temp_sensor_driver = {
+ .probe = omap_temp_sensor_probe,
+ .remove = omap_temp_sensor_remove,
+ .suspend = omap_temp_sensor_suspend,
+ .resume = omap_temp_sensor_resume,
+ .driver = {
+ .name = "omap_temp_sensor",
+ .pm = &omap_temp_sensor_dev_pm_ops,
+ },
+};
+
+int __init omap_temp_sensor_init(void)
+{
+ if (!cpu_is_omap446x())
+ return 0;
+
+ return platform_driver_register(&omap_temp_sensor_driver);
+}
+
+static void __exit omap_temp_sensor_exit(void)
+{
+ platform_driver_unregister(&omap_temp_sensor_driver);
+}
+
+module_init(omap_temp_sensor_init);
+module_exit(omap_temp_sensor_exit);
+
+MODULE_DESCRIPTION("OMAP446X Temperature Sensor Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index f601180..99cbf50 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -903,16 +903,20 @@
*/
static int mmc_suspend(struct mmc_host *host)
{
+ int err = 0;
+
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
- if (!mmc_host_is_spi(host))
+ if (mmc_card_can_sleep(host))
+ err = mmc_card_sleep(host);
+ else if (!mmc_host_is_spi(host))
mmc_deselect_cards(host);
host->card->state &= ~MMC_STATE_HIGHSPEED;
mmc_release_host(host);
- return 0;
+ return err;
}
/*
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index dedf3da..c507190 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -28,16 +28,20 @@
#include <linux/clk.h>
#include <linux/mmc/host.h>
#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
#include <linux/io.h>
#include <linux/semaphore.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+
#include <plat/dma.h>
#include <mach/hardware.h>
#include <plat/board.h>
#include <plat/mmc.h>
#include <plat/cpu.h>
+#include <plat/omap-pm.h>
/* OMAP HSMMC Host Controller Registers */
#define OMAP_HSMMC_SYSCONFIG 0x0010
@@ -166,6 +170,7 @@
unsigned int id;
unsigned int dma_len;
unsigned int dma_sg_idx;
+ unsigned int master_clock;
unsigned char bus_mode;
unsigned char power_mode;
u32 *buffer;
@@ -177,7 +182,6 @@
int slot_id;
int got_dbclk;
int response_busy;
- int context_loss;
int dpm_state;
int vdd;
int protect_card;
@@ -188,6 +192,30 @@
struct omap_mmc_platform_data *pdata;
};
+static void omap_hsmmc_status_notify_cb(int card_present, void *dev_id)
+{
+ struct omap_hsmmc_host *host = (struct omap_hsmmc_host *)dev_id;
+ unsigned int status, oldstat;
+
+ pr_debug("%s: card_present %d\n", mmc_hostname(host->mmc),
+ card_present);
+
+ if (!mmc_slot(host).mmc_data.status) {
+ mmc_detect_change(host->mmc, 0);
+ return;
+ }
+
+ status = mmc_slot(host).mmc_data.status(mmc_dev(host->mmc));
+
+ oldstat = mmc_slot(host).mmc_data.card_present;
+ mmc_slot(host).mmc_data.card_present = status;
+ if (status ^ oldstat) {
+ pr_debug("%s: Slot status change detected (%d -> %d)\n",
+ mmc_hostname(host->mmc), oldstat, status);
+ mmc_detect_change(host->mmc, 0);
+ }
+}
+
static int omap_hsmmc_card_detect(struct device *dev, int slot)
{
struct omap_mmc_platform_data *mmc = dev->platform_data;
@@ -593,21 +621,11 @@
static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
{
struct mmc_ios *ios = &host->mmc->ios;
- struct omap_mmc_platform_data *pdata = host->pdata;
- int context_loss = 0;
u32 hctl, capa, con;
u16 dsor = 0;
unsigned long timeout;
- if (pdata->get_context_loss_count) {
- context_loss = pdata->get_context_loss_count(host->dev);
- if (context_loss < 0)
- return 1;
- }
-
- dev_dbg(mmc_dev(host->mmc), "context was %slost\n",
- context_loss == host->context_loss ? "not " : "");
- if (host->context_loss == context_loss)
+ if (!omap_pm_was_context_lost(host->dev))
return 1;
/* Wait for hardware reset */
@@ -676,11 +694,11 @@
}
if (ios->clock) {
- dsor = OMAP_MMC_MASTER_CLOCK / ios->clock;
+ dsor = host->master_clock / ios->clock;
if (dsor < 1)
dsor = 1;
- if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock)
+ if (host->master_clock / dsor > ios->clock)
dsor++;
if (dsor > 250)
@@ -707,8 +725,6 @@
else
OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
out:
- host->context_loss = context_loss;
-
dev_dbg(mmc_dev(host->mmc), "context is restored\n");
return 0;
}
@@ -718,15 +734,7 @@
*/
static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
{
- struct omap_mmc_platform_data *pdata = host->pdata;
- int context_loss;
-
- if (pdata->get_context_loss_count) {
- context_loss = pdata->get_context_loss_count(host->dev);
- if (context_loss < 0)
- return;
- host->context_loss = context_loss;
- }
+ return;
}
#else
@@ -1155,8 +1163,7 @@
int ret;
/* Disable the clocks */
- clk_disable(host->fclk);
- clk_disable(host->iclk);
+ pm_runtime_put_sync(host->dev);
if (host->got_dbclk)
clk_disable(host->dbclk);
@@ -1167,8 +1174,7 @@
if (!ret)
ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1,
vdd);
- clk_enable(host->iclk);
- clk_enable(host->fclk);
+ pm_runtime_get_sync(host->dev);
if (host->got_dbclk)
clk_enable(host->dbclk);
@@ -1534,7 +1540,7 @@
u32 con;
int do_send_init_stream = 0;
- mmc_host_enable(host->mmc);
+ pm_runtime_get_sync(host->dev);
if (ios->power_mode != host->power_mode) {
switch (ios->power_mode) {
@@ -1593,11 +1599,11 @@
}
if (ios->clock) {
- dsor = OMAP_MMC_MASTER_CLOCK / ios->clock;
+ dsor = host->master_clock / ios->clock;
if (dsor < 1)
dsor = 1;
- if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock)
+ if (host->master_clock / dsor > ios->clock)
dsor++;
if (dsor > 250)
@@ -1629,10 +1635,7 @@
else
OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
- if (host->power_mode == MMC_POWER_OFF)
- mmc_host_disable(host->mmc);
- else
- mmc_host_lazy_disable(host->mmc);
+ pm_runtime_put_sync(host->dev);
}
static int omap_hsmmc_get_cd(struct mmc_host *mmc)
@@ -1709,8 +1712,8 @@
/* Handler for [ENABLED -> DISABLED] transition */
static int omap_hsmmc_enabled_to_disabled(struct omap_hsmmc_host *host)
{
- omap_hsmmc_context_save(host);
- clk_disable(host->fclk);
+ pm_runtime_put_sync(host->dev);
+
host->dpm_state = DISABLED;
dev_dbg(mmc_dev(host->mmc), "ENABLED -> DISABLED\n");
@@ -1729,12 +1732,12 @@
if (!mmc_try_claim_host(host->mmc))
return 0;
- clk_enable(host->fclk);
- omap_hsmmc_context_restore(host);
+ pm_runtime_get_sync(host->dev);
+
if (mmc_card_can_sleep(host->mmc)) {
err = mmc_card_sleep(host->mmc);
if (err < 0) {
- clk_disable(host->fclk);
+ pm_runtime_put_sync(host->dev);
mmc_release_host(host->mmc);
return err;
}
@@ -1746,7 +1749,7 @@
mmc_slot(host).set_sleep(host->dev, host->slot_id, 1, 0,
new_state == CARDSLEEP);
/* FIXME: turn off bus power and perhaps interrupts too */
- clk_disable(host->fclk);
+ pm_runtime_put_sync(host->dev);
host->dpm_state = new_state;
mmc_release_host(host->mmc);
@@ -1800,13 +1803,8 @@
/* Handler for [DISABLED -> ENABLED] transition */
static int omap_hsmmc_disabled_to_enabled(struct omap_hsmmc_host *host)
{
- int err;
+ pm_runtime_get_sync(host->dev);
- err = clk_enable(host->fclk);
- if (err < 0)
- return err;
-
- omap_hsmmc_context_restore(host);
host->dpm_state = ENABLED;
dev_dbg(mmc_dev(host->mmc), "DISABLED -> ENABLED\n");
@@ -1820,8 +1818,8 @@
if (!mmc_try_claim_host(host->mmc))
return 0;
- clk_enable(host->fclk);
- omap_hsmmc_context_restore(host);
+ pm_runtime_get_sync(host->dev);
+
if (mmc_slot(host).set_sleep)
mmc_slot(host).set_sleep(host->dev, host->slot_id, 0,
host->vdd, host->dpm_state == CARDSLEEP);
@@ -1841,9 +1839,8 @@
/* Handler for [OFF -> ENABLED] transition */
static int omap_hsmmc_off_to_enabled(struct omap_hsmmc_host *host)
{
- clk_enable(host->fclk);
+ pm_runtime_get_sync(host->dev);
- omap_hsmmc_context_restore(host);
omap_hsmmc_conf_bus_power(host);
mmc_power_restore_host(host->mmc);
@@ -1902,32 +1899,29 @@
}
}
-static int omap_hsmmc_enable_fclk(struct mmc_host *mmc)
+static int omap_hsmmc_enable_simple(struct mmc_host *mmc)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
- int err;
- err = clk_enable(host->fclk);
- if (err)
- return err;
- dev_dbg(mmc_dev(host->mmc), "mmc_fclk: enabled\n");
- omap_hsmmc_context_restore(host);
+ pm_runtime_get_sync(host->dev);
+
+ dev_dbg(mmc_dev(host->mmc), "enabled\n");
return 0;
}
-static int omap_hsmmc_disable_fclk(struct mmc_host *mmc, int lazy)
+static int omap_hsmmc_disable_simple(struct mmc_host *mmc, int lazy)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
- omap_hsmmc_context_save(host);
- clk_disable(host->fclk);
- dev_dbg(mmc_dev(host->mmc), "mmc_fclk: disabled\n");
+ pm_runtime_put_sync(host->dev);
+
+ dev_dbg(mmc_dev(host->mmc), "idle\n");
return 0;
}
static const struct mmc_host_ops omap_hsmmc_ops = {
- .enable = omap_hsmmc_enable_fclk,
- .disable = omap_hsmmc_disable_fclk,
+ .enable = omap_hsmmc_enable_simple,
+ .disable = omap_hsmmc_disable_simple,
.request = omap_hsmmc_request,
.set_ios = omap_hsmmc_set_ios,
.get_cd = omap_hsmmc_get_cd,
@@ -1953,30 +1947,22 @@
{
struct mmc_host *mmc = s->private;
struct omap_hsmmc_host *host = mmc_priv(mmc);
- int context_loss = 0;
- if (host->pdata->get_context_loss_count)
- context_loss = host->pdata->get_context_loss_count(host->dev);
seq_printf(s, "mmc%d:\n"
" enabled:\t%d\n"
" dpm_state:\t%d\n"
" nesting_cnt:\t%d\n"
- " ctx_loss:\t%d:%d\n"
- "\nregs:\n",
+ " ct",
mmc->index, mmc->enabled ? 1 : 0,
- host->dpm_state, mmc->nesting_cnt,
- host->context_loss, context_loss);
+ host->dpm_state, mmc->nesting_cnt);
if (host->suspended || host->dpm_state == OFF) {
seq_printf(s, "host suspended, can't read registers\n");
return 0;
}
- if (clk_enable(host->fclk) != 0) {
- seq_printf(s, "can't read the regs\n");
- return 0;
- }
+ pm_runtime_get_sync(host->dev);
seq_printf(s, "SYSCONFIG:\t0x%08x\n",
OMAP_HSMMC_READ(host->base, SYSCONFIG));
@@ -1993,7 +1979,7 @@
seq_printf(s, "CAPA:\t\t0x%08x\n",
OMAP_HSMMC_READ(host->base, CAPA));
- clk_disable(host->fclk);
+ pm_runtime_put_sync(host->dev);
return 0;
}
@@ -2078,6 +2064,10 @@
host->base = ioremap(host->mapbase, SZ_4K);
host->power_mode = MMC_POWER_OFF;
+ host->master_clock = OMAP_MMC_MASTER_CLOCK;
+ if (mmc_slot(host).features & HSMMC_HAS_48MHZ_MASTER_CLK)
+ host->master_clock = OMAP_MMC_MASTER_CLOCK / 2;
+
platform_set_drvdata(pdev, host);
INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect);
@@ -2119,18 +2109,17 @@
/* we start off in DISABLED state */
host->dpm_state = DISABLED;
- if (clk_enable(host->iclk) != 0) {
- clk_put(host->iclk);
- clk_put(host->fclk);
- goto err1;
- }
+ pm_runtime_enable(host->dev);
+#ifndef CONFIG_PM_RUNTIME
+ /*
+ * If runtime PM is not enabled, ensure clocks are always enabled.
+ */
+ clk_enable(host->iclk);
+ clk_enable(host->fclk);
+#endif
- if (mmc_host_enable(host->mmc) != 0) {
- clk_disable(host->iclk);
- clk_put(host->iclk);
- clk_put(host->fclk);
+ if (mmc_host_enable(host->mmc) != 0)
goto err1;
- }
if (cpu_is_omap2430()) {
host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
@@ -2168,34 +2157,25 @@
if (mmc_slot(host).nonremovable)
mmc->caps |= MMC_CAP_NONREMOVABLE;
+ mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_IGNORE_PM_NOTIFY;
+ if (mmc_slot(host).mmc_data.built_in)
+ mmc->pm_flags = MMC_PM_KEEP_POWER | MMC_PM_IGNORE_PM_NOTIFY;
+
omap_hsmmc_conf_bus_power(host);
- /* Select DMA lines */
- switch (host->id) {
- case OMAP_MMC1_DEVID:
- host->dma_line_tx = OMAP24XX_DMA_MMC1_TX;
- host->dma_line_rx = OMAP24XX_DMA_MMC1_RX;
- break;
- case OMAP_MMC2_DEVID:
- host->dma_line_tx = OMAP24XX_DMA_MMC2_TX;
- host->dma_line_rx = OMAP24XX_DMA_MMC2_RX;
- break;
- case OMAP_MMC3_DEVID:
- host->dma_line_tx = OMAP34XX_DMA_MMC3_TX;
- host->dma_line_rx = OMAP34XX_DMA_MMC3_RX;
- break;
- case OMAP_MMC4_DEVID:
- host->dma_line_tx = OMAP44XX_DMA_MMC4_TX;
- host->dma_line_rx = OMAP44XX_DMA_MMC4_RX;
- break;
- case OMAP_MMC5_DEVID:
- host->dma_line_tx = OMAP44XX_DMA_MMC5_TX;
- host->dma_line_rx = OMAP44XX_DMA_MMC5_RX;
- break;
- default:
- dev_err(mmc_dev(host->mmc), "Invalid MMC id\n");
+ res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
+ if (!res) {
+ dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
goto err_irq;
}
+ host->dma_line_tx = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
+ if (!res) {
+ dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
+ goto err_irq;
+ }
+ host->dma_line_rx = res->end;
/* Request IRQ for MMC operations */
ret = request_irq(host->irq, omap_hsmmc_irq, IRQF_DISABLED,
@@ -2236,7 +2216,12 @@
}
pdata->suspend = omap_hsmmc_suspend_cdirq;
pdata->resume = omap_hsmmc_resume_cdirq;
+ } else if (mmc_slot(host).mmc_data.register_status_notify) {
+ mmc_slot(host).mmc_data.register_status_notify(omap_hsmmc_status_notify_cb, host);
}
+ if (mmc_slot(host).mmc_data.status)
+ mmc_slot(host).mmc_data.card_present =
+ mmc_slot(host).mmc_data.status(mmc_dev(host->mmc));
omap_hsmmc_disable_irq(host);
@@ -2275,9 +2260,9 @@
free_irq(host->irq, host);
err_irq:
mmc_host_disable(host->mmc);
- clk_disable(host->iclk);
clk_put(host->fclk);
clk_put(host->iclk);
+
if (host->got_dbclk) {
clk_disable(host->dbclk);
clk_put(host->dbclk);
@@ -2311,7 +2296,8 @@
flush_work_sync(&host->mmc_carddetect_work);
mmc_host_disable(host->mmc);
- clk_disable(host->iclk);
+ pm_runtime_suspend(host->dev);
+
clk_put(host->fclk);
clk_put(host->iclk);
if (host->got_dbclk) {
@@ -2343,6 +2329,8 @@
return 0;
if (host) {
+ pm_runtime_get_sync(host->dev);
+
host->suspended = 1;
if (host->pdata->suspend) {
ret = host->pdata->suspend(&pdev->dev,
@@ -2352,18 +2340,19 @@
"Unable to handle MMC board"
" level suspend\n");
host->suspended = 0;
+ pm_runtime_put_sync(host->dev);
return ret;
}
}
cancel_work_sync(&host->mmc_carddetect_work);
+ if (mmc_slot(host).mmc_data.built_in)
+ host->mmc->pm_flags |= MMC_PM_KEEP_POWER;
ret = mmc_suspend_host(host->mmc);
- mmc_host_enable(host->mmc);
if (ret == 0) {
omap_hsmmc_disable_irq(host);
OMAP_HSMMC_WRITE(host->base, HCTL,
OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
- mmc_host_disable(host->mmc);
- clk_disable(host->iclk);
+
if (host->got_dbclk)
clk_disable(host->dbclk);
} else {
@@ -2375,9 +2364,9 @@
dev_dbg(mmc_dev(host->mmc),
"Unmask interrupt failed\n");
}
- mmc_host_disable(host->mmc);
}
+ pm_runtime_put_sync(host->dev);
}
return ret;
}
@@ -2393,14 +2382,7 @@
return 0;
if (host) {
- ret = clk_enable(host->iclk);
- if (ret)
- goto clk_en_err;
-
- if (mmc_host_enable(host->mmc) != 0) {
- clk_disable(host->iclk);
- goto clk_en_err;
- }
+ pm_runtime_get_sync(host->dev);
if (host->got_dbclk)
clk_enable(host->dbclk);
@@ -2421,15 +2403,10 @@
if (ret == 0)
host->suspended = 0;
- mmc_host_lazy_disable(host->mmc);
+ pm_runtime_put_sync(host->dev);
}
return ret;
-
-clk_en_err:
- dev_dbg(mmc_dev(host->mmc),
- "Failed to enable MMC clocks during resume\n");
- return ret;
}
#else
@@ -2437,9 +2414,38 @@
#define omap_hsmmc_resume NULL
#endif
+/* called just before device is disabled */
+static int omap_hsmmc_runtime_suspend(struct device *dev)
+{
+ struct omap_hsmmc_host *host;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ host = platform_get_drvdata(to_platform_device(dev));
+ omap_hsmmc_context_save(host);
+
+ return 0;
+}
+
+/* called after device is (re)enabled, ONLY if context was lost */
+static int omap_hsmmc_runtime_resume(struct device *dev)
+{
+ struct omap_hsmmc_host *host;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ host = platform_get_drvdata(to_platform_device(dev));
+ omap_hsmmc_context_restore(host);
+
+ return 0;
+}
+
+
static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
.suspend = omap_hsmmc_suspend,
.resume = omap_hsmmc_resume,
+ .runtime_suspend = omap_hsmmc_runtime_suspend,
+ .runtime_resume = omap_hsmmc_runtime_resume,
};
static struct platform_driver omap_hsmmc_driver = {
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 9fad104d..e3091fc 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -757,6 +757,7 @@
{ "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
{ "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
{ "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q80", INFO(0, 0, 64 * 1024, 16, SECT_4K) },
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
diff --git a/drivers/omap_hsi/Kconfig b/drivers/omap_hsi/Kconfig
new file mode 100644
index 0000000..1f2862f
--- /dev/null
+++ b/drivers/omap_hsi/Kconfig
@@ -0,0 +1,70 @@
+#
+# OMAP HSI driver configuration
+#
+
+config OMAP_HSI
+ bool "OMAP HSI hardware driver"
+ depends on (ARCH_OMAP34XX || ARCH_OMAP4)
+ default n
+ ---help---
+ If you say Y here, you will enable the OMAP HSI hardware driver.
+
+ Note: This module is a unified driver specific to OMAP. Efforts are
+ underway to create a vendor independent implementation.
+
+ The MIPI HSI is a High Speed Synchronous Serial Interface and is
+ defined for communication between two Integrated Circuits (the
+ typical scenario is an application IC and cellular modem IC
+ communication). Data transaction model is peer-to-peer.
+
+ Not all features required for a production device are implemented in
+ this driver. See the documentation for more information.
+
+ This physical layer provides logical channeling and several modes of
+ operation.
+
+ The OMAP HSI driver supports either:
+ - the OMAP MIPI HSI device
+ - the OMAP SSI device
+
+choice
+ prompt "Selected device support file"
+ depends on OMAP_HSI && y
+ default OMAP_HSI_DEVICE
+ ---help---
+ Adds the device support for one of the devices handled by the HSI
+ driver.
+
+ The OMAP HSI driver supports either:
+ - the OMAP MIPI HSI device
+ - the OMAP SSI device
+
+config OMAP_HSI_DEVICE
+ bool "HSI (OMAP MIPI HSI)"
+ depends on ARCH_OMAP4
+
+config OMAP_SSI_DEVICE
+ bool "SSI (OMAP SSI)"
+ depends on ARCH_OMAP34XX
+
+endchoice
+
+#
+# OMAP HSI char device kernel configuration
+#
+
+config OMAP_HSI_CHAR
+ tristate "OMAP HSI character driver"
+ depends on OMAP_HSI
+ ---help---
+ If you say Y here, you will enable the OMAP HSI character driver.
+
+ This driver provides a simple character device interface for
+ serial communication over the HSI bus.
+
+config OMAP_HSI_PROTOCOL
+ tristate "HSI Protocol driver for Infineon Modem"
+ depends on OMAP_HSI
+ ---help---
+ If you say Y here, you will enable the HSI Protocol driver.
+ This driver supports HSI protocol for Infineon Modem.
diff --git a/drivers/omap_hsi/Makefile b/drivers/omap_hsi/Makefile
new file mode 100644
index 0000000..0f072fb
--- /dev/null
+++ b/drivers/omap_hsi/Makefile
@@ -0,0 +1,21 @@
+#
+# Makefile for HSI drivers
+#
+EXTRA_CFLAGS :=
+
+omap_hsi-objs := hsi_driver.o hsi_driver_dma.o hsi_driver_int.o \
+ hsi_driver_if.o hsi_driver_bus.o hsi_driver_gpio.o \
+ hsi_driver_fifo.o
+
+ifeq ($(CONFIG_DEBUG_FS), y)
+ omap_hsi-objs += hsi_driver_debugfs.o
+endif
+
+hsi_char-objs := hsi-char.o hsi-if.o
+
+hsi-protocol-objs := hsi_protocol.o hsi_protocol_if.o \
+ hsi_protocol_cmd.o
+
+obj-$(CONFIG_OMAP_HSI) += omap_hsi.o
+obj-$(CONFIG_OMAP_HSI_CHAR) += hsi_char.o
+obj-$(CONFIG_OMAP_HSI_PROTOCOL) += hsi-protocol.o
diff --git a/drivers/omap_hsi/hsi-char.c b/drivers/omap_hsi/hsi-char.c
new file mode 100644
index 0000000..871de30
--- /dev/null
+++ b/drivers/omap_hsi/hsi-char.c
@@ -0,0 +1,556 @@
+/*
+ * hsi-char.c
+ *
+ * HSI character device driver, implements the character device
+ * interface.
+ *
+ * Copyright (C) 2009 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Andras Domokos <andras.domokos@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/miscdevice.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <asm/mach-types.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/hsi_driver_if.h>
+#include <linux/hsi_char.h>
+
+#include <plat/omap_hsi.h>
+
+#include "hsi-char.h"
+
+#define DRIVER_VERSION "0.2.1"
+#define HSI_CHAR_DEVICE_NAME "hsi_char"
+
+static unsigned int port = 1;
+module_param(port, uint, 1);
+MODULE_PARM_DESC(port, "HSI port to be probed");
+
+static unsigned int num_channels;
+static unsigned int channels_map[HSI_MAX_CHAR_DEVS] = { 0 };
+module_param_array(channels_map, uint, &num_channels, 0);
+MODULE_PARM_DESC(channels_map, "HSI channels to be probed");
+
+dev_t hsi_char_dev;
+
+struct char_queue {
+ struct list_head list;
+ u32 *data;
+ unsigned int count;
+};
+
+struct hsi_char {
+ unsigned int opened;
+ int poll_event;
+ struct list_head rx_queue;
+ struct list_head tx_queue;
+ spinlock_t lock; /* Serialize access to driver data and API */
+ struct fasync_struct *async_queue;
+ wait_queue_head_t rx_wait;
+ wait_queue_head_t tx_wait;
+ wait_queue_head_t poll_wait;
+};
+
+static struct hsi_char hsi_char_data[HSI_MAX_CHAR_DEVS];
+
+void if_hsi_notify(int ch, struct hsi_event *ev)
+{
+ struct char_queue *entry;
+
+ pr_debug("%s, ev = {0x%x, 0x%p, %u}\n", __func__, ev->event, ev->data,
+ ev->count);
+
+ spin_lock(&hsi_char_data[ch].lock);
+
+ if (!hsi_char_data[ch].opened) {
+ pr_debug("%s, device not opened\n!", __func__);
+ spin_unlock(&hsi_char_data[ch].lock);
+ return;
+ }
+
+ switch (HSI_EV_TYPE(ev->event)) {
+ case HSI_EV_IN:
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry) {
+ pr_err("HSI-CHAR: entry allocation failed.\n");
+ spin_unlock(&hsi_char_data[ch].lock);
+ return;
+ }
+ entry->data = ev->data;
+ entry->count = ev->count;
+ list_add_tail(&entry->list, &hsi_char_data[ch].rx_queue);
+ spin_unlock(&hsi_char_data[ch].lock);
+ pr_debug("%s, HSI_EV_IN\n", __func__);
+ wake_up_interruptible(&hsi_char_data[ch].rx_wait);
+ break;
+ case HSI_EV_OUT:
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry) {
+ pr_err("HSI-CHAR: entry allocation failed.\n");
+ spin_unlock(&hsi_char_data[ch].lock);
+ return;
+ }
+ entry->data = ev->data;
+ entry->count = ev->count;
+ hsi_char_data[ch].poll_event |= (POLLOUT | POLLWRNORM);
+ list_add_tail(&entry->list, &hsi_char_data[ch].tx_queue);
+ spin_unlock(&hsi_char_data[ch].lock);
+ pr_debug("%s, HSI_EV_OUT\n", __func__);
+ wake_up_interruptible(&hsi_char_data[ch].tx_wait);
+ break;
+ case HSI_EV_EXCEP:
+ hsi_char_data[ch].poll_event |= POLLPRI;
+ spin_unlock(&hsi_char_data[ch].lock);
+ pr_debug("%s, HSI_EV_EXCEP\n", __func__);
+ wake_up_interruptible(&hsi_char_data[ch].poll_wait);
+ break;
+ case HSI_EV_AVAIL:
+ hsi_char_data[ch].poll_event |= (POLLIN | POLLRDNORM);
+ spin_unlock(&hsi_char_data[ch].lock);
+ pr_debug("%s, HSI_EV_AVAIL\n", __func__);
+ wake_up_interruptible(&hsi_char_data[ch].poll_wait);
+ break;
+ default:
+ spin_unlock(&hsi_char_data[ch].lock);
+ break;
+ }
+}
+
+static int hsi_char_fasync(int fd, struct file *file, int on)
+{
+ int ch = (int)file->private_data;
+ if (fasync_helper(fd, file, on, &hsi_char_data[ch].async_queue) >= 0)
+ return 0;
+ else
+ return -EIO;
+}
+
+static unsigned int hsi_char_poll(struct file *file, poll_table * wait)
+{
+ int ch = (int)file->private_data;
+ unsigned int ret = 0;
+
+ /*printk(KERN_DEBUG "%s\n", __func__); */
+
+ poll_wait(file, &hsi_char_data[ch].poll_wait, wait);
+ poll_wait(file, &hsi_char_data[ch].tx_wait, wait);
+ spin_lock_bh(&hsi_char_data[ch].lock);
+ ret = hsi_char_data[ch].poll_event;
+ spin_unlock_bh(&hsi_char_data[ch].lock);
+
+ pr_debug("%s, ret = 0x%x\n", __func__, ret);
+ return ret;
+}
+
+static ssize_t hsi_char_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ch = (int)file->private_data;
+ DECLARE_WAITQUEUE(wait, current);
+ u32 *data;
+ unsigned int data_len;
+ struct char_queue *entry;
+ ssize_t ret;
+
+ /*printk(KERN_DEBUG "%s, count = %d\n", __func__, count); */
+
+ /* only 32bit data is supported for now */
+ if ((count < 4) || (count & 3))
+ return -EINVAL;
+
+ data = kmalloc(count, GFP_ATOMIC);
+
+ ret = if_hsi_read(ch, data, count);
+ if (ret < 0) {
+ kfree(data);
+ goto out2;
+ }
+
+ spin_lock_bh(&hsi_char_data[ch].lock);
+ add_wait_queue(&hsi_char_data[ch].rx_wait, &wait);
+ spin_unlock_bh(&hsi_char_data[ch].lock);
+
+ for (;;) {
+ data = NULL;
+ data_len = 0;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ spin_lock_bh(&hsi_char_data[ch].lock);
+ if (!list_empty(&hsi_char_data[ch].rx_queue)) {
+ entry = list_entry(hsi_char_data[ch].rx_queue.next,
+ struct char_queue, list);
+ data = entry->data;
+ data_len = entry->count;
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock_bh(&hsi_char_data[ch].lock);
+
+ pr_debug("%s, data = 0x%p, data_len = %d\n",
+ __func__, data, data_len);
+
+ if (data_len) {
+ pr_debug("%s, RX finished\n", __func__);
+ spin_lock_bh(&hsi_char_data[ch].lock);
+ hsi_char_data[ch].poll_event &= ~(POLLIN | POLLRDNORM);
+ spin_unlock_bh(&hsi_char_data[ch].lock);
+ if_hsi_poll(ch);
+ break;
+ } else if (file->f_flags & O_NONBLOCK) {
+ pr_debug("%s, O_NONBLOCK\n", __func__);
+ ret = -EAGAIN;
+ goto out;
+ } else if (signal_pending(current)) {
+ pr_debug("%s, ERESTARTSYS\n", __func__);
+ ret = -EAGAIN;
+ if_hsi_cancel_read(ch);
+ /* goto out; */
+ break;
+ }
+
+ /*printk(KERN_DEBUG "%s, going to sleep...\n", __func__); */
+ schedule();
+ /*printk(KERN_DEBUG "%s, woke up\n", __func__); */
+ }
+
+ if (data_len) {
+ ret = copy_to_user((void __user *)buf, data, data_len);
+ if (!ret)
+ ret = data_len;
+ }
+
+ kfree(data);
+
+out:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&hsi_char_data[ch].rx_wait, &wait);
+
+out2:
+ /*printk(KERN_DEBUG "%s, ret = %d\n", __func__, ret); */
+ return ret;
+}
+
+static ssize_t hsi_char_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ch = (int)file->private_data;
+ DECLARE_WAITQUEUE(wait, current);
+ u32 *data;
+ unsigned int data_len = 0;
+ struct char_queue *entry;
+ ssize_t ret;
+
+ /*printk(KERN_DEBUG "%s, count = %d\n", __func__, count); */
+
+ /* only 32bit data is supported for now */
+ if ((count < 4) || (count & 3))
+ return -EINVAL;
+
+ data = kmalloc(count, GFP_ATOMIC);
+ if (!data) {
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+ if (copy_from_user(data, (void __user *)buf, count)) {
+ ret = -EFAULT;
+ kfree(data);
+ goto out2;
+ } else {
+ ret = count;
+ }
+
+ ret = if_hsi_write(ch, data, count);
+ if (ret < 0) {
+ kfree(data);
+ goto out2;
+ }
+ spin_lock_bh(&hsi_char_data[ch].lock);
+ hsi_char_data[ch].poll_event &= ~(POLLOUT | POLLWRNORM);
+ add_wait_queue(&hsi_char_data[ch].tx_wait, &wait);
+ spin_unlock_bh(&hsi_char_data[ch].lock);
+
+ for (;;) {
+ data = NULL;
+ data_len = 0;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ spin_lock_bh(&hsi_char_data[ch].lock);
+ if (!list_empty(&hsi_char_data[ch].tx_queue)) {
+ entry = list_entry(hsi_char_data[ch].tx_queue.next,
+ struct char_queue, list);
+ data = entry->data;
+ data_len = entry->count;
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock_bh(&hsi_char_data[ch].lock);
+
+ if (data_len) {
+ pr_debug("%s, TX finished\n", __func__);
+ ret = data_len;
+ break;
+ } else if (file->f_flags & O_NONBLOCK) {
+ pr_debug("%s, O_NONBLOCK\n", __func__);
+ ret = -EAGAIN;
+ goto out;
+ } else if (signal_pending(current)) {
+ pr_debug("%s, ERESTARTSYS\n", __func__);
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ /*printk(KERN_DEBUG "%s, going to sleep...\n", __func__); */
+ schedule();
+ /*printk(KERN_DEBUG "%s, woke up\n", __func__); */
+ }
+
+ kfree(data);
+
+out:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&hsi_char_data[ch].tx_wait, &wait);
+
+out2:
+ /*printk(KERN_DEBUG "%s, ret = %d\n", __func__, ret); */
+ return ret;
+}
+
+static long hsi_char_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ch = (int)file->private_data;
+ unsigned int state;
+ size_t occ;
+ struct hsi_rx_config rx_cfg;
+ struct hsi_tx_config tx_cfg;
+ int ret = 0;
+
+ pr_debug("%s, ch = %d, cmd = 0x%08x\n", __func__, ch, cmd);
+
+ switch (cmd) {
+ case CS_SEND_BREAK:
+ if_hsi_send_break(ch);
+ break;
+ case CS_FLUSH_RX:
+ if_hsi_flush_rx(ch);
+ break;
+ case CS_FLUSH_TX:
+ if_hsi_flush_tx(ch);
+ break;
+ case CS_SET_ACWAKELINE:
+ if (copy_from_user(&state, (void __user *)arg, sizeof(state)))
+ ret = -EFAULT;
+ else
+ if_hsi_set_acwakeline(ch, state);
+ break;
+ case CS_GET_ACWAKELINE:
+ if_hsi_get_acwakeline(ch, &state);
+ if (copy_to_user((void __user *)arg, &state, sizeof(state)))
+ ret = -EFAULT;
+ break;
+ case CS_GET_CAWAKELINE:
+ if_hsi_get_cawakeline(ch, &state);
+ if (copy_to_user((void __user *)arg, &state, sizeof(state)))
+ ret = -EFAULT;
+ break;
+ case CS_SET_RX:
+ if (copy_from_user(&rx_cfg, (void __user *)arg, sizeof(rx_cfg)))
+ ret = -EFAULT;
+ else
+ ret = if_hsi_set_rx(ch, &rx_cfg);
+ break;
+ case CS_GET_RX:
+ if_hsi_get_rx(ch, &rx_cfg);
+ if (copy_to_user((void __user *)arg, &rx_cfg, sizeof(rx_cfg)))
+ ret = -EFAULT;
+ break;
+ case CS_SET_TX:
+ if (copy_from_user(&tx_cfg, (void __user *)arg, sizeof(tx_cfg)))
+ ret = -EFAULT;
+ else
+ ret = if_hsi_set_tx(ch, &tx_cfg);
+ break;
+ case CS_GET_TX:
+ if_hsi_get_tx(ch, &tx_cfg);
+ if (copy_to_user((void __user *)arg, &tx_cfg, sizeof(tx_cfg)))
+ ret = -EFAULT;
+ break;
+ case CS_SW_RESET:
+ if_hsi_sw_reset(ch);
+ break;
+ case CS_GET_FIFO_OCCUPANCY:
+ if_hsi_get_fifo_occupancy(ch, &occ);
+ if (copy_to_user((void __user *)arg, &occ, sizeof(occ)))
+ ret = -EFAULT;
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ return ret;
+}
+
+static int hsi_char_open(struct inode *inode, struct file *file)
+{
+ int ret = 0, ch = iminor(inode);
+ int i;
+
+ for (i = 0; i < HSI_MAX_CHAR_DEVS; i++)
+ if ((channels_map[i] - 1) == ch)
+ break;
+
+ if (i == HSI_MAX_CHAR_DEVS) {
+ pr_err("HSI char open: Channel %d not found\n", ch);
+ return -ENODEV;
+ }
+
+ pr_debug("HSI char open: opening channel %d\n", ch);
+
+ spin_lock_bh(&hsi_char_data[ch].lock);
+
+ if (hsi_char_data[ch].opened) {
+ spin_unlock_bh(&hsi_char_data[ch].lock);
+ pr_err("HSI char open: Channel %d already opened\n", ch);
+ return -EBUSY;
+ }
+
+ file->private_data = (void *)ch;
+ hsi_char_data[ch].opened++;
+ hsi_char_data[ch].poll_event = (POLLOUT | POLLWRNORM);
+ spin_unlock_bh(&hsi_char_data[ch].lock);
+
+ ret = if_hsi_start(ch);
+
+ return ret;
+}
+
+static int hsi_char_release(struct inode *inode, struct file *file)
+{
+ int ch = (int)file->private_data;
+ struct char_queue *entry;
+ struct list_head *cursor, *next;
+
+ pr_debug("%s, ch = %d\n", __func__, ch);
+
+ if_hsi_stop(ch);
+ spin_lock_bh(&hsi_char_data[ch].lock);
+ hsi_char_data[ch].opened--;
+
+ if (!list_empty(&hsi_char_data[ch].rx_queue)) {
+ list_for_each_safe(cursor, next, &hsi_char_data[ch].rx_queue) {
+ entry = list_entry(cursor, struct char_queue, list);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+
+ if (!list_empty(&hsi_char_data[ch].tx_queue)) {
+ list_for_each_safe(cursor, next, &hsi_char_data[ch].tx_queue) {
+ entry = list_entry(cursor, struct char_queue, list);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+
+ spin_unlock_bh(&hsi_char_data[ch].lock);
+
+ return 0;
+}
+
+static const struct file_operations hsi_char_fops = {
+ .owner = THIS_MODULE,
+ .read = hsi_char_read,
+ .write = hsi_char_write,
+ .poll = hsi_char_poll,
+ .unlocked_ioctl = hsi_char_ioctl,
+ .open = hsi_char_open,
+ .release = hsi_char_release,
+ .fasync = hsi_char_fasync,
+};
+
+static struct cdev hsi_char_cdev;
+
+static int __init hsi_char_init(void)
+{
+ int ret, i;
+
+ pr_info("HSI character device version " DRIVER_VERSION "\n");
+ pr_info("HSI char driver: %d channels mapped\n", num_channels);
+
+ for (i = 0; i < HSI_MAX_CHAR_DEVS; i++) {
+ init_waitqueue_head(&hsi_char_data[i].rx_wait);
+ init_waitqueue_head(&hsi_char_data[i].tx_wait);
+ init_waitqueue_head(&hsi_char_data[i].poll_wait);
+ spin_lock_init(&hsi_char_data[i].lock);
+ hsi_char_data[i].opened = 0;
+ INIT_LIST_HEAD(&hsi_char_data[i].rx_queue);
+ INIT_LIST_HEAD(&hsi_char_data[i].tx_queue);
+ }
+
+ /*printk(KERN_DEBUG "%s, devname = %s\n", __func__, devname); */
+
+ ret = if_hsi_init(port, channels_map, num_channels);
+ if (ret)
+ return ret;
+
+ ret =
+ alloc_chrdev_region(&hsi_char_dev, 0, HSI_MAX_CHAR_DEVS,
+ HSI_CHAR_DEVICE_NAME);
+ if (ret < 0) {
+ pr_err("HSI character driver: Failed to register\n");
+ return ret;
+ }
+
+ cdev_init(&hsi_char_cdev, &hsi_char_fops);
+ ret = cdev_add(&hsi_char_cdev, hsi_char_dev, HSI_MAX_CHAR_DEVS);
+ if (ret < 0) {
+ pr_err("HSI character device: Failed to add char device\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit hsi_char_exit(void)
+{
+ cdev_del(&hsi_char_cdev);
+ unregister_chrdev_region(hsi_char_dev, HSI_MAX_CHAR_DEVS);
+ if_hsi_exit();
+}
+
+MODULE_AUTHOR("Andras Domokos <andras.domokos@nokia.com>");
+MODULE_AUTHOR("Sebatien Jan <s-jan@ti.com> / Texas Instruments");
+MODULE_DESCRIPTION("HSI character device");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
+
+module_init(hsi_char_init);
+module_exit(hsi_char_exit);
diff --git a/drivers/omap_hsi/hsi-char.h b/drivers/omap_hsi/hsi-char.h
new file mode 100644
index 0000000..c4b1c4c
--- /dev/null
+++ b/drivers/omap_hsi/hsi-char.h
@@ -0,0 +1,35 @@
+/*
+ * hsi-char.h
+ *
+ * HSI character driver private declaration header file.
+ *
+ * Copyright (C) 2009 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Andras Domokos <andras.domokos@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _HSI_CHAR_H
+#define _HSI_CHAR_H
+
+#include "hsi-if.h"
+
+/* how many char devices would be created at most */
+#define HSI_MAX_CHAR_DEVS 16
+
+/* Max HSI channel id allowed to be handled as char device. */
+/* Current range [1, 16] */
+#define HSI_MAX_CHAR_DEV_ID 16
+
+void if_hsi_notify(int ch, struct hsi_event *ev);
+
+#endif /* _HSI_CHAR_H */
diff --git a/drivers/omap_hsi/hsi-if.c b/drivers/omap_hsi/hsi-if.c
new file mode 100644
index 0000000..5228b6a
--- /dev/null
+++ b/drivers/omap_hsi/hsi-if.c
@@ -0,0 +1,672 @@
+ /*
+ * hsi-if.c
+ *
+ * Part of the HSI character driver, implements the HSI interface.
+ *
+ * Copyright (C) 2009 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Andras Domokos <andras.domokos@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <asm/mach-types.h>
+#include <linux/ioctl.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/bitmap.h>
+
+#include <linux/hsi_driver_if.h>
+#include <linux/hsi_char.h>
+
+#include "hsi-char.h"
+#include "hsi-if.h"
+
+#define HSI_CHANNEL_STATE_UNAVAIL (1 << 0)
+#define HSI_CHANNEL_STATE_READING (1 << 1)
+#define HSI_CHANNEL_STATE_WRITING (1 << 2)
+
+#define PORT1 0
+#define PORT2 1
+
+#define RXCONV(dst, src) \
+ do { \
+ (dst)->mode = (src)->mode; \
+ (dst)->flow = (src)->flow; \
+ (dst)->frame_size = (src)->frame_size; \
+ (dst)->channels = (src)->channels; \
+ (dst)->divisor = (src)->divisor; \
+ (dst)->counters = (src)->counters; \
+ } while (0)
+
+#define TXCONV(dst, src) \
+ do { \
+ (dst)->mode = (src)->mode; \
+ (dst)->flow = (src)->flow; \
+ (dst)->frame_size = (src)->frame_size; \
+ (dst)->channels = (src)->channels; \
+ (dst)->divisor = (src)->divisor; \
+ (dst)->arb_mode = (src)->arb_mode; \
+ } while (0)
+
+struct if_hsi_channel {
+ struct hsi_device *dev;
+ unsigned int channel_id;
+ u32 *tx_data;
+ unsigned int tx_count; /* Number of bytes to be written */
+ u32 *rx_data;
+ unsigned int rx_count; /* Number of bytes to be read */
+ unsigned int opened;
+ unsigned int state;
+ spinlock_t lock; /* Serializes access to channel data */
+};
+
+struct if_hsi_iface {
+ struct if_hsi_channel channels[HSI_MAX_CHAR_DEVS];
+ int bootstrap;
+ unsigned long init_chan_map;
+ spinlock_t lock; /* Serializes access to HSI functional interface */
+};
+
+static void if_hsi_port_event(struct hsi_device *dev, unsigned int event,
+ void *arg);
+static int __devinit if_hsi_probe(struct hsi_device *dev);
+static int __devexit if_hsi_remove(struct hsi_device *dev);
+
+static struct hsi_device_driver if_hsi_char_driver = {
+ .ctrl_mask = ANY_HSI_CONTROLLER,
+ .probe = if_hsi_probe,
+ .remove = __devexit_p(if_hsi_remove),
+ .driver = {
+ .name = "hsi_char"},
+};
+
+static struct if_hsi_iface hsi_iface;
+
+static int if_hsi_read_on(int ch, u32 *data, unsigned int count)
+{
+ struct if_hsi_channel *channel;
+ int ret;
+
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+
+ spin_lock(&channel->lock);
+ if (channel->state & HSI_CHANNEL_STATE_READING) {
+ pr_err("Read still pending on channel %d\n", ch);
+ spin_unlock(&channel->lock);
+ return -EBUSY;
+ }
+ channel->state |= HSI_CHANNEL_STATE_READING;
+ channel->rx_data = data;
+ channel->rx_count = count;
+ spin_unlock(&channel->lock);
+
+ ret = hsi_read(channel->dev, data, count / 4);
+ dev_dbg(&channel->dev->device, "%s, ch = %d, ret = %d\n", __func__, ch,
+ ret);
+
+ return ret;
+}
+
+/* HSI char driver read done callback */
+static void if_hsi_read_done(struct hsi_device *dev, unsigned int size)
+{
+ struct if_hsi_channel *channel;
+ struct hsi_event ev;
+
+ channel = &hsi_iface.channels[dev->n_ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, dev->n_ch);
+ spin_lock(&channel->lock);
+ channel->state &= ~HSI_CHANNEL_STATE_READING;
+ ev.event = HSI_EV_IN;
+ ev.data = channel->rx_data;
+ ev.count = 4 * size; /* Convert size to number of u8, not u32 */
+ spin_unlock(&channel->lock);
+ if_hsi_notify(dev->n_ch, &ev);
+}
+
+int if_hsi_read(int ch, u32 *data, unsigned int count)
+{
+ int ret = 0;
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ ret = if_hsi_read_on(ch, data, count);
+ return ret;
+}
+
+int if_hsi_poll(int ch)
+{
+ struct if_hsi_channel *channel;
+ int ret = 0;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ ret = hsi_poll(channel->dev);
+ return ret;
+}
+
+static int if_hsi_write_on(int ch, u32 *address, unsigned int count)
+{
+ struct if_hsi_channel *channel;
+ int ret;
+
+ channel = &hsi_iface.channels[ch];
+
+ spin_lock(&channel->lock);
+ if (channel->state & HSI_CHANNEL_STATE_WRITING) {
+ pr_err("Write still pending on channel %d\n", ch);
+ spin_unlock(&channel->lock);
+ return -EBUSY;
+ }
+
+ channel->tx_data = address;
+ channel->tx_count = count;
+ channel->state |= HSI_CHANNEL_STATE_WRITING;
+ spin_unlock(&channel->lock);
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ ret = hsi_write(channel->dev, address, count / 4);
+ return ret;
+}
+
+/* HSI char driver write done callback */
+static void if_hsi_write_done(struct hsi_device *dev, unsigned int size)
+{
+ struct if_hsi_channel *channel;
+ struct hsi_event ev;
+
+ channel = &hsi_iface.channels[dev->n_ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, dev->n_ch);
+
+ spin_lock(&channel->lock);
+ channel->state &= ~HSI_CHANNEL_STATE_WRITING;
+ ev.event = HSI_EV_OUT;
+ ev.data = channel->tx_data;
+ ev.count = 4 * size; /* Convert size to number of u8, not u32 */
+ spin_unlock(&channel->lock);
+ if_hsi_notify(dev->n_ch, &ev);
+}
+
+int if_hsi_write(int ch, u32 *data, unsigned int count)
+{
+ int ret = 0;
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ ret = if_hsi_write_on(ch, data, count);
+ return ret;
+}
+
+void if_hsi_send_break(int ch)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ hsi_ioctl(channel->dev, HSI_IOCTL_SEND_BREAK, NULL);
+}
+
+void if_hsi_flush_rx(int ch)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ hsi_ioctl(channel->dev, HSI_IOCTL_FLUSH_RX, NULL);
+}
+
+void if_hsi_flush_ch(int ch)
+{
+ /* FIXME - Check the purpose of this function */
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+}
+
+void if_hsi_flush_tx(int ch)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ hsi_ioctl(channel->dev, HSI_IOCTL_FLUSH_TX, NULL);
+}
+
+void if_hsi_get_acwakeline(int ch, unsigned int *state)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ hsi_ioctl(channel->dev, HSI_IOCTL_GET_ACWAKE, state);
+}
+
+void if_hsi_set_acwakeline(int ch, unsigned int state)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ hsi_ioctl(channel->dev,
+ state ? HSI_IOCTL_ACWAKE_UP : HSI_IOCTL_ACWAKE_DOWN, NULL);
+}
+
+void if_hsi_get_cawakeline(int ch, unsigned int *state)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ hsi_ioctl(channel->dev, HSI_IOCTL_GET_CAWAKE, state);
+}
+
+int if_hsi_set_rx(int ch, struct hsi_rx_config *cfg)
+{
+ int ret;
+ struct if_hsi_channel *channel;
+ struct hsr_ctx ctx;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ RXCONV(&ctx, cfg);
+ ret = hsi_ioctl(channel->dev, HSI_IOCTL_SET_RX, &ctx);
+ return ret;
+}
+
+void if_hsi_get_rx(int ch, struct hsi_rx_config *cfg)
+{
+ struct if_hsi_channel *channel;
+ struct hsr_ctx ctx;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ hsi_ioctl(channel->dev, HSI_IOCTL_GET_RX, &ctx);
+ RXCONV(cfg, &ctx);
+}
+
+int if_hsi_set_tx(int ch, struct hsi_tx_config *cfg)
+{
+ int ret;
+ struct if_hsi_channel *channel;
+ struct hst_ctx ctx;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ TXCONV(&ctx, cfg);
+ ret = hsi_ioctl(channel->dev, HSI_IOCTL_SET_TX, &ctx);
+ return ret;
+}
+
+void if_hsi_get_tx(int ch, struct hsi_tx_config *cfg)
+{
+ struct if_hsi_channel *channel;
+ struct hst_ctx ctx;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ hsi_ioctl(channel->dev, HSI_IOCTL_GET_TX, &ctx);
+ TXCONV(cfg, &ctx);
+}
+
+void if_hsi_sw_reset(int ch)
+{
+ struct if_hsi_channel *channel;
+ int i;
+ channel = &hsi_iface.channels[ch];
+ hsi_ioctl(channel->dev, HSI_IOCTL_SW_RESET, NULL);
+
+ spin_lock_bh(&hsi_iface.lock);
+ /* Reset HSI channel states */
+ for (i = 0; i < HSI_MAX_PORTS; i++)
+ if_hsi_char_driver.ch_mask[i] = 0;
+
+ for (i = 0; i < HSI_MAX_CHAR_DEVS; i++) {
+ channel = &hsi_iface.channels[i];
+ channel->opened = 0;
+ channel->state = HSI_CHANNEL_STATE_UNAVAIL;
+ }
+ spin_unlock_bh(&hsi_iface.lock);
+}
+
+void if_hsi_get_fifo_occupancy(int ch, size_t *occ)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ hsi_ioctl(channel->dev, HSI_IOCTL_GET_FIFO_OCCUPANCY, occ);
+}
+
+void if_hsi_cancel_read(int ch)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ if (channel->state & HSI_CHANNEL_STATE_READING)
+ hsi_read_cancel(channel->dev);
+ spin_lock(&channel->lock);
+ channel->state &= ~HSI_CHANNEL_STATE_READING;
+ spin_unlock(&channel->lock);
+}
+
+void if_hsi_cancel_write(int ch)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ if (channel->state & HSI_CHANNEL_STATE_WRITING)
+ hsi_write_cancel(channel->dev);
+ spin_lock(&channel->lock);
+ channel->state &= ~HSI_CHANNEL_STATE_WRITING;
+ spin_unlock(&channel->lock);
+}
+
+static int if_hsi_openchannel(struct if_hsi_channel *channel)
+{
+ int ret = 0;
+
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__,
+ channel->channel_id);
+ spin_lock(&channel->lock);
+
+ if (channel->state == HSI_CHANNEL_STATE_UNAVAIL) {
+ pr_err("Channel %d is not available\n", channel->channel_id);
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ if (channel->opened) {
+ pr_err("Channel %d is busy\n", channel->channel_id);
+ ret = -EBUSY;
+ goto leave;
+ }
+
+ if (!channel->dev) {
+ pr_err("Channel %d is not ready??\n", channel->channel_id);
+ ret = -ENODEV;
+ goto leave;
+ }
+ spin_unlock(&channel->lock);
+
+ ret = hsi_open(channel->dev);
+
+ spin_lock(&channel->lock);
+ if (ret < 0) {
+ pr_err("Could not open channel %d\n", channel->channel_id);
+ goto leave;
+ }
+
+ channel->opened = 1;
+
+leave:
+ spin_unlock(&channel->lock);
+ return ret;
+}
+
+static int if_hsi_closechannel(struct if_hsi_channel *channel)
+{
+ int ret = 0;
+
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__,
+ channel->channel_id);
+ spin_lock(&channel->lock);
+
+ if (!channel->opened)
+ goto leave;
+
+ if (!channel->dev) {
+ pr_err("Channel %d is not ready??\n", channel->channel_id);
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ /* Stop any pending read/write */
+ if (channel->state & HSI_CHANNEL_STATE_READING) {
+ channel->state &= ~HSI_CHANNEL_STATE_READING;
+ spin_unlock(&channel->lock);
+ hsi_read_cancel(channel->dev);
+ spin_lock(&channel->lock);
+ }
+
+ if (channel->state & HSI_CHANNEL_STATE_WRITING) {
+ channel->state &= ~HSI_CHANNEL_STATE_WRITING;
+ spin_unlock(&channel->lock);
+ hsi_write_cancel(channel->dev);
+ } else
+ spin_unlock(&channel->lock);
+
+ hsi_close(channel->dev);
+
+ spin_lock(&channel->lock);
+ channel->opened = 0;
+leave:
+ spin_unlock(&channel->lock);
+ return ret;
+}
+
+int if_hsi_start(int ch)
+{
+ struct if_hsi_channel *channel;
+ int ret = 0;
+
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+
+ spin_lock_bh(&channel->lock);
+ channel->state = 0;
+ spin_unlock_bh(&channel->lock);
+
+ ret = if_hsi_openchannel(channel);
+ if (ret < 0) {
+ pr_err("Could not open channel %d\n", ch);
+ goto error;
+ }
+
+ if_hsi_poll(ch);
+error:
+ return ret;
+}
+
+void if_hsi_stop(int ch)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+
+ if_hsi_closechannel(channel);
+}
+
+static int __devinit if_hsi_probe(struct hsi_device *dev)
+{
+ struct if_hsi_channel *channel;
+ unsigned long *address;
+ int ret = -ENXIO, port;
+
+ dev_dbg(&dev->device, "%s, port = %d, ch = %d\n", __func__, dev->n_p,
+ dev->n_ch);
+
+ for (port = 0; port < HSI_MAX_PORTS; port++) {
+ if (if_hsi_char_driver.ch_mask[port])
+ break;
+ }
+
+ if (port == HSI_MAX_PORTS)
+ return -ENXIO;
+
+ if (dev->n_ch >= HSI_MAX_CHAR_DEV_ID) {
+ pr_err("HSI char driver cannot handle channel %d\n", dev->n_ch);
+ return -ENXIO;
+ }
+
+ address = &if_hsi_char_driver.ch_mask[port];
+
+ spin_lock_bh(&hsi_iface.lock);
+ if (test_bit(dev->n_ch, address) && (dev->n_p == port)) {
+ hsi_set_read_cb(dev, if_hsi_read_done);
+ hsi_set_write_cb(dev, if_hsi_write_done);
+ hsi_set_port_event_cb(dev, if_hsi_port_event);
+ channel = &hsi_iface.channels[dev->n_ch];
+ channel->dev = dev;
+ channel->state = 0;
+ ret = 0;
+ hsi_iface.init_chan_map ^= (1 << dev->n_ch);
+ }
+ spin_unlock_bh(&hsi_iface.lock);
+
+ return ret;
+}
+
+static int __devexit if_hsi_remove(struct hsi_device *dev)
+{
+ struct if_hsi_channel *channel;
+ unsigned long *address;
+ int ret = -ENXIO, port;
+
+ dev_dbg(&dev->device, "%s, port = %d, ch = %d\n", __func__, dev->n_p,
+ dev->n_ch);
+
+ for (port = 0; port < HSI_MAX_PORTS; port++) {
+ if (if_hsi_char_driver.ch_mask[port])
+ break;
+ }
+
+ if (port == HSI_MAX_PORTS)
+ return -ENXIO;
+
+ address = &if_hsi_char_driver.ch_mask[port];
+
+ spin_lock_bh(&hsi_iface.lock);
+ if (test_bit(dev->n_ch, address) && (dev->n_p == port)) {
+ hsi_set_read_cb(dev, NULL);
+ hsi_set_write_cb(dev, NULL);
+ hsi_set_port_event_cb(dev, NULL);
+ channel = &hsi_iface.channels[dev->n_ch];
+ channel->dev = NULL;
+ channel->state = HSI_CHANNEL_STATE_UNAVAIL;
+ ret = 0;
+ }
+ spin_unlock_bh(&hsi_iface.lock);
+
+ return ret;
+}
+
+static void if_hsi_port_event(struct hsi_device *dev, unsigned int event,
+ void *arg)
+{
+ struct hsi_event ev;
+ int i;
+
+ ev.event = HSI_EV_EXCEP;
+ ev.data = (u32 *) 0;
+ ev.count = 0;
+
+ switch (event) {
+ case HSI_EVENT_BREAK_DETECTED:
+ pr_debug("%s, HWBREAK detected\n", __func__);
+ ev.data = (u32 *) HSI_HWBREAK;
+ for (i = 0; i < HSI_MAX_CHAR_DEVS; i++) {
+ if (hsi_iface.channels[i].opened)
+ if_hsi_notify(i, &ev);
+ }
+ break;
+ case HSI_EVENT_HSR_DATAAVAILABLE:
+ i = (int)arg;
+ pr_debug("%s, HSI_EVENT_HSR_DATAAVAILABLE channel = %d\n",
+ __func__, i);
+ ev.event = HSI_EV_AVAIL;
+ if (hsi_iface.channels[i].opened)
+ if_hsi_notify(i, &ev);
+ break;
+ case HSI_EVENT_CAWAKE_UP:
+ pr_debug("%s, CAWAKE up\n", __func__);
+ break;
+ case HSI_EVENT_CAWAKE_DOWN:
+ pr_debug("%s, CAWAKE down\n", __func__);
+ break;
+ case HSI_EVENT_ERROR:
+ pr_debug("%s, HSI ERROR occured\n", __func__);
+ break;
+ default:
+ pr_warning("%s, Unknown event(%d)\n", __func__, event);
+ break;
+ }
+}
+
+int __init if_hsi_init(unsigned int port, unsigned int *channels_map,
+ unsigned int num_channels)
+{
+ struct if_hsi_channel *channel;
+ int i, ret = 0;
+
+ pr_debug("%s, port = %d\n", __func__, port);
+
+ port -= 1;
+ if (port >= HSI_MAX_PORTS)
+ return -EINVAL;
+
+ hsi_iface.bootstrap = 1;
+ spin_lock_init(&hsi_iface.lock);
+
+ for (i = 0; i < HSI_MAX_PORTS; i++)
+ if_hsi_char_driver.ch_mask[i] = 0;
+
+ for (i = 0; i < HSI_MAX_CHAR_DEVS; i++) {
+ channel = &hsi_iface.channels[i];
+ channel->dev = NULL;
+ channel->opened = 0;
+ channel->state = HSI_CHANNEL_STATE_UNAVAIL;
+ channel->channel_id = i;
+ spin_lock_init(&channel->lock);
+ }
+
+ for (i = 0; (i < num_channels) && channels_map[i]; i++) {
+ pr_debug("%s, port = %d, channels_map[i] = %d\n", __func__,
+ port, channels_map[i]);
+ if ((channels_map[i] - 1) < HSI_MAX_CHAR_DEV_ID)
+ if_hsi_char_driver.ch_mask[port] |=
+ (1 << ((channels_map[i] - 1)));
+ else {
+ pr_err("Channel %d cannot be handled by the HSI "
+ "driver.\n", channels_map[i]);
+ return -EINVAL;
+ }
+
+ }
+ hsi_iface.init_chan_map = if_hsi_char_driver.ch_mask[port];
+
+ ret = hsi_register_driver(&if_hsi_char_driver);
+ if (ret)
+ pr_err("Error while registering HSI driver %d", ret);
+
+ if (hsi_iface.init_chan_map) {
+ ret = -ENXIO;
+ pr_err("HSI: Some channels could not be registered (out of "
+ "range or already registered?)\n");
+ }
+ return ret;
+}
+
+int __devexit if_hsi_exit(void)
+{
+ struct if_hsi_channel *channel;
+ unsigned long *address;
+ int i, port;
+
+ pr_debug("%s\n", __func__);
+
+ for (port = 0; port < HSI_MAX_PORTS; port++) {
+ if (if_hsi_char_driver.ch_mask[port])
+ break;
+ }
+
+ if (port == HSI_MAX_PORTS)
+ return -ENXIO;
+
+ address = &if_hsi_char_driver.ch_mask[port];
+
+ for (i = 0; i < HSI_MAX_CHAR_DEVS; i++) {
+ channel = &hsi_iface.channels[i];
+ if (channel->opened) {
+ if_hsi_set_acwakeline(i, HSI_IOCTL_ACWAKE_DOWN);
+ if_hsi_closechannel(channel);
+ }
+ }
+ hsi_unregister_driver(&if_hsi_char_driver);
+ return 0;
+}
diff --git a/drivers/omap_hsi/hsi-if.h b/drivers/omap_hsi/hsi-if.h
new file mode 100644
index 0000000..96afdd4
--- /dev/null
+++ b/drivers/omap_hsi/hsi-if.h
@@ -0,0 +1,69 @@
+/*
+ * hsi-if.h
+ *
+ * Part of the HSI character driver, private headers.
+ *
+ * Copyright (C) 2009 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Andras Domokos <andras.domokos@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _HSI_IF_H
+#define _HSI_IF_H
+
+#define HSI_EV_MASK (0xffff << 0)
+#define HSI_EV_TYPE_MASK (0x0f << 16)
+#define HSI_EV_IN (0x01 << 16)
+#define HSI_EV_OUT (0x02 << 16)
+#define HSI_EV_EXCEP (0x03 << 16)
+#define HSI_EV_AVAIL (0x04 << 16)
+#define HSI_EV_TYPE(event) ((event) & HSI_EV_TYPE_MASK)
+
+#define HSI_HWBREAK 1
+#define HSI_ERROR 2
+
+struct hsi_event {
+ unsigned int event;
+ u32 *data;
+ unsigned int count;
+};
+
+int if_hsi_init(unsigned int port, unsigned int *channels_map,
+ unsigned int num_channels);
+int if_hsi_exit(void);
+
+int if_hsi_start(int ch);
+void if_hsi_stop(int ch);
+
+void if_hsi_send_break(int ch);
+void if_hsi_flush_rx(int ch);
+void if_hsi_flush_tx(int ch);
+void if_hsi_bootstrap(int ch);
+void if_hsi_set_acwakeline(int ch, unsigned int state);
+void if_hsi_get_acwakeline(int ch, unsigned int *state);
+void if_hsi_get_cawakeline(int ch, unsigned int *state);
+int if_hsi_set_rx(int ch, struct hsi_rx_config *cfg);
+void if_hsi_get_rx(int ch, struct hsi_rx_config *cfg);
+int if_hsi_set_tx(int ch, struct hsi_tx_config *cfg);
+void if_hsi_get_tx(int ch, struct hsi_tx_config *cfg);
+void if_hsi_sw_reset(int ch);
+void if_hsi_get_fifo_occupancy(int ch, size_t *occ);
+
+int if_hsi_read(int ch, u32 *data, unsigned int count);
+int if_hsi_poll(int ch);
+int if_hsi_write(int ch, u32 *data, unsigned int count);
+
+void if_hsi_cancel_read(int ch);
+void if_hsi_cancel_write(int ch);
+
+#endif /* _HSI_IF_H */
diff --git a/drivers/omap_hsi/hsi-protocol-if.h b/drivers/omap_hsi/hsi-protocol-if.h
new file mode 100644
index 0000000..f56ef36
--- /dev/null
+++ b/drivers/omap_hsi/hsi-protocol-if.h
@@ -0,0 +1,187 @@
+/*
+ * hsi-if.h
+ *
+ * Part of the HSI character driver, private headers.
+ *
+ * Copyright (C) 2009 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Andras Domokos <andras.domokos@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _HSI_IF_H
+#define _HSI_IF_H
+
+#define HSI_EV_MASK (0xffff << 0)
+#define HSI_EV_TYPE_MASK (0x0f << 16)
+#define HSI_EV_IN (0x01 << 16)
+#define HSI_EV_OUT (0x02 << 16)
+#define HSI_EV_EXCEP (0x03 << 16)
+#define HSI_EV_AVAIL (0x04 << 16)
+#define HSI_EV_TYPE(event) ((event) & HSI_EV_TYPE_MASK)
+
+#define HSI_HWBREAK 1
+#define HSI_ERROR 2
+
+#define HSI_MAX_CHANNELS 16
+#define CHANNEL_MASK 0xFF
+#define HSI_LL_INVALID_CHANNEL 0xFF
+
+struct hsi_event {
+ unsigned int event;
+ u32 *data;
+ unsigned int count;
+};
+
+struct if_hsi_channel {
+ struct hsi_device *dev;
+ unsigned int channel_id;
+ u32 *tx_data;
+ unsigned int tx_count;
+ u32 *rx_data;
+ unsigned int rx_count;
+ unsigned int opened;
+ unsigned int state;
+ u32 *tx_buf;
+ u32 *rx_buf;
+ unsigned int tx_state;
+ unsigned int rx_state;
+ unsigned int tx_nak_count;
+ unsigned int rx_nak_count;
+ spinlock_t lock; /* Serializes access to channel data */
+};
+
+struct if_hsi_iface {
+ struct if_hsi_channel channels[HSI_MAX_CHANNELS];
+#if 0
+ int bootstrap;
+#endif
+ unsigned long init_chan_map;
+ spinlock_t lock; /* Serializes access to HSI functional interface */
+};
+
+struct if_hsi_cmd {
+ u32 tx_cmd[50];
+ u32 rx_cmd[50];
+ struct timespec tx_cmd_time[50];
+ struct timespec rx_cmd_time[50];
+};
+
+enum {
+ HSI_LL_MSG_BREAK = 0x00,
+ HSI_LL_MSG_ECHO = 0x01,
+ HSI_LL_MSG_INFO_REQ = 0x02,
+ HSI_LL_MSG_INFO = 0x03,
+ HSI_LL_MSG_CONFIGURE = 0x04,
+ HSI_LL_MSG_ALLOCATE_CH = 0x05,
+ HSI_LL_MSG_RELEASE_CH = 0x06,
+ HSI_LL_MSG_OPEN_CONN = 0x07,
+ HSI_LL_MSG_CONN_READY = 0x08,
+ HSI_LL_MSG_CONN_CLOSED = 0x09,
+ HSI_LL_MSG_CANCEL_CONN = 0x0A,
+ HSI_LL_MSG_ACK = 0x0B,
+ HSI_LL_MSG_NAK = 0x0C,
+ HSI_LL_MSG_CONF_RATE = 0x0D,
+ HSI_LL_MSG_OPEN_CONN_OCTET = 0x0E,
+ HSI_LL_MSG_INVALID = 0xFF,
+};
+
+enum {
+ HSI_LL_TX_STATE_UNDEF,
+ HSI_LL_TX_STATE_CLOSED,
+ HSI_LL_TX_STATE_IDLE,
+ HSI_LL_TX_STATE_POWER_DOWN,
+ HSI_LL_TX_STATE_ERROR,
+ HSI_LL_TX_STATE_SEND_OPEN_CONN,
+ HSI_LL_TX_STATE_WAIT_FOR_ACK,
+ HSI_LL_TX_STATE_NACK,
+ HSI_LL_TX_STATE_WAIT_FOR_CONN_READY,
+ HSI_LL_TX_STATE_SEND_CONF_RATE,
+ HSI_LL_TX_STATE_WAIT_FOR_CONF_ACK,
+ HSI_LL_TX_STATE_TX,
+ HSI_LL_TX_STATE_WAIT_FOR_CONN_CLOSED,
+ HSI_LL_TX_STATE_TO_OPEN_CONN,
+ HSI_LL_TX_STATE_TO_ACK,
+ HSI_LL_TX_STATE_TO_READY,
+ HSI_LL_TX_STATE_TO_CONF,
+ HSI_LL_TX_STATE_TO_CONF_ACK,
+ HSI_LL_TX_STATE_TO_TX,
+ HSI_LL_TX_STATE_TO_CLOSE,
+ HSI_LL_TX_STATE_SEND_BREAK,
+};
+
+enum {
+ HSI_LL_RX_STATE_UNDEF,
+ HSI_LL_RX_STATE_CLOSED,
+ HSI_LL_RX_STATE_IDLE,
+ HSI_LL_RX_STATE_POWER_DOWN,
+ HSI_LL_RX_STATE_ERROR,
+ HSI_LL_RX_STATE_BLOCKED,
+ HSI_LL_RX_STATE_SEND_ACK,
+ HSI_LL_RX_STATE_SEND_NACK,
+ HSI_LL_RX_STATE_SEND_CONN_READY,
+ HSI_LL_RX_STATE_RX,
+ HSI_LL_RX_STATE_SEND_CONN_CLOSED,
+ HSI_LL_RX_STATE_SEND_CONN_CANCEL,
+ HSI_LL_RX_STATE_WAIT_FOR_CANCEL_CONN_ACK,
+ HSI_LL_RX_STATE_SEND_CONF_ACK,
+ HSI_LL_RX_STATE_SEND_CONF_NACK,
+ HSI_LL_RX_STATE_TO_RX,
+ HSI_LL_RX_STATE_TO_ACK,
+ HSI_LL_RX_STATE_TO_NACK,
+ HSI_LL_RX_STATE_TO_CONN_READY,
+ HSI_LL_RX_STATE_TO_CONN_CLOSED,
+ HSI_LL_RX_STATE_TO_CONN_CANCEL,
+ HSI_LL_RX_STATE_TO_CONN_CANCEL_ACK,
+ HSI_LL_RX_STATE_TO_CONF_ACK,
+ HSI_LL_RX_STATE_SEND_BREAK,
+};
+
+
+int if_hsi_init(void);
+int if_hsi_exit(void);
+
+int if_hsi_start(int ch);
+void if_hsi_stop(int ch);
+
+void if_hsi_send_break(int ch);
+void if_hsi_flush_rx(int ch);
+void if_hsi_flush_tx(int ch);
+void if_hsi_bootstrap(int ch);
+void if_hsi_set_wakeline(int ch, unsigned int state);
+void if_hsi_get_wakeline(int ch, unsigned int *state);
+
+#if 0
+int if_hsi_set_rx(int ch, struct hsi_rx_config *cfg);
+void if_hsi_get_rx(int ch, struct hsi_rx_config *cfg);
+int if_hsi_set_tx(int ch, struct hsi_tx_config *cfg);
+void if_hsi_get_tx(int ch, struct hsi_tx_config *cfg);
+#endif
+
+int if_hsi_read(int ch, u32 *data, unsigned int count);
+int if_hsi_poll(int ch);
+int if_hsi_write(int ch, u32 *data, unsigned int count);
+
+void if_hsi_cancel_read(int ch);
+void if_hsi_cancel_write(int ch);
+
+void if_notify(int ch, struct hsi_event *ev);
+int hsi_proto_read(int ch, u32 *buffer, int count);
+int hsi_proto_write(int ch, u32 *buffer, int length);
+int hsi_decode_cmd(u32 *data, u32 *cmd, u32 *ch, u32 *param);
+int protocol_create_cmd(int cmd_type, unsigned int channel, void *arg);
+int hsi_protocol_send_command(u32 cmd, u32 channel, u32 param);
+void rx_stm(u32 cmd, u32 ch, u32 param);
+#if 0
+int hsi_start_protocol(void);
+#endif
+#endif /* _HSI_IF_H */
diff --git a/drivers/omap_hsi/hsi_driver.c b/drivers/omap_hsi/hsi_driver.c
new file mode 100644
index 0000000..bb07c10
--- /dev/null
+++ b/drivers/omap_hsi/hsi_driver.c
@@ -0,0 +1,1136 @@
+/*
+ * hsi_driver.c
+ *
+ * Implements HSI module interface, initialization, and PM related functions.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+
+#include <mach/omap4-common.h>
+#include <plat/omap_device.h>
+
+#include "hsi_driver.h"
+
+#if 0
+static struct pm_qos_request_list *pm_qos_handle;
+#endif
+
+#define HSI_MODULENAME "omap_hsi"
+#define HSI_DRIVER_VERSION "0.4.1"
+#define HSI_RESETDONE_MAX_RETRIES 5 /* Max 5*L4 Read cycles waiting for */
+ /* reset to complete */
+#define HSI_RESETDONE_NORMAL_RETRIES 1 /* Reset should complete in 1 R/W */
+
+void hsi_save_ctx(struct hsi_dev *hsi_ctrl)
+{
+ struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+ void __iomem *base = hsi_ctrl->base;
+ struct port_ctx *p;
+ int port;
+
+ pdata->ctx->sysconfig = hsi_inl(base, HSI_SYS_SYSCONFIG_REG);
+ pdata->ctx->gdd_gcr = hsi_inl(base, HSI_GDD_GCR_REG);
+ if (hsi_driver_device_is_hsi(pdev))
+ pdata->ctx->dll = hsi_inl(base, HSI_HSR_DLL_REG);
+
+ for (port = 1; port <= pdata->num_ports; port++) {
+ p = &pdata->ctx->pctx[port - 1];
+ /* HSI TOP */
+ p->sys_mpu_enable[0] = hsi_inl(base,
+ HSI_SYS_MPU_ENABLE_REG(port, 0));
+ p->sys_mpu_enable[1] = hsi_inl(base,
+ HSI_SYS_MPU_U_ENABLE_REG(port, 0));
+
+ /* HST */
+ p->hst.mode = hsi_inl(base, HSI_HST_MODE_REG(port));
+ if (!hsi_driver_device_is_hsi(pdev))
+ p->hst.frame_size = hsi_inl(base,
+ HSI_HST_FRAMESIZE_REG(port));
+ p->hst.divisor = hsi_inl(base, HSI_HST_DIVISOR_REG(port));
+ p->hst.channels = hsi_inl(base, HSI_HST_CHANNELS_REG(port));
+ p->hst.arb_mode = hsi_inl(base, HSI_HST_ARBMODE_REG(port));
+
+ /* HSR */
+ p->hsr.mode = hsi_inl(base, HSI_HSR_MODE_REG(port));
+ if (!hsi_driver_device_is_hsi(pdev))
+ p->hsr.frame_size = hsi_inl(base,
+ HSI_HSR_FRAMESIZE_REG(port));
+ p->hsr.divisor = hsi_inl(base, HSI_HSR_DIVISOR_REG(port));
+ p->hsr.channels = hsi_inl(base, HSI_HSR_CHANNELS_REG(port));
+ p->hsr.counters = hsi_inl(base, HSI_HSR_COUNTERS_REG(port));
+ }
+}
+
+void hsi_restore_ctx(struct hsi_dev *hsi_ctrl)
+{
+ struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+ void __iomem *base = hsi_ctrl->base;
+ struct port_ctx *p;
+ int port;
+
+ hsi_outl(pdata->ctx->sysconfig, base, HSI_SYS_SYSCONFIG_REG);
+ hsi_outl(pdata->ctx->gdd_gcr, base, HSI_GDD_GCR_REG);
+ if (hsi_driver_device_is_hsi(pdev))
+ hsi_outl(pdata->ctx->dll, base, HSI_HSR_DLL_REG);
+
+ for (port = 1; port <= pdata->num_ports; port++) {
+ p = &pdata->ctx->pctx[port - 1];
+ /* HSI TOP */
+ hsi_outl(p->sys_mpu_enable[0], base,
+ HSI_SYS_MPU_ENABLE_REG(port, 0));
+ hsi_outl(p->sys_mpu_enable[1], base,
+ HSI_SYS_MPU_U_ENABLE_REG(port, 0));
+
+ /* HST */
+ hsi_outl(p->hst.mode, base, HSI_HST_MODE_REG(port));
+ if (!hsi_driver_device_is_hsi(pdev))
+ hsi_outl(p->hst.frame_size, base,
+ HSI_HST_FRAMESIZE_REG(port));
+ hsi_outl(p->hst.divisor, base, HSI_HST_DIVISOR_REG(port));
+ hsi_outl(p->hst.channels, base, HSI_HST_CHANNELS_REG(port));
+ hsi_outl(p->hst.arb_mode, base, HSI_HST_ARBMODE_REG(port));
+
+ /* HSR */
+ if (!hsi_driver_device_is_hsi(pdev))
+ hsi_outl(p->hsr.frame_size, base,
+ HSI_HSR_FRAMESIZE_REG(port));
+ hsi_outl(p->hsr.divisor, base, HSI_HSR_DIVISOR_REG(port));
+ hsi_outl(p->hsr.channels, base, HSI_HSR_CHANNELS_REG(port));
+ hsi_outl(p->hsr.counters, base, HSI_HSR_COUNTERS_REG(port));
+ }
+
+ if (hsi_driver_device_is_hsi(pdev)) {
+ /* SW strategy for HSI fifo management can be changed here */
+ hsi_fifo_mapping(hsi_ctrl, HSI_FIFO_MAPPING_DEFAULT);
+ }
+
+ /* As a last step move HSR from MODE_VAL.SLEEP to the relevant mode. */
+ /* This will enable the ACREADY flow control mechanism. */
+ for (port = 1; port <= pdata->num_ports; port++) {
+ p = &pdata->ctx->pctx[port - 1];
+ hsi_outl(p->hsr.mode, base, HSI_HSR_MODE_REG(port));
+ }
+}
+
+
+/* NOTE: Function called in soft interrupt context (tasklet) */
+int hsi_port_event_handler(struct hsi_port *p, unsigned int event, void *arg)
+{
+ struct hsi_channel *hsi_channel;
+ int ch;
+
+
+ if (event == HSI_EVENT_HSR_DATAAVAILABLE) {
+ /* The data-available event is channel-specific and must not be
+ * broadcasted
+ */
+ hsi_channel = p->hsi_channel + (int)arg;
+ read_lock(&hsi_channel->rw_lock);
+ if ((hsi_channel->dev) && (hsi_channel->port_event))
+ hsi_channel->port_event(hsi_channel->dev, event, arg);
+ read_unlock(&hsi_channel->rw_lock);
+ } else {
+ for (ch = 0; ch < p->max_ch; ch++) {
+ hsi_channel = p->hsi_channel + ch;
+ read_lock(&hsi_channel->rw_lock);
+ if ((hsi_channel->dev) && (hsi_channel->port_event))
+ hsi_channel->port_event(hsi_channel->dev,
+ event, arg);
+ read_unlock(&hsi_channel->rw_lock);
+ }
+ }
+ return 0;
+}
+
+static void hsi_dev_release(struct device *dev)
+{
+ /* struct device kfree is already made in unregister_hsi_devices().
+ * Registering this function is necessary to avoid an error from
+ * the device_release() function.
+ */
+}
+
+/* Register a hsi_device, linked to a port and channel id */
+static int __init reg_hsi_dev_ch(struct hsi_dev *hsi_ctrl, unsigned int p,
+ unsigned int ch)
+{
+ struct hsi_device *dev;
+ struct hsi_port *port = &hsi_ctrl->hsi_port[p];
+ int err;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->n_ctrl = hsi_ctrl->id;
+ dev->n_p = p;
+ dev->n_ch = ch;
+ dev->ch = &port->hsi_channel[ch];
+ dev->device.bus = &hsi_bus_type;
+ dev->device.parent = hsi_ctrl->dev;
+ dev->device.release = hsi_dev_release;
+ if (dev->n_ctrl < 0)
+ dev_set_name(&dev->device, "omap_hsi-p%u.c%u", p, ch);
+ else
+ dev_set_name(&dev->device, "omap_hsi%d-p%u.c%u", dev->n_ctrl, p,
+ ch);
+
+ dev_dbg(hsi_ctrl->dev,
+ "reg_hsi_dev_ch, port %d, ch %d, hsi_ctrl->dev:0x%x,"
+ "&dev->device:0x%x\n",
+ p, ch, (unsigned int)hsi_ctrl->dev, (unsigned int)&dev->device);
+
+ err = device_register(&dev->device);
+ if (err >= 0) {
+ write_lock_bh(&port->hsi_channel[ch].rw_lock);
+ port->hsi_channel[ch].dev = dev;
+ write_unlock_bh(&port->hsi_channel[ch].rw_lock);
+ } else {
+ kfree(dev);
+ }
+ return err;
+}
+
+static int __init register_hsi_devices(struct hsi_dev *hsi_ctrl)
+{
+ int port;
+ int ch;
+ int err;
+
+ for (port = 0; port < hsi_ctrl->max_p; port++)
+ for (ch = 0; ch < hsi_ctrl->hsi_port[port].max_ch; ch++) {
+ err = reg_hsi_dev_ch(hsi_ctrl, port, ch);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit unregister_hsi_devices(struct hsi_dev *hsi_ctrl)
+{
+ struct hsi_port *hsi_p;
+ struct hsi_device *device;
+ unsigned int port;
+ unsigned int ch;
+
+ for (port = 0; port < hsi_ctrl->max_p; port++) {
+ hsi_p = &hsi_ctrl->hsi_port[port];
+ for (ch = 0; ch < hsi_p->max_ch; ch++) {
+ device = hsi_p->hsi_channel[ch].dev;
+ hsi_close(device);
+ device_unregister(&device->device);
+ kfree(device);
+ }
+ }
+}
+
+void hsi_set_pm_default(struct hsi_dev *hsi_ctrl)
+{
+ /* Set default SYSCONFIG PM settings */
+ hsi_outl((HSI_AUTOIDLE | HSI_SIDLEMODE_SMART_WAKEUP |
+ HSI_MIDLEMODE_SMART_WAKEUP),
+ hsi_ctrl->base, HSI_SYS_SYSCONFIG_REG);
+ hsi_outl(HSI_CLK_AUTOGATING_ON, hsi_ctrl->base, HSI_GDD_GCR_REG);
+
+ /* HSI_TODO : use the HWMOD API : omap_hwmod_set_slave_idlemode() */
+}
+
+void hsi_set_pm_force_hsi_on(struct hsi_dev *hsi_ctrl)
+{
+ /* Force HSI to ON by never acknowledging a PRCM idle request */
+ /* SIdleAck and MStandby are never asserted */
+ hsi_outl((HSI_AUTOIDLE | HSI_SIDLEMODE_NO |
+ HSI_MIDLEMODE_NO),
+ hsi_ctrl->base, HSI_SYS_SYSCONFIG_REG);
+ hsi_outl(HSI_CLK_AUTOGATING_ON, hsi_ctrl->base, HSI_GDD_GCR_REG);
+
+ /* HSI_TODO : use the HWMOD API : omap_hwmod_set_slave_idlemode() */
+}
+
+int hsi_softreset(struct hsi_dev *hsi_ctrl)
+{
+ unsigned int ind = 0;
+ void __iomem *base = hsi_ctrl->base;
+ u32 status;
+
+ /* Reseting HSI Block */
+ hsi_outl_or(HSI_SOFTRESET, base, HSI_SYS_SYSCONFIG_REG);
+ do {
+ status = hsi_inl(base, HSI_SYS_SYSSTATUS_REG);
+ ind++;
+ } while ((!(status & HSI_RESETDONE)) &&
+ (ind < HSI_RESETDONE_MAX_RETRIES));
+
+ if (ind >= HSI_RESETDONE_MAX_RETRIES) {
+ dev_err(hsi_ctrl->dev, "HSI SW_RESET failed to complete within"
+ " %d retries.\n", HSI_RESETDONE_MAX_RETRIES);
+ return -EIO;
+ } else if (ind > HSI_RESETDONE_NORMAL_RETRIES) {
+ dev_warn(hsi_ctrl->dev, "HSI SW_RESET abnormally long:"
+ " %d retries to complete.\n", ind);
+ }
+
+ ind = 0;
+ /* Reseting DMA Engine */
+ hsi_outl_or(HSI_GDD_GRST_SWRESET, base, HSI_GDD_GRST_REG);
+ do {
+ status = hsi_inl(base, HSI_GDD_GRST_REG);
+ ind++;
+ } while ((status & HSI_GDD_GRST_SWRESET) &&
+ (ind < HSI_RESETDONE_MAX_RETRIES));
+
+ if (ind >= HSI_RESETDONE_MAX_RETRIES) {
+ dev_err(hsi_ctrl->dev, "HSI DMA SW_RESET failed to complete"
+ " within %d retries.\n", HSI_RESETDONE_MAX_RETRIES);
+ return -EIO;
+ }
+
+ if (ind > HSI_RESETDONE_NORMAL_RETRIES) {
+ dev_warn(hsi_ctrl->dev, "HSI DMA SW_RESET abnormally long:"
+ " %d retries to complete.\n", ind);
+ }
+
+ return 0;
+}
+
+static void hsi_set_ports_default(struct hsi_dev *hsi_ctrl,
+ struct platform_device *pd)
+{
+ struct port_ctx *cfg;
+ struct hsi_platform_data *pdata = pd->dev.platform_data;
+ unsigned int port = 0;
+ void __iomem *base = hsi_ctrl->base;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+
+ for (port = 1; port <= pdata->num_ports; port++) {
+ cfg = &pdata->ctx->pctx[port - 1];
+ /* HST */
+ hsi_outl(cfg->hst.mode | cfg->hst.flow |
+ HSI_HST_MODE_WAKE_CTRL_SW, base,
+ HSI_HST_MODE_REG(port));
+ if (!hsi_driver_device_is_hsi(pdev))
+ hsi_outl(cfg->hst.frame_size, base,
+ HSI_HST_FRAMESIZE_REG(port));
+ hsi_outl(cfg->hst.divisor, base, HSI_HST_DIVISOR_REG(port));
+ hsi_outl(cfg->hst.channels, base, HSI_HST_CHANNELS_REG(port));
+ hsi_outl(cfg->hst.arb_mode, base, HSI_HST_ARBMODE_REG(port));
+
+ /* HSR */
+ hsi_outl(cfg->hsr.mode | cfg->hsr.flow, base,
+ HSI_HSR_MODE_REG(port));
+ if (!hsi_driver_device_is_hsi(pdev))
+ hsi_outl(cfg->hsr.frame_size, base,
+ HSI_HSR_FRAMESIZE_REG(port));
+ hsi_outl(cfg->hsr.channels, base, HSI_HSR_CHANNELS_REG(port));
+ if (hsi_driver_device_is_hsi(pdev))
+ hsi_outl(cfg->hsr.divisor, base,
+ HSI_HSR_DIVISOR_REG(port));
+ hsi_outl(cfg->hsr.counters, base, HSI_HSR_COUNTERS_REG(port));
+ }
+
+ if (hsi_driver_device_is_hsi(pdev)) {
+ /* SW strategy for HSI fifo management can be changed here */
+ hsi_fifo_mapping(hsi_ctrl, HSI_FIFO_MAPPING_DEFAULT);
+ hsi_outl(pdata->ctx->dll, base, HSI_HSR_DLL_REG);
+ }
+}
+
+static int __init hsi_port_channels_init(struct hsi_port *port)
+{
+ struct hsi_channel *ch;
+ unsigned int ch_i;
+
+ for (ch_i = 0; ch_i < port->max_ch; ch_i++) {
+ ch = &port->hsi_channel[ch_i];
+ ch->channel_number = ch_i;
+ rwlock_init(&ch->rw_lock);
+ ch->flags = 0;
+ ch->hsi_port = port;
+ ch->read_data.addr = NULL;
+ ch->read_data.size = 0;
+ ch->read_data.lch = -1;
+ ch->write_data.addr = NULL;
+ ch->write_data.size = 0;
+ ch->write_data.lch = -1;
+ ch->dev = NULL;
+ ch->read_done = NULL;
+ ch->write_done = NULL;
+ ch->port_event = NULL;
+ }
+
+ return 0;
+}
+
+static int hsi_port_channels_reset(struct hsi_port *port)
+{
+ struct hsi_channel *ch;
+ unsigned int ch_i;
+
+ for (ch_i = 0; ch_i < port->max_ch; ch_i++) {
+ ch = &port->hsi_channel[ch_i];
+ ch->flags = 0;
+ ch->read_data.addr = NULL;
+ ch->read_data.size = 0;
+ ch->read_data.lch = -1;
+ ch->write_data.addr = NULL;
+ ch->write_data.size = 0;
+ ch->write_data.lch = -1;
+ }
+
+ return 0;
+}
+
+void hsi_softreset_driver(struct hsi_dev *hsi_ctrl)
+{
+ struct platform_device *pd = to_platform_device(hsi_ctrl->dev);
+ struct hsi_platform_data *pdata = pd->dev.platform_data;
+ struct hsi_port *hsi_p;
+ unsigned int port;
+ u32 revision;
+
+ /* HSI port reset */
+ for (port = 0; port < hsi_ctrl->max_p; port++) {
+ hsi_p = &hsi_ctrl->hsi_port[port];
+ hsi_p->counters_on = 1;
+ hsi_p->reg_counters = pdata->ctx->pctx[port].hsr.counters;
+ hsi_port_channels_reset(&hsi_ctrl->hsi_port[port]);
+ }
+
+ hsi_set_pm_default(hsi_ctrl);
+
+ /* Re-Configure HSI ports */
+ hsi_set_ports_default(hsi_ctrl, pd);
+
+ /* Gather info from registers for the driver.(REVISION) */
+ revision = hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG);
+ if (hsi_driver_device_is_hsi(pd))
+ dev_info(hsi_ctrl->dev, "HSI Hardware REVISION 0x%x\n",
+ revision);
+ else
+ dev_info(hsi_ctrl->dev, "SSI Hardware REVISION %d.%d\n",
+ (revision & HSI_SSI_REV_MAJOR) >> 4,
+ (revision & HSI_SSI_REV_MINOR));
+}
+
+static int __init hsi_request_mpu_irq(struct hsi_port *hsi_p)
+{
+ struct hsi_dev *hsi_ctrl = hsi_p->hsi_controller;
+ struct platform_device *pd = to_platform_device(hsi_ctrl->dev);
+ struct resource *mpu_irq;
+
+ if (hsi_driver_device_is_hsi(pd))
+ mpu_irq = platform_get_resource(pd, IORESOURCE_IRQ,
+ hsi_p->port_number - 1);
+ else /* SSI support 2 IRQs per port */
+ mpu_irq = platform_get_resource(pd, IORESOURCE_IRQ,
+ (hsi_p->port_number - 1) * 2);
+
+ if (!mpu_irq) {
+ dev_err(hsi_ctrl->dev, "HSI misses info for MPU IRQ on"
+ " port %d\n", hsi_p->port_number);
+ return -ENXIO;
+ }
+ hsi_p->n_irq = 0; /* We only use one irq line */
+ hsi_p->irq = mpu_irq->start;
+ return hsi_mpu_init(hsi_p, mpu_irq->name);
+}
+
+static int __init hsi_request_cawake_irq(struct hsi_port *hsi_p)
+{
+ struct hsi_dev *hsi_ctrl = hsi_p->hsi_controller;
+ struct platform_device *pd = to_platform_device(hsi_ctrl->dev);
+ struct resource *cawake_irq;
+
+ if (hsi_driver_device_is_hsi(pd)) {
+ hsi_p->cawake_gpio = -1;
+ return 0;
+ } else {
+ cawake_irq = platform_get_resource(pd, IORESOURCE_IRQ,
+ 4 + hsi_p->port_number);
+ }
+
+ if (!cawake_irq) {
+ dev_err(hsi_ctrl->dev, "SSI device misses info for CAWAKE"
+ "IRQ on port %d\n", hsi_p->port_number);
+ return -ENXIO;
+ }
+
+ if (cawake_irq->flags & IORESOURCE_UNSET) {
+ dev_info(hsi_ctrl->dev, "No CAWAKE GPIO support\n");
+ hsi_p->cawake_gpio = -1;
+ return 0;
+ }
+
+ hsi_p->cawake_gpio_irq = cawake_irq->start;
+ hsi_p->cawake_gpio = irq_to_gpio(cawake_irq->start);
+ return hsi_cawake_init(hsi_p, cawake_irq->name);
+}
+
+static void hsi_ports_exit(struct hsi_dev *hsi_ctrl, unsigned int max_ports)
+{
+ struct hsi_port *hsi_p;
+ unsigned int port;
+
+ for (port = 0; port < max_ports; port++) {
+ hsi_p = &hsi_ctrl->hsi_port[port];
+ hsi_mpu_exit(hsi_p);
+ hsi_cawake_exit(hsi_p);
+ }
+}
+
+static int __init hsi_ports_init(struct hsi_dev *hsi_ctrl)
+{
+ struct platform_device *pd = to_platform_device(hsi_ctrl->dev);
+ struct hsi_platform_data *pdata = pd->dev.platform_data;
+ struct hsi_port *hsi_p;
+ unsigned int port;
+ int err;
+
+ for (port = 0; port < hsi_ctrl->max_p; port++) {
+ hsi_p = &hsi_ctrl->hsi_port[port];
+ hsi_p->port_number = port + 1;
+ hsi_p->hsi_controller = hsi_ctrl;
+ hsi_p->max_ch = hsi_driver_device_is_hsi(pd) ?
+ HSI_CHANNELS_MAX : HSI_SSI_CHANNELS_MAX;
+ hsi_p->irq = 0;
+ hsi_p->cawake_status = -1; /* Unknown */
+ hsi_p->cawake_off_event = false;
+ hsi_p->acwake_status = 0;
+ hsi_p->in_int_tasklet = false;
+ hsi_p->in_cawake_tasklet = false;
+ hsi_p->counters_on = 1;
+ hsi_p->reg_counters = pdata->ctx->pctx[port].hsr.counters;
+ spin_lock_init(&hsi_p->lock);
+ err = hsi_port_channels_init(&hsi_ctrl->hsi_port[port]);
+ if (err < 0)
+ goto rback1;
+ err = hsi_request_mpu_irq(hsi_p);
+ if (err < 0)
+ goto rback2;
+ err = hsi_request_cawake_irq(hsi_p);
+ if (err < 0)
+ goto rback3;
+ }
+ return 0;
+rback3:
+ hsi_mpu_exit(hsi_p);
+rback2:
+ hsi_ports_exit(hsi_ctrl, port + 1);
+rback1:
+ return err;
+}
+
+static int __init hsi_request_gdd_irq(struct hsi_dev *hsi_ctrl)
+{
+ struct platform_device *pd = to_platform_device(hsi_ctrl->dev);
+ struct resource *gdd_irq;
+
+ if (hsi_driver_device_is_hsi(pd))
+ gdd_irq = platform_get_resource(pd, IORESOURCE_IRQ, 2);
+ else
+ gdd_irq = platform_get_resource(pd, IORESOURCE_IRQ, 4);
+
+ if (!gdd_irq) {
+ dev_err(hsi_ctrl->dev, "HSI has no GDD IRQ resource\n");
+ return -ENXIO;
+ }
+
+ hsi_ctrl->gdd_irq = gdd_irq->start;
+ return hsi_gdd_init(hsi_ctrl, gdd_irq->name);
+}
+
+static int __init hsi_init_gdd_chan_count(struct hsi_dev *hsi_ctrl)
+{
+ struct platform_device *pd = to_platform_device(hsi_ctrl->dev);
+ u8 gdd_chan_count;
+ struct hsi_platform_data *pdata =
+ (struct hsi_platform_data *)pd->dev.platform_data;
+ int i;
+
+ if (!pdata) {
+ dev_err(hsi_ctrl->dev, "HSI has no platform data\n");
+ return -ENXIO;
+ }
+
+ gdd_chan_count = pdata->hsi_gdd_chan_count;
+
+ if (!gdd_chan_count) {
+ dev_warn(hsi_ctrl->dev, "HSI device has no GDD channel count "
+ "(use %d as default)\n",
+ HSI_DMA_CHANNEL_DEFAULT);
+ hsi_ctrl->gdd_chan_count = HSI_DMA_CHANNEL_DEFAULT;
+ } else {
+ hsi_ctrl->gdd_chan_count = gdd_chan_count;
+ /* Check that the number of channels is power of 2 */
+ for (i = 0; i < 16; i++) {
+ if (hsi_ctrl->gdd_chan_count == (1 << i))
+ break;
+ }
+ if (i >= 16)
+ dev_err(hsi_ctrl->dev, "The Number of DMA channels "
+ "shall be a power of 2! (=%d)\n",
+ hsi_ctrl->gdd_chan_count);
+ }
+ return 0;
+}
+
+/**
+* hsi_clocks_disable_channel - virtual wrapper for disabling HSI clocks for
+* a given channel
+* @dev - reference to the hsi device.
+* @channel_number - channel number which requests clock to be disabled
+* 0xFF means no particular channel
+*
+* Note : there is no real HW clock management per HSI channel, this is only
+* virtual to keep track of active channels and ease debug
+*
+* Function to be called with lock
+*/
+void hsi_clocks_disable_channel(struct device *dev, u8 channel_number,
+ const char *s)
+{
+ struct platform_device *pd = to_platform_device(dev);
+ struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd);
+
+ if (channel_number != HSI_CH_NUMBER_NONE)
+ dev_dbg(dev, "CLK: hsi_clocks_disable for "
+ "channel %d: %s\n", channel_number, s);
+ else
+ dev_dbg(dev, "CLK: hsi_clocks_disable: %s\n", s);
+
+ if (!hsi_ctrl->clock_enabled) {
+ dev_dbg(dev, "Clocks already disabled, skipping...\n");
+ return;
+ }
+ if (hsi_is_hsi_controller_busy(hsi_ctrl)) {
+ dev_dbg(dev, "Cannot disable clocks, HSI port busy\n");
+ return;
+ }
+
+ if (hsi_is_hst_controller_busy(hsi_ctrl))
+ dev_dbg(dev, "Disabling clocks with HST FSM not IDLE !\n");
+
+#ifdef K3_0_PORTING_HSI_MISSING_FEATURE
+ /* Allow Fclk to change */
+ if (dpll_cascading_blocker_release(dev) < 0)
+ dev_warn(dev, "Error releasing DPLL cascading constraint\n");
+#endif
+
+ pm_runtime_put_sync_suspend(dev);
+}
+
+/**
+* hsi_clocks_enable_channel - virtual wrapper for enabling HSI clocks for
+* a given channel
+* @dev - reference to the hsi device.
+* @channel_number - channel number which requests clock to be enabled
+* 0xFF means no particular channel
+*
+* Note : there is no real HW clock management per HSI channel, this is only
+* virtual to keep track of active channels and ease debug
+*
+* Function to be called with lock
+*/
+int hsi_clocks_enable_channel(struct device *dev, u8 channel_number,
+ const char *s)
+{
+ struct platform_device *pd = to_platform_device(dev);
+ struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd);
+
+ if (channel_number != HSI_CH_NUMBER_NONE)
+ dev_dbg(dev, "CLK: hsi_clocks_enable for "
+ "channel %d: %s\n", channel_number, s);
+ else
+ dev_dbg(dev, "CLK: hsi_clocks_enable: %s\n", s);
+
+ if (hsi_ctrl->clock_enabled) {
+ dev_dbg(dev, "Clocks already enabled, skipping...\n");
+ return -EEXIST;
+ }
+
+#ifdef K3_0_PORTING_HSI_MISSING_FEATURE
+ /* Prevent Fclk change */
+ if (dpll_cascading_blocker_hold(dev) < 0)
+ dev_warn(dev, "Error holding DPLL cascading constraint\n");
+#endif
+
+ return pm_runtime_get_sync(dev);
+}
+
+static int __init hsi_controller_init(struct hsi_dev *hsi_ctrl,
+ struct platform_device *pd)
+{
+ struct hsi_platform_data *pdata = pd->dev.platform_data;
+ struct resource *mem, *ioarea;
+ int err;
+
+ mem = platform_get_resource(pd, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pd->dev, "HSI device does not have "
+ "HSI IO memory region information\n");
+ return -ENXIO;
+ }
+ dev_dbg(&pd->dev, "hsi_controller_init : IORESOURCE_MEM %s [%x, %x]\n",
+ mem->name, mem->start, mem->end);
+
+ ioarea = devm_request_mem_region(&pd->dev, mem->start,
+ (mem->end - mem->start) + 1,
+ dev_name(&pd->dev));
+ if (!ioarea) {
+ dev_err(&pd->dev, "Unable to request HSI IO mem region\n");
+ return -EBUSY;
+ }
+ dev_dbg(&pd->dev, "hsi_controller_init : ioarea %s [%x, %x]\n",
+ ioarea->name, ioarea->start, ioarea->end);
+
+ hsi_ctrl->phy_base = mem->start;
+ hsi_ctrl->base = devm_ioremap(&pd->dev, mem->start,
+ (mem->end - mem->start) + 1);
+ if (!hsi_ctrl->base) {
+ dev_err(&pd->dev, "Unable to ioremap HSI base IO address\n");
+ return -ENXIO;
+ }
+ dev_dbg(&pd->dev, "hsi_controller_init : hsi_ctrl->base=%x\n",
+ (unsigned int)hsi_ctrl->base);
+
+ hsi_ctrl->id = pd->id;
+ if (pdata->num_ports > HSI_MAX_PORTS) {
+ dev_err(&pd->dev, "The HSI driver does not support enough "
+ "ports!\n");
+ return -ENXIO;
+ }
+ hsi_ctrl->max_p = pdata->num_ports;
+ hsi_ctrl->in_dma_tasklet = false;
+ hsi_ctrl->fifo_mapping_strategy = HSI_FIFO_MAPPING_UNDEF;
+ hsi_ctrl->dev = &pd->dev;
+ spin_lock_init(&hsi_ctrl->lock);
+ err = hsi_init_gdd_chan_count(hsi_ctrl);
+ if (err < 0)
+ goto rback1;
+
+ err = hsi_ports_init(hsi_ctrl);
+ if (err < 0)
+ goto rback1;
+
+ err = hsi_request_gdd_irq(hsi_ctrl);
+ if (err < 0)
+ goto rback2;
+
+ /* Everything is fine */
+ return 0;
+rback2:
+ hsi_ports_exit(hsi_ctrl, hsi_ctrl->max_p);
+rback1:
+ dev_err(&pd->dev, "Error on hsi_controller initialization\n");
+ return err;
+}
+
+static void hsi_controller_exit(struct hsi_dev *hsi_ctrl)
+{
+ hsi_gdd_exit(hsi_ctrl);
+ hsi_ports_exit(hsi_ctrl, hsi_ctrl->max_p);
+}
+
+/* HSI Platform Device probing & hsi_device registration */
+static int __init hsi_platform_device_probe(struct platform_device *pd)
+{
+ struct hsi_platform_data *pdata = pd->dev.platform_data;
+ struct hsi_dev *hsi_ctrl;
+ u32 revision;
+ int err;
+
+ dev_dbg(&pd->dev, "HSI DRIVER : hsi_platform_device_probe\n");
+
+ dev_dbg(&pd->dev, "The platform device probed is an %s\n",
+ hsi_driver_device_is_hsi(pd) ? "HSI" : "SSI");
+
+ if (!pdata) {
+ dev_err(&pd->dev, "No platform_data found on hsi device\n");
+ return -ENXIO;
+ }
+
+ hsi_ctrl = kzalloc(sizeof(*hsi_ctrl), GFP_KERNEL);
+ if (hsi_ctrl == NULL) {
+ dev_err(&pd->dev, "Could not allocate memory for"
+ " struct hsi_dev\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pd, hsi_ctrl);
+ err = hsi_controller_init(hsi_ctrl, pd);
+ if (err < 0) {
+ dev_err(&pd->dev, "Could not initialize hsi controller:"
+ " %d\n", err);
+ goto rollback1;
+ }
+ /* Wakeup dependency was disabled for HSI <-> MPU PM_L3INIT_HSI_WKDEP */
+#if 0
+ omap_writel(0x141, 0x4A307338);
+#endif
+ pm_runtime_enable(hsi_ctrl->dev);
+ pm_runtime_irq_safe(hsi_ctrl->dev);
+ hsi_clocks_enable(hsi_ctrl->dev, __func__);
+
+ /* Non critical SW Reset */
+ err = hsi_softreset(hsi_ctrl);
+ if (err < 0)
+ goto rollback2;
+
+ hsi_set_pm_default(hsi_ctrl);
+
+ /* Configure HSI ports */
+ hsi_set_ports_default(hsi_ctrl, pd);
+
+ /* Gather info from registers for the driver.(REVISION) */
+ revision = hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG);
+ if (hsi_driver_device_is_hsi(pd))
+ dev_info(hsi_ctrl->dev, "HSI Hardware REVISION 0x%x\n",
+ revision);
+ else
+ dev_info(hsi_ctrl->dev, "SSI Hardware REVISION %d.%d\n",
+ (revision & HSI_SSI_REV_MAJOR) >> 4,
+ (revision & HSI_SSI_REV_MINOR));
+
+ err = hsi_debug_add_ctrl(hsi_ctrl);
+ if (err < 0) {
+ dev_err(&pd->dev,
+ "Could not add hsi controller to debugfs: %d\n", err);
+ goto rollback2;
+ }
+
+ err = register_hsi_devices(hsi_ctrl);
+ if (err < 0) {
+ dev_err(&pd->dev, "Could not register hsi_devices: %d\n", err);
+ goto rollback3;
+ }
+
+ /* Allow HSI to wake up the platform */
+ device_init_wakeup(hsi_ctrl->dev, true);
+
+#ifdef K3_0_PORTING_HSI_MISSING_FEATURE
+ /* Set the HSI FCLK to default. */
+ err = omap_device_set_rate(hsi_ctrl->dev, hsi_ctrl->dev,
+ pdata->default_hsi_fclk);
+ if (err)
+ dev_err(&pd->dev, "Cannot set HSI FClk to default value: %ld\n",
+ pdata->default_hsi_fclk);
+#endif
+
+ /* From here no need for HSI HW access */
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+
+ return err;
+
+rollback3:
+ hsi_debug_remove_ctrl(hsi_ctrl);
+rollback2:
+ hsi_controller_exit(hsi_ctrl);
+
+ /* From here no need for HSI HW access */
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+
+rollback1:
+ kfree(hsi_ctrl);
+ return err;
+}
+
+static int __exit hsi_platform_device_remove(struct platform_device *pd)
+{
+ struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd);
+
+ dev_dbg(&pd->dev, "HSI DRIVER : hsi_platform_device_remove\n");
+
+ if (!hsi_ctrl)
+ return 0;
+
+ unregister_hsi_devices(hsi_ctrl);
+
+ /* From here no need for HSI HW access */
+ pm_runtime_disable(hsi_ctrl->dev);
+
+ hsi_debug_remove_ctrl(hsi_ctrl);
+ hsi_controller_exit(hsi_ctrl);
+
+ kfree(hsi_ctrl);
+
+ return 0;
+}
+
+#ifdef CONFIG_SUSPEND
+static int hsi_suspend_noirq(struct device *dev)
+{
+ struct hsi_platform_data *pdata = dev->platform_data;
+ struct platform_device *pd = to_platform_device(dev);
+ struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* If HSI is enabled, CAWAKE IO wakeup has been disabled and */
+ /* we don't want to re-enable it here. HSI interrupt shall be */
+ /* generated normally because HSI HW is ON. */
+ if (hsi_ctrl->clock_enabled) {
+ dev_info(dev, "Platform Suspend while HSI active\n");
+ return 0;
+ }
+
+ /* Perform HSI board specific action before platform suspend */
+ if (pdata->board_suspend)
+ pdata->board_suspend(0, device_may_wakeup(dev));
+
+ return 0;
+}
+
+static int hsi_resume_noirq(struct device *dev)
+{
+ struct hsi_platform_data *pdata = dev->platform_data;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* This function shall not schedule the tasklet, because it is */
+ /* redundant with what is already done in the PRCM interrupt handler. */
+ /* HSI IO checking in PRCM int handler is done when waking up from : */
+ /* - Device OFF mode (wake up from suspend) */
+ /* - L3INIT in RET (Idle mode) */
+ /* hsi_resume_noirq is called only when system wakes up from suspend. */
+ /* So HSI IO checking in PRCM int handler and hsi_resume_noirq are */
+ /* redundant. We need to choose which one will schedule the tasklet */
+ /* Since HSI IO checking in PRCM int handler covers more cases, it is */
+ /* the winner. */
+
+ /* Perform (optional) HSI board specific action after platform wakeup */
+ if (pdata->board_resume)
+ pdata->board_resume(0);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SUSPEND */
+
+#ifdef CONFIG_PM_RUNTIME
+/**
+* hsi_runtime_resume - executed by the PM core for the bus type of the device being woken up
+* @dev - reference to the hsi device.
+*
+*
+*/
+int hsi_runtime_resume(struct device *dev)
+{
+ struct platform_device *pd = to_platform_device(dev);
+ struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd);
+ struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data;
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (hsi_ctrl->clock_enabled)
+ dev_warn(dev, "Warning: clock status mismatch vs runtime PM\n");
+
+ hsi_ctrl->clock_enabled = true;
+
+ /* Restore context */
+ hsi_restore_ctx(hsi_ctrl);
+
+ /* When HSI is ON, no need for IO wakeup mechanism */
+ pdata->wakeup_disable(0);
+
+ /* HSI device is now fully operational and _must_ be able to */
+ /* complete I/O operations */
+
+ return 0;
+}
+
+/**
+* hsi_runtime_suspend - Prepare HSI for low power : device will not process data and will
+ not communicate with the CPU
+* @dev - reference to the hsi device.
+*
+* Return value : -EBUSY or -EAGAIN if device is busy and still operational
+*
+*/
+int hsi_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pd = to_platform_device(dev);
+ struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd);
+ struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data;
+ int port;
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (!hsi_ctrl->clock_enabled)
+ dev_warn(dev, "Warning: clock status mismatch vs runtime PM\n");
+
+ /* Save context */
+ hsi_save_ctx(hsi_ctrl);
+
+ hsi_ctrl->clock_enabled = false;
+
+ /* Put HSR into SLEEP mode to force ACREADY to low while HSI is idle */
+ for (port = 1; port <= pdata->num_ports; port++) {
+ hsi_outl_and(HSI_HSR_MODE_MODE_VAL_SLEEP, hsi_ctrl->base,
+ HSI_HSR_MODE_REG(port));
+ }
+
+ /* HSI is going to INA/RET/OFF, it needs IO wakeup mechanism enabled */
+ if (device_may_wakeup(dev))
+ pdata->wakeup_enable(0);
+ else
+ pdata->wakeup_disable(0);
+
+ /* HSI is now ready to be put in low power state */
+
+ return 0;
+}
+
+/* Based on counters, device appears to be idle.
+ * Check if the device can be suspended.
+ */
+static int hsi_runtime_idle(struct device *dev)
+{
+ struct platform_device *pd = to_platform_device(dev);
+ struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (hsi_is_hsi_controller_busy(hsi_ctrl)) {
+ dev_dbg(dev, "hsi_runtime_idle: HSI port busy\n");
+ return -EBUSY;
+ }
+
+ if (hsi_is_hst_controller_busy(hsi_ctrl)) {
+ dev_dbg(dev, "hsi_runtime_idle: HST FSM not IDLE !\n");
+ return -EBUSY;
+ }
+
+ /* HSI_TODO : check also the interrupt status registers.*/
+
+ return 0;
+}
+
+#endif /* CONFIG_PM_RUNTIME */
+
+int hsi_driver_device_is_hsi(struct platform_device *dev)
+{
+ struct platform_device_id *id =
+ (struct platform_device_id *)platform_get_device_id(dev);
+ return (id->driver_data == HSI_DRV_DEVICE_HSI);
+}
+
+/* List of devices supported by this driver */
+static struct platform_device_id hsi_id_table[] = {
+ {"omap_hsi", HSI_DRV_DEVICE_HSI},
+ {"omap_ssi", HSI_DRV_DEVICE_SSI},
+ {},
+};
+
+MODULE_DEVICE_TABLE(platform, hsi_id_table);
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops hsi_driver_pm_ops = {
+#ifdef CONFIG_SUSPEND
+ .suspend_noirq = hsi_suspend_noirq,
+ .resume_noirq = hsi_resume_noirq,
+#endif
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = hsi_runtime_suspend,
+ .runtime_resume = hsi_runtime_resume,
+ .runtime_idle = hsi_runtime_idle,
+#endif
+};
+
+#define HSI_DRIVER_PM_OPS_PTR (&hsi_driver_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define HSI_DRIVER_PM_OPS_PTR NULL
+
+#endif
+
+static struct platform_driver hsi_pdriver = {
+ .driver = {
+ .name = HSI_MODULENAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = HSI_DRIVER_PM_OPS_PTR,
+#endif
+ },
+ .id_table = hsi_id_table,
+ .remove = __exit_p(hsi_platform_device_remove),
+};
+
+/* HSI bus and platform driver registration */
+static int __init hsi_driver_init(void)
+{
+ int err = 0;
+
+ pr_info(LOG_NAME "HSI DRIVER Version " HSI_DRIVER_VERSION "\n");
+
+ /* Register the (virtual) HSI bus */
+ err = hsi_bus_init();
+ if (err < 0) {
+ pr_err(LOG_NAME "HSI bus_register err %d\n", err);
+ return err;
+ }
+
+ err = hsi_debug_init();
+ if (err < 0) {
+ pr_err(LOG_NAME "HSI Debugfs failed %d\n", err);
+ goto rback1;
+ }
+
+ /* Register the HSI platform driver */
+ err = platform_driver_probe(&hsi_pdriver, hsi_platform_device_probe);
+ if (err < 0) {
+ pr_err(LOG_NAME "Platform DRIVER register FAILED: %d\n", err);
+ goto rback2;
+ }
+
+ return 0;
+rback2:
+ hsi_debug_exit();
+rback1:
+ hsi_bus_exit();
+ return err;
+}
+
+static void __exit hsi_driver_exit(void)
+{
+ platform_driver_unregister(&hsi_pdriver);
+ hsi_debug_exit();
+ hsi_bus_exit();
+
+ pr_info(LOG_NAME "HSI DRIVER removed\n");
+}
+
+module_init(hsi_driver_init);
+module_exit(hsi_driver_exit);
+
+MODULE_ALIAS("platform:" HSI_MODULENAME);
+MODULE_AUTHOR("Carlos Chinea / Nokia");
+MODULE_AUTHOR("Sebastien JAN / Texas Instruments");
+MODULE_AUTHOR("Djamil ELAIDI / Texas Instruments");
+MODULE_DESCRIPTION("MIPI High-speed Synchronous Serial Interface (HSI) Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/omap_hsi/hsi_driver.h b/drivers/omap_hsi/hsi_driver.h
new file mode 100644
index 0000000..0991d98
--- /dev/null
+++ b/drivers/omap_hsi/hsi_driver.h
@@ -0,0 +1,398 @@
+/*
+ * hsi_driver.h
+ *
+ * Header file for the HSI driver low level interface.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef __HSI_DRIVER_H__
+#define __HSI_DRIVER_H__
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/notifier.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/hsi_driver_if.h>
+#include <plat/omap_hsi.h>
+
+/* Channel states */
+#define HSI_CH_OPEN 0x01
+#define HSI_CH_RX_POLL 0x10
+#define HSI_CH_ACWAKE 0x02 /* ACWAKE line status */
+
+#define HSI_CH_NUMBER_NONE 0xFF
+/*
+ * The number of channels handled by the driver in the ports, or the highest
+ * port channel number (+1) used. (MAX:8 for SSI; 16 for HSI)
+ * Reducing this value optimizes the driver memory footprint.
+ */
+#define HSI_PORT_MAX_CH HSI_CHANNELS_MAX
+
+/* Number of DMA channels when nothing is defined for the device */
+#define HSI_DMA_CHANNEL_DEFAULT 8
+
+
+#define LOG_NAME "OMAP HSI: "
+
+/* SW strategies for HSI FIFO mapping */
+enum {
+ HSI_FIFO_MAPPING_UNDEF = 0,
+ HSI_FIFO_MAPPING_SSI, /* 8 FIFOs per port (SSI compatible mode) */
+ HSI_FIFO_MAPPING_ALL_PORT1, /* ALL FIFOs mapped on 1st port */
+};
+#define HSI_FIFO_MAPPING_DEFAULT HSI_FIFO_MAPPING_ALL_PORT1
+
+/* Device identifying constants */
+enum {
+ HSI_DRV_DEVICE_HSI,
+ HSI_DRV_DEVICE_SSI
+};
+
+/**
+ * struct hsi_data - HSI buffer descriptor
+ * @addr: pointer to the buffer where to send or receive data
+ * @size: size in words (32 bits) of the buffer
+ * @lch: associated GDD (DMA) logical channel number, if any
+ */
+struct hsi_data {
+ u32 *addr;
+ unsigned int size;
+ int lch;
+};
+
+/**
+ * struct hsi_channel - HSI channel data
+ * @read_data: Incoming HSI buffer descriptor
+ * @write_data: Outgoing HSI buffer descriptor
+ * @hsi_port: Reference to port where the channel belongs to
+ * @flags: Tracks if channel has been open
+ * @channel_number: HSI channel number
+ * @rw_lock: Read/Write lock to serialize access to callback and hsi_device
+ * @dev: Reference to the associated hsi_device channel
+ * @write_done: Callback to signal TX completed.
+ * @read_done: Callback to signal RX completed.
+ * @port_event: Callback to signal port events (RX Error, HWBREAK, CAWAKE ...)
+ */
+struct hsi_channel {
+ struct hsi_data read_data;
+ struct hsi_data write_data;
+ struct hsi_port *hsi_port;
+ u8 flags;
+ u8 channel_number;
+ rwlock_t rw_lock;
+ struct hsi_device *dev;
+ void (*write_done) (struct hsi_device *dev, unsigned int size);
+ void (*read_done) (struct hsi_device *dev, unsigned int size);
+ void (*port_event) (struct hsi_device *dev, unsigned int event,
+ void *arg);
+};
+
+/**
+ * struct hsi_port - hsi port driver data
+ * @hsi_channel: Array of channels in the port
+ * @hsi_controller: Reference to the HSI controller
+ * @port_number: port number
+ * @max_ch: maximum number of channels supported on the port
+ * @n_irq: HSI irq line use to handle interrupts (0 or 1)
+ * @irq: IRQ number
+ * @cawake_gpio: GPIO number for cawake line (-1 if none)
+ * @cawake_gpio_irq: IRQ number for cawake gpio events
+ * @cawake_status: Tracks CAWAKE line status
+ * @cawake_off_event: True if CAWAKE event was detected from OFF mode
+ * @acwake_status: Bitmap to track ACWAKE line status per channel
+ * @in_int_tasklet: True if interrupt tasklet for this port is currently running
+ * @in_cawake_tasklet: True if CAWAKE tasklet for this port is currently running
+ * @counters_on: indicates if the HSR counters are in use or not
+ * @reg_counters: stores the previous counters values when deactivated
+ * @lock: Serialize access to the port registers and internal data
+ * @hsi_tasklet: Bottom half for interrupts when clocks are enabled
+ * @cawake_tasklet: Bottom half for cawake events
+ */
+struct hsi_port {
+ struct hsi_channel hsi_channel[HSI_PORT_MAX_CH];
+ struct hsi_dev *hsi_controller;
+ u8 flags;
+ u8 port_number; /* Range [1,2] */
+ u8 max_ch;
+ u8 n_irq;
+ int irq;
+ int cawake_gpio;
+ int cawake_gpio_irq;
+ int cawake_status;
+ bool cawake_off_event;
+ unsigned int acwake_status; /* HSI_TODO : fine tune init values */
+ bool in_int_tasklet;
+ bool in_cawake_tasklet;
+ int counters_on;
+ unsigned long reg_counters;
+ spinlock_t lock; /* access to the port registers and internal data */
+ struct tasklet_struct hsi_tasklet;
+ struct tasklet_struct cawake_tasklet; /* SSI_TODO : need to replace */
+ /* by a workqueue */
+};
+
+/**
+ * struct hsi_dev - hsi controller driver data
+ * This structure is saved into platform_device->dev->p->driver_data
+ *
+ * @hsi_port: Array of hsi ports enabled in the controller
+ * @id: HSI controller platform id number
+ * @max_p: Number of ports enabled in the controller
+ * @hsi_clk: Reference to the HSI custom clock
+ * @base: HSI registers base virtual address
+ * @phy_base: HSI registers base physical address
+ * @lock: Serializes access to internal data and regs
+ * @clock_enabled: Indicates if HSI Clocks are ON
+ * @gdd_irq: GDD (DMA) irq number
+ * @fifo_mapping_strategy: Selected strategy for fifo to ports/channels mapping
+ * @gdd_usecount: Holds the number of ongoning DMA transfers
+ * @last_gdd_lch: Last used GDD logical channel
+ * @gdd_chan_count: Number of available DMA channels on the device (must be ^2)
+ * @in_dma_tasklet: True if DMA tasklet for the controller is currently running
+ * @set_min_bus_tput: (PM) callback to set minimun bus throuput
+ * @clk_notifier_register: (PM) callabck for DVFS support
+ * @clk_notifier_unregister: (PM) callabck for DVFS support
+ * @hsi_nb: (PM) Notification block for DVFS notification chain
+ * @hsi_gdd_tasklet: Bottom half for DMA Interrupts when clocks are enabled
+ * @dir: debugfs base directory
+ * @dev: Reference to the HSI platform device
+ */
+struct hsi_dev { /* HSI_TODO: should be later renamed into hsi_controller*/
+ struct hsi_port hsi_port[HSI_MAX_PORTS];
+ int id;
+ u8 max_p;
+ void __iomem *base;
+ unsigned long phy_base;
+ spinlock_t lock; /* Serializes access to internal data and regs */
+ bool clock_enabled;
+ int gdd_irq;
+ unsigned int fifo_mapping_strategy;
+ unsigned int gdd_usecount;
+ unsigned int last_gdd_lch;
+ unsigned int gdd_chan_count;
+ bool in_dma_tasklet;
+ void (*set_min_bus_tput) (struct device *dev, u8 agent_id,
+ unsigned long r);
+ struct notifier_block hsi_nb;
+ struct tasklet_struct hsi_gdd_tasklet;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dir;
+#endif
+ struct device *dev;
+};
+
+/**
+ * struct hsi_platform_data - Board specific data
+*/
+struct hsi_platform_data {
+ void (*set_min_bus_tput) (struct device *dev, u8 agent_id,
+ unsigned long r);
+ int (*device_enable) (struct platform_device *pdev);
+ int (*device_shutdown) (struct platform_device *pdev);
+ int (*device_idle) (struct platform_device *pdev);
+ int (*wakeup_enable) (int hsi_port);
+ int (*wakeup_disable) (int hsi_port);
+ int (*wakeup_is_from_hsi) (void);
+ int (*board_suspend)(int hsi_port, bool dev_may_wakeup);
+ int (*board_resume)(int hsi_port);
+ u8 num_ports;
+ struct ctrl_ctx *ctx;
+ u8 hsi_gdd_chan_count;
+ unsigned long default_hsi_fclk;
+};
+
+/* HSI Bus */
+extern struct bus_type hsi_bus_type;
+
+int hsi_port_event_handler(struct hsi_port *p, unsigned int event, void *arg);
+int hsi_bus_init(void);
+void hsi_bus_exit(void);
+/* End HSI Bus */
+
+void hsi_reset_ch_read(struct hsi_channel *ch);
+void hsi_reset_ch_write(struct hsi_channel *ch);
+bool hsi_is_channel_busy(struct hsi_channel *ch);
+bool hsi_is_hsi_port_busy(struct hsi_port *pport);
+bool hsi_is_hsi_controller_busy(struct hsi_dev *hsi_ctrl);
+bool hsi_is_hst_port_busy(struct hsi_port *pport);
+bool hsi_is_hst_controller_busy(struct hsi_dev *hsi_ctrl);
+
+int hsi_driver_enable_interrupt(struct hsi_port *pport, u32 flag);
+int hsi_driver_enable_read_interrupt(struct hsi_channel *hsi_channel,
+ u32 *data);
+int hsi_driver_enable_write_interrupt(struct hsi_channel *hsi_channel,
+ u32 *data);
+bool hsi_is_dma_read_int_pending(struct hsi_dev *hsi_ctrl);
+int hsi_driver_read_dma(struct hsi_channel *hsi_channel, u32 * data,
+ unsigned int count);
+int hsi_driver_write_dma(struct hsi_channel *hsi_channel, u32 * data,
+ unsigned int count);
+
+int hsi_driver_cancel_read_interrupt(struct hsi_channel *ch);
+int hsi_driver_cancel_write_interrupt(struct hsi_channel *ch);
+void hsi_driver_disable_read_interrupt(struct hsi_channel *ch);
+void hsi_driver_disable_write_interrupt(struct hsi_channel *ch);
+int hsi_driver_cancel_write_dma(struct hsi_channel *ch);
+int hsi_driver_cancel_read_dma(struct hsi_channel *ch);
+int hsi_do_cawake_process(struct hsi_port *pport);
+
+int hsi_driver_device_is_hsi(struct platform_device *dev);
+
+int hsi_mpu_init(struct hsi_port *hsi_p, const char *irq_name);
+void hsi_mpu_exit(struct hsi_port *hsi_p);
+
+int hsi_gdd_init(struct hsi_dev *hsi_ctrl, const char *irq_name);
+void hsi_gdd_exit(struct hsi_dev *hsi_ctrl);
+
+int hsi_cawake_init(struct hsi_port *port, const char *irq_name);
+void hsi_cawake_exit(struct hsi_port *port);
+
+int hsi_fifo_get_id(struct hsi_dev *hsi_ctrl, unsigned int channel,
+ unsigned int port);
+int hsi_fifo_get_chan(struct hsi_dev *hsi_ctrl, unsigned int fifo,
+ unsigned int *channel, unsigned int *port);
+int hsi_fifo_mapping(struct hsi_dev *hsi_ctrl, unsigned int mtype);
+long hsi_hst_bufstate_f_reg(struct hsi_dev *hsi_ctrl,
+ unsigned int port, unsigned int channel);
+long hsi_hsr_bufstate_f_reg(struct hsi_dev *hsi_ctrl,
+ unsigned int port, unsigned int channel);
+long hsi_hst_buffer_reg(struct hsi_dev *hsi_ctrl,
+ unsigned int port, unsigned int channel);
+long hsi_hsr_buffer_reg(struct hsi_dev *hsi_ctrl,
+ unsigned int port, unsigned int channel);
+u8 hsi_get_rx_fifo_occupancy(struct hsi_dev *hsi_ctrl, u8 fifo);
+void hsi_set_pm_force_hsi_on(struct hsi_dev *hsi_ctrl);
+void hsi_set_pm_default(struct hsi_dev *hsi_ctrl);
+int hsi_softreset(struct hsi_dev *hsi_ctrl);
+void hsi_softreset_driver(struct hsi_dev *hsi_ctrl);
+
+void hsi_clocks_disable_channel(struct device *dev, u8 channel_number,
+ const char *s);
+int hsi_clocks_enable_channel(struct device *dev, u8 channel_number,
+ const char *s);
+#ifdef CONFIG_PM_RUNTIME
+extern int hsi_runtime_resume(struct device *dev);
+extern int hsi_runtime_suspend(struct device *dev);
+#else
+static inline int hsi_runtime_resume(struct device *dev) { return -ENOSYS; }
+static inline int hsi_runtime_suspend(struct device *dev) { return -ENOSYS; }
+#endif
+void hsi_save_ctx(struct hsi_dev *hsi_ctrl);
+void hsi_restore_ctx(struct hsi_dev *hsi_ctrl);
+
+
+#ifdef CONFIG_DEBUG_FS
+int hsi_debug_init(void);
+void hsi_debug_exit(void);
+int hsi_debug_add_ctrl(struct hsi_dev *hsi_ctrl);
+void hsi_debug_remove_ctrl(struct hsi_dev *hsi_ctrl);
+#else
+#define hsi_debug_add_ctrl(hsi_ctrl) 0
+#define hsi_debug_remove_ctrl(hsi_ctrl)
+#define hsi_debug_init() 0
+#define hsi_debug_exit()
+#endif /* CONFIG_DEBUG_FS */
+
+static inline struct hsi_channel *hsi_ctrl_get_ch(struct hsi_dev *hsi_ctrl,
+ unsigned int port,
+ unsigned int channel)
+{
+ return &hsi_ctrl->hsi_port[port - 1].hsi_channel[channel];
+}
+
+/* HSI IO access */
+static inline u32 hsi_inl(void __iomem *base, u32 offset)
+{
+ return inl((unsigned int)base + offset);
+}
+
+static inline void hsi_outl(u32 data, void __iomem *base, u32 offset)
+{
+ outl(data, (unsigned int)base + offset);
+}
+
+static inline void hsi_outl_or(u32 data, void __iomem *base, u32 offset)
+{
+ u32 tmp = hsi_inl(base, offset);
+ hsi_outl((tmp | data), base, offset);
+}
+
+static inline void hsi_outl_and(u32 data, void __iomem *base, u32 offset)
+{
+ u32 tmp = hsi_inl(base, offset);
+ hsi_outl((tmp & data), base, offset);
+}
+
+static inline u16 hsi_inw(void __iomem *base, u32 offset)
+{
+ return inw((unsigned int)base + offset);
+}
+
+static inline void hsi_outw(u16 data, void __iomem *base, u32 offset)
+{
+ outw(data, (unsigned int)base + offset);
+}
+
+static inline void hsi_outw_or(u16 data, void __iomem *base, u32 offset)
+{
+ u16 tmp = hsi_inw(base, offset);
+ hsi_outw((tmp | data), base, offset);
+}
+
+static inline void hsi_outw_and(u16 data, void __iomem *base, u32 offset)
+{
+ u16 tmp = hsi_inw(base, offset);
+ hsi_outw((tmp & data), base, offset);
+}
+
+static inline int hsi_get_cawake(struct hsi_port *port)
+{
+ struct platform_device *pdev =
+ to_platform_device(port->hsi_controller->dev);
+
+ if (hsi_driver_device_is_hsi(pdev))
+ return (HSI_HSR_MODE_WAKE_STATUS ==
+ (hsi_inl(port->hsi_controller->base,
+ HSI_HSR_MODE_REG(port->port_number)) &
+ HSI_HSR_MODE_WAKE_STATUS));
+ else if (port->cawake_gpio >= 0)
+ return gpio_get_value(port->cawake_gpio);
+ else
+ return -ENXIO;
+}
+
+static inline void hsi_clocks_disable(struct device *dev, const char *s)
+{
+ hsi_clocks_disable_channel(dev, HSI_CH_NUMBER_NONE, s);
+}
+
+static inline int hsi_clocks_enable(struct device *dev, const char *s)
+{
+ return hsi_clocks_enable_channel(dev, HSI_CH_NUMBER_NONE, s);
+}
+
+#endif /* __HSI_DRIVER_H__ */
diff --git a/drivers/omap_hsi/hsi_driver_bus.c b/drivers/omap_hsi/hsi_driver_bus.c
new file mode 100644
index 0000000..4bce43d
--- /dev/null
+++ b/drivers/omap_hsi/hsi_driver_bus.c
@@ -0,0 +1,203 @@
+/*
+ * hsi_driver_bus.c
+ *
+ * Implements an HSI bus, device and driver interface.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/device.h>
+#include "hsi_driver.h"
+
+#define HSI_PREFIX "hsi:"
+
+struct bus_type hsi_bus_type;
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE + 1, "%s%s\n", HSI_PREFIX,
+ dev_name(dev));
+}
+
+static struct device_attribute hsi_dev_attrs[] = {
+ __ATTR_RO(modalias),
+ __ATTR_NULL,
+};
+
+static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ add_uevent_var(env, "MODALIAS=%s%s", HSI_PREFIX, dev_name(dev));
+ return 0;
+}
+
+static int hsi_bus_match(struct device *device, struct device_driver *driver)
+{
+ struct hsi_device *dev = to_hsi_device(device);
+ struct hsi_device_driver *drv = to_hsi_device_driver(driver);
+
+ pr_debug("HSI DRIVER BUS : hsi_bus_match for ctrl:%d, port:%d, ch%d\n",
+ dev->n_ctrl, dev->n_p, dev->n_ch);
+
+ if (!test_bit(dev->n_ctrl, &drv->ctrl_mask))
+ return 0;
+
+ if (!test_bit(dev->n_ch, &drv->ch_mask[dev->n_p]))
+ return 0;
+
+ pr_info
+ ("HSI DRIVER BUS : hsi_bus_match SUCCESS : ctrl:%d (mask:%x),"
+ " port:%d, ch:%d (mask:%x)\n",
+ dev->n_ctrl, (u32) drv->ctrl_mask, dev->n_p, dev->n_ch,
+ (u32) drv->ch_mask[dev->n_p]);
+
+ return 1;
+}
+
+int hsi_bus_unreg_dev(struct device *device, void *p)
+{
+ device->release(device);
+ device_unregister(device);
+
+ return 0;
+}
+
+int __init hsi_bus_init(void)
+{
+ return bus_register(&hsi_bus_type);
+}
+
+void hsi_bus_exit(void)
+{
+ bus_for_each_dev(&hsi_bus_type, NULL, NULL, hsi_bus_unreg_dev);
+ bus_unregister(&hsi_bus_type);
+}
+
+static int hsi_bus_probe(struct device *dev)
+{
+ struct hsi_device_driver *drv;
+ int rc;
+
+ pr_debug("HSI DRIVER BUS : hsi_bus_probe\n");
+
+ if (!dev->driver)
+ return 0;
+
+ drv = to_hsi_device_driver(dev->driver);
+
+ if (!drv->probe)
+ return -ENODEV;
+
+ rc = drv->probe(to_hsi_device(dev));
+
+ return rc;
+}
+
+static int hsi_bus_remove(struct device *dev)
+{
+ struct hsi_device_driver *drv;
+ int ret;
+
+ pr_debug("HSI DRIVER BUS : hsi_bus_remove\n");
+
+ if (!dev->driver)
+ return 0;
+
+ drv = to_hsi_device_driver(dev->driver);
+ if (drv->remove) {
+ ret = drv->remove(to_hsi_device(dev));
+ } else {
+ dev->driver = NULL;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int hsi_bus_suspend(struct device *dev, pm_message_t mesg)
+{
+ struct hsi_device_driver *drv;
+
+ if (!dev->driver)
+ return 0;
+
+ drv = to_hsi_device_driver(dev->driver);
+ if (!drv->suspend)
+ return 0;
+
+ return drv->suspend(to_hsi_device(dev), mesg);
+}
+
+static int hsi_bus_resume(struct device *dev)
+{
+ struct hsi_device_driver *drv;
+
+ if (!dev->driver)
+ return 0;
+
+ drv = to_hsi_device_driver(dev->driver);
+ if (!drv->resume)
+ return 0;
+
+ return drv->resume(to_hsi_device(dev));
+}
+
+struct bus_type hsi_bus_type = {
+ .name = "hsi",
+ .dev_attrs = hsi_dev_attrs,
+ .match = hsi_bus_match,
+ .uevent = hsi_bus_uevent,
+ .probe = hsi_bus_probe,
+ .remove = hsi_bus_remove,
+ .suspend = hsi_bus_suspend,
+ .resume = hsi_bus_resume,
+};
+
+/**
+ * hsi_register_driver - Register HSI device driver
+ * @driver - reference to the HSI device driver.
+ */
+int hsi_register_driver(struct hsi_device_driver *driver)
+{
+ int ret = 0;
+
+ if (driver == NULL)
+ return -EINVAL;
+
+ driver->driver.bus = &hsi_bus_type;
+
+ ret = driver_register(&driver->driver);
+
+ if (ret == 0)
+ pr_debug("hsi: driver %s registered\n", driver->driver.name);
+
+ return ret;
+}
+EXPORT_SYMBOL(hsi_register_driver);
+
+/**
+ * hsi_unregister_driver - Unregister HSI device driver
+ * @driver - reference to the HSI device driver.
+ */
+void hsi_unregister_driver(struct hsi_device_driver *driver)
+{
+ if (driver == NULL)
+ return;
+
+ driver_unregister(&driver->driver);
+
+ pr_debug("hsi: driver %s unregistered\n", driver->driver.name);
+}
+EXPORT_SYMBOL(hsi_unregister_driver);
diff --git a/drivers/omap_hsi/hsi_driver_debugfs.c b/drivers/omap_hsi/hsi_driver_debugfs.c
new file mode 100644
index 0000000..d1f32dd
--- /dev/null
+++ b/drivers/omap_hsi/hsi_driver_debugfs.c
@@ -0,0 +1,500 @@
+/*
+ * hsi_driver_debugfs.c
+ *
+ * Implements HSI debugfs.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/ctype.h>
+#include "hsi_driver.h"
+
+#define HSI_DIR_NAME_SIZE 64
+
+static struct dentry *hsi_dir;
+
+static int hsi_debug_show(struct seq_file *m, void *p)
+{
+ struct hsi_dev *hsi_ctrl = m->private;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+
+ hsi_clocks_enable(hsi_ctrl->dev, __func__);
+
+ seq_printf(m, "REVISION\t: 0x%08x\n",
+ hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG));
+ if (hsi_driver_device_is_hsi(pdev))
+ seq_printf(m, "HWINFO\t\t: 0x%08x\n",
+ hsi_inl(hsi_ctrl->base, HSI_SYS_HWINFO_REG));
+ seq_printf(m, "SYSCONFIG\t: 0x%08x\n",
+ hsi_inl(hsi_ctrl->base, HSI_SYS_SYSCONFIG_REG));
+ seq_printf(m, "SYSSTATUS\t: 0x%08x\n",
+ hsi_inl(hsi_ctrl->base, HSI_SYS_SYSSTATUS_REG));
+
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+
+ return 0;
+}
+
+static int hsi_debug_port_show(struct seq_file *m, void *p)
+{
+ struct hsi_port *hsi_port = m->private;
+ struct hsi_dev *hsi_ctrl = hsi_port->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ unsigned int port = hsi_port->port_number;
+ int ch, fifo;
+ long buff_offset;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+
+ hsi_clocks_enable(hsi_ctrl->dev, __func__);
+
+ if (hsi_port->cawake_gpio >= 0)
+ seq_printf(m, "CAWAKE\t\t: %d\n", hsi_get_cawake(hsi_port));
+
+ seq_printf(m, "WAKE\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_SYS_WAKE_REG(port)));
+ seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", hsi_port->n_irq,
+ hsi_inl(base,
+ HSI_SYS_MPU_ENABLE_REG(port, hsi_port->n_irq)));
+ seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", hsi_port->n_irq,
+ hsi_inl(base,
+ HSI_SYS_MPU_STATUS_REG(port, hsi_port->n_irq)));
+ if (hsi_driver_device_is_hsi(pdev)) {
+ seq_printf(m, "MPU_U_ENABLE_IRQ%d\t: 0x%08x\n",
+ hsi_port->n_irq,
+ hsi_inl(base, HSI_SYS_MPU_U_ENABLE_REG(port,
+ hsi_port->n_irq)));
+ seq_printf(m, "MPU_U_STATUS_IRQ%d\t: 0x%08x\n", hsi_port->n_irq,
+ hsi_inl(base,
+ HSI_SYS_MPU_U_STATUS_REG(port,
+ hsi_port->n_irq)));
+ }
+ /* HST */
+ seq_printf(m, "\nHST\n===\n");
+ seq_printf(m, "MODE\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_MODE_REG(port)));
+ seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_FRAMESIZE_REG(port)));
+ seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_DIVISOR_REG(port)));
+ seq_printf(m, "CHANNELS\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_CHANNELS_REG(port)));
+ seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_ARBMODE_REG(port)));
+ seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_TXSTATE_REG(port)));
+ if (hsi_driver_device_is_hsi(pdev)) {
+ seq_printf(m, "BUFSTATE P1\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_BUFSTATE_REG(1)));
+ seq_printf(m, "BUFSTATE P2\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_BUFSTATE_REG(2)));
+ } else {
+ seq_printf(m, "BUFSTATE\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_BUFSTATE_REG(port)));
+ }
+ seq_printf(m, "BREAK\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_HST_BREAK_REG(port)));
+ for (ch = 0; ch < 8; ch++) {
+ buff_offset = hsi_hst_buffer_reg(hsi_ctrl, port, ch);
+ if (buff_offset >= 0)
+ seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
+ hsi_inl(base, buff_offset));
+ }
+ if (hsi_driver_device_is_hsi(pdev)) {
+ for (fifo = 0; fifo < HSI_HST_FIFO_COUNT; fifo++) {
+ seq_printf(m, "FIFO MAPPING%d\t: 0x%08x\n", fifo,
+ hsi_inl(base,
+ HSI_HST_MAPPING_FIFO_REG(fifo)));
+ }
+ }
+ /* HSR */
+ seq_printf(m, "\nHSR\n===\n");
+ seq_printf(m, "MODE\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_MODE_REG(port)));
+ seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_FRAMESIZE_REG(port)));
+ seq_printf(m, "CHANNELS\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_CHANNELS_REG(port)));
+ seq_printf(m, "COUNTERS\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_COUNTERS_REG(port)));
+ seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_RXSTATE_REG(port)));
+ if (hsi_driver_device_is_hsi(pdev)) {
+ seq_printf(m, "BUFSTATE P1\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_BUFSTATE_REG(1)));
+ seq_printf(m, "BUFSTATE P2\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_BUFSTATE_REG(2)));
+ } else {
+ seq_printf(m, "BUFSTATE\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_BUFSTATE_REG(port)));
+ }
+ seq_printf(m, "BREAK\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_BREAK_REG(port)));
+ seq_printf(m, "ERROR\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_ERROR_REG(port)));
+ seq_printf(m, "ERRORACK\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_ERRORACK_REG(port)));
+ for (ch = 0; ch < 8; ch++) {
+ buff_offset = hsi_hsr_buffer_reg(hsi_ctrl, port, ch);
+ if (buff_offset >= 0)
+ seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
+ hsi_inl(base, buff_offset));
+ }
+ if (hsi_driver_device_is_hsi(pdev)) {
+ for (fifo = 0; fifo < HSI_HSR_FIFO_COUNT; fifo++) {
+ seq_printf(m, "FIFO MAPPING%d\t: 0x%08x\n", fifo,
+ hsi_inl(base,
+ HSI_HSR_MAPPING_FIFO_REG(fifo)));
+ }
+ seq_printf(m, "DLL\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_DLL_REG));
+ seq_printf(m, "DIVISOR\t: 0x%08x\n",
+ hsi_inl(base, HSI_HSR_DIVISOR_REG(port)));
+ }
+
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+
+ return 0;
+}
+
+static int hsi_debug_gdd_show(struct seq_file *m, void *p)
+{
+ struct hsi_dev *hsi_ctrl = m->private;
+ void __iomem *base = hsi_ctrl->base;
+ int lch;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+
+ hsi_clocks_enable(hsi_ctrl->dev, __func__);
+
+ seq_printf(m, "GDD_MPU_STATUS\t: 0x%08x\n",
+ hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG));
+ seq_printf(m, "GDD_MPU_ENABLE\t: 0x%08x\n\n",
+ hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG));
+
+ if (!hsi_driver_device_is_hsi(pdev)) {
+ seq_printf(m, "HW_ID\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_SSI_GDD_HW_ID_REG));
+ seq_printf(m, "PPORT_ID\t: 0x%08x\n",
+ hsi_inl(base, HSI_SSI_GDD_PPORT_ID_REG));
+ seq_printf(m, "MPORT_ID\t: 0x%08x\n",
+ hsi_inl(base, HSI_SSI_GDD_MPORT_ID_REG));
+ seq_printf(m, "TEST\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_SSI_GDD_TEST_REG));
+ }
+
+ seq_printf(m, "GCR\t\t: 0x%08x\n", hsi_inl(base, HSI_GDD_GCR_REG));
+
+ for (lch = 0; lch < hsi_ctrl->gdd_chan_count; lch++) {
+ seq_printf(m, "\nGDD LCH %d\n=========\n", lch);
+ seq_printf(m, "CSDP\t\t: 0x%04x\n",
+ hsi_inw(base, HSI_GDD_CSDP_REG(lch)));
+ seq_printf(m, "CCR\t\t: 0x%04x\n",
+ hsi_inw(base, HSI_GDD_CCR_REG(lch)));
+ seq_printf(m, "CICR\t\t: 0x%04x\n",
+ hsi_inw(base, HSI_GDD_CCIR_REG(lch)));
+ seq_printf(m, "CSR\t\t: 0x%04x\n",
+ hsi_inw(base, HSI_GDD_CSR_REG(lch)));
+ seq_printf(m, "CSSA\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_GDD_CSSA_REG(lch)));
+ seq_printf(m, "CDSA\t\t: 0x%08x\n",
+ hsi_inl(base, HSI_GDD_CDSA_REG(lch)));
+ seq_printf(m, "CEN\t\t: 0x%04x\n",
+ hsi_inw(base, HSI_GDD_CEN_REG(lch)));
+ seq_printf(m, "CSAC\t\t: 0x%04x\n",
+ hsi_inw(base, HSI_GDD_CSAC_REG(lch)));
+ seq_printf(m, "CDAC\t\t: 0x%04x\n",
+ hsi_inw(base, HSI_GDD_CDAC_REG(lch)));
+ if (!hsi_driver_device_is_hsi(pdev))
+ seq_printf(m, "CLNK_CTRL\t: 0x%04x\n",
+ hsi_inw(base,
+ HSI_SSI_GDD_CLNK_CTRL_REG(lch)));
+ }
+
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+
+ return 0;
+}
+
+static int hsi_port_counters_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static int hsi_port_counters_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static loff_t hsi_port_counters_seek(struct file *file, loff_t off, int whence)
+{
+ return 0;
+}
+
+static ssize_t hsi_port_counters_read(struct file *filep, char __user * buff,
+ size_t count, loff_t *offp)
+{
+ ssize_t ret;
+ struct hsi_port *hsi_port = filep->private_data;
+ struct hsi_dev *hsi_ctrl = hsi_port->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ unsigned int port = hsi_port->port_number;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+ char str[50];
+ unsigned long reg;
+
+ if (*offp > 0) {
+ ret = 0;
+ goto hsi_cnt_rd_bk;
+ }
+
+ hsi_clocks_enable(hsi_ctrl->dev, __func__);
+
+ reg = hsi_inl(base, HSI_HSR_COUNTERS_REG(port));
+
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+
+ if (hsi_driver_device_is_hsi(pdev)) {
+ sprintf(str, "FT:%d, TB:%d, FB:%d\n",
+ (int)(reg & HSI_COUNTERS_FT_MASK) >>
+ HSI_COUNTERS_FT_OFFSET,
+ (int)(reg & HSI_COUNTERS_TB_MASK) >>
+ HSI_COUNTERS_TB_OFFSET,
+ (int)(reg & HSI_COUNTERS_FB_MASK) >>
+ HSI_COUNTERS_FB_OFFSET);
+ } else {
+ sprintf(str, "timeout:%d\n", (int)reg);
+ }
+
+ ret = strlen(str);
+ if (copy_to_user((void __user *)buff, str, ret)) {
+ dev_err(hsi_ctrl->dev, "copy_to_user failed\n");
+ ret = 0;
+ } else {
+ *offp = ret;
+ }
+
+hsi_cnt_rd_bk:
+ return ret;
+}
+
+/*
+ * Split the buffer `buf' into space-separated words.
+ * Return the number of words or <0 on error.
+ */
+static int hsi_debug_tokenize(char *buf, char *words[], int maxwords)
+{
+ int nwords = 0;
+
+ while (*buf) {
+ char *end;
+
+ /* Skip leading whitespace */
+ while (*buf && isspace(*buf))
+ buf++;
+ if (!*buf)
+ break; /* oh, it was trailing whitespace */
+
+ /* Run `end' over a word */
+ for (end = buf; *end && !isspace(*end); end++)
+ ;
+ /* `buf' is the start of the word, `end' is one past the end */
+
+ if (nwords == maxwords)
+ return -EINVAL; /* ran out of words[] before bytes */
+ if (*end)
+ *end++ = '\0'; /* terminate the word */
+ words[nwords++] = buf;
+ buf = end;
+ }
+ return nwords;
+}
+
+static ssize_t hsi_port_counters_write(struct file *filep,
+ const char __user *buff, size_t count,
+ loff_t *offp)
+{
+ ssize_t ret;
+ struct hsi_port *hsi_port = filep->private_data;
+ struct hsi_dev *hsi_ctrl = hsi_port->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ unsigned int port = hsi_port->port_number;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+#define MAXWORDS 4
+ int nwords;
+ char *words[MAXWORDS];
+ char tmpbuf[256];
+ unsigned long reg, ft, tb, fb;
+
+ if (count == 0)
+ return 0;
+ if (count > sizeof(tmpbuf) - 1)
+ return -E2BIG;
+ if (copy_from_user(tmpbuf, buff, count))
+ return -EFAULT;
+ tmpbuf[count] = '\0';
+ dev_dbg(hsi_ctrl->dev, "%s: read %d bytes from userspace\n",
+ __func__, (int)count);
+
+ nwords = hsi_debug_tokenize(tmpbuf, words, MAXWORDS);
+ if (nwords < 0) {
+ dev_warn(hsi_ctrl->dev,
+ "HSI counters write usage: echo <values> > counters\n");
+ return -EINVAL;
+ }
+
+ hsi_clocks_enable(hsi_ctrl->dev, __func__);
+
+ if (hsi_driver_device_is_hsi(pdev)) {
+ if (nwords != 3) {
+ dev_warn(hsi_ctrl->dev, "HSI counters write usage: "
+ "echo \"FT TB FB\" > counters\n");
+ ret = -EINVAL;
+ goto hsi_cnt_w_bk1;
+ }
+ strict_strtoul(words[0], 0, &ft);
+ strict_strtoul(words[1], 0, &tb);
+ strict_strtoul(words[2], 0, &fb);
+ reg = ((ft << HSI_COUNTERS_FT_OFFSET & HSI_COUNTERS_FT_MASK) |
+ (tb << HSI_COUNTERS_TB_OFFSET & HSI_COUNTERS_TB_MASK) |
+ (fb << HSI_COUNTERS_FB_OFFSET & HSI_COUNTERS_FB_MASK));
+ } else {
+ if (nwords != 1) {
+ dev_warn(hsi_ctrl->dev, "HSI counters write usage: "
+ "echo \"timeout\" > counters\n");
+ ret = -EINVAL;
+ goto hsi_cnt_w_bk1;
+ }
+ strict_strtoul(words[0], 0, ®);
+ }
+ hsi_outl(reg, base, HSI_HSR_COUNTERS_REG(port));
+ ret = count;
+ *offp += count;
+
+hsi_cnt_w_bk1:
+
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+
+ return ret;
+}
+
+static int hsi_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hsi_debug_show, inode->i_private);
+}
+
+static int hsi_port_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hsi_debug_port_show, inode->i_private);
+}
+
+static int hsi_gdd_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hsi_debug_gdd_show, inode->i_private);
+}
+
+static const struct file_operations hsi_regs_fops = {
+ .open = hsi_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations hsi_port_regs_fops = {
+ .open = hsi_port_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations hsi_port_counters_fops = {
+ .open = hsi_port_counters_open,
+ .read = hsi_port_counters_read,
+ .write = hsi_port_counters_write,
+ .llseek = hsi_port_counters_seek,
+ .release = hsi_port_counters_release,
+};
+
+static const struct file_operations hsi_gdd_regs_fops = {
+ .open = hsi_gdd_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int __init hsi_debug_add_ctrl(struct hsi_dev *hsi_ctrl)
+{
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+ unsigned char dir_name[HSI_DIR_NAME_SIZE];
+ struct dentry *dir;
+ unsigned int port;
+
+ if (pdev->id < 0) {
+ hsi_ctrl->dir = debugfs_create_dir(pdev->name, hsi_dir);
+ } else {
+ snprintf(dir_name, sizeof(dir_name), "%s%d", pdev->name,
+ pdev->id);
+ hsi_ctrl->dir = debugfs_create_dir(dir_name, hsi_dir);
+ }
+ if (IS_ERR(hsi_ctrl->dir))
+ return PTR_ERR(hsi_ctrl->dir);
+
+ debugfs_create_file("regs", S_IRUGO, hsi_ctrl->dir, hsi_ctrl,
+ &hsi_regs_fops);
+
+ for (port = 0; port < hsi_ctrl->max_p; port++) {
+ snprintf(dir_name, sizeof(dir_name), "port%d", port + 1);
+ dir = debugfs_create_dir(dir_name, hsi_ctrl->dir);
+ if (IS_ERR(dir))
+ goto rback;
+ debugfs_create_file("regs", S_IRUGO, dir,
+ &hsi_ctrl->hsi_port[port],
+ &hsi_port_regs_fops);
+ debugfs_create_file("counters", S_IRUGO | S_IWUGO, dir,
+ &hsi_ctrl->hsi_port[port],
+ &hsi_port_counters_fops);
+ }
+
+ dir = debugfs_create_dir("gdd", hsi_ctrl->dir);
+ if (IS_ERR(dir))
+ goto rback;
+ debugfs_create_file("regs", S_IRUGO, dir, hsi_ctrl, &hsi_gdd_regs_fops);
+
+ return 0;
+rback:
+ debugfs_remove_recursive(hsi_ctrl->dir);
+ return PTR_ERR(dir);
+}
+
+void hsi_debug_remove_ctrl(struct hsi_dev *hsi_ctrl)
+{
+ debugfs_remove_recursive(hsi_ctrl->dir);
+}
+
+int __init hsi_debug_init(void)
+{
+ hsi_dir = debugfs_create_dir("hsi", NULL);
+ if (IS_ERR(hsi_dir))
+ return PTR_ERR(hsi_dir);
+
+ return 0;
+}
+
+void hsi_debug_exit(void)
+{
+ debugfs_remove_recursive(hsi_dir);
+}
diff --git a/drivers/omap_hsi/hsi_driver_dma.c b/drivers/omap_hsi/hsi_driver_dma.c
new file mode 100644
index 0000000..ad819f5
--- /dev/null
+++ b/drivers/omap_hsi/hsi_driver_dma.c
@@ -0,0 +1,643 @@
+/*
+ * hsi_driver_dma.c
+ *
+ * Implements HSI low level interface driver functionality with DMA support.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/dma-mapping.h>
+#include "hsi_driver.h"
+
+#define HSI_SYNC_WRITE 0
+#define HSI_SYNC_READ 1
+#define HSI_L3_TPUT 13428 /* 13428 KiB/s => ~110 Mbit/s */
+
+static unsigned char hsi_sync_table[2][2][8] = {
+ {
+ {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08},
+ {0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00}
+ }, {
+ {0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17},
+ {0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f}
+ }
+};
+
+/**
+ * hsi_is_dma_read_int_pending - Indicates if a DMA read interrupt is pending
+ * @hsi_ctrl - HSI controller of the GDD.
+ *
+ * Needs to be called holding the hsi_controller lock
+ *
+ * Returns true if DMA read interrupt is pending, else false
+ */
+bool hsi_is_dma_read_int_pending(struct hsi_dev *hsi_ctrl)
+{
+ void __iomem *base = hsi_ctrl->base;
+ unsigned int gdd_lch = 0;
+ u32 status_reg = 0;
+ int i, j;
+ status_reg = hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
+ status_reg &= hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ if (!status_reg)
+ return false;
+
+ /* Scan all enabled DMA channels */
+ for (gdd_lch = 0; gdd_lch < hsi_ctrl->gdd_chan_count; gdd_lch++) {
+ if (!(status_reg & HSI_GDD_LCH(gdd_lch)))
+ continue;
+ for (i = 0; i < hsi_ctrl->max_p; i++)
+ for (j = 0; j < hsi_ctrl->hsi_port[i].max_ch; j++)
+ if (hsi_ctrl->hsi_port[i].
+ hsi_channel[j].read_data.lch == gdd_lch)
+ return true;
+ }
+ return false;
+}
+/**
+ * hsi_get_free_lch - Get a free GDD(DMA) logical channel
+ * @hsi_ctrl - HSI controller of the GDD.
+ *
+ * Needs to be called holding the hsi_controller lock
+ *
+ * Returns the logical channel number, or -EBUSY if none available
+ */
+static int hsi_get_free_lch(struct hsi_dev *hsi_ctrl)
+{
+ unsigned int enable_reg;
+ int i, lch;
+
+ enable_reg = hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ lch = hsi_ctrl->last_gdd_lch;
+ for (i = 0; i < hsi_ctrl->gdd_chan_count; i++) {
+ if (++lch >= hsi_ctrl->gdd_chan_count)
+ lch = 0;
+ if ((enable_reg & HSI_GDD_LCH(lch)) == 0) {
+ hsi_ctrl->last_gdd_lch = lch;
+ return lch;
+ }
+ }
+ return -EBUSY;
+}
+
+/**
+ * hsi_driver_write_dma - Program GDD [DMA] to write data from memory to
+ * the hsi channel buffer.
+ * @hsi_channel - pointer to the hsi_channel to write data to.
+ * @data - 32-bit word pointer to the data.
+ * @size - Number of 32bit words to be transfered.
+ *
+ * hsi_controller lock must be held before calling this function.
+ *
+ * Return 0 on success and < 0 on error.
+ */
+int hsi_driver_write_dma(struct hsi_channel *hsi_channel, u32 * data,
+ unsigned int size)
+{
+ struct hsi_dev *hsi_ctrl = hsi_channel->hsi_port->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ unsigned int port = hsi_channel->hsi_port->port_number;
+ unsigned int channel = hsi_channel->channel_number;
+ unsigned int sync;
+ int lch;
+ dma_addr_t src_addr;
+ dma_addr_t dest_addr;
+ u16 tmp;
+ int fifo;
+
+ if ((size < 1) || (data == NULL))
+ return -EINVAL;
+
+ lch = hsi_get_free_lch(hsi_ctrl);
+ if (lch < 0) {
+ dev_err(hsi_ctrl->dev, "No free DMA channels.\n");
+ return -EBUSY; /* No free GDD logical channels. */
+ } else {
+ dev_dbg(hsi_ctrl->dev, "Allocated DMA channel %d for write on"
+ " HSI channel %d.\n", lch,
+ hsi_channel->channel_number);
+ }
+
+ /* NOTE: Getting a free gdd logical channel and
+ * reserve it must be done atomicaly. */
+ hsi_channel->write_data.lch = lch;
+
+ /* Sync is required for SSI but not for HSI */
+ sync = hsi_sync_table[HSI_SYNC_WRITE][port - 1][channel];
+
+ src_addr = dma_map_single(hsi_ctrl->dev, data, size * 4, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(hsi_ctrl->dev, src_addr))) {
+ dev_err(hsi_ctrl->dev, "Failed to create DMA write mapping.\n");
+ return -ENOMEM;
+ }
+
+ tmp = HSI_SRC_SINGLE_ACCESS0 |
+ HSI_SRC_MEMORY_PORT |
+ HSI_DST_SINGLE_ACCESS0 |
+ HSI_DST_PERIPHERAL_PORT | HSI_DATA_TYPE_S32;
+ hsi_outw(tmp, base, HSI_GDD_CSDP_REG(lch));
+
+ tmp = HSI_SRC_AMODE_POSTINC | HSI_DST_AMODE_CONST | sync;
+ hsi_outw(tmp, base, HSI_GDD_CCR_REG(lch));
+
+ hsi_outw((HSI_BLOCK_IE | HSI_TOUT_IE), base, HSI_GDD_CCIR_REG(lch));
+
+ if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) {
+ fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
+ if (unlikely(fifo < 0)) {
+ dev_err(hsi_ctrl->dev, "No valid FIFO id for DMA "
+ "transfer to FIFO.\n");
+ return -EFAULT;
+ }
+ /* HSI CDSA register takes a FIFO ID when copying to FIFO */
+ hsi_outl(fifo, base, HSI_GDD_CDSA_REG(lch));
+ } else {
+ dest_addr = hsi_ctrl->phy_base + HSI_HST_BUFFER_CH_REG(port,
+ channel);
+ /* SSI CDSA register always takes a 32-bit address */
+ hsi_outl(dest_addr, base, HSI_GDD_CDSA_REG(lch));
+ }
+
+ /* HSI CSSA register takes a 32-bit address when copying from memory */
+ /* SSI CSSA register always takes a 32-bit address */
+ hsi_outl(src_addr, base, HSI_GDD_CSSA_REG(lch));
+ hsi_outw(size, base, HSI_GDD_CEN_REG(lch));
+
+ /* TODO : Need to clean interrupt status here to avoid spurious int */
+
+ hsi_outl_or(HSI_GDD_LCH(lch), base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ hsi_outw_or(HSI_CCR_ENABLE, base, HSI_GDD_CCR_REG(lch));
+
+ return 0;
+}
+
+/**
+ * hsi_driver_read_dma - Program GDD [DMA] to write data to memory from
+ * the hsi channel buffer.
+ * @hsi_channel - pointer to the hsi_channel to read data from.
+ * @data - 32-bit word pointer where to store the incoming data.
+ * @size - Number of 32bit words to be transfered to the buffer.
+ *
+ * hsi_controller lock must be held before calling this function.
+ *
+ * Return 0 on success and < 0 on error.
+ */
+int hsi_driver_read_dma(struct hsi_channel *hsi_channel, u32 * data,
+ unsigned int count)
+{
+ struct hsi_dev *hsi_ctrl = hsi_channel->hsi_port->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ unsigned int port = hsi_channel->hsi_port->port_number;
+ unsigned int channel = hsi_channel->channel_number;
+ unsigned int sync;
+ int lch;
+ dma_addr_t src_addr;
+ dma_addr_t dest_addr;
+ u16 tmp;
+ int fifo;
+
+ lch = hsi_get_free_lch(hsi_ctrl);
+ if (lch < 0) {
+ dev_err(hsi_ctrl->dev, "No free DMA channels.\n");
+ return -EBUSY; /* No free GDD logical channels. */
+ } else {
+ dev_dbg(hsi_ctrl->dev, "Allocated DMA channel %d for read on"
+ " HSI channel %d.\n", lch,
+ hsi_channel->channel_number);
+ }
+
+ /* When DMA is used for Rx, disable the Rx Interrupt.
+ * (else DATAAVAILLABLE event would get triggered on first
+ * received data word)
+ * (Rx interrupt might be active for polling feature)
+ */
+#if 0
+ if (omap_readl(0x4A05A810)) {
+ dev_err(hsi_ctrl->dev,
+ "READ INTERRUPT IS PENDING DMA() but still disabling %0x\n",
+ omap_readl(0x4A05A810));
+ }
+#endif
+ hsi_driver_disable_read_interrupt(hsi_channel);
+
+ /*
+ * NOTE: Gettting a free gdd logical channel and
+ * reserve it must be done atomicaly.
+ */
+ hsi_channel->read_data.lch = lch;
+
+ /* Sync is required for SSI but not for HSI */
+ sync = hsi_sync_table[HSI_SYNC_READ][port - 1][channel];
+
+ dest_addr = dma_map_single(hsi_ctrl->dev, data, count * 4,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(hsi_ctrl->dev, dest_addr))) {
+ dev_err(hsi_ctrl->dev, "Failed to create DMA read mapping.\n");
+ return -ENOMEM;
+ }
+
+ tmp = HSI_DST_SINGLE_ACCESS0 |
+ HSI_DST_MEMORY_PORT |
+ HSI_SRC_SINGLE_ACCESS0 |
+ HSI_SRC_PERIPHERAL_PORT | HSI_DATA_TYPE_S32;
+ hsi_outw(tmp, base, HSI_GDD_CSDP_REG(lch));
+
+ tmp = HSI_DST_AMODE_POSTINC | HSI_SRC_AMODE_CONST | sync;
+ hsi_outw(tmp, base, HSI_GDD_CCR_REG(lch));
+
+ hsi_outw((HSI_BLOCK_IE | HSI_TOUT_IE), base, HSI_GDD_CCIR_REG(lch));
+
+ if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) {
+ fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
+ if (unlikely(fifo < 0)) {
+ dev_err(hsi_ctrl->dev, "No valid FIFO id for DMA "
+ "transfer from FIFO.\n");
+ return -EFAULT;
+ }
+ /* HSI CSSA register takes a FIFO ID when copying from FIFO */
+ hsi_outl(fifo, base, HSI_GDD_CSSA_REG(lch));
+ } else{
+ src_addr = hsi_ctrl->phy_base + HSI_HSR_BUFFER_CH_REG(port,
+ channel);
+ /* SSI CSSA register always takes a 32-bit address */
+ hsi_outl(src_addr, base, HSI_GDD_CSSA_REG(lch));
+ }
+
+ /* HSI CDSA register takes a 32-bit address when copying to memory */
+ /* SSI CDSA register always takes a 32-bit address */
+ hsi_outl(dest_addr, base, HSI_GDD_CDSA_REG(lch));
+ hsi_outw(count, base, HSI_GDD_CEN_REG(lch));
+
+ /* TODO : Need to clean interrupt status here to avoid spurious int */
+
+ hsi_outl_or(HSI_GDD_LCH(lch), base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ hsi_outw_or(HSI_CCR_ENABLE, base, HSI_GDD_CCR_REG(lch));
+
+ return 0;
+}
+
+/**
+ * hsi_driver_cancel_write_dma - Cancel an ongoing GDD [DMA] write for the
+ * specified hsi channel.
+ * @hsi_ch - pointer to the hsi_channel to cancel DMA write.
+ *
+ * hsi_controller lock must be held before calling this function.
+ *
+ * Return: -ENXIO : No DMA channel found for specified HSI channel
+ * -ECANCELED : DMA cancel success, data not transfered to TX FIFO
+ * 0 : DMA transfer is already over, data already transfered to TX FIFO
+ *
+ * Note: whatever returned value, write callback will not be called after
+ * write cancel.
+ */
+int hsi_driver_cancel_write_dma(struct hsi_channel *hsi_ch)
+{
+ int lch = hsi_ch->write_data.lch;
+ unsigned int port = hsi_ch->hsi_port->port_number;
+ unsigned int channel = hsi_ch->channel_number;
+ struct hsi_dev *hsi_ctrl = hsi_ch->hsi_port->hsi_controller;
+ u16 ccr, gdd_csr;
+ long buff_offset;
+ u32 status_reg;
+ dma_addr_t dma_h;
+ size_t size;
+
+ dev_err(&hsi_ch->dev->device, "hsi_driver_cancel_write_dma( "
+ "channel %d\n", hsi_ch->channel_number);
+
+ if (lch < 0) {
+ dev_err(&hsi_ch->dev->device, "No DMA channel found for HSI "
+ "channel %d\n", hsi_ch->channel_number);
+ return -ENXIO;
+ }
+ ccr = hsi_inw(hsi_ctrl->base, HSI_GDD_CCR_REG(lch));
+ if (!(ccr & HSI_CCR_ENABLE)) {
+ dev_dbg(&hsi_ch->dev->device, "Write cancel on not "
+ "enabled logical channel %d CCR REG 0x%04X\n",
+ lch, ccr);
+ }
+ status_reg = hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
+ status_reg &= hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ hsi_outw_and(~HSI_CCR_ENABLE, hsi_ctrl->base, HSI_GDD_CCR_REG(lch));
+
+ /* Clear CSR register by reading it, as it is cleared automaticaly */
+ /* by HW after SW read. */
+ gdd_csr = hsi_inw(hsi_ctrl->base, HSI_GDD_CSR_REG(lch));
+ hsi_outl_and(~HSI_GDD_LCH(lch), hsi_ctrl->base,
+ HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ hsi_outl(HSI_GDD_LCH(lch), hsi_ctrl->base,
+ HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
+
+ /* Unmap DMA region */
+ dma_h = hsi_inl(hsi_ctrl->base, HSI_GDD_CSSA_REG(lch));
+ size = hsi_inw(hsi_ctrl->base, HSI_GDD_CEN_REG(lch)) * 4;
+ dma_unmap_single(hsi_ctrl->dev, dma_h, size, DMA_TO_DEVICE);
+
+ buff_offset = hsi_hst_bufstate_f_reg(hsi_ctrl, port, channel);
+ if (buff_offset >= 0)
+ hsi_outl_and(~HSI_BUFSTATE_CHANNEL(channel), hsi_ctrl->base,
+ buff_offset);
+
+ hsi_reset_ch_write(hsi_ch);
+ return status_reg & HSI_GDD_LCH(lch) ? 0 : -ECANCELED;
+}
+
+/**
+ * hsi_driver_cancel_read_dma - Cancel an ongoing GDD [DMA] read for the
+ * specified hsi channel.
+ * @hsi_ch - pointer to the hsi_channel to cancel DMA read.
+ *
+ * hsi_controller lock must be held before calling this function.
+ *
+ * Return: -ENXIO : No DMA channel found for specified HSI channel
+ * -ECANCELED : DMA cancel success, data not available at expected
+ * address.
+ * 0 : DMA transfer is already over, data already available at
+ * expected address.
+ *
+ * Note: whatever returned value, read callback will not be called after cancel.
+ */
+int hsi_driver_cancel_read_dma(struct hsi_channel *hsi_ch)
+{
+ int lch = hsi_ch->read_data.lch;
+ struct hsi_dev *hsi_ctrl = hsi_ch->hsi_port->hsi_controller;
+ u16 ccr, gdd_csr;
+ u32 status_reg;
+ dma_addr_t dma_h;
+ size_t size;
+
+ dev_err(&hsi_ch->dev->device, "hsi_driver_cancel_read_dma "
+ "channel %d\n", hsi_ch->channel_number);
+
+ /* Re-enable interrupts for polling if needed */
+ if (hsi_ch->flags & HSI_CH_RX_POLL)
+ hsi_driver_enable_read_interrupt(hsi_ch, NULL);
+
+ if (lch < 0) {
+ dev_err(&hsi_ch->dev->device, "No DMA channel found for HSI "
+ "channel %d\n", hsi_ch->channel_number);
+ return -ENXIO;
+ }
+
+ ccr = hsi_inw(hsi_ctrl->base, HSI_GDD_CCR_REG(lch));
+ if (!(ccr & HSI_CCR_ENABLE)) {
+ dev_dbg(&hsi_ch->dev->device, "Read cancel on not "
+ "enabled logical channel %d CCR REG 0x%04X\n",
+ lch, ccr);
+ }
+
+ status_reg = hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
+ status_reg &= hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ hsi_outw_and(~HSI_CCR_ENABLE, hsi_ctrl->base, HSI_GDD_CCR_REG(lch));
+
+ /* Clear CSR register by reading it, as it is cleared automaticaly */
+ /* by HW after SW read */
+ gdd_csr = hsi_inw(hsi_ctrl->base, HSI_GDD_CSR_REG(lch));
+ hsi_outl_and(~HSI_GDD_LCH(lch), hsi_ctrl->base,
+ HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ hsi_outl(HSI_GDD_LCH(lch), hsi_ctrl->base,
+ HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
+
+ /* Unmap DMA region - Access to the buffer is now safe */
+ dma_h = hsi_inl(hsi_ctrl->base, HSI_GDD_CDSA_REG(lch));
+ size = hsi_inw(hsi_ctrl->base, HSI_GDD_CEN_REG(lch)) * 4;
+ dma_unmap_single(hsi_ctrl->dev, dma_h, size, DMA_FROM_DEVICE);
+
+ hsi_reset_ch_read(hsi_ch);
+ return status_reg & HSI_GDD_LCH(lch) ? 0 : -ECANCELED;
+}
+
+/**
+ * hsi_get_info_from_gdd_lch - Retrieve channels information from DMA channel
+ * @hsi_ctrl - HSI device control structure
+ * @lch - DMA logical channel
+ * @port - HSI port
+ * @channel - HSI channel
+ * @is_read_path - channel is used for reading
+ *
+ * Updates the port, channel and is_read_path parameters depending on the
+ * lch DMA channel status.
+ *
+ * Return 0 on success and < 0 on error.
+ */
+int hsi_get_info_from_gdd_lch(struct hsi_dev *hsi_ctrl, unsigned int lch,
+ unsigned int *port, unsigned int *channel,
+ unsigned int *is_read_path)
+{
+ int i_ports;
+ int i_chans;
+ int err = -1;
+
+ for (i_ports = 0; i_ports < HSI_MAX_PORTS; i_ports++)
+ for (i_chans = 0; i_chans < HSI_PORT_MAX_CH; i_chans++)
+ if (hsi_ctrl->hsi_port[i_ports].
+ hsi_channel[i_chans].read_data.lch == lch) {
+ *is_read_path = 1;
+ *port = i_ports + 1;
+ *channel = i_chans;
+ err = 0;
+ goto get_info_bk;
+ } else if (hsi_ctrl->hsi_port[i_ports].
+ hsi_channel[i_chans].write_data.lch == lch) {
+ *is_read_path = 0;
+ *port = i_ports + 1;
+ *channel = i_chans;
+ err = 0;
+ goto get_info_bk;
+ }
+get_info_bk:
+ return err;
+}
+
+static void do_hsi_gdd_lch(struct hsi_dev *hsi_ctrl, unsigned int gdd_lch)
+{
+ void __iomem *base = hsi_ctrl->base;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+ struct hsi_channel *ch;
+ unsigned int port;
+ unsigned int channel;
+ unsigned int is_read_path;
+ u32 gdd_csr;
+ dma_addr_t dma_h;
+ size_t size;
+ int fifo, fifo_words_avail;
+
+ if (hsi_get_info_from_gdd_lch(hsi_ctrl, gdd_lch, &port, &channel,
+ &is_read_path) < 0) {
+ dev_err(hsi_ctrl->dev, "Unable to match the DMA channel %d with"
+ " an HSI channel\n", gdd_lch);
+ return;
+ } else {
+ dev_dbg(hsi_ctrl->dev, "DMA event on gdd_lch=%d => port=%d, "
+ "channel=%d, read=%d\n", gdd_lch, port, channel,
+ is_read_path);
+ }
+
+ hsi_outl_and(~HSI_GDD_LCH(gdd_lch), base,
+ HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ /* Warning : CSR register is cleared automaticaly by HW after SW read */
+ gdd_csr = hsi_inw(base, HSI_GDD_CSR_REG(gdd_lch));
+
+ if (!(gdd_csr & HSI_CSR_TOUT)) {
+ if (is_read_path) { /* Read path */
+ dma_h = hsi_inl(base, HSI_GDD_CDSA_REG(gdd_lch));
+ size = hsi_inw(base, HSI_GDD_CEN_REG(gdd_lch)) * 4;
+ dma_sync_single_for_cpu(hsi_ctrl->dev, dma_h, size,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(hsi_ctrl->dev, dma_h, size,
+ DMA_FROM_DEVICE);
+ ch = hsi_ctrl_get_ch(hsi_ctrl, port, channel);
+ hsi_reset_ch_read(ch);
+
+ dev_dbg(hsi_ctrl->dev, "Calling ch %d read callback "
+ "(size %d).\n", channel, size/4);
+ spin_unlock(&hsi_ctrl->lock);
+ ch->read_done(ch->dev, size / 4);
+ spin_lock(&hsi_ctrl->lock);
+
+ /* Check if FIFO is correctly emptied */
+ if (hsi_driver_device_is_hsi(pdev)) {
+ fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
+ if (unlikely(fifo < 0)) {
+ dev_err(hsi_ctrl->dev, "No valid FIFO "
+ "id found for channel %d.\n",
+ channel);
+ return;
+ }
+ fifo_words_avail =
+ hsi_get_rx_fifo_occupancy(hsi_ctrl,
+ fifo);
+ if (fifo_words_avail)
+ dev_dbg(hsi_ctrl->dev,
+ "WARNING: FIFO %d not empty "
+ "after DMA copy, remaining "
+ "%d/%d frames\n",
+ fifo, fifo_words_avail,
+ HSI_HSR_FIFO_SIZE);
+ }
+ /* Re-enable interrupts for polling if needed */
+ if (ch->flags & HSI_CH_RX_POLL)
+ hsi_driver_enable_read_interrupt(ch, NULL);
+ } else { /* Write path */
+ dma_h = hsi_inl(base, HSI_GDD_CSSA_REG(gdd_lch));
+ size = hsi_inw(base, HSI_GDD_CEN_REG(gdd_lch)) * 4;
+ dma_unmap_single(hsi_ctrl->dev, dma_h, size,
+ DMA_TO_DEVICE);
+ ch = hsi_ctrl_get_ch(hsi_ctrl, port, channel);
+ hsi_reset_ch_write(ch);
+
+ dev_dbg(hsi_ctrl->dev, "Calling ch %d write callback "
+ "(size %d).\n", channel, size/4);
+ spin_unlock(&hsi_ctrl->lock);
+ ch->write_done(ch->dev, size / 4);
+ spin_lock(&hsi_ctrl->lock);
+ }
+ } else {
+ dev_err(hsi_ctrl->dev, "Time-out overflow Error on GDD transfer"
+ " on gdd channel %d\n", gdd_lch);
+ spin_unlock(&hsi_ctrl->lock);
+ /* TODO : need to perform a DMA soft reset */
+ hsi_port_event_handler(&hsi_ctrl->hsi_port[port - 1],
+ HSI_EVENT_ERROR, NULL);
+ spin_lock(&hsi_ctrl->lock);
+ }
+}
+
+static u32 hsi_process_dma_event(struct hsi_dev *hsi_ctrl)
+{
+ void __iomem *base = hsi_ctrl->base;
+ unsigned int gdd_lch = 0;
+ u32 status_reg = 0;
+ u32 lch_served = 0;
+ unsigned int gdd_max_count = hsi_ctrl->gdd_chan_count;
+
+ status_reg = hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
+ status_reg &= hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
+ if (!status_reg) {
+ dev_dbg(hsi_ctrl->dev, "DMA : no event, exit.\n");
+ return 0;
+ }
+
+ for (gdd_lch = 0; gdd_lch < gdd_max_count; gdd_lch++) {
+ if (status_reg & HSI_GDD_LCH(gdd_lch)) {
+ do_hsi_gdd_lch(hsi_ctrl, gdd_lch);
+ lch_served |= HSI_GDD_LCH(gdd_lch);
+ }
+ }
+
+ /* Acknowledge interrupt for DMA channel */
+ hsi_outl(lch_served, base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
+
+
+ return status_reg;
+}
+
+static void do_hsi_gdd_tasklet(unsigned long device)
+{
+ struct hsi_dev *hsi_ctrl = (struct hsi_dev *)device;
+
+ dev_dbg(hsi_ctrl->dev, "DMA Tasklet : clock_enabled=%d\n",
+ hsi_ctrl->clock_enabled);
+
+ spin_lock(&hsi_ctrl->lock);
+ hsi_clocks_enable(hsi_ctrl->dev, __func__);
+ hsi_ctrl->in_dma_tasklet = true;
+
+ hsi_process_dma_event(hsi_ctrl);
+
+ hsi_ctrl->in_dma_tasklet = false;
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+ spin_unlock(&hsi_ctrl->lock);
+
+ enable_irq(hsi_ctrl->gdd_irq);
+}
+
+static irqreturn_t hsi_gdd_mpu_handler(int irq, void *p)
+{
+ struct hsi_dev *hsi_ctrl = p;
+
+ tasklet_hi_schedule(&hsi_ctrl->hsi_gdd_tasklet);
+
+ /* Disable interrupt until Bottom Half has cleared the IRQ status */
+ /* register */
+ disable_irq_nosync(hsi_ctrl->gdd_irq);
+
+ return IRQ_HANDLED;
+}
+
+int __init hsi_gdd_init(struct hsi_dev *hsi_ctrl, const char *irq_name)
+{
+ tasklet_init(&hsi_ctrl->hsi_gdd_tasklet, do_hsi_gdd_tasklet,
+ (unsigned long)hsi_ctrl);
+
+ dev_info(hsi_ctrl->dev, "Registering IRQ %s (%d)\n",
+ irq_name, hsi_ctrl->gdd_irq);
+
+ if (request_irq(hsi_ctrl->gdd_irq, hsi_gdd_mpu_handler,
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_HIGH,
+ irq_name, hsi_ctrl) < 0) {
+ dev_err(hsi_ctrl->dev, "FAILED to request GDD IRQ %d\n",
+ hsi_ctrl->gdd_irq);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void hsi_gdd_exit(struct hsi_dev *hsi_ctrl)
+{
+ tasklet_kill(&hsi_ctrl->hsi_gdd_tasklet);
+ free_irq(hsi_ctrl->gdd_irq, hsi_ctrl);
+}
diff --git a/drivers/omap_hsi/hsi_driver_fifo.c b/drivers/omap_hsi/hsi_driver_fifo.c
new file mode 100644
index 0000000..aa33a1a
--- /dev/null
+++ b/drivers/omap_hsi/hsi_driver_fifo.c
@@ -0,0 +1,325 @@
+/*
+ * hsi_driver_fifo.c
+ *
+ * Implements HSI module fifo management.
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/list.h>
+#include "hsi_driver.h"
+
+/**
+ * hsi_fifo_get_id - Get fifo index corresponding to (port, channel)
+ * @hsi_ctrl - HSI controler data
+ * @channel - channel used
+ * @port - HSI port used
+ *
+ * Returns the fifo index associated to the provided (port, channel).
+ * Notes: 1) The fifo <=> (port, channel) correspondance depends on the selected
+ * SW strategy for channels mapping (fifo management).
+ * 2) the mapping is identical for Read and Write path.
+ * This exclusively applies to HSI devices.
+ */
+int hsi_fifo_get_id(struct hsi_dev *hsi_ctrl, unsigned int channel,
+ unsigned int port)
+{
+ int fifo_index = 0;
+ int err = 0;
+
+ if (unlikely((channel >= HSI_CHANNELS_MAX) || (port < 1) ||
+ (port > 2))) {
+ err = -EINVAL;
+ goto fifo_id_bk;
+ }
+
+ if (hsi_ctrl->fifo_mapping_strategy == HSI_FIFO_MAPPING_ALL_PORT1) {
+ if (unlikely(port != 1)) {
+ err = -EINVAL;
+ goto fifo_id_bk;
+ } else {
+ fifo_index = channel;
+ }
+ } else if (hsi_ctrl->fifo_mapping_strategy == HSI_FIFO_MAPPING_SSI) {
+ if (unlikely(channel >= 8)) {
+ err = -EINVAL;
+ goto fifo_id_bk;
+ } else {
+ fifo_index = channel + 8 * (port - 1);
+ }
+ } else {
+ err = -EPERM;
+ goto fifo_id_bk;
+ }
+
+fifo_id_bk:
+ if (unlikely(err < 0)) {
+ fifo_index = err;
+ dev_err(hsi_ctrl->dev, "Cannot map a FIFO to the requested "
+ "params: channel:%d, port:%d; ERR=%d\n", channel, port,
+ err);
+ }
+
+ return fifo_index;
+}
+
+/**
+ * hsi_fifo_get_chan - Get (port, channel) from a fifo index
+ * @hsi_ctrl - HSI controler data
+ * @fifo - HSI fifo used (0..HSI_HST_FIFO_COUNT)
+ * @channel - related channel if any (0..)
+ * @port - related port if any (1..2)
+ *
+ * Returns 0 in case of success, and errocode (< 0) else
+ * Notes: 1) The fifo <=> (port, channel) correspondance depends on the selected
+ * SW strategy for channels mapping (fifo management).
+ * 2) the mapping is identical for Read and Write path.
+ * This exclusively applies to HSI devices.
+ */
+int hsi_fifo_get_chan(struct hsi_dev *hsi_ctrl, unsigned int fifo,
+ unsigned int *channel, unsigned int *port)
+{
+ int err = 0;
+
+ if (unlikely(fifo >= HSI_HST_FIFO_COUNT)) {
+ err = -EINVAL;
+ goto fifo_id_bk;
+ }
+
+ if (hsi_ctrl->fifo_mapping_strategy == HSI_FIFO_MAPPING_ALL_PORT1) {
+ *channel = fifo;
+ *port = 1;
+ } else if (hsi_ctrl->fifo_mapping_strategy == HSI_FIFO_MAPPING_SSI) {
+ if (fifo < 8) {
+ *channel = fifo;
+ *port = 1;
+ } else {
+ *channel = fifo - 8;
+ *port = 2;
+ }
+ } else {
+ err = -EPERM;
+ goto fifo_id_bk;
+ }
+
+fifo_id_bk:
+ if (unlikely(err < 0))
+ dev_err(hsi_ctrl->dev, "Cannot map a channel / port to the "
+ "requested params: fifo:%d; ERR=%d\n", fifo, err);
+
+ return err;
+}
+
+/**
+ * hsi_fifo_mapping - Configures the HSI FIFO mapping registers.
+ * @hsi_ctrl - HSI controler data
+ * @mtype - mapping strategy
+ *
+ * Returns 0 in case of success, and errocode (< 0) else
+ * Configures the HSI FIFO mapping registers. Several mapping strategies are
+ * proposed.
+ * Note: The mapping is identical for Read and Write path.
+ * This exclusively applies to HSI devices.
+ */
+int hsi_fifo_mapping(struct hsi_dev *hsi_ctrl, unsigned int mtype)
+{
+ int err = 0;
+ void __iomem *base = hsi_ctrl->base;
+ int i;
+ unsigned int channel, port;
+
+ if (mtype == HSI_FIFO_MAPPING_ALL_PORT1) {
+ channel = 0;
+ for (i = 0; i < HSI_HST_FIFO_COUNT; i++) {
+ hsi_outl(HSI_MAPPING_ENABLE |
+ (channel << HSI_MAPPING_CH_NUMBER_OFFSET) |
+ (0 << HSI_MAPPING_PORT_NUMBER_OFFSET) |
+ HSI_HST_MAPPING_THRESH_VALUE,
+ base, HSI_HST_MAPPING_FIFO_REG(i));
+ hsi_outl(HSI_MAPPING_ENABLE |
+ (channel << HSI_MAPPING_CH_NUMBER_OFFSET) |
+ (0 << HSI_MAPPING_PORT_NUMBER_OFFSET),
+ base, HSI_HSR_MAPPING_FIFO_REG(i));
+ channel++;
+ }
+ if (hsi_ctrl->fifo_mapping_strategy == HSI_FIFO_MAPPING_UNDEF)
+ dev_dbg(hsi_ctrl->dev, "Fifo mapping : All FIFOs for "
+ "Port1\n");
+ hsi_ctrl->fifo_mapping_strategy = HSI_FIFO_MAPPING_ALL_PORT1;
+ } else if (mtype == HSI_FIFO_MAPPING_SSI) {
+ channel = 0;
+ port = 0;
+ for (i = 0; i < HSI_HST_FIFO_COUNT; i++) {
+ hsi_outl(HSI_MAPPING_ENABLE |
+ (channel << HSI_MAPPING_CH_NUMBER_OFFSET) |
+ (port << HSI_MAPPING_PORT_NUMBER_OFFSET) |
+ HSI_HST_MAPPING_THRESH_VALUE,
+ base, HSI_HST_MAPPING_FIFO_REG(i));
+ hsi_outl(HSI_MAPPING_ENABLE |
+ (channel << HSI_MAPPING_CH_NUMBER_OFFSET) |
+ (port << HSI_MAPPING_PORT_NUMBER_OFFSET),
+ base, HSI_HSR_MAPPING_FIFO_REG(i));
+ channel++;
+ if (channel == 8) {
+ channel = 0;
+ port = 1;
+ }
+ }
+
+ if (hsi_ctrl->fifo_mapping_strategy == HSI_FIFO_MAPPING_UNDEF)
+ dev_dbg(hsi_ctrl->dev, "Fifo mapping : 8 FIFOs per Port"
+ " (SSI compatible mode)\n");
+ hsi_ctrl->fifo_mapping_strategy = HSI_FIFO_MAPPING_SSI;
+ } else {
+ hsi_ctrl->fifo_mapping_strategy = HSI_FIFO_MAPPING_UNDEF;
+ dev_err(hsi_ctrl->dev, "Bad Fifo strategy request : %d\n",
+ mtype);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+/**
+ * hsi_hst_bufstate_f_reg - Return the proper HSI_HST_BUFSTATE register offset
+ * @hsi_ctrl - HSI controler data
+ * @port - HSI port used
+ * @channel - channel used
+ *
+ * Returns the HSI_HST_BUFSTATE register offset
+ * Note: indexing of BUFSTATE registers is different on SSI and HSI:
+ * On SSI: it is linked to the ports
+ * On HSI: it is linked to the FIFOs (and depend on the SW strategy)
+ */
+long hsi_hst_bufstate_f_reg(struct hsi_dev *hsi_ctrl,
+ unsigned int port, unsigned int channel)
+{
+ int fifo;
+ if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) {
+ fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
+ if (unlikely(fifo < 0)) {
+ dev_err(hsi_ctrl->dev,
+ "hsi_hst_bufstate_f_reg ERROR : %d\n", fifo);
+ return fifo;
+ } else
+ return HSI_HST_BUFSTATE_FIFO_REG(fifo);
+ } else {
+ return HSI_HST_BUFSTATE_REG(port);
+ }
+}
+
+/**
+ * hsi_hsr_bufstate_f_reg - Return the proper HSI_HSR_BUFSTATE register offset
+ * @hsi_ctrl - HSI controler data
+ * @port - HSI port used
+ * @channel - channel used
+ *
+ * Returns the HSI_HSR_BUFSTATE register offset
+ * Note: indexing of BUFSTATE registers is different on SSI and HSI:
+ * On SSI: it is linked to the ports
+ * On HSI: it is linked to the FIFOs (and depend on the SW strategy)
+ */
+long hsi_hsr_bufstate_f_reg(struct hsi_dev *hsi_ctrl,
+ unsigned int port, unsigned int channel)
+{
+ int fifo;
+ if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) {
+ fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
+ if (unlikely(fifo < 0)) {
+ dev_err(hsi_ctrl->dev,
+ "hsi_hsr_bufstate_f_reg ERROR : %d\n", fifo);
+ return fifo;
+ } else
+ return HSI_HSR_BUFSTATE_FIFO_REG(fifo);
+ } else {
+ return HSI_HSR_BUFSTATE_REG(port);
+ }
+}
+
+/**
+ * hsi_hst_buffer_f_reg - Return the proper HSI_HST_BUFFER register offset
+ * @hsi_ctrl - HSI controler data
+ * @port - HSI port used
+ * @channel - channel used
+ *
+ * Returns the HSI_HST_BUFFER register offset
+ * Note: indexing of BUFFER registers is different on SSI and HSI:
+ * On SSI: it is linked to the ports
+ * On HSI: it is linked to the FIFOs (and depend on the SW strategy)
+ */
+long hsi_hst_buffer_reg(struct hsi_dev *hsi_ctrl,
+ unsigned int port, unsigned int channel)
+{
+ int fifo;
+ if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) {
+ fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
+ if (unlikely(fifo < 0)) {
+ dev_err(hsi_ctrl->dev,
+ "hsi_hst_bufstate_f_reg ERROR : %d\n", fifo);
+ return fifo;
+ } else
+ return HSI_HST_BUFFER_FIFO_REG(fifo);
+ } else {
+ return HSI_HST_BUFFER_CH_REG(port, channel);
+ }
+}
+
+/**
+ * hsi_hsr_buffer_f_reg - Return the proper HSI_HSR_BUFFER register offset
+ * @hsi_ctrl - HSI controler data
+ * @port - HSI port used
+ * @channel - channel used
+ *
+ * Returns the HSI_HSR_BUFFER register offset
+ * Note: indexing of BUFFER registers is different on SSI and HSI:
+ * On SSI: it is linked to the ports
+ * On HSI: it is linked to the FIFOs (and depend on the SW strategy)
+ */
+long hsi_hsr_buffer_reg(struct hsi_dev *hsi_ctrl,
+ unsigned int port, unsigned int channel)
+{
+ int fifo;
+ if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) {
+ fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
+ if (unlikely(fifo < 0)) {
+ dev_err(hsi_ctrl->dev,
+ "hsi_hsr_bufstate_f_reg ERROR : %d\n", fifo);
+ return fifo;
+ } else
+ return HSI_HSR_BUFFER_FIFO_REG(fifo);
+ } else {
+ return HSI_HSR_BUFFER_CH_REG(port, channel);
+ }
+}
+
+/**
+ * hsi_get_rx_fifo_occupancy - Return the size of data remaining
+ * in the given FIFO
+ * @hsi_ctrl - HSI controler data
+ * @fifo - FIFO to look at
+ *
+ * Returns the number of frames (32bits) remaining in the FIFO
+ */
+u8 hsi_get_rx_fifo_occupancy(struct hsi_dev *hsi_ctrl, u8 fifo)
+{
+ void __iomem *base = hsi_ctrl->base;
+ int hsr_mapping, mapping_words;
+
+ hsr_mapping = hsi_inl(base, HSI_HSR_MAPPING_FIFO_REG(fifo));
+ mapping_words = (hsr_mapping >> HSI_HST_MAPPING_THRESH_OFFSET) & 0xF;
+ return mapping_words;
+}
+
diff --git a/drivers/omap_hsi/hsi_driver_gpio.c b/drivers/omap_hsi/hsi_driver_gpio.c
new file mode 100644
index 0000000..4c8810b
--- /dev/null
+++ b/drivers/omap_hsi/hsi_driver_gpio.c
@@ -0,0 +1,75 @@
+/*
+ * hsi_driver_gpio.c
+ *
+ * Implements HSI GPIO related functionality. (i.e: wake lines management)
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/gpio.h>
+#include "hsi_driver.h"
+
+static void do_hsi_cawake_tasklet(unsigned long hsi_p)
+{
+ struct hsi_port *port = (struct hsi_port *)hsi_p;
+ struct hsi_dev *hsi_ctrl = port->hsi_controller;
+
+ spin_lock(&hsi_ctrl->lock);
+ hsi_clocks_enable(hsi_ctrl->dev, __func__);
+ port->in_cawake_tasklet = true;
+
+ port->cawake_status = hsi_get_cawake(port);
+ hsi_do_cawake_process(port);
+
+ port->in_cawake_tasklet = false;
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+ spin_unlock(&hsi_ctrl->lock);
+}
+
+static irqreturn_t hsi_cawake_isr(int irq, void *hsi_p)
+{
+ struct hsi_port *port = hsi_p;
+
+ tasklet_hi_schedule(&port->cawake_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+int __init hsi_cawake_init(struct hsi_port *port, const char *irq_name)
+{
+ tasklet_init(&port->cawake_tasklet, do_hsi_cawake_tasklet,
+ (unsigned long)port);
+
+ if (request_irq(port->cawake_gpio_irq, hsi_cawake_isr,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING, irq_name, port) < 0) {
+ dev_err(port->hsi_controller->dev,
+ "FAILED to request %s GPIO IRQ %d on port %d\n",
+ irq_name, port->cawake_gpio_irq, port->port_number);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void hsi_cawake_exit(struct hsi_port *port)
+{
+ if (port->cawake_gpio < 0)
+ return; /* Nothing to do (case SSI with GPIO or */
+ /* HSI with IO ring wakeup */
+
+ tasklet_kill(&port->cawake_tasklet);
+ free_irq(port->cawake_gpio_irq, port);
+}
diff --git a/drivers/omap_hsi/hsi_driver_if.c b/drivers/omap_hsi/hsi_driver_if.c
new file mode 100644
index 0000000..19012e5
--- /dev/null
+++ b/drivers/omap_hsi/hsi_driver_if.c
@@ -0,0 +1,965 @@
+/*
+ * hsi_driver_if.c
+ *
+ * Implements HSI hardware driver interfaces for the upper layers.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include "hsi_driver.h"
+
+#define NOT_SET (-1)
+
+/* Manage HSR divisor update
+ * A special divisor value allows switching to auto-divisor mode in Rx
+ * (but with error counters deactivated). This function implements the
+ * the transitions to/from this mode.
+ */
+int hsi_set_rx_divisor(struct hsi_port *sport, struct hsr_ctx *cfg)
+{
+ struct hsi_dev *hsi_ctrl = sport->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ int port = sport->port_number;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+
+ if (cfg->divisor == NOT_SET)
+ return 0;
+
+ if (hsi_driver_device_is_hsi(pdev)) {
+ if (cfg->divisor == HSI_HSR_DIVISOR_AUTO &&
+ sport->counters_on) {
+ /* auto mode: deactivate counters + set divisor = 0 */
+ sport->reg_counters = hsi_inl(base, HSI_HSR_COUNTERS_REG
+ (port));
+ sport->counters_on = 0;
+ hsi_outl(0, base, HSI_HSR_COUNTERS_REG(port));
+ hsi_outl(0, base, HSI_HSR_DIVISOR_REG(port));
+ dev_dbg(hsi_ctrl->dev, "Switched to HSR auto mode\n");
+ } else if (cfg->divisor != HSI_HSR_DIVISOR_AUTO) {
+ /* Divisor set mode: use counters */
+ /* Leave auto mode: use new counters values */
+ cfg->counters = 0xFFFFF;
+ sport->reg_counters = cfg->counters;
+ sport->counters_on = 1;
+ hsi_outl(cfg->counters, base,
+ HSI_HSR_COUNTERS_REG(port));
+ hsi_outl(cfg->divisor, base, HSI_HSR_DIVISOR_REG(port));
+ dev_dbg(hsi_ctrl->dev, "Left HSR auto mode. "
+ "Counters=0x%08x, Divisor=0x%08x\n",
+ cfg->counters, cfg->divisor);
+ }
+ } else {
+ if (cfg->divisor == HSI_HSR_DIVISOR_AUTO &&
+ sport->counters_on) {
+ /* auto mode: deactivate timeout */
+ sport->reg_counters = hsi_inl(base,
+ SSI_TIMEOUT_REG(port));
+ sport->counters_on = 0;
+ hsi_outl(0, base, SSI_TIMEOUT_REG(port));
+ dev_dbg(hsi_ctrl->dev, "Deactivated SSR timeout\n");
+ } else if (cfg->divisor == HSI_SSR_DIVISOR_USE_TIMEOUT) {
+ /* Leave auto mode: use new counters values */
+ sport->reg_counters = cfg->counters;
+ sport->counters_on = 1;
+ hsi_outl(cfg->counters, base, SSI_TIMEOUT_REG(port));
+ dev_dbg(hsi_ctrl->dev, "Left SSR auto mode. "
+ "Timeout=0x%08x\n", cfg->counters);
+ }
+ }
+
+ return 0;
+}
+
+int hsi_set_rx(struct hsi_port *sport, struct hsr_ctx *cfg)
+{
+ struct hsi_dev *hsi_ctrl = sport->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ int port = sport->port_number;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+
+ if (((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_STREAM) &&
+ ((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_FRAME) &&
+ ((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_SLEEP) &&
+ (cfg->mode != NOT_SET))
+ return -EINVAL;
+
+ if (hsi_driver_device_is_hsi(pdev)) {
+ if (((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_SYNCHRONIZED)
+ && ((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_PIPELINED)
+ && (cfg->flow != NOT_SET))
+ return -EINVAL;
+ /* HSI only supports payload size of 32bits */
+ if ((cfg->frame_size != HSI_FRAMESIZE_MAX) &&
+ (cfg->frame_size != NOT_SET))
+ return -EINVAL;
+ } else {
+ if (((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_SYNCHRONIZED)
+ && (cfg->flow != NOT_SET))
+ return -EINVAL;
+ /* HSI only supports payload size of 32bits */
+ if ((cfg->frame_size != HSI_FRAMESIZE_MAX) &&
+ (cfg->frame_size != NOT_SET))
+ return -EINVAL;
+ }
+
+ if ((cfg->channels == 0) ||
+ ((cfg->channels > sport->max_ch) && (cfg->channels != NOT_SET)))
+ return -EINVAL;
+
+ if (hsi_driver_device_is_hsi(pdev)) {
+ if ((cfg->divisor > HSI_MAX_RX_DIVISOR) &&
+ (cfg->divisor != HSI_HSR_DIVISOR_AUTO) &&
+ (cfg->divisor != NOT_SET))
+ return -EINVAL;
+ }
+
+ if ((cfg->mode != NOT_SET) && (cfg->flow != NOT_SET))
+ hsi_outl(cfg->mode | ((cfg->flow & HSI_FLOW_VAL_MASK)
+ << HSI_FLOW_OFFSET), base,
+ HSI_HSR_MODE_REG(port));
+
+ if (cfg->frame_size != NOT_SET)
+ hsi_outl(cfg->frame_size, base, HSI_HSR_FRAMESIZE_REG(port));
+
+ if (cfg->channels != NOT_SET) {
+ if ((cfg->channels & (-cfg->channels)) ^ cfg->channels)
+ return -EINVAL;
+ else
+ hsi_outl(cfg->channels, base,
+ HSI_HSR_CHANNELS_REG(port));
+ }
+
+ return hsi_set_rx_divisor(sport, cfg);
+}
+
+void hsi_get_rx(struct hsi_port *sport, struct hsr_ctx *cfg)
+{
+ struct hsi_dev *hsi_ctrl = sport->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ int port = sport->port_number;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+
+ cfg->mode = hsi_inl(base, HSI_HSR_MODE_REG(port)) & HSI_MODE_VAL_MASK;
+ cfg->flow = (hsi_inl(base, HSI_HSR_MODE_REG(port)) & HSI_FLOW_VAL_MASK)
+ >> HSI_FLOW_OFFSET;
+ cfg->frame_size = hsi_inl(base, HSI_HSR_FRAMESIZE_REG(port));
+ cfg->channels = hsi_inl(base, HSI_HSR_CHANNELS_REG(port));
+ if (hsi_driver_device_is_hsi(pdev)) {
+ cfg->divisor = hsi_inl(base, HSI_HSR_DIVISOR_REG(port));
+ cfg->counters = hsi_inl(base, HSI_HSR_COUNTERS_REG(port));
+ } else {
+ cfg->counters = hsi_inl(base, SSI_TIMEOUT_REG(port));
+ }
+}
+
+int hsi_set_tx(struct hsi_port *sport, struct hst_ctx *cfg)
+{
+ struct hsi_dev *hsi_ctrl = sport->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ int port = sport->port_number;
+ struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
+ unsigned int max_divisor = hsi_driver_device_is_hsi(pdev) ?
+ HSI_MAX_TX_DIVISOR : HSI_SSI_MAX_TX_DIVISOR;
+
+ if (((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_STREAM) &&
+ ((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_FRAME) &&
+ (cfg->mode != NOT_SET))
+ return -EINVAL;
+
+ if (hsi_driver_device_is_hsi(pdev)) {
+ if (((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_SYNCHRONIZED)
+ && ((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_PIPELINED)
+ && (cfg->flow != NOT_SET))
+ return -EINVAL;
+ /* HSI only supports payload size of 32bits */
+ if ((cfg->frame_size != HSI_FRAMESIZE_MAX) &&
+ (cfg->frame_size != NOT_SET))
+ return -EINVAL;
+ } else {
+ if (((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_SYNCHRONIZED)
+ && (cfg->flow != NOT_SET))
+ return -EINVAL;
+
+ if ((cfg->frame_size > HSI_FRAMESIZE_MAX) &&
+ (cfg->frame_size != NOT_SET))
+ return -EINVAL;
+ }
+
+ if ((cfg->channels == 0) ||
+ ((cfg->channels > sport->max_ch) && (cfg->channels != NOT_SET)))
+ return -EINVAL;
+
+ if ((cfg->divisor > max_divisor) && (cfg->divisor != NOT_SET))
+ return -EINVAL;
+
+ if ((cfg->arb_mode != HSI_ARBMODE_ROUNDROBIN) &&
+ (cfg->arb_mode != HSI_ARBMODE_PRIORITY) && (cfg->mode != NOT_SET))
+ return -EINVAL;
+
+ if ((cfg->mode != NOT_SET) && (cfg->flow != NOT_SET))
+ hsi_outl(cfg->mode | ((cfg->flow & HSI_FLOW_VAL_MASK) <<
+ HSI_FLOW_OFFSET) |
+ HSI_HST_MODE_WAKE_CTRL_SW, base,
+ HSI_HST_MODE_REG(port));
+
+ if (cfg->frame_size != NOT_SET)
+ hsi_outl(cfg->frame_size, base, HSI_HST_FRAMESIZE_REG(port));
+
+ if (cfg->channels != NOT_SET) {
+ if ((cfg->channels & (-cfg->channels)) ^ cfg->channels)
+ return -EINVAL;
+ else
+ hsi_outl(cfg->channels, base,
+ HSI_HST_CHANNELS_REG(port));
+ }
+
+ if (cfg->divisor != NOT_SET)
+ hsi_outl(cfg->divisor, base, HSI_HST_DIVISOR_REG(port));
+
+ if (cfg->arb_mode != NOT_SET)
+ hsi_outl(cfg->arb_mode, base, HSI_HST_ARBMODE_REG(port));
+
+ return 0;
+}
+
+void hsi_get_tx(struct hsi_port *sport, struct hst_ctx *cfg)
+{
+ struct hsi_dev *hsi_ctrl = sport->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ int port = sport->port_number;
+
+ cfg->mode = hsi_inl(base, HSI_HST_MODE_REG(port)) & HSI_MODE_VAL_MASK;
+ cfg->flow = (hsi_inl(base, HSI_HST_MODE_REG(port)) & HSI_FLOW_VAL_MASK)
+ >> HSI_FLOW_OFFSET;
+ cfg->frame_size = hsi_inl(base, HSI_HST_FRAMESIZE_REG(port));
+ cfg->channels = hsi_inl(base, HSI_HST_CHANNELS_REG(port));
+ cfg->divisor = hsi_inl(base, HSI_HST_DIVISOR_REG(port));
+ cfg->arb_mode = hsi_inl(base, HSI_HST_ARBMODE_REG(port));
+}
+
+/**
+ * hsi_open - open a hsi device channel.
+ * @dev - Reference to the hsi device channel to be openned.
+ *
+ * Returns 0 on success, -EINVAL on bad parameters, -EBUSY if is already opened.
+ */
+int hsi_open(struct hsi_device *dev)
+{
+ struct hsi_channel *ch;
+ struct hsi_port *port;
+ struct hsi_dev *hsi_ctrl;
+
+ if (!dev || !dev->ch) {
+ pr_err(LOG_NAME "Wrong HSI device %p\n", dev);
+ return -EINVAL;
+ }
+ dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
+
+ ch = dev->ch;
+ if (!ch->read_done || !ch->write_done) {
+ dev_err(dev->device.parent,
+ "Trying to open with no (read/write) callbacks "
+ "registered\n");
+ return -EINVAL;
+ }
+ port = ch->hsi_port;
+ hsi_ctrl = port->hsi_controller;
+
+ spin_lock_bh(&hsi_ctrl->lock);
+ hsi_clocks_enable_channel(dev->device.parent, ch->channel_number,
+ __func__);
+
+ if (ch->flags & HSI_CH_OPEN) {
+ dev_err(dev->device.parent,
+ "Port %d Channel %d already OPENED\n",
+ dev->n_p, dev->n_ch);
+ spin_unlock_bh(&hsi_ctrl->lock);
+ return -EBUSY;
+ }
+
+ /* Restart with flags cleaned up */
+ ch->flags = HSI_CH_OPEN;
+
+ hsi_driver_enable_interrupt(port, HSI_CAWAKEDETECTED | HSI_ERROROCCURED
+ | HSI_BREAKDETECTED);
+
+ /* NOTE: error and break are port events and do not need to be
+ * enabled for HSI extended enable register */
+
+ hsi_clocks_disable_channel(dev->device.parent, ch->channel_number,
+ __func__);
+ spin_unlock_bh(&hsi_ctrl->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hsi_open);
+
+/**
+ * hsi_write - write data into the hsi device channel
+ * @dev - reference to the hsi device channel to write into.
+ * @addr - pointer to a 32-bit word data to be written.
+ * @size - number of 32-bit word to be written.
+ *
+ * Return 0 on sucess, a negative value on failure.
+ * A success value only indicates that the request has been accepted.
+ * Transfer is only completed when the write_done callback is called.
+ *
+ */
+int hsi_write(struct hsi_device *dev, u32 *addr, unsigned int size)
+{
+ struct hsi_channel *ch;
+ int err;
+
+ if (unlikely(!dev)) {
+ pr_err(LOG_NAME "Null dev pointer in hsi_write\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(!dev->ch || !addr || (size <= 0))) {
+ dev_err(dev->device.parent,
+ "Wrong parameters hsi_device %p data %p count %d",
+ dev, addr, size);
+ return -EINVAL;
+ }
+ dev_dbg(dev->device.parent, "%s ch %d, @%x, size %d u32\n", __func__,
+ dev->n_ch, (u32) addr, size);
+
+ if (unlikely(!(dev->ch->flags & HSI_CH_OPEN))) {
+ dev_err(dev->device.parent, "HSI device NOT open\n");
+ return -EINVAL;
+ }
+
+ ch = dev->ch;
+
+ spin_lock_bh(&ch->hsi_port->hsi_controller->lock);
+ if (pm_runtime_suspended(dev->device.parent) ||
+ !ch->hsi_port->hsi_controller->clock_enabled)
+ dev_dbg(dev->device.parent,
+ "hsi_write with HSI clocks OFF, clock_enabled = %d\n",
+ ch->hsi_port->hsi_controller->clock_enabled);
+
+ hsi_clocks_enable_channel(dev->device.parent,
+ ch->channel_number, __func__);
+
+ if (ch->write_data.addr != NULL) {
+ dev_err(dev->device.parent, "# Invalid request - Write "
+ "operation pending port %d channel %d\n",
+ ch->hsi_port->port_number,
+ ch->channel_number);
+
+ hsi_clocks_disable_channel(dev->device.parent,
+ ch->channel_number, __func__);
+ spin_unlock_bh(&ch->hsi_port->hsi_controller->lock);
+ return -EINVAL;
+ }
+
+ ch->write_data.addr = addr;
+ ch->write_data.size = size;
+ ch->write_data.lch = -1;
+
+ if (size == 1)
+ err = hsi_driver_enable_write_interrupt(ch, addr);
+ else
+ err = hsi_driver_write_dma(ch, addr, size);
+
+ if (unlikely(err < 0)) {
+ ch->write_data.addr = NULL;
+ ch->write_data.size = 0;
+ dev_err(dev->device.parent, "Failed to program write\n");
+ }
+
+ spin_unlock_bh(&ch->hsi_port->hsi_controller->lock);
+
+ /* Leave clocks enabled until transfer is complete (write callback */
+ /* is called */
+ return err;
+}
+EXPORT_SYMBOL(hsi_write);
+
+/**
+ * hsi_read - read data from the hsi device channel
+ * @dev - hsi device channel reference to read data from.
+ * @addr - pointer to a 32-bit word data to store the data.
+ * @size - number of 32-bit word to be stored.
+ *
+ * Return 0 on sucess, a negative value on failure.
+ * A success value only indicates that the request has been accepted.
+ * Data is only available in the buffer when the read_done callback is called.
+ *
+ */
+int hsi_read(struct hsi_device *dev, u32 *addr, unsigned int size)
+{
+ struct hsi_channel *ch;
+ int err;
+
+ if (unlikely(!dev)) {
+ pr_err(LOG_NAME "Null dev pointer in hsi_read\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(!dev->ch || !addr || (size <= 0))) {
+ dev_err(dev->device.parent, "Wrong parameters "
+ "hsi_device %p data %p count %d", dev, addr, size);
+ return -EINVAL;
+ }
+#if 0
+ if (dev->n_ch == 0)
+ dev_info(dev->device.parent, "%s ch %d, @%x, size %d u32\n",
+ __func__, dev->n_ch, (u32) addr, size);
+#endif
+ if (unlikely(!(dev->ch->flags & HSI_CH_OPEN))) {
+ dev_err(dev->device.parent, "HSI device NOT open\n");
+ return -EINVAL;
+ }
+
+ ch = dev->ch;
+
+ spin_lock_bh(&ch->hsi_port->hsi_controller->lock);
+ if (pm_runtime_suspended(dev->device.parent) ||
+ !ch->hsi_port->hsi_controller->clock_enabled)
+ dev_dbg(dev->device.parent,
+ "hsi_read with HSI clocks OFF, clock_enabled = %d\n",
+ ch->hsi_port->hsi_controller->clock_enabled);
+
+ hsi_clocks_enable_channel(dev->device.parent, ch->channel_number,
+ __func__);
+
+ if (ch->read_data.addr != NULL) {
+ dev_err(dev->device.parent, "# Invalid request - Read "
+ "operation pending port %d channel %d\n",
+ ch->hsi_port->port_number,
+ ch->channel_number);
+ err = -EINVAL;
+ goto done;
+ }
+
+ ch->read_data.addr = addr;
+ ch->read_data.size = size;
+ ch->read_data.lch = -1;
+
+ if (size == 1)
+ err = hsi_driver_enable_read_interrupt(ch, addr);
+ else
+ err = hsi_driver_read_dma(ch, addr, size);
+
+ if (unlikely(err < 0)) {
+ ch->read_data.addr = NULL;
+ ch->read_data.size = 0;
+ dev_err(dev->device.parent, "Failed to program read\n");
+ }
+
+done:
+ hsi_clocks_disable_channel(dev->device.parent, ch->channel_number,
+ __func__);
+ spin_unlock_bh(&ch->hsi_port->hsi_controller->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(hsi_read);
+
+int __hsi_write_cancel(struct hsi_channel *ch)
+{
+ int err = -ENODATA;
+ if (ch->write_data.size == 1)
+ err = hsi_driver_cancel_write_interrupt(ch);
+ else if (ch->write_data.size > 1)
+ err = hsi_driver_cancel_write_dma(ch);
+ else
+ dev_dbg(ch->dev->device.parent, "%s : Nothing to cancel %d\n",
+ __func__, ch->write_data.size);
+ dev_err(ch->dev->device.parent, "%s : %d\n", __func__, err);
+ return err;
+}
+
+/**
+ * hsi_write_cancel - Cancel pending write request.
+ * @dev - hsi device channel where to cancel the pending write.
+ *
+ * write_done() callback will not be called after success of this function.
+ *
+ * Return: -ENXIO : No DMA channel found for specified HSI channel
+ * -ECANCELED : write cancel success, data not transfered to TX FIFO
+ * 0 : transfer is already over, data already transfered to TX FIFO
+ *
+ * Note: whatever returned value, write callback will not be called after
+ * write cancel.
+ */
+int hsi_write_cancel(struct hsi_device *dev)
+{
+ int err;
+ if (unlikely(!dev || !dev->ch)) {
+ pr_err(LOG_NAME "Wrong HSI device %p\n", dev);
+ return -ENODEV;
+ }
+ dev_err(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
+
+ if (unlikely(!(dev->ch->flags & HSI_CH_OPEN))) {
+ dev_err(dev->device.parent, "HSI device NOT open\n");
+ return -ENODEV;
+ }
+
+ spin_lock_bh(&dev->ch->hsi_port->hsi_controller->lock);
+ hsi_clocks_enable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+
+ err = __hsi_write_cancel(dev->ch);
+
+ hsi_clocks_disable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+ spin_unlock_bh(&dev->ch->hsi_port->hsi_controller->lock);
+ return err;
+}
+EXPORT_SYMBOL(hsi_write_cancel);
+
+int __hsi_read_cancel(struct hsi_channel *ch)
+{
+ int err = -ENODATA;
+ if (ch->read_data.size == 1)
+ err = hsi_driver_cancel_read_interrupt(ch);
+ else if (ch->read_data.size > 1)
+ err = hsi_driver_cancel_read_dma(ch);
+ else
+ dev_dbg(ch->dev->device.parent, "%s : Nothing to cancel %d\n",
+ __func__, ch->read_data.size);
+
+ dev_err(ch->dev->device.parent, "%s : %d\n", __func__, err);
+ return err;
+}
+
+/**
+ * hsi_read_cancel - Cancel pending read request.
+ * @dev - hsi device channel where to cancel the pending read.
+ *
+ * read_done() callback will not be called after success of this function.
+ *
+ * Return: -ENXIO : No DMA channel found for specified HSI channel
+ * -ECANCELED : read cancel success, data not available at expected
+ * address.
+ * 0 : transfer is already over, data already available at expected
+ * address.
+ *
+ * Note: whatever returned value, read callback will not be called after cancel.
+ */
+int hsi_read_cancel(struct hsi_device *dev)
+{
+ int err;
+ if (unlikely(!dev || !dev->ch)) {
+ pr_err(LOG_NAME "Wrong HSI device %p\n", dev);
+ return -ENODEV;
+ }
+ dev_err(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
+
+ if (unlikely(!(dev->ch->flags & HSI_CH_OPEN))) {
+ dev_err(dev->device.parent, "HSI device NOT open\n");
+ return -ENODEV;
+ }
+
+ spin_lock_bh(&dev->ch->hsi_port->hsi_controller->lock);
+ hsi_clocks_enable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+
+ err = __hsi_read_cancel(dev->ch);
+
+ hsi_clocks_disable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+ spin_unlock_bh(&dev->ch->hsi_port->hsi_controller->lock);
+ return err;
+}
+EXPORT_SYMBOL(hsi_read_cancel);
+
+/**
+ * hsi_poll - HSI poll feature, enables data interrupt on frame reception
+ * @dev - hsi device channel reference to apply the I/O control
+ * (or port associated to it)
+ *
+ * Return 0 on success, a negative value on failure.
+ *
+ */
+int hsi_poll(struct hsi_device *dev)
+{
+ struct hsi_channel *ch;
+ struct hsi_dev *hsi_ctrl;
+ int err;
+
+ if (unlikely(!dev || !dev->ch))
+ return -EINVAL;
+ dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
+
+ if (unlikely(!(dev->ch->flags & HSI_CH_OPEN))) {
+ dev_err(dev->device.parent, "HSI device NOT open\n");
+ return -EINVAL;
+ }
+
+ ch = dev->ch;
+ hsi_ctrl = ch->hsi_port->hsi_controller;
+
+ spin_lock_bh(&hsi_ctrl->lock);
+ hsi_clocks_enable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+
+ ch->flags |= HSI_CH_RX_POLL;
+
+ err = hsi_driver_enable_read_interrupt(ch, NULL);
+
+ hsi_clocks_disable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+ spin_unlock_bh(&hsi_ctrl->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(hsi_poll);
+
+/**
+ * hsi_unpoll - HSI poll feature, disables data interrupt on frame reception
+ * @dev - hsi device channel reference to apply the I/O control
+ * (or port associated to it)
+ *
+ * Return 0 on success, a negative value on failure.
+ *
+ */
+int hsi_unpoll(struct hsi_device *dev)
+{
+ struct hsi_channel *ch;
+ struct hsi_dev *hsi_ctrl;
+
+ if (unlikely(!dev || !dev->ch))
+ return -EINVAL;
+ dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
+
+ if (unlikely(!(dev->ch->flags & HSI_CH_OPEN))) {
+ dev_err(dev->device.parent, "HSI device NOT open\n");
+ return -EINVAL;
+ }
+
+ ch = dev->ch;
+ hsi_ctrl = ch->hsi_port->hsi_controller;
+
+ spin_lock_bh(&hsi_ctrl->lock);
+ hsi_clocks_enable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+
+ ch->flags &= ~HSI_CH_RX_POLL;
+
+ hsi_driver_disable_read_interrupt(ch);
+
+ hsi_clocks_disable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+ spin_unlock_bh(&hsi_ctrl->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hsi_unpoll);
+
+/**
+ * hsi_ioctl - HSI I/O control
+ * @dev - hsi device channel reference to apply the I/O control
+ * (or port associated to it)
+ * @command - HSI I/O control command
+ * @arg - parameter associated to the control command. NULL, if no parameter.
+ *
+ * Return 0 on sucess, a negative value on failure.
+ *
+ */
+int hsi_ioctl(struct hsi_device *dev, unsigned int command, void *arg)
+{
+ struct hsi_channel *ch;
+ struct hsi_dev *hsi_ctrl;
+ struct hsi_port *pport;
+ void __iomem *base;
+ unsigned int port, channel;
+ u32 acwake;
+ int err = 0;
+ int fifo = 0;
+
+ if (unlikely((!dev) ||
+ (!dev->ch) ||
+ (!dev->ch->hsi_port) ||
+ (!dev->ch->hsi_port->hsi_controller)) ||
+ (!(dev->ch->flags & HSI_CH_OPEN))) {
+ pr_err(LOG_NAME "HSI IOCTL Invalid parameter\n");
+ return -EINVAL;
+ }
+
+ ch = dev->ch;
+ pport = ch->hsi_port;
+ hsi_ctrl = ch->hsi_port->hsi_controller;
+ port = ch->hsi_port->port_number;
+ channel = ch->channel_number;
+ base = hsi_ctrl->base;
+
+ dev_dbg(dev->device.parent, "IOCTL: ch %d, command %d\n",
+ channel, command);
+
+ spin_lock_bh(&hsi_ctrl->lock);
+ hsi_clocks_enable_channel(dev->device.parent, channel, __func__);
+
+ switch (command) {
+ case HSI_IOCTL_ACWAKE_UP:
+ if (ch->flags & HSI_CH_ACWAKE) {
+ dev_dbg(dev->device.parent, "Duplicate ACWAKE UP\n");
+ err = -EPERM;
+ goto out;
+ }
+
+ /* Wake up request to Modem (typically OMAP initiated) */
+ /* Symetrical disable will be done in HSI_IOCTL_ACWAKE_DOWN */
+
+ ch->flags |= HSI_CH_ACWAKE;
+ pport->acwake_status |= BIT(channel);
+
+ /* We only claim once the wake line per channel */
+ acwake = hsi_inl(base, HSI_SYS_WAKE_REG(port));
+ if (!(acwake & HSI_WAKE(channel))) {
+ hsi_outl(HSI_SET_WAKE(channel), base,
+ HSI_SYS_SET_WAKE_REG(port));
+ }
+
+ goto out;
+ break;
+ case HSI_IOCTL_ACWAKE_DOWN:
+ /* Low power request initiation (OMAP initiated, typically */
+ /* following inactivity timeout) */
+ /* ACPU HSI block shall still be capable of receiving */
+ if (!(ch->flags & HSI_CH_ACWAKE)) {
+ dev_dbg(dev->device.parent, "Duplicate ACWAKE DOWN\n");
+ err = -EPERM;
+ goto out;
+ }
+
+ acwake = hsi_inl(base, HSI_SYS_WAKE_REG(port));
+ if (unlikely(pport->acwake_status !=
+ (acwake & HSI_WAKE_MASK))) {
+ dev_warn(dev->device.parent,
+ "ACWAKE shadow register mismatch"
+ " acwake_status: 0x%x, HSI_SYS_WAKE_REG: 0x%x",
+ pport->acwake_status, acwake);
+ pport->acwake_status = acwake & HSI_WAKE_MASK;
+ }
+ /* SSI_TODO: add safety check for SSI also */
+
+ ch->flags &= ~HSI_CH_ACWAKE;
+ pport->acwake_status &= ~BIT(channel);
+
+ /* Release the wake line per channel */
+ if ((acwake & HSI_WAKE(channel))) {
+ hsi_outl(HSI_CLEAR_WAKE(channel), base,
+ HSI_SYS_CLEAR_WAKE_REG(port));
+ }
+
+ goto out;
+ break;
+ case HSI_IOCTL_SEND_BREAK:
+ hsi_outl(1, base, HSI_HST_BREAK_REG(port));
+ /*HSI_TODO : need to deactivate clock after BREAK frames sent*/
+ /*Use interrupt ? (if TX BREAK INT exists)*/
+ break;
+ case HSI_IOCTL_GET_ACWAKE:
+ if (!arg) {
+ err = -EINVAL;
+ goto out;
+ }
+ *(u32 *)arg = hsi_inl(base, HSI_SYS_WAKE_REG(port));
+ break;
+ case HSI_IOCTL_FLUSH_RX:
+ hsi_outl(0, base, HSI_HSR_RXSTATE_REG(port));
+ break;
+ case HSI_IOCTL_FLUSH_TX:
+ hsi_outl(0, base, HSI_HST_TXSTATE_REG(port));
+ break;
+ case HSI_IOCTL_GET_CAWAKE:
+ if (!arg) {
+ err = -EINVAL;
+ goto out;
+ }
+ err = hsi_get_cawake(dev->ch->hsi_port);
+ if (err < 0) {
+ err = -ENODEV;
+ goto out;
+ }
+ *(u32 *)arg = err;
+ break;
+ case HSI_IOCTL_SET_RX:
+ if (!arg) {
+ err = -EINVAL;
+ goto out;
+ }
+ err = hsi_set_rx(dev->ch->hsi_port, (struct hsr_ctx *)arg);
+ break;
+ case HSI_IOCTL_GET_RX:
+ if (!arg) {
+ err = -EINVAL;
+ goto out;
+ }
+ hsi_get_rx(dev->ch->hsi_port, (struct hsr_ctx *)arg);
+ break;
+ case HSI_IOCTL_SET_TX:
+ if (!arg) {
+ err = -EINVAL;
+ goto out;
+ }
+ err = hsi_set_tx(dev->ch->hsi_port, (struct hst_ctx *)arg);
+ break;
+ case HSI_IOCTL_GET_TX:
+ if (!arg) {
+ err = -EINVAL;
+ goto out;
+ }
+ hsi_get_tx(dev->ch->hsi_port, (struct hst_ctx *)arg);
+ break;
+ case HSI_IOCTL_SW_RESET:
+ dev_info(dev->device.parent, "SW Reset\n");
+ err = hsi_softreset(hsi_ctrl);
+
+ /* Reset HSI config to default */
+ hsi_softreset_driver(hsi_ctrl);
+ break;
+ case HSI_IOCTL_GET_FIFO_OCCUPANCY:
+ if (!arg) {
+ err = -EINVAL;
+ goto out;
+ }
+ fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
+ if (unlikely(fifo < 0)) {
+ dev_err(hsi_ctrl->dev, "No valid FIFO id found for "
+ "channel %d.\n", channel);
+ err = -EFAULT;
+ goto out;
+ }
+ *(size_t *)arg = hsi_get_rx_fifo_occupancy(hsi_ctrl, fifo);
+ break;
+ case HSI_IOCTL_SET_ACREADY_SAFEMODE:
+ omap_writel(omap_readl(0x4A1000C8) | 0x7, 0x4A1000C8);
+ break;
+ case HSI_IOCTL_SET_ACREADY_NORMAL:
+ omap_writel(omap_readl(0x4A1000C8) & 0xFFFFFFF9, 0x4A1000C8);
+ case HSI_IOCTL_SET_3WIRE_MODE:
+ omap_writel(0x30000, 0x4A058C08);
+ break;
+ case HSI_IOCTL_SET_4WIRE_MODE:
+ omap_writel((omap_readl(0x4A058C08) & 0xFFFF), 0x4A058C08);
+ break;
+ default:
+ err = -ENOIOCTLCMD;
+ break;
+ }
+out:
+ /* All IOCTL end by disabling the clocks, except ACWAKE high. */
+ hsi_clocks_disable_channel(dev->device.parent, channel, __func__);
+
+ spin_unlock_bh(&hsi_ctrl->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(hsi_ioctl);
+
+/**
+ * hsi_close - close given hsi device channel
+ * @dev - reference to hsi device channel.
+ */
+void hsi_close(struct hsi_device *dev)
+{
+ struct hsi_dev *hsi_ctrl;
+
+ if (!dev || !dev->ch) {
+ pr_err(LOG_NAME "Trying to close wrong HSI device %p\n", dev);
+ return;
+ }
+ dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
+
+ hsi_ctrl = dev->ch->hsi_port->hsi_controller;
+
+ spin_lock_bh(&hsi_ctrl->lock);
+ hsi_clocks_enable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+
+ if (dev->ch->flags & HSI_CH_OPEN) {
+ dev->ch->flags &= ~HSI_CH_OPEN;
+ __hsi_write_cancel(dev->ch);
+ __hsi_read_cancel(dev->ch);
+ }
+
+ hsi_clocks_disable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+ spin_unlock_bh(&hsi_ctrl->lock);
+}
+EXPORT_SYMBOL(hsi_close);
+
+/**
+ * hsi_set_read_cb - register read_done() callback.
+ * @dev - reference to hsi device channel where the callback is associated to.
+ * @read_cb - callback to signal read transfer completed.
+ * size is expressed in number of 32-bit words.
+ *
+ * NOTE: Write callback must be only set when channel is not open !
+ */
+void hsi_set_read_cb(struct hsi_device *dev,
+ void (*read_cb) (struct hsi_device *dev,
+ unsigned int size))
+{
+ dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
+
+ dev->ch->read_done = read_cb;
+}
+EXPORT_SYMBOL(hsi_set_read_cb);
+
+/**
+ * hsi_set_read_cb - register write_done() callback.
+ * @dev - reference to hsi device channel where the callback is associated to.
+ * @write_cb - callback to signal read transfer completed.
+ * size is expressed in number of 32-bit words.
+ *
+ * NOTE: Read callback must be only set when channel is not open !
+ */
+void hsi_set_write_cb(struct hsi_device *dev,
+ void (*write_cb) (struct hsi_device *dev,
+ unsigned int size))
+{
+ dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
+
+ dev->ch->write_done = write_cb;
+}
+EXPORT_SYMBOL(hsi_set_write_cb);
+
+/**
+ * hsi_set_port_event_cb - register port_event callback.
+ * @dev - reference to hsi device channel where the callback is associated to.
+ * @port_event_cb - callback to signal events from the channel port.
+ */
+void hsi_set_port_event_cb(struct hsi_device *dev,
+ void (*port_event_cb) (struct hsi_device *dev,
+ unsigned int event,
+ void *arg))
+{
+ struct hsi_port *port = dev->ch->hsi_port;
+ struct hsi_dev *hsi_ctrl = port->hsi_controller;
+
+ dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
+
+ write_lock_bh(&dev->ch->rw_lock);
+ dev->ch->port_event = port_event_cb;
+ write_unlock_bh(&dev->ch->rw_lock);
+
+ /* Since we now have a callback registered for events, we can now */
+ /* enable the CAWAKE, ERROR and BREAK interrupts */
+ spin_lock_bh(&hsi_ctrl->lock);
+ hsi_clocks_enable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+ hsi_driver_enable_interrupt(port, HSI_CAWAKEDETECTED | HSI_ERROROCCURED
+ | HSI_BREAKDETECTED);
+ hsi_clocks_disable_channel(dev->device.parent, dev->ch->channel_number,
+ __func__);
+ spin_unlock_bh(&hsi_ctrl->lock);
+}
+EXPORT_SYMBOL(hsi_set_port_event_cb);
diff --git a/drivers/omap_hsi/hsi_driver_int.c b/drivers/omap_hsi/hsi_driver_int.c
new file mode 100644
index 0000000..52bbba1
--- /dev/null
+++ b/drivers/omap_hsi/hsi_driver_int.c
@@ -0,0 +1,684 @@
+/*
+ * hsi_driver_int.c
+ *
+ * Implements HSI interrupt functionality.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include "hsi_driver.h"
+#include <linux/delay.h>
+int shceduled_already_flag = 0;
+void hsi_reset_ch_read(struct hsi_channel *ch)
+{
+ ch->read_data.addr = NULL;
+ ch->read_data.size = 0;
+ ch->read_data.lch = -1;
+}
+
+void hsi_reset_ch_write(struct hsi_channel *ch)
+{
+ ch->write_data.addr = NULL;
+ ch->write_data.size = 0;
+ ch->write_data.lch = -1;
+}
+
+/* Check if a Write (data transfer from AP to CP) is
+ * ongoing for a given HSI channel
+ */
+bool hsi_is_channel_busy(struct hsi_channel *ch)
+{
+ if (ch->write_data.addr == NULL)
+ return false;
+
+ /*
+ * Note: we do not check if there is a read pending, because incoming
+ * data will trigger an interrupt (FIFO or DMA), and wake up the
+ * platform, so no need to keep the clocks ON.
+ */
+ return true;
+}
+
+/* Check if a HSI port is busy :
+ * - data transfer (Write) is ongoing for a given HSI channel
+ * - CAWAKE is high
+ * - Currently in HSI interrupt tasklet
+ * - Currently in HSI CAWAKE tasklet (for SSI)
+ */
+bool hsi_is_hsi_port_busy(struct hsi_port *pport)
+{
+ struct hsi_dev *hsi_ctrl = pport->hsi_controller;
+ bool cur_cawake = hsi_get_cawake(pport);
+ int ch;
+
+ if (pport->in_int_tasklet) {
+ dev_dbg(hsi_ctrl->dev, "Interrupt tasklet running\n");
+ return true;
+ }
+
+ if (pport->in_cawake_tasklet) {
+ dev_dbg(hsi_ctrl->dev, "SSI Cawake tasklet running\n");
+ return true;
+ }
+
+ if (cur_cawake) {
+ dev_dbg(hsi_ctrl->dev, "Port %d: WAKE status: acwake_status %d,"
+ "cur_cawake %d", pport->port_number,
+ pport->acwake_status, cur_cawake);
+ return true;
+ }
+
+ for (ch = 0; ch < pport->max_ch; ch++)
+ if (hsi_is_channel_busy(&pport->hsi_channel[ch])) {
+ dev_dbg(hsi_ctrl->dev, "Port %d; channel %d "
+ "busy\n", pport->port_number, ch);
+ return true;
+ }
+
+ return false;
+}
+
+/* Check if HSI controller is busy :
+ * - One of the HSI port is busy
+ * - Currently in HSI DMA tasklet
+ */
+bool hsi_is_hsi_controller_busy(struct hsi_dev *hsi_ctrl)
+{
+ int port;
+
+ if (hsi_ctrl->in_dma_tasklet) {
+ dev_dbg(hsi_ctrl->dev, "DMA tasklet running\n");
+ return true;
+ }
+
+ for (port = 0; port < hsi_ctrl->max_p; port++)
+ if (hsi_is_hsi_port_busy(&hsi_ctrl->hsi_port[port])) {
+ dev_dbg(hsi_ctrl->dev, "Port %d busy\n", port + 1);
+ return true;
+ }
+
+ dev_dbg(hsi_ctrl->dev, "No activity on HSI controller\n");
+ return false;
+}
+
+bool hsi_is_hst_port_busy(struct hsi_port *pport)
+{
+ unsigned int port = pport->port_number;
+ void __iomem *base = pport->hsi_controller->base;
+ u32 txstateval;
+
+ txstateval = hsi_inl(base, HSI_HST_TXSTATE_REG(port)) &
+ HSI_HST_TXSTATE_VAL_MASK;
+
+ if (txstateval != HSI_HST_TXSTATE_IDLE) {
+ dev_dbg(pport->hsi_controller->dev, "HST port %d busy, "
+ "TXSTATE=%d\n", port, txstateval);
+ return true;
+ }
+
+ return false;
+}
+
+bool hsi_is_hst_controller_busy(struct hsi_dev *hsi_ctrl)
+{
+ int port;
+
+ for (port = 0; port < hsi_ctrl->max_p; port++)
+ if (hsi_is_hst_port_busy(&hsi_ctrl->hsi_port[port]))
+ return true;
+
+ return false;
+}
+
+
+/* Enables the CAWAKE, BREAK, or ERROR interrupt for the given port */
+int hsi_driver_enable_interrupt(struct hsi_port *pport, u32 flag)
+{
+ hsi_outl_or(flag, pport->hsi_controller->base,
+ HSI_SYS_MPU_ENABLE_REG(pport->port_number, pport->n_irq));
+
+ return 0;
+}
+
+/* Enables the Data Accepted Interrupt of HST for the given channel */
+int hsi_driver_enable_write_interrupt(struct hsi_channel *ch, u32 * data)
+{
+ struct hsi_port *p = ch->hsi_port;
+ unsigned int port = p->port_number;
+ unsigned int channel = ch->channel_number;
+
+ hsi_outl_or(HSI_HST_DATAACCEPT(channel), p->hsi_controller->base,
+ HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel));
+
+ return 0;
+}
+
+/* Enables the Data Available Interrupt of HSR for the given channel */
+int hsi_driver_enable_read_interrupt(struct hsi_channel *ch, u32 * data)
+{
+ struct hsi_port *p = ch->hsi_port;
+ unsigned int port = p->port_number;
+ unsigned int channel = ch->channel_number;
+
+ hsi_outl_or(HSI_HSR_DATAAVAILABLE(channel), p->hsi_controller->base,
+ HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel));
+
+ return 0;
+}
+
+/**
+ * hsi_driver_cancel_write_interrupt - Cancel pending write interrupt.
+ * @dev - hsi device channel where to cancel the pending interrupt.
+ *
+ * Return: -ECANCELED : write cancel success, data not transfered to TX FIFO
+ * 0 : transfer is already over, data already transfered to TX FIFO
+ *
+ * Note: whatever returned value, write callback will not be called after
+ * write cancel.
+ */
+int hsi_driver_cancel_write_interrupt(struct hsi_channel *ch)
+{
+ struct hsi_port *p = ch->hsi_port;
+ unsigned int port = p->port_number;
+ unsigned int channel = ch->channel_number;
+ void __iomem *base = p->hsi_controller->base;
+ u32 status_reg;
+ long buff_offset;
+
+ status_reg = hsi_inl(base,
+ HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel));
+
+ if (!(status_reg & HSI_HST_DATAACCEPT(channel))) {
+ dev_dbg(&ch->dev->device, "Write cancel on not "
+ "enabled channel %d ENABLE REG 0x%08X", channel,
+ status_reg);
+ }
+ status_reg &= hsi_inl(base,
+ HSI_SYS_MPU_STATUS_CH_REG(port, p->n_irq, channel));
+
+ hsi_outl_and(~HSI_HST_DATAACCEPT(channel), base,
+ HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel));
+
+ buff_offset = hsi_hst_bufstate_f_reg(p->hsi_controller, port, channel);
+ if (buff_offset >= 0)
+ hsi_outl_and(~HSI_BUFSTATE_CHANNEL(channel), base, buff_offset);
+ hsi_reset_ch_write(ch);
+ return status_reg & HSI_HST_DATAACCEPT(channel) ? 0 : -ECANCELED;
+}
+
+/**
+ * hsi_driver_cancel_read_interrupt - Cancel pending read interrupt.
+ * @dev - hsi device channel where to cancel the pending interrupt.
+ *
+ * Return: -ECANCELED : read cancel success data not available at expected
+ * address.
+ * 0 : transfer is already over, data already available at expected
+ * address.
+ *
+ * Note: whatever returned value, read callback will not be called after cancel.
+ */
+int hsi_driver_cancel_read_interrupt(struct hsi_channel *ch)
+{
+ struct hsi_port *p = ch->hsi_port;
+ unsigned int port = p->port_number;
+ unsigned int channel = ch->channel_number;
+ void __iomem *base = p->hsi_controller->base;
+ u32 status_reg;
+
+ status_reg = hsi_inl(base,
+ HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel));
+ if (!(status_reg & HSI_HSR_DATAAVAILABLE(channel))) {
+ dev_dbg(&ch->dev->device, "Read cancel on not "
+ "enabled channel %d ENABLE REG 0x%08X", channel,
+ status_reg);
+ }
+ status_reg &= hsi_inl(base,
+ HSI_SYS_MPU_STATUS_CH_REG(port, p->n_irq, channel));
+ hsi_outl_and(~HSI_HSR_DATAAVAILABLE(channel), base,
+ HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel));
+ hsi_reset_ch_read(ch);
+ return status_reg & HSI_HSR_DATAAVAILABLE(channel) ? 0 : -ECANCELED;
+}
+
+void hsi_driver_disable_write_interrupt(struct hsi_channel *ch)
+{
+ struct hsi_port *p = ch->hsi_port;
+ unsigned int port = p->port_number;
+ unsigned int channel = ch->channel_number;
+ void __iomem *base = p->hsi_controller->base;
+
+ hsi_outl_and(~HSI_HST_DATAACCEPT(channel), base,
+ HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel));
+}
+
+void hsi_driver_disable_read_interrupt(struct hsi_channel *ch)
+{
+ struct hsi_port *p = ch->hsi_port;
+ unsigned int port = p->port_number;
+ unsigned int channel = ch->channel_number;
+ void __iomem *base = p->hsi_controller->base;
+
+ hsi_outl_and(~HSI_HSR_DATAAVAILABLE(channel), base,
+ HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel));
+}
+
+/* HST_ACCEPTED interrupt processing */
+static void hsi_do_channel_tx(struct hsi_channel *ch)
+{
+ struct hsi_dev *hsi_ctrl = ch->hsi_port->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ unsigned int n_ch;
+ unsigned int n_p;
+ unsigned int irq;
+ long buff_offset;
+
+ n_ch = ch->channel_number;
+ n_p = ch->hsi_port->port_number;
+ irq = ch->hsi_port->n_irq;
+
+ dev_dbg(hsi_ctrl->dev,
+ "Data Accepted interrupt for channel %d.\n", n_ch);
+
+ hsi_driver_disable_write_interrupt(ch);
+
+ if (ch->write_data.addr == NULL) {
+ dev_err(hsi_ctrl->dev, "Error, NULL Write address.\n");
+ hsi_reset_ch_write(ch);
+
+ } else {
+ buff_offset = hsi_hst_buffer_reg(hsi_ctrl, n_p, n_ch);
+ if (buff_offset >= 0) {
+ hsi_outl(*(ch->write_data.addr), base, buff_offset);
+ ch->write_data.addr = NULL;
+ }
+ }
+
+ spin_unlock(&hsi_ctrl->lock);
+ dev_dbg(hsi_ctrl->dev, "Calling ch %d write callback.\n", n_ch);
+ (*ch->write_done) (ch->dev, 1);
+ spin_lock(&hsi_ctrl->lock);
+}
+
+/* HSR_AVAILABLE interrupt processing */
+static void hsi_do_channel_rx(struct hsi_channel *ch)
+{
+ struct hsi_dev *hsi_ctrl = ch->hsi_port->hsi_controller;
+ void __iomem *base = ch->hsi_port->hsi_controller->base;
+ unsigned int n_ch;
+ unsigned int n_p;
+ unsigned int irq;
+ long buff_offset;
+ int rx_poll = 0;
+ int data_read = 0;
+ int fifo, fifo_words_avail;
+ unsigned int data;
+
+ n_ch = ch->channel_number;
+ n_p = ch->hsi_port->port_number;
+ irq = ch->hsi_port->n_irq;
+
+ dev_dbg(hsi_ctrl->dev,
+ "Data Available interrupt for channel %d.\n", n_ch);
+
+ /* Check if there is data in FIFO available for reading */
+ if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) {
+ fifo = hsi_fifo_get_id(hsi_ctrl, n_ch, n_p);
+ if (unlikely(fifo < 0)) {
+ dev_err(hsi_ctrl->dev, "No valid FIFO id found for "
+ "channel %d.\n", n_ch);
+ return;
+ }
+ fifo_words_avail = hsi_get_rx_fifo_occupancy(hsi_ctrl, fifo);
+ if (!fifo_words_avail) {
+ dev_dbg(hsi_ctrl->dev,
+ "WARNING: RX FIFO %d empty before CPU copy\n",
+ fifo);
+
+ /* Do not disable interrupt becaue another interrupt */
+ /* can still come, this time with a real frame. */
+ return;
+ }
+ }
+
+ /*
+ * Check race condition: RX transmission initiated but DMA transmission
+ * already started - acknowledge then ignore interrupt occurence
+ */
+ if (ch->read_data.lch != -1) {
+ dev_err(hsi_ctrl->dev,
+ "race condition between rx txmn and DMA txmn %0x\n",
+ ch->read_data.lch);
+ hsi_driver_disable_read_interrupt(ch);
+ goto done;
+ }
+
+ if (ch->flags & HSI_CH_RX_POLL)
+ rx_poll = 1;
+
+ if (ch->read_data.addr) {
+ buff_offset = hsi_hsr_buffer_reg(hsi_ctrl, n_p, n_ch);
+ if (buff_offset >= 0) {
+ data_read = 1;
+ data = *(ch->read_data.addr) = hsi_inl(base,
+ buff_offset);
+ }
+ }
+ hsi_driver_disable_read_interrupt(ch);
+ hsi_reset_ch_read(ch);
+
+done:
+ if (rx_poll) {
+ spin_unlock(&hsi_ctrl->lock);
+ hsi_port_event_handler(ch->hsi_port,
+ HSI_EVENT_HSR_DATAAVAILABLE,
+ (void *)n_ch);
+ spin_lock(&hsi_ctrl->lock);
+ }
+
+ if (data_read) {
+ spin_unlock(&hsi_ctrl->lock);
+ (*ch->read_done) (ch->dev, 1);
+ spin_lock(&hsi_ctrl->lock);
+ }
+}
+
+/**
+ * hsi_do_cawake_process - CAWAKE line management
+ * @pport - HSI port to process
+ *
+ * This function handles the CAWAKE L/H transitions and call the event callback
+ * accordingly.
+ *
+ * Returns 0 if CAWAKE event process, -EAGAIN if CAWAKE event processing is
+ * delayed due to a pending DMA interrupt.
+ * If -EAGAIN is returned, pport->hsi_tasklet has to be re-scheduled once
+ * DMA tasklet has be executed. This should be done automatically by driver.
+ *
+*/
+int hsi_do_cawake_process(struct hsi_port *pport)
+{
+ struct hsi_dev *hsi_ctrl = pport->hsi_controller;
+ bool cawake_status = hsi_get_cawake(pport);
+
+ /* Deal with init condition */
+ if (unlikely(pport->cawake_status < 0))
+ pport->cawake_status = !cawake_status;
+ dev_dbg(hsi_ctrl->dev, "%s: Interrupts are not enabled but CAWAKE came."
+ "hsi: port[%d] irq[%d] irq_en=0x%08x dma_irq_en=0x%08x\n",
+ __func__, pport->port_number, pport->n_irq,
+ hsi_inl(pport->hsi_controller->base,
+ HSI_SYS_MPU_ENABLE_REG(pport->port_number,
+ pport->n_irq)),
+ hsi_inl(pport->hsi_controller->base,
+ HSI_SYS_GDD_MPU_IRQ_ENABLE_REG));
+
+ /* Check CAWAKE line status */
+ if (cawake_status) {
+ dev_dbg(hsi_ctrl->dev, "CAWAKE rising edge detected\n");
+
+ /* Check for possible mismatch (race condition) */
+ if (unlikely(pport->cawake_status)) {
+ dev_warn(hsi_ctrl->dev,
+ "CAWAKE race is detected: %s.\n",
+ "HI -> LOW -> HI");
+ spin_unlock(&hsi_ctrl->lock);
+ hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_DOWN,
+ NULL);
+ spin_lock(&hsi_ctrl->lock);
+ }
+ pport->cawake_status = 1;
+
+ /* Force HSI to ON_ACTIVE when CAWAKE is high */
+ hsi_set_pm_force_hsi_on(hsi_ctrl);
+ /*
+ * TODO: Use pm_qos() to set latency constraint to prevent
+ * L3INIT to enter RET/OFF when CAWAKE is high.
+ */
+
+ spin_unlock(&hsi_ctrl->lock);
+ hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_UP, NULL);
+ spin_lock(&hsi_ctrl->lock);
+ } else {
+ dev_dbg(hsi_ctrl->dev, "CAWAKE falling edge detected\n");
+
+ /* Check for pending DMA interrupt */
+ if (hsi_is_dma_read_int_pending(hsi_ctrl)) {
+ dev_dbg(hsi_ctrl->dev, "Pending DMA Read interrupt "
+ "before CAWAKE->L, exiting "
+ "Interrupt tasklet.\n");
+ return -EAGAIN;
+ }
+ if (unlikely(!pport->cawake_status)) {
+ dev_warn(hsi_ctrl->dev,
+ "CAWAKE race is detected: %s.\n",
+ "LOW -> HI -> LOW");
+ spin_unlock(&hsi_ctrl->lock);
+ hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_UP,
+ NULL);
+ spin_lock(&hsi_ctrl->lock);
+ }
+ pport->cawake_status = 0;
+
+ /* Allow HSI HW to enter IDLE when CAWAKE is low */
+ hsi_set_pm_default(hsi_ctrl);
+ /*
+ * TODO: Use pm_qos() to release latency constraint to allow
+ * L3INIT to enter RET/OFF when CAWAKE is low
+ */
+
+ spin_unlock(&hsi_ctrl->lock);
+ hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_DOWN, NULL);
+ spin_lock(&hsi_ctrl->lock);
+ }
+ return 0;
+}
+
+/**
+ * hsi_driver_int_proc - check all channels / ports for interrupts events
+ * @hsi_ctrl - HSI controler data
+ * @status_offset: interrupt status register offset
+ * @enable_offset: interrupt enable regiser offset
+ * @start: interrupt index to start on
+ * @stop: interrupt index to stop on
+ *
+ * returns the bitmap of processed events
+ *
+ * This function calls the related processing functions and triggered events.
+ * Events are cleared after corresponding function has been called.
+*/
+static u32 hsi_driver_int_proc(struct hsi_port *pport,
+ unsigned long status_offset,
+ unsigned long enable_offset, unsigned int start,
+ unsigned int stop)
+{
+ struct hsi_dev *hsi_ctrl = pport->hsi_controller;
+ void __iomem *base = hsi_ctrl->base;
+ unsigned int port = pport->port_number;
+ unsigned int channel;
+ u32 status_reg;
+ u32 hsr_err_reg;
+ u32 channels_served = 0;
+
+ /* Get events status */
+ status_reg = hsi_inl(base, status_offset);
+ status_reg &= hsi_inl(base, enable_offset);
+
+ if (pport->cawake_off_event) {
+ dev_dbg(hsi_ctrl->dev, "CAWAKE detected from OFF mode.\n");
+ } else if (!status_reg) {
+ dev_dbg(hsi_ctrl->dev, "Channels [%d,%d] : no event, exit.\n",
+ start, stop);
+ return 0;
+ } else {
+ dev_dbg(hsi_ctrl->dev, "Channels [%d,%d] : Events 0x%08x\n",
+ start, stop, status_reg);
+ }
+
+ if (status_reg & HSI_BREAKDETECTED) {
+ dev_info(hsi_ctrl->dev, "Hardware BREAK on port %d\n", port);
+ hsi_outl(0, base, HSI_HSR_BREAK_REG(port));
+ spin_unlock(&hsi_ctrl->lock);
+ hsi_port_event_handler(pport, HSI_EVENT_BREAK_DETECTED, NULL);
+ spin_lock(&hsi_ctrl->lock);
+
+ channels_served |= HSI_BREAKDETECTED;
+ }
+
+ if (status_reg & HSI_ERROROCCURED) {
+ hsr_err_reg = hsi_inl(base, HSI_HSR_ERROR_REG(port));
+ if (hsr_err_reg & HSI_HSR_ERROR_SIG)
+ dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n",
+ port, hsr_err_reg, "Signal Error");
+ if (hsr_err_reg & HSI_HSR_ERROR_FTE)
+ dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n",
+ port, hsr_err_reg, "Frame Timeout Error");
+ if (hsr_err_reg & HSI_HSR_ERROR_TBE)
+ dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n",
+ port, hsr_err_reg, "Tailing Bit Error");
+ if (hsr_err_reg & HSI_HSR_ERROR_RME)
+ dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n",
+ port, hsr_err_reg, "RX Mapping Error");
+ if (hsr_err_reg & HSI_HSR_ERROR_TME)
+ dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n",
+ port, hsr_err_reg, "TX Mapping Error");
+ /* Clear error event bit */
+ hsi_outl(hsr_err_reg, base, HSI_HSR_ERRORACK_REG(port));
+ if (hsr_err_reg) { /* ignore spurious errors */
+ spin_unlock(&hsi_ctrl->lock);
+ hsi_port_event_handler(pport, HSI_EVENT_ERROR, NULL);
+ spin_lock(&hsi_ctrl->lock);
+ } else
+ dev_dbg(hsi_ctrl->dev, "Spurious HSI error!\n");
+
+ channels_served |= HSI_ERROROCCURED;
+ }
+
+ for (channel = start; channel <= stop; channel++) {
+ if (status_reg & HSI_HST_DATAACCEPT(channel)) {
+ hsi_do_channel_tx(&pport->hsi_channel[channel]);
+ channels_served |= HSI_HST_DATAACCEPT(channel);
+ }
+
+ if (status_reg & HSI_HSR_DATAAVAILABLE(channel)) {
+ hsi_do_channel_rx(&pport->hsi_channel[channel]);
+ channels_served |= HSI_HSR_DATAAVAILABLE(channel);
+ }
+
+ if (status_reg & HSI_HSR_DATAOVERRUN(channel)) {
+ /*HSI_TODO : Data overrun handling*/
+ dev_err(hsi_ctrl->dev,
+ "Data overrun in real time mode !\n");
+ }
+ }
+
+ /* CAWAKE falling or rising edge detected */
+ if ((status_reg & HSI_CAWAKEDETECTED) || pport->cawake_off_event) {
+ if (hsi_do_cawake_process(pport) == -EAGAIN)
+ goto proc_done;
+
+ channels_served |= HSI_CAWAKEDETECTED;
+ pport->cawake_off_event = false;
+ }
+proc_done:
+ /* Reset status bits */
+ hsi_outl(channels_served, base, status_offset);
+
+ return channels_served;
+}
+
+static u32 hsi_process_int_event(struct hsi_port *pport)
+{
+ unsigned int port = pport->port_number;
+ unsigned int irq = pport->n_irq;
+ u32 status_reg;
+
+ /* Process events for channels 0..7 */
+ status_reg = hsi_driver_int_proc(pport,
+ HSI_SYS_MPU_STATUS_REG(port, irq),
+ HSI_SYS_MPU_ENABLE_REG(port, irq),
+ 0,
+ min(pport->max_ch, (u8) HSI_SSI_CHANNELS_MAX) - 1);
+
+ /* Process events for channels 8..15 */
+ if (pport->max_ch > HSI_SSI_CHANNELS_MAX)
+ status_reg |= hsi_driver_int_proc(pport,
+ HSI_SYS_MPU_U_STATUS_REG(port, irq),
+ HSI_SYS_MPU_U_ENABLE_REG(port, irq),
+ HSI_SSI_CHANNELS_MAX, pport->max_ch - 1);
+
+ return status_reg;
+}
+
+static void do_hsi_tasklet(unsigned long hsi_port)
+{
+ struct hsi_port *pport = (struct hsi_port *)hsi_port;
+ struct hsi_dev *hsi_ctrl = pport->hsi_controller;
+ u32 status_reg;
+
+ dev_dbg(hsi_ctrl->dev, "Int Tasklet : clock_enabled=%d\n",
+ hsi_ctrl->clock_enabled);
+ spin_lock(&hsi_ctrl->lock);
+ hsi_clocks_enable(hsi_ctrl->dev, __func__);
+ pport->in_int_tasklet = true;
+
+ status_reg = hsi_process_int_event(pport);
+
+ pport->in_int_tasklet = false;
+ hsi_clocks_disable(hsi_ctrl->dev, __func__);
+ spin_unlock(&hsi_ctrl->lock);
+ shceduled_already_flag = 0;
+ enable_irq(pport->irq);
+}
+
+static irqreturn_t hsi_mpu_handler(int irq, void *p)
+{
+ struct hsi_port *pport = p;
+ if (shceduled_already_flag == 0) {
+ shceduled_already_flag = 1;
+ tasklet_hi_schedule(&pport->hsi_tasklet);
+ /*
+ * Disable interrupt until Bottom Half has cleared the
+ * IRQ status register
+ */
+ disable_irq_nosync(pport->irq);
+ }
+ return IRQ_HANDLED;
+}
+
+int __init hsi_mpu_init(struct hsi_port *hsi_p, const char *irq_name)
+{
+ int err;
+
+ tasklet_init(&hsi_p->hsi_tasklet, do_hsi_tasklet, (unsigned long)hsi_p);
+
+ dev_info(hsi_p->hsi_controller->dev, "Registering IRQ %s (%d)\n",
+ irq_name, hsi_p->irq);
+ err = request_irq(hsi_p->irq, hsi_mpu_handler,
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_HIGH,
+ irq_name, hsi_p);
+ if (err < 0) {
+ dev_err(hsi_p->hsi_controller->dev, "FAILED to MPU request"
+ " IRQ (%d) on port %d", hsi_p->irq, hsi_p->port_number);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void hsi_mpu_exit(struct hsi_port *hsi_p)
+{
+ tasklet_kill(&hsi_p->hsi_tasklet);
+ free_irq(hsi_p->irq, hsi_p);
+}
diff --git a/drivers/omap_hsi/hsi_protocol.c b/drivers/omap_hsi/hsi_protocol.c
new file mode 100644
index 0000000..e1451e7
--- /dev/null
+++ b/drivers/omap_hsi/hsi_protocol.c
@@ -0,0 +1,308 @@
+/*
+ * File - hsi_protocol.c
+ *
+ * Implements HSI protocol for Infineon Modem.
+ *
+ * Copyright (C) 2011 Samsung Electronics.
+ *
+ * Author: Rupesh Gujare <rupesh.g@samsung.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#if 0
+#define DEBUG 1
+#endif
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+
+#include "hsi-protocol-if.h"
+#include <linux/hsi_driver_if.h>
+
+#define DRIVER_VERSION "1.0"
+
+char test_data[10] = "abcdefghij";
+
+dev_t hsi_protocol_dev;
+
+struct protocol_queue {
+ struct list_head list;
+ u32 *data;
+ unsigned int count;
+};
+
+struct hsi_protocol {
+ unsigned int opened;
+ int poll_event;
+ struct list_head rx_queue;
+ struct list_head tx_queue;
+ spinlock_t lock; /* Serialize access to driver data and API */
+ struct fasync_struct *async_queue;
+ wait_queue_head_t rx_wait;
+ wait_queue_head_t tx_wait;
+ wait_queue_head_t poll_wait;
+};
+
+static struct hsi_protocol hsi_protocol_data[HSI_MAX_CHANNELS];
+
+void if_notify(int ch, struct hsi_event *ev)
+{
+ struct protocol_queue *entry;
+
+ pr_debug("%s, ev = {0x%x, 0x%p, %u}\n",
+ __func__, ev->event, ev->data, ev->count);
+
+ spin_lock(&hsi_protocol_data[ch].lock);
+
+/* Not Required */
+ /*if (!hsi_protocol_data[ch].opened) {
+ pr_debug("%s, device not opened\n!", __func__);
+ printk(KERN_INFO "%s, device not opened\n!", __func__);
+ spin_unlock(&hsi_protocol_data[ch].lock);
+ return;
+ }*/
+
+ switch (HSI_EV_TYPE(ev->event)) {
+ case HSI_EV_IN:
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry) {
+ pr_err("HSI-CHAR: entry allocation failed.\n");
+ spin_unlock(&hsi_protocol_data[ch].lock);
+ return;
+ }
+ entry->data = ev->data;
+ entry->count = ev->count;
+ list_add_tail(&entry->list, &hsi_protocol_data[ch].rx_queue);
+ spin_unlock(&hsi_protocol_data[ch].lock);
+ pr_debug("%s, HSI_EV_IN\n", __func__);
+ wake_up_interruptible(&hsi_protocol_data[ch].rx_wait);
+ break;
+ case HSI_EV_OUT:
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry) {
+ pr_err("HSI-CHAR: entry allocation failed.\n");
+ spin_unlock(&hsi_protocol_data[ch].lock);
+ return;
+ }
+ entry->data = ev->data;
+ entry->count = ev->count;
+ hsi_protocol_data[ch].poll_event |= (POLLOUT | POLLWRNORM);
+ list_add_tail(&entry->list, &hsi_protocol_data[ch].tx_queue);
+ spin_unlock(&hsi_protocol_data[ch].lock);
+ pr_debug("%s, HSI_EV_OUT\n", __func__);
+ wake_up_interruptible(&hsi_protocol_data[ch].tx_wait);
+ break;
+ case HSI_EV_EXCEP:
+ hsi_protocol_data[ch].poll_event |= POLLPRI;
+ spin_unlock(&hsi_protocol_data[ch].lock);
+ pr_debug("%s, HSI_EV_EXCEP\n", __func__);
+ wake_up_interruptible(&hsi_protocol_data[ch].poll_wait);
+ break;
+ case HSI_EV_AVAIL:
+ hsi_protocol_data[ch].poll_event |= (POLLIN | POLLRDNORM);
+ spin_unlock(&hsi_protocol_data[ch].lock);
+ pr_debug("%s, HSI_EV_AVAIL\n", __func__);
+ wake_up_interruptible(&hsi_protocol_data[ch].poll_wait);
+ break;
+ default:
+ spin_unlock(&hsi_protocol_data[ch].lock);
+ break;
+ }
+}
+
+int hsi_proto_read(int ch, u32 *buffer, int count)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ u32 *data;
+ unsigned int data_len = 0;
+ struct protocol_queue *entry;
+ int ret, recv_data = 0;
+
+ /*if (count > MAX_HSI_IPC_BUFFER)
+ count = MAX_HSI_IPC_BUFFER;
+
+ data = kmalloc(count, GFP_ATOMIC);*/
+
+ ret = if_hsi_read(ch, buffer, count);
+ if (ret < 0) {
+ pr_err("Can not submit read. READ Error\n");
+ goto out2;
+ }
+
+ spin_lock_bh(&hsi_protocol_data[ch].lock);
+ add_wait_queue(&hsi_protocol_data[ch].rx_wait, &wait);
+ spin_unlock_bh(&hsi_protocol_data[ch].lock);
+
+ for (;;) {
+ data = NULL;
+ data_len = 0;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ spin_lock_bh(&hsi_protocol_data[ch].lock);
+ if (!list_empty(&hsi_protocol_data[ch].rx_queue)) {
+ entry = list_entry(hsi_protocol_data[ch].rx_queue.next,
+ struct protocol_queue, list);
+ data = entry->data;
+ data_len = entry->count;
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock_bh(&hsi_protocol_data[ch].lock);
+
+ pr_debug("%s, data = 0x%p, data_len = %d\n",
+ __func__, data, data_len);
+
+ if (data_len) {
+ pr_debug("%s, RX finished, ch-> %d, length = %d\n",
+ __func__, ch, count);
+ spin_lock_bh(&hsi_protocol_data[ch].lock);
+ hsi_protocol_data[ch].poll_event &=
+ ~(POLLIN | POLLRDNORM);
+ spin_unlock_bh(&hsi_protocol_data[ch].lock);
+ if_hsi_poll(ch);
+#if 0
+ memcpy(buffer, data, count);
+#endif
+ recv_data += data_len;
+#if 0
+ buffer += data_len;
+ if ((recv_data == count) || (recv_data >= MAX_HSI_IPC_BUFFER))
+#endif
+ break;
+ } else if (signal_pending(current)) {
+ pr_debug("%s, ERESTARTSYS\n", __func__);
+ recv_data = -EAGAIN;
+ if_hsi_cancel_read(ch);
+ /* goto out; */
+ break;
+ }
+
+ /*printk(KERN_DEBUG "%s, going to sleep...\n", __func__); */
+ schedule();
+ /*printk(KERN_DEBUG "%s, woke up\n", __func__); */
+ }
+
+/*out:*/
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&hsi_protocol_data[ch].rx_wait, &wait);
+
+out2:
+ /*To Do- Set bit if data to be received is
+ * greater than 512K Bytes and return to IPC call
+ */
+
+ return recv_data;
+}
+
+int hsi_proto_write(int ch, u32 *buffer, int length)
+{
+
+ DECLARE_WAITQUEUE(wait, current);
+ u32 *data;
+ unsigned int data_len = 0, ret = -1;
+ struct protocol_queue *entry;
+
+ ret = if_hsi_write(ch, buffer, length);
+ if (ret < 0) {
+ pr_err("HSI Write ERROR %s\n", __func__);
+ goto out2;
+ } else
+ spin_lock_bh(&hsi_protocol_data[ch].lock);
+ hsi_protocol_data[ch].poll_event &= ~(POLLOUT | POLLWRNORM);
+ add_wait_queue(&hsi_protocol_data[ch].tx_wait, &wait);
+ spin_unlock_bh(&hsi_protocol_data[ch].lock);
+
+ for (;;) {
+ data = NULL;
+ data_len = 0;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_lock_bh(&hsi_protocol_data[ch].lock);
+ if (!list_empty(&hsi_protocol_data[ch].tx_queue)) {
+ entry = list_entry(hsi_protocol_data[ch].tx_queue.next,
+ struct protocol_queue, list);
+ data = entry->data;
+ data_len = entry->count;
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock_bh(&hsi_protocol_data[ch].lock);
+
+ if (data_len) {
+ pr_debug("%s, TX finished, data_len = %d, ch-> %d\n",
+ __func__, length, ch);
+ ret = data_len;
+ break;
+ } else if (signal_pending(current)) {
+ pr_debug("%s, ERESTARTSYS\n", __func__);
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ schedule();
+ }
+
+out:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&hsi_protocol_data[ch].tx_wait, &wait);
+
+out2:
+ return ret;
+}
+EXPORT_SYMBOL(hsi_proto_write);
+
+static int __init hsi_protocol_init(void)
+{
+ int i, ret = 0;
+
+ pr_info("HSI Infineon Protocol driver version " DRIVER_VERSION "\n");
+
+ for (i = 0; i < HSI_MAX_CHANNELS; i++) {
+ init_waitqueue_head(&hsi_protocol_data[i].rx_wait);
+ init_waitqueue_head(&hsi_protocol_data[i].tx_wait);
+ init_waitqueue_head(&hsi_protocol_data[i].poll_wait);
+ spin_lock_init(&hsi_protocol_data[i].lock);
+ hsi_protocol_data[i].opened = 0;
+ INIT_LIST_HEAD(&hsi_protocol_data[i].rx_queue);
+ INIT_LIST_HEAD(&hsi_protocol_data[i].tx_queue);
+ }
+
+ printk(KERN_INFO "hsi_protocol_init : hsi_mux_setting Done.\n");
+
+ ret = if_hsi_init();
+
+ return ret;
+}
+
+
+static void __exit hsi_protocol_exit(void)
+{
+ if_hsi_exit();
+}
+
+
+MODULE_AUTHOR("Rupesh Gujare <rupesh.g@samsung.com> / Samsung Electronics");
+MODULE_DESCRIPTION("HSI Protocol for Infineon Modem");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
+
+module_init(hsi_protocol_init);
+module_exit(hsi_protocol_exit);
diff --git a/drivers/omap_hsi/hsi_protocol_cmd.c b/drivers/omap_hsi/hsi_protocol_cmd.c
new file mode 100644
index 0000000..f28b049
--- /dev/null
+++ b/drivers/omap_hsi/hsi_protocol_cmd.c
@@ -0,0 +1,429 @@
+/*
+ * File - hsi_protocol_if_cmd.c
+ *
+ * Implements HSI protocol for Infineon Modem.
+ *
+ * Copyright (C) 2011 Samsung Electronics. All rights reserved.
+ *
+ * Author: Rupesh Gujare <rupesh.g@samsung.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include <linux/hsi_driver_if.h>
+#include "hsi-protocol-if.h"
+
+extern struct if_hsi_iface hsi_protocol_iface;
+extern wait_queue_head_t ipc_read_wait, ipc_write_wait;
+int if_hsi_openchannel(struct if_hsi_channel *channel);
+int if_hsi_closechannel(struct if_hsi_channel *channel);
+
+extern struct if_hsi_cmd hsi_cmd_history;
+extern int tx_cmd_history_p;
+extern int rx_cmd_history_p;
+
+/*Decode command from received PDU on channle 0*/
+int hsi_decode_cmd(u32 *cmd_data, u32 *cmd, u32 *ch, u32 *param)
+{
+ int ret = 0;
+ u32 data = *cmd_data;
+ u8 lrc_cal, lrc_act;
+ u8 val1, val2, val3;
+
+ *cmd = ((data & 0xF0000000) >> 28);
+
+ switch (*cmd) {
+ case HSI_LL_MSG_BREAK:
+ pr_err("Command MSG_BREAK Received.\n");
+ break;
+
+ case HSI_LL_MSG_OPEN_CONN:
+ *ch = ((data & 0x0F000000) >> 24);
+ *param = ((data & 0x00FFFF00) >> 8);
+ /*Check LRC*/
+ val1 = ((data & 0xFF000000) >> 24);
+ val2 = ((data & 0x00FF0000) >> 16);
+ val3 = ((data & 0x0000FF00) >> 8);
+ lrc_act = (data & 0x000000FF);
+ lrc_cal = val1 ^ val2 ^ val3;
+ if (lrc_cal != lrc_act)
+ ret = -1;
+ break;
+
+ case HSI_LL_MSG_CONN_READY:
+ case HSI_LL_MSG_CONN_CLOSED:
+ case HSI_LL_MSG_CANCEL_CONN:
+ case HSI_LL_MSG_NAK:
+ *ch = ((data & 0x0F000000) >> 24);
+ break;
+
+ case HSI_LL_MSG_ACK:
+ *ch = ((data & 0x0F000000) >> 24);
+ *param = (data & 0x00FFFFFF);
+ //printk(KERN_INFO "ACK Received ch=%d, param=%d\n",*ch, *param);
+ break;
+
+ case HSI_LL_MSG_CONF_RATE:
+ *ch = ((data & 0x0F000000) >> 24);
+ *param = ((data & 0x0F000000) >> 24);
+ break;
+
+ case HSI_LL_MSG_OPEN_CONN_OCTET:
+ *ch = ((data & 0x0F000000) >> 24);
+ *param = (data & 0x00FFFFFF);
+ break;
+
+ case HSI_LL_MSG_ECHO:
+ case HSI_LL_MSG_INFO_REQ:
+ case HSI_LL_MSG_INFO:
+ case HSI_LL_MSG_CONFIGURE:
+ case HSI_LL_MSG_ALLOCATE_CH:
+ case HSI_LL_MSG_RELEASE_CH:
+ case HSI_LL_MSG_INVALID:
+ *cmd = HSI_LL_MSG_INVALID;
+ *ch = HSI_LL_INVALID_CHANNEL;
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+int protocol_create_cmd(int cmd_type, unsigned int channel, void *arg)
+{
+ unsigned int command = 0;
+ int ret = 0;
+
+ switch (cmd_type) {
+ case HSI_LL_MSG_BREAK:
+ {
+ command = 0;
+ }
+ break;
+
+ case HSI_LL_MSG_OPEN_CONN:
+ {
+ unsigned int size = *(unsigned int *)arg;
+ unsigned int lcr = 0;
+
+/* if(size > 4)
+ size = (size & 0x3) ? ((size >> 2) + 1):(size >> 2);
+ else
+ size = 1;*/
+
+ command = ((HSI_LL_MSG_OPEN_CONN & 0x0000000F) << 28) |
+ ((channel & 0x000000FF) << 24) |
+ ((size & 0x0000FFFF) << 8);
+
+ lcr = ((command & 0xFF000000) >> 24) ^
+ ((command & 0x00FF0000) >> 16) ^
+ ((command & 0x0000FF00) >> 8);
+
+ command = command | (lcr & 0x000000FF);
+ }
+ break;
+
+ case HSI_LL_MSG_CONN_READY:
+ {
+ command = ((HSI_LL_MSG_CONN_READY & 0x0000000F) << 28) |
+ ((channel & 0x000000FF) << 24);
+ }
+ break;
+
+ case HSI_LL_MSG_CONN_CLOSED:
+ {
+ command = ((HSI_LL_MSG_CONN_CLOSED & 0x0000000F) << 28) |
+ ((channel & 0x000000FF) << 24);
+ }
+ break;
+
+ case HSI_LL_MSG_CANCEL_CONN:
+ {
+ unsigned int role = *(unsigned int *)arg;
+
+ command = ((HSI_LL_MSG_CANCEL_CONN & 0x0000000F) << 28) |
+ ((channel & 0x000000FF) << 24) |
+ ((role & 0x000000FF) << 16);
+ }
+ break;
+
+ case HSI_LL_MSG_ACK:
+ {
+ unsigned int echo_params = *(unsigned int *)arg;
+
+ command = ((HSI_LL_MSG_ACK & 0x0000000F) << 28) |
+ ((channel & 0x000000FF) << 24) |
+ ((echo_params & 0x00FFFFFF));
+ }
+ break;
+
+ case HSI_LL_MSG_NAK:
+ {
+ command = ((HSI_LL_MSG_NAK & 0x0000000F) << 28) |
+ ((channel & 0x000000FF) << 24);
+ }
+ break;
+
+ case HSI_LL_MSG_CONF_RATE:
+ {
+ unsigned int baud_rate = *(unsigned int *)arg;
+
+ command = ((HSI_LL_MSG_CONF_RATE & 0x0000000F) << 28) |
+ ((channel & 0x000000FF) << 24) |
+ ((baud_rate & 0x00FFFFFF));
+ }
+ break;
+
+ case HSI_LL_MSG_OPEN_CONN_OCTET:
+ {
+ unsigned int size = *(unsigned int *)arg;
+
+ command = ((HSI_LL_MSG_OPEN_CONN_OCTET & 0x0000000F) << 28) |
+ ((channel & 0x000000FF) << 24) |
+ ((size & 0x00FFFFFF));
+
+ }
+ break;
+
+ case HSI_LL_MSG_ECHO:
+ case HSI_LL_MSG_INFO_REQ:
+ case HSI_LL_MSG_INFO:
+ case HSI_LL_MSG_CONFIGURE:
+ case HSI_LL_MSG_ALLOCATE_CH:
+ case HSI_LL_MSG_RELEASE_CH:
+ case HSI_LL_MSG_INVALID:
+ ret = -1;
+ break;
+ }
+ return command;
+}
+
+int set_tx_config(struct if_hsi_channel *ch, u32 mode, u32 max_channels)
+{
+ struct hst_ctx tx_config;
+ int ret;
+
+ hsi_ioctl(ch->dev, HSI_IOCTL_GET_TX, &tx_config);
+ tx_config.mode = mode;
+ tx_config.channels = max_channels;
+ ret = hsi_ioctl(ch->dev, HSI_IOCTL_SET_TX, &tx_config);
+ return ret;
+}
+
+static int saved_cmd_queue = 0;
+static u32 cmd_saved[5];
+int hsi_protocol_send_command(u32 cmd, u32 channel, u32 param)
+{
+ struct if_hsi_channel *channel_zero;
+ u32 cmd_array[4] = {0x00000000, 0xAAAAAAAA, 0xBBBBBBBB, 0xCCCCCCCC}, ret = -1;
+
+ channel_zero = &hsi_protocol_iface.channels[0];
+ cmd_array[0] = protocol_create_cmd(cmd, channel, ¶m);
+ pr_debug("[%s] CMD = %08x\n",__func__, cmd_array[0]);
+ while (channel_zero->tx_state != HSI_LL_TX_STATE_IDLE) {
+ cmd_saved[saved_cmd_queue] = cmd_array[0];
+ saved_cmd_queue++;
+ pr_debug("(%s) cmd_saved : %x(%d)\n", __func__, cmd_array[0], saved_cmd_queue);
+
+ return 0;
+ }
+
+send_retry:
+
+ channel_zero->tx_state = HSI_LL_TX_STATE_TX;
+
+ // For es 2.1 ver.
+ ret = hsi_proto_write(0, cmd_array, 4);
+ if (ret < 0) {
+ pr_err("(%s) Command Write failed, CMD->%X\n", __func__, cmd_array[0]);
+ channel_zero->tx_state = HSI_LL_TX_STATE_IDLE;
+ return -1;
+ } else {
+ channel_zero->tx_state = HSI_LL_TX_STATE_IDLE;
+
+ pr_debug("[%s] CMD = %08x\n", __func__, cmd_array[0]);
+
+ hsi_cmd_history.tx_cmd[tx_cmd_history_p] = cmd_array[0];
+ hsi_cmd_history.tx_cmd_time[tx_cmd_history_p] = CURRENT_TIME;
+ tx_cmd_history_p++;
+ if (tx_cmd_history_p >= 50)
+ tx_cmd_history_p = 0;
+
+ if (saved_cmd_queue) {
+ saved_cmd_queue--;
+ cmd_array[0] = cmd_saved[saved_cmd_queue];
+
+ goto send_retry;
+ }
+
+ return 0;
+ }
+}
+
+void rx_stm(u32 cmd, u32 ch, u32 param)
+{
+ struct if_hsi_channel *channel;
+ u32 size = 0, tmp_cmd = 0, ret, i;
+ channel = &hsi_protocol_iface.channels[ch];
+
+ switch (cmd) {
+ case HSI_LL_MSG_OPEN_CONN:
+ pr_err("ERROR... OPEN_CONN Not supported. Should use OPEN_CONN_OCTECT instead.\n");
+ break;
+
+ case HSI_LL_MSG_ECHO:
+ pr_err("ERROR... HSI_LL_MSG_ECHO not supported.\n");
+ break;
+
+ case HSI_LL_MSG_CONN_CLOSED:
+ switch (channel->tx_state) {
+ case HSI_LL_TX_STATE_WAIT_FOR_CONN_CLOSED:
+ channel->tx_state = HSI_LL_TX_STATE_IDLE;
+
+ /* ACWAKE ->LOW */
+ ret = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_ACWAKE_DOWN, NULL);
+ if (ret == 0)
+ pr_debug("ACWAKE pulled low in %s()\n", __func__);
+ else
+ pr_err("ACWAKE pulled low in %s() ERROR : %d\n", __func__, ret);
+
+ pr_debug("[%s] Received CONN_CLOSED. ch-> %d\n", __func__,ch);
+ break;
+
+ default:
+ pr_err("Wrong STATE for CONN_CLOSED\n");
+ }
+ break;
+
+ case HSI_LL_MSG_CANCEL_CONN:
+ pr_debug("Received CANCEL_CONN\n");
+ break;
+
+ case HSI_LL_MSG_ACK:
+ switch (channel->tx_state) {
+ case HSI_LL_TX_STATE_WAIT_FOR_ACK:
+ case HSI_LL_TX_STATE_SEND_OPEN_CONN:
+ //printk(KERN_INFO "ACK received %s()\n",__func__);
+
+ channel->tx_state = HSI_LL_TX_STATE_TX;
+ size = param;
+#if 0
+ // TEMP: send/read by 16 byte unit for v.11A(CP)
+ if ((size > 16) && (size % 16))
+ size += (16 - (size % 16));
+ else if (size < 16)
+ size = 16;
+#endif
+
+ // For es 2.1 ver.
+ if (size % 4)
+ size += (4 - (size % 4));
+
+ pr_debug("Writing %d bytes data on channel %d, tx_buf = %x, in %s()\n", size, ch, channel->tx_buf, __func__);
+ ret = hsi_proto_write(ch, channel->tx_buf, size);
+ channel->tx_state = HSI_LL_TX_STATE_WAIT_FOR_CONN_CLOSED;
+ wake_up_interruptible(&ipc_write_wait);
+ channel->tx_nak_count = 0;
+ break;
+
+ case HSI_LL_TX_STATE_CLOSED:/* ACK as response to CANCEL_CONN */
+ if (channel->rx_state == HSI_LL_RX_STATE_WAIT_FOR_CANCEL_CONN_ACK)
+ channel->rx_state = HSI_LL_RX_STATE_IDLE;
+ break;
+
+ case HSI_LL_TX_STATE_WAIT_FOR_CONF_ACK: /* ACK as response to CONF_RATE */
+ //TODO: SET CONF RATE
+ pr_debug("ACK Received for CONF_RATE\n");
+ break;
+
+ default:
+ pr_err("ACK Received for Unknown state\n");
+ }
+ break;
+
+ case HSI_LL_MSG_NAK:
+ switch (channel->tx_state) {
+ case HSI_LL_TX_STATE_WAIT_FOR_ACK:
+ printk(KERN_INFO "(%s) NAK received. ch->%d\n", __func__, ch);
+ //channel->tx_state = HSI_LL_TX_STATE_NACK;
+ if (channel->tx_nak_count < 10) {
+ msleep(10);
+
+ tmp_cmd = ((HSI_LL_MSG_OPEN_CONN_OCTET & 0x0000000F) << 28) |
+ ((ch & 0x000000FF) << 24);
+ for (i = 49; i >= 0; i--) {
+ if ((hsi_cmd_history.tx_cmd[i] & 0xFFF00000) == tmp_cmd)
+ break;
+ }
+ size = (hsi_cmd_history.tx_cmd[i] & 0x000FFFFF);
+
+ pr_debug("(%s) Re Send OPEN CONN ch->%d, size->%d, count->%d\n", __func__, ch, size, channel->tx_nak_count);
+
+ hsi_protocol_send_command(HSI_LL_MSG_OPEN_CONN_OCTET, ch, size);
+ channel->tx_nak_count++;
+ } else {
+ hsi_protocol_send_command(HSI_LL_MSG_BREAK, ch, size);
+ pr_debug("(%s) Sending MSG_BREAK. ch->%d\n", __func__, ch);
+ //TODO Reset All channels and inform IPC write about failure (Possibly by sending signal)
+ }
+ break;
+
+ case HSI_LL_TX_STATE_WAIT_FOR_CONF_ACK: /* NAK as response to CONF_RATE */
+ channel->tx_state = HSI_LL_TX_STATE_IDLE;
+ break;
+
+ default:
+ pr_err("ERROR - Received NAK in invalid state. state->%d\n", channel->tx_state);
+ }
+ break;
+
+ case HSI_LL_MSG_CONF_RATE:
+ //TODO: Set Conf Rate
+ pr_debug("CONF_RATE Received\n");
+ break;
+
+ case HSI_LL_MSG_OPEN_CONN_OCTET:
+ switch (channel->rx_state) {
+ /* case HSI_LL_RX_STATE_CLOSED: */
+ case HSI_LL_RX_STATE_IDLE:
+ pr_debug("OPEN_CONN_OCTET in %s(), ch-> %d\n", __func__, ch);
+ channel->rx_state = HSI_LL_RX_STATE_TO_ACK;
+ hsi_protocol_send_command(HSI_LL_MSG_ACK, ch, param);
+
+ channel->rx_count = param;
+ channel->rx_state = HSI_LL_RX_STATE_RX;
+ wake_up_interruptible(&ipc_read_wait);
+ break;
+
+ case HSI_LL_RX_STATE_BLOCKED:
+ /* TODO */
+ break;
+
+ default:
+ pr_err("OPEN_CONN_OCTET in invalid state, Current State -> %d\n", channel->rx_state);
+ pr_info("Sending NAK to channel-> %d\n", ch);
+ hsi_protocol_send_command(HSI_LL_MSG_NAK, ch, param);
+ }
+ break;
+
+ default:
+ pr_err("Invalid Command encountered in rx_state()\n");
+ }
+
+}
diff --git a/drivers/omap_hsi/hsi_protocol_if.c b/drivers/omap_hsi/hsi_protocol_if.c
new file mode 100644
index 0000000..ced5dae
--- /dev/null
+++ b/drivers/omap_hsi/hsi_protocol_if.c
@@ -0,0 +1,896 @@
+/*
+ * File - hsi_protocol_if.c
+ *
+ * Implements HSI protocol for Infineon Modem.
+ *
+ * Copyright (C) 2011 Samsung Electronics.
+ *
+ * Author: Rupesh Gujare <rupesh.g@samsung.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <asm/mach-types.h>
+#include <linux/ioctl.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/bitmap.h>
+#include <linux/poll.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+
+#include <linux/hsi_driver_if.h>
+#include "hsi-protocol-if.h"
+
+//#define DEBUG 1
+//#define DEBUG_PHY_DATA 1
+
+#define HSI_CHANNEL_STATE_UNAVAIL (1 << 0)
+#define HSI_CHANNEL_STATE_READING (1 << 1)
+#define HSI_CHANNEL_STATE_WRITING (1 << 2)
+
+
+struct if_hsi_iface hsi_protocol_iface;
+wait_queue_head_t ipc_read_wait, ipc_write_wait;
+
+
+static void if_hsi_protocol_port_event(struct hsi_device *dev, unsigned int event,
+ void *arg);
+static int __devinit hsi_protocol_probe(struct hsi_device *dev);
+static int __devexit hsi_protocol_remove(struct hsi_device *dev);
+
+static struct hsi_device_driver if_hsi_protocol_driver = {
+ .ctrl_mask = ANY_HSI_CONTROLLER,
+ .probe = hsi_protocol_probe,
+ .remove = __devexit_p(hsi_protocol_remove),
+ .driver = {
+ .name = "hsi_protocol"
+ },
+};
+
+struct if_hsi_cmd hsi_cmd_history;
+int tx_cmd_history_p = 0;
+int rx_cmd_history_p = 0;
+
+static int if_hsi_read_on(int ch, u32 *data, unsigned int count)
+{
+ struct if_hsi_channel *channel;
+ int ret;
+
+ channel = &hsi_protocol_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+
+ spin_lock(&channel->lock);
+ if (channel->state & HSI_CHANNEL_STATE_READING) {
+ pr_err("Read still pending on channel %d\n", ch);
+ spin_unlock(&channel->lock);
+ return -EBUSY;
+ }
+ channel->state |= HSI_CHANNEL_STATE_READING;
+ channel->rx_data = data;
+ channel->rx_count = count;
+ spin_unlock(&channel->lock);
+
+ ret = hsi_read(channel->dev, data, count / 4);
+ dev_dbg(&channel->dev->device, "%s, ch = %d, ret = %d\n", __func__, ch,
+ ret);
+
+ return ret;
+}
+
+static void if_hsi_proto_read_done(struct hsi_device *dev, unsigned int size)
+{
+ struct if_hsi_channel *channel;
+ struct hsi_event ev;
+
+#ifdef DEBUG_PHY_DATA
+ u32 *tmp;
+ u32 i;
+#endif
+
+ //printk(KERN_INFO "if_hsi_proto_read_done() is called for ch-> %d\n", dev->n_ch);
+ channel = &hsi_protocol_iface.channels[dev->n_ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, dev->n_ch);
+ spin_lock(&channel->lock);
+ channel->state &= ~HSI_CHANNEL_STATE_READING;
+ ev.event = HSI_EV_IN;
+ ev.data = channel->rx_data;
+ ev.count = 4 * size;
+ spin_unlock(&channel->lock);
+
+#ifdef DEBUG_PHY_DATA
+ //Check received data -> Commented as it adds delay which causes MSG_BREAK
+ tmp = channel->rx_data;
+ printk(KERN_INFO "[%s](%d)(%d) RX = ", __func__, dev->n_ch, ev.count);
+ for (i = 0; i < ((size > 5) ? 5 : size); i++) {
+ printk(KERN_INFO "%08x ", *tmp);
+ tmp++;
+ }
+ printk(KERN_INFO "\n");
+#endif
+
+ if_notify(dev->n_ch, &ev);
+}
+
+int if_hsi_read(int ch, u32 *data, unsigned int count)
+{
+ int ret = 0;
+ struct if_hsi_channel *channel;
+ channel = &hsi_protocol_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ ret = if_hsi_read_on(ch, data, count);
+ return ret;
+}
+
+int if_hsi_poll(int ch)
+{
+ struct if_hsi_channel *channel;
+ int ret = 0;
+ channel = &hsi_protocol_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ ret = hsi_poll(channel->dev);
+ return ret;
+}
+
+static int if_hsi_write_on(int ch, u32 *address, unsigned int count)
+{
+ struct if_hsi_channel *channel;
+ int ret;
+
+ channel = &hsi_protocol_iface.channels[ch];
+
+ spin_lock(&channel->lock);
+ if (channel->state & HSI_CHANNEL_STATE_WRITING) {
+ pr_err("Write still pending on channel %d\n", ch);
+ printk(KERN_INFO "Write still pending on channel %d\n", ch);
+ spin_unlock(&channel->lock);
+ return -EBUSY;
+ }
+
+ channel->tx_data = address;
+ channel->tx_count = count;
+ channel->state |= HSI_CHANNEL_STATE_WRITING;
+ spin_unlock(&channel->lock);
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ ret = hsi_write(channel->dev, address, count / 4);
+ return ret;
+}
+
+
+static void if_hsi_proto_write_done(struct hsi_device *dev, unsigned int size)
+{
+ struct if_hsi_channel *channel;
+ struct hsi_event ev;
+
+#ifdef DEBUG_PHY_DATA
+ u32 *tmp;
+ u32 i;
+#endif
+
+ //printk(KERN_INFO "if_hsi_proto_write_done() is called for ch-> %d\n", dev->n_ch);
+ channel = &hsi_protocol_iface.channels[dev->n_ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, dev->n_ch);
+
+ spin_lock(&channel->lock);
+ channel->state &= ~HSI_CHANNEL_STATE_WRITING;
+ ev.event = HSI_EV_OUT;
+ ev.data = channel->tx_data;
+ ev.count = 4 * size;
+ spin_unlock(&channel->lock);
+
+#ifdef DEBUG_PHY_DATA
+ //Check Outgoing data, Commented as it adds delay which causes MSG_BREAK
+ tmp = channel->tx_data;
+ printk(KERN_INFO "[%s](%d)(%d) TX = ", __func__, dev->n_ch, ev.count);
+ for (i = 0; i < ((size > 5) ? 5 : size); i++) {
+ printk(KERN_INFO "%08x ", *tmp);
+ tmp++;
+ }
+ printk(KERN_INFO "\n");
+#endif
+
+ if_notify(dev->n_ch, &ev);
+
+}
+
+int if_hsi_write(int ch, u32 *data, unsigned int count)
+{
+ int ret = 0;
+ struct if_hsi_channel *channel;
+ channel = &hsi_protocol_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ ret = if_hsi_write_on(ch, data, count);
+ return ret;
+}
+
+void if_hsi_cancel_read(int ch)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_protocol_iface.channels[ch];
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
+ if (channel->state & HSI_CHANNEL_STATE_READING)
+ hsi_read_cancel(channel->dev);
+ spin_lock(&channel->lock);
+ channel->state &= ~HSI_CHANNEL_STATE_READING;
+ spin_unlock(&channel->lock);
+}
+
+void if_hsi_set_wakeline(int ch, unsigned int state)
+{
+ struct if_hsi_channel *channel;
+ channel = &hsi_protocol_iface.channels[ch];
+ hsi_ioctl(channel->dev,
+ state ? HSI_IOCTL_ACWAKE_UP : HSI_IOCTL_ACWAKE_DOWN, NULL);
+}
+
+
+static void if_hsi_protocol_port_event(struct hsi_device *dev, unsigned int event,
+ void *arg)
+{
+ struct hsi_event ev;
+ int i;
+
+ ev.event = HSI_EV_EXCEP;
+ ev.data = (u32 *) 0;
+ ev.count = 0;
+
+
+ switch (event) {
+ case HSI_EVENT_BREAK_DETECTED:
+ pr_debug("%s, HWBREAK detected\n", __func__);
+ ev.data = (u32 *) HSI_HWBREAK;
+ for (i = 0; i < HSI_MAX_CHANNELS; i++) {
+ if (hsi_protocol_iface.channels[i].opened)
+ if_notify(i, &ev);
+ }
+ break;
+ case HSI_EVENT_HSR_DATAAVAILABLE:
+ i = (int)arg;
+ pr_debug("%s, HSI_EVENT_HSR_DATAAVAILABLE channel = %d\n",
+ __func__, i);
+ ev.event = HSI_EV_AVAIL;
+ if (hsi_protocol_iface.channels[i].opened)
+ if_notify(i, &ev);
+ break;
+ case HSI_EVENT_CAWAKE_UP:
+ pr_debug("%s, CAWAKE up\n", __func__);
+ break;
+ case HSI_EVENT_CAWAKE_DOWN:
+ pr_debug("%s, CAWAKE down\n", __func__);
+ break;
+ case HSI_EVENT_ERROR:
+ pr_debug("%s, HSI ERROR occured\n", __func__);
+ break;
+ default:
+ pr_warning("%s, Unknown event(%d)\n", __func__, event);
+ break;
+ }
+}
+
+int if_hsi_openchannel(struct if_hsi_channel *channel)
+{
+ int ret = 0;
+
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__,
+ channel->channel_id);
+ spin_lock(&channel->lock);
+
+ if (channel->state == HSI_CHANNEL_STATE_UNAVAIL) {
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ if (channel->opened) {
+ ret = -EBUSY;
+ goto leave;
+ }
+
+ if (!channel->dev) {
+ pr_err("Channel %d is not ready??\n", channel->channel_id);
+ ret = -ENODEV;
+ goto leave;
+ }
+ spin_unlock(&channel->lock);
+
+ ret = hsi_open(channel->dev);
+ spin_lock(&channel->lock);
+ if (ret < 0) {
+ pr_err("Could not open channel %d\n", channel->channel_id);
+ goto leave;
+ }
+
+ channel->opened = 1;
+ channel->tx_state = HSI_LL_TX_STATE_IDLE;
+ channel->rx_state = HSI_LL_RX_STATE_TO_CONN_READY;
+ printk(KERN_INFO "setting channel->opened=1 for channel %d\n", channel->dev->n_ch);
+leave:
+ spin_unlock(&channel->lock);
+ return ret;
+}
+
+int if_hsi_closechannel(struct if_hsi_channel *channel)
+{
+ int ret = 0;
+
+ dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__,
+ channel->channel_id);
+ spin_lock(&channel->lock);
+
+ if (!channel->opened)
+ goto leave;
+
+ if (!channel->dev) {
+ pr_err("Channel %d is not ready??\n", channel->channel_id);
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ /* Stop any pending read/write */
+ if (channel->state & HSI_CHANNEL_STATE_READING) {
+ channel->state &= ~HSI_CHANNEL_STATE_READING;
+ spin_unlock(&channel->lock);
+ hsi_read_cancel(channel->dev);
+ spin_lock(&channel->lock);
+ }
+ if (channel->state & HSI_CHANNEL_STATE_WRITING) {
+ channel->state &= ~HSI_CHANNEL_STATE_WRITING;
+
+ spin_unlock(&channel->lock);
+ hsi_write_cancel(channel->dev);
+ } else
+ spin_unlock(&channel->lock);
+
+ hsi_close(channel->dev);
+
+ spin_lock(&channel->lock);
+ channel->opened = 0;
+ channel->tx_state = HSI_LL_TX_STATE_CLOSED;
+ channel->rx_state = HSI_LL_RX_STATE_CLOSED;
+leave:
+ spin_unlock(&channel->lock);
+ return ret;
+}
+
+
+/* Read Thread
+* Should be responsible for handling commands
+* Should wait on port events - waitqueue
+*
+*/
+static int hsi_read_thrd(void *data)
+{
+ u32 cmd_data[4], cmd, channel, param = 0;
+ int ret;
+
+ printk(KERN_INFO "Inside read thread\n");
+ while (1) {
+ /*Call hsi_proto_read*/
+ /*Read 16 bytes due to Modem limitation*/
+ //hsi_proto_read(0, cmd_data, (4 * 4));
+
+ // For es 2.1 ver.
+ hsi_proto_read(0, cmd_data, 4);
+
+ hsi_cmd_history.rx_cmd[rx_cmd_history_p] = cmd_data[0];
+ hsi_cmd_history.rx_cmd_time[rx_cmd_history_p] = CURRENT_TIME;
+ rx_cmd_history_p++;
+ if (rx_cmd_history_p >= 50)
+ rx_cmd_history_p = 0;
+
+ /*Decode Command*/
+ ret = hsi_decode_cmd(&cmd_data[0], &cmd, &channel, ¶m);
+ if (ret != 0) {
+ pr_err("Can not decode command\n");
+ } else {
+ printk(KERN_INFO "%s(),CMD Received-> %x, ch-> %d, param-> %d.\n", __func__, cmd, channel, param);
+ /*Rx State Machine*/
+ rx_stm(cmd, channel, param);
+ }
+ }
+ return 0;
+}
+
+
+int hsi_start_protocol(void)
+{
+ struct hst_ctx tx_config;
+ struct hsr_ctx rx_config;
+ int i, ret = 0;
+
+ printk(KERN_INFO "In function %s()\n", __func__);
+ /*Open All channels */
+ for (i = 0; i <= 5; i++) {
+ ret = if_hsi_openchannel(&hsi_protocol_iface.channels[i]);
+ if (ret < 0)
+ pr_err("Can not Open channel->%d . Can not start HSI protocol\n", i);
+ else
+ printk(KERN_INFO "Channel->%d Open Successful\n", i);
+
+ /*Set Rx Config*/
+ hsi_ioctl(hsi_protocol_iface.channels[i].dev, HSI_IOCTL_GET_RX, &rx_config);
+ rx_config.mode = 2;
+ rx_config.divisor = 1;
+ rx_config.channels = HSI_MAX_CHANNELS;
+ ret = hsi_ioctl(hsi_protocol_iface.channels[i].dev, HSI_IOCTL_SET_RX, &rx_config);
+ if (ret == 0)
+ printk(KERN_INFO "SET_RX Successful for ch->%d\n", i);
+
+ /*Set Tx Config*/
+ hsi_ioctl(hsi_protocol_iface.channels[i].dev, HSI_IOCTL_GET_TX, &tx_config);
+ tx_config.mode = 2;
+ tx_config.divisor = 1;
+ tx_config.channels = HSI_MAX_CHANNELS;
+ ret = hsi_ioctl(hsi_protocol_iface.channels[i].dev, HSI_IOCTL_SET_TX, &tx_config);
+ if (ret == 0)
+ printk(KERN_INFO "SET_TX Successful for ch->%d\n", i);
+ }
+ /*Make channel-0 tx_state to IDLE*/
+ hsi_protocol_iface.channels[0].tx_state = HSI_LL_TX_STATE_IDLE;
+ return ret;
+}
+EXPORT_SYMBOL(hsi_start_protocol);
+
+static int hsi_protocol_proc(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ char *p = page;
+ int len, i;
+
+ p += sprintf(p, "======= HISTORY OF CMD =======\n");
+ p += sprintf(p, " tx_cmd_history_p : %d\n", tx_cmd_history_p);
+ p += sprintf(p, " rx_cmd_history_p : %d\n", rx_cmd_history_p);
+ for (i = 0; i < 50; i++) {
+ p += sprintf(p, " [%d] tx : 0x%08x(%lu.%09lu), rx : 0x%08x(%lu.%09lu)\n",
+ i, hsi_cmd_history.tx_cmd[i], (unsigned long)hsi_cmd_history.tx_cmd_time[i].tv_sec, (unsigned long)hsi_cmd_history.tx_cmd_time[i].tv_nsec,
+ hsi_cmd_history.rx_cmd[i], (unsigned long)hsi_cmd_history.rx_cmd_time[i].tv_sec, (unsigned long)hsi_cmd_history.rx_cmd_time[i].tv_nsec);
+ }
+ p += sprintf(p, "======= HISTORY OF CMD =======\n");
+
+ len = (p - page) - off;
+ if (len < 0)
+ len = 0;
+
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+
+ return len;
+}
+
+int __devexit hsi_protocol_remove(struct hsi_device *dev)
+{
+ struct if_hsi_channel *channel;
+ unsigned long *address;
+ int port, ret;
+
+ //dev_dbg(&dev->device, "%s, port = %d, ch = %d\n", __func__, dev->n_p,
+ // dev->n_ch);
+
+ for (port = 0; port < HSI_MAX_PORTS; port++) {
+ if (if_hsi_protocol_driver.ch_mask[port])
+ break;
+ }
+
+ address = &if_hsi_protocol_driver.ch_mask[port];
+
+ spin_lock_bh(&hsi_protocol_iface.lock);
+ if (test_bit(dev->n_ch, address) && (dev->n_p == port)) {
+ hsi_set_read_cb(dev, NULL);
+ hsi_set_write_cb(dev, NULL);
+ hsi_set_port_event_cb(dev, NULL);
+ channel = &hsi_protocol_iface.channels[dev->n_ch];
+ channel->dev = NULL;
+ channel->state = HSI_CHANNEL_STATE_UNAVAIL;
+ ret = 0;
+ }
+ spin_unlock_bh(&hsi_protocol_iface.lock);
+
+ return ret;
+}
+
+int __devinit hsi_protocol_probe(struct hsi_device *dev)
+{
+ struct if_hsi_channel *channel;
+ unsigned long *address;
+ int port;
+
+ printk(KERN_INFO "Inside Function %s\n", __func__);
+ for (port = 0; port < HSI_MAX_PORTS; port++) {
+ if (if_hsi_protocol_driver.ch_mask[port])
+ break;
+ }
+
+ address = &if_hsi_protocol_driver.ch_mask[port];
+
+ spin_lock_bh(&hsi_protocol_iface.lock);
+ if (test_bit(dev->n_ch, address) && (dev->n_p == port)) {
+ printk(KERN_INFO "Regestering callback functions\n");
+ hsi_set_read_cb(dev, if_hsi_proto_read_done);
+ hsi_set_write_cb(dev, if_hsi_proto_write_done);
+ hsi_set_port_event_cb(dev, if_hsi_protocol_port_event);
+ channel = &hsi_protocol_iface.channels[dev->n_ch];
+ channel->dev = dev;
+ channel->state = 0;
+ channel->rx_state = HSI_LL_RX_STATE_CLOSED;
+ channel->tx_state = HSI_LL_TX_STATE_CLOSED;
+ channel->tx_count = 0;
+ channel->rx_count = 0;
+ channel->tx_nak_count = 0;
+ channel->rx_nak_count = 0;
+ channel->rx_buf = NULL;
+ channel->tx_buf = NULL;
+ hsi_protocol_iface.init_chan_map ^= (1 << dev->n_ch);
+ }
+ spin_unlock_bh(&hsi_protocol_iface.lock);
+
+ return 0;
+
+}
+
+
+int __init if_hsi_init(void)
+{
+ struct if_hsi_channel *channel;
+ int i, ret;
+ struct proc_dir_entry *dir;
+
+ for (i = 0; i < HSI_MAX_PORTS; i++)
+ if_hsi_protocol_driver.ch_mask[i] = 0;
+
+ for (i = 0; i < HSI_MAX_CHANNELS; i++) {
+ channel = &hsi_protocol_iface.channels[i];
+ channel->dev = NULL;
+ channel->opened = 0;
+ channel->state = HSI_CHANNEL_STATE_UNAVAIL;
+ channel->channel_id = i;
+ spin_lock_init(&channel->lock);
+ }
+
+ /*Initialize waitqueue for IPC read*/
+ init_waitqueue_head(&ipc_read_wait);
+ init_waitqueue_head(&ipc_write_wait);
+
+ /*Select All Channels of PORT-1.*/
+ if_hsi_protocol_driver.ch_mask[0] = CHANNEL_MASK;
+
+ ret = hsi_register_driver(&if_hsi_protocol_driver);
+ if (ret)
+ pr_err("Error while registering HSI driver %d", ret);
+
+ dir = create_proc_read_entry("driver/hsi_cmd", 0, 0, hsi_protocol_proc, NULL);
+ if (dir == NULL)
+ printk(KERN_INFO "create_proc_read_entry Fail.\n");
+ printk(KERN_INFO "create_proc_read_entry Done.\n");
+
+ return ret;
+}
+
+int __devexit if_hsi_exit(void)
+{
+ struct if_hsi_channel *channel;
+ unsigned long *address;
+ int i, port;
+
+ pr_debug("%s\n", __func__);
+
+ for (port = 0; port < HSI_MAX_PORTS; port++) {
+ if (if_hsi_protocol_driver.ch_mask[port])
+ break;
+ }
+
+ address = &if_hsi_protocol_driver.ch_mask[port];
+
+ for (i = 0; i < HSI_MAX_CHANNELS; i++) {
+ channel = &hsi_protocol_iface.channels[i];
+ if (channel->opened) {
+ if_hsi_set_wakeline(i, HSI_IOCTL_ACWAKE_DOWN);
+ if_hsi_closechannel(channel);
+ }
+ }
+
+ hsi_unregister_driver(&if_hsi_protocol_driver);
+ return 0;
+
+}
+
+u32 initialization = 0;
+
+/*Write data to channel*/
+int write_hsi(u32 ch, u32 *data, int length)
+{
+ int ret;
+ //u32 cmd[4] = {0x00000000, 0xAAAAAAAA, 0xBBBBBBBB, 0xCCCCCCCC};
+ struct if_hsi_channel *channel;
+ struct task_struct *read_thread;
+
+ channel = &hsi_protocol_iface.channels[ch];
+ channel->tx_buf = data;
+ channel->tx_count = 0;
+
+ //cmd[0] = protocol_create_cmd(HSI_LL_MSG_OPEN_CONN_OCTET, ch, (void *)&length);
+ //printk(KERN_INFO "data ptr is %x\n", data);
+
+ if (initialization == 0) {
+
+#if 0
+ /* ACWAKE ->HIGH */
+ ret = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_ACWAKE_UP, NULL);
+ if (ret == 0)
+ printk(KERN_INFO "ACWAKE pulled high in %s()\n", __func__);
+ else
+ printk(KERN_INFO "ACWAKE pulled high in %s() ERROR : %d\n", __func__, ret);
+#endif
+
+ /*Creating read thread*/
+ read_thread = kthread_run(hsi_read_thrd, NULL, "hsi_read_thread");
+
+ initialization++;
+ }
+ /*Wait till previous data transfer is over*/
+ while (channel->tx_state != HSI_LL_TX_STATE_IDLE) {
+ //printk(KERN_INFO "Wait 5ms previous data transfer isn't over %s()\n", __func__);
+
+ //msleep(5);
+
+ return -EAGAIN;
+ }
+
+#if 1
+ /* ACWAKE ->HIGH */
+ ret = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_ACWAKE_UP, NULL);
+ if (ret == 0)
+ printk(KERN_INFO "ACWAKE pulled high in %s()\n", __func__);
+ else
+ printk(KERN_INFO "ACWAKE pulled high in %s() ERROR : %d\n", __func__, ret);
+#endif
+
+ channel->tx_state = HSI_LL_TX_STATE_WAIT_FOR_ACK;
+
+ //send_cmd(cmd, channel, data)
+ //ret = hsi_proto_write(0, &cmd, 4*4);
+ //printk(KERN_INFO "Write returned %d\n", ret);
+ hsi_protocol_send_command(HSI_LL_MSG_OPEN_CONN_OCTET, ch, length);
+
+ wait_event_interruptible(ipc_write_wait, channel->tx_count != 0);
+
+ return channel->tx_count;
+
+
+}
+EXPORT_SYMBOL(write_hsi);
+
+
+int read_hsi(u8 *data, u32 ch, u32 *length)
+{
+ int ret, size, tmp, actual_length;
+ struct if_hsi_channel *channel;
+
+ channel = &hsi_protocol_iface.channels[ch];
+ channel->rx_state = HSI_LL_RX_STATE_IDLE;
+
+ //printk(KERN_INFO "In read_hsi() function, Sleeping ... channel-> %d\n", ch);
+ wait_event_interruptible(ipc_read_wait, (channel->rx_count != 0));
+ //printk(KERN_INFO "In read_hsi() function, Waking Up ... channel-> %d\n", ch);
+
+ actual_length = channel->rx_count;
+ size = channel->rx_count;
+
+#if 0
+ // TEMP: send/read by 16 byte unit for v.11A(CP)
+ if ((size > 16) && (size % 16))
+ size += (16 - (size % 16));
+ else if (size < 16)
+ size = 16;
+#endif
+
+ // For es 2.1 ver.
+ if (size % 4)
+ size += (4 - (size % 4));
+
+ ret = hsi_proto_read(ch, (u32 *)data, size);
+ if (ret < 0)
+ printk(KERN_INFO "Read in IPC failed, %s()\n", __func__);
+
+ //printk(KERN_INFO "%s() read returned %d, actual_length = %d, ch-> %d\n", __func__, ret, actual_length, ch);
+ //printk(KERN_INFO "%s() sending CONN_CLOSED.\n", __func__);
+ tmp = hsi_protocol_send_command(HSI_LL_MSG_CONN_CLOSED, ch, 0);
+ //printk(KERN_INFO "%s() Sending CONN_CLOSED Finished. ret = %d\n", __func__, tmp);
+
+ *length = actual_length;
+ channel->rx_count = 0;
+
+ //printk(KERN_INFO "%s() RETURNING TO IPC with ret = %d\n", __func__, ret);
+ return ret;
+
+}
+EXPORT_SYMBOL(read_hsi);
+
+
+//========================================================//
+// ++ Flashless Boot. ++ //
+//========================================================//
+int hsi_start_protocol_single(void)
+{
+ int ret = 0;
+
+ struct hst_ctx tx_config;
+ struct hsr_ctx rx_config;
+
+ /*Open channel 0 */
+ ret = if_hsi_openchannel(&hsi_protocol_iface.channels[0]);
+ if (ret < 0) {
+ pr_err("Can not Open channel 0. Can not start HSI protocol\n");
+ goto err;
+ } else
+ printk(KERN_INFO "if_hsi_openchannel() returned %d\n", ret);
+
+
+ /*Set Tx Config*/
+ hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_GET_TX, &tx_config);
+ tx_config.mode = 2;
+ tx_config.channels = 1;
+ tx_config.divisor = 0;
+ ret = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_SET_TX, &tx_config);
+ if (ret < 0) {
+ printk(KERN_INFO "write_hsi_direct : SET_TX Fail : %d\n", ret);
+ return ret;
+ }
+
+ hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_GET_RX, &rx_config);
+ rx_config.mode = 2;
+ rx_config.channels = 1;
+ rx_config.divisor = 0;
+ //rx_config.timeout = HZ / 2;
+ ret = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_SET_RX, &rx_config);
+ if (ret < 0) {
+ printk(KERN_INFO "write_hsi_direct : SET_RX Fail : %d\n", ret);
+ return ret;
+ }
+
+ /* ACWAKE ->HIGH */
+ ret = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_ACWAKE_UP, NULL);
+ if (ret == 0)
+ printk(KERN_INFO "ACWAKE pulled high in %s()\n", __func__);
+
+err:
+
+ return ret;
+}
+EXPORT_SYMBOL(hsi_start_protocol_single);
+
+int hsi_reconfigure_protocol(void)
+{
+ int ret = 0;
+
+ /* ACWAKE ->LOW */
+ ret = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_ACWAKE_DOWN, NULL);
+ if (ret == 0)
+ printk(KERN_INFO "ACWAKE pulled low in %s()\n", __func__);
+ else
+ printk(KERN_INFO "ACWAKE down fail!! %d\n", ret);
+
+
+ /*Clse channel 0 */
+ ret = if_hsi_closechannel(&hsi_protocol_iface.channels[0]);
+ if (ret < 0) {
+ pr_err("Can not Close channel 0. Can not Stop HSI protocol for flashless\n");
+ goto err;
+ }
+
+
+ printk(KERN_INFO "(%s)(%d) hsi_start_protocol Start.\n", __func__, __LINE__);
+ hsi_start_protocol();
+ printk(KERN_INFO "(%s)(%d) hsi_start_protocol Done.\n", __func__, __LINE__);
+
+err:
+
+ return ret;
+}
+EXPORT_SYMBOL(hsi_reconfigure_protocol);
+
+int write_hsi_direct(u32 *data, int length)
+{
+ int retval = 0;
+#if 0
+ struct hst_ctx tx_config;
+
+
+ printk(KERN_INFO "write_hsi_direct : len : %d\n", length);
+ hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_GET_TX, &tx_config);
+ tx_config.mode = 2;
+ tx_config.channels = 1;
+ tx_config.divisor = 47;
+ retval = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_SET_TX, &tx_config);
+ if (retval < 0) {
+ printk(KERN_INFO "write_hsi_direct : SET_TX Fail : %d\n", retval);
+ return retval;
+ }
+ printk(KERN_INFO "write_hsi_direct : SET_TX Successful\n");
+
+ retval = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_ACWAKE_UP, NULL);
+ if (retval < 0) {
+ printk(KERN_INFO "write_hsi_direct : ACWAKE High Fail : %d\n", retval);
+ return retval;
+ }
+#endif
+
+#if 0
+ if ((length > 16) && (length % 4))
+ length += (4 - (length % 4));
+ else if (length < 16)
+ length = 16;
+#endif
+
+// printk(KERN_INFO "write_hsi_direct : new len : %d\n", length);
+
+ retval = hsi_proto_write(0, data, length);
+ if (retval < 0) {
+ printk(KERN_INFO "write_hsi_direct : hsi_proto_write Fail : %d\n", retval);
+ return retval;
+ }
+ //printk(KERN_INFO "write_hsi_direct : Write returned %d\n", retval);
+
+ return retval;
+}
+EXPORT_SYMBOL(write_hsi_direct);
+
+int read_hsi_direct(u32 *data, int length)
+{
+ int retval = 0;
+#if 0
+ struct hsr_ctx rx_config;
+
+
+ printk(KERN_INFO "read_hsi_direct : len : %d\n", length);
+ hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_GET_RX, &rx_config);
+ rx_config.mode = 2;
+ rx_config.channels = 1;
+ rx_config.divisor = 47;
+ retval = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_SET_RX, &rx_config);
+ if (retval < 0) {
+ printk(KERN_INFO "read_hsi_direct : SET_RX Fail : %d\n", retval);
+ return retval;
+ }
+ printk(KERN_INFO "read_hsi_direct : SET_RX Successful\n");
+
+ retval = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_ACWAKE_UP, NULL);
+ if (retval < 0) {
+ printk(KERN_INFO "read_hsi_direct : ACWAKE High Fail : %d\n", retval);
+ return retval;
+ }
+ printk(KERN_INFO "read_hsi_direct : ACWAKE High\n");
+#endif
+
+#if 0
+ if ((length > 16) && (length % 4))
+ length += (4 - (length % 4));
+ else if (length < 16)
+ length = 16;
+#endif
+ //printk(KERN_INFO "read_hsi_direct : new len : %d\n", length);
+
+ retval = hsi_proto_read(0, data, length);
+ if (retval < 0) {
+ printk(KERN_INFO "read_hsi_direct : hsi_proto_read Fail : %d\n", retval);
+ return retval;
+ }
+ //printk(KERN_INFO "read_hsi_direct : Read returned %d\n", retval);
+
+ return retval;
+}
+EXPORT_SYMBOL(read_hsi_direct);
+
+//========================================================//
+// -- Flashless Boot. -- //
+//========================================================//
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 87fe0f7..f34ae3a 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -82,6 +82,18 @@
#define VREG_BC_PROC 3
#define VREG_BC_CLK_RST 4
+/* TWL6030 LDO register values for CFG_TRANS */
+#define TWL6030_CFG_TRANS_STATE_MASK 0x03
+#define TWL6030_CFG_TRANS_STATE_OFF 0x00
+/*
+ * Auto means the following:
+ * SMPS: AUTO(PWM/PFM)
+ * LDO: AMS(SLP/ACT)
+ * resource: ON
+ */
+#define TWL6030_CFG_TRANS_STATE_AUTO 0x01
+#define TWL6030_CFG_TRANS_SLEEP_SHIFT 2
+
/* TWL6030 LDO register values for CFG_STATE */
#define TWL6030_CFG_STATE_OFF 0x00
#define TWL6030_CFG_STATE_ON 0x01
@@ -104,6 +116,18 @@
#define SMPS_MULTOFFSET_VIO BIT(1)
#define SMPS_MULTOFFSET_SMPS3 BIT(6)
+/* TWL6030 VUSB supplemental config registers */
+#define TWL6030_MISC2 0xE5
+#define TWL6030_CFG_LDO_PD2 0xF5
+
+/*
+ * TWL603X SMPS has 6 bits xxxx_CFG_VOLTAGE.VSEL[5:0] to configure voltages and
+ * each bit combination corresponds to a particular voltage (value 63 is
+ * reserved).
+ */
+#define TWL603X_SMPS_VSEL_MASK 0x3F
+#define TWL603X_SMPS_NUMBER_VOLTAGES TWL603X_SMPS_VSEL_MASK
+
static inline int
twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset)
{
@@ -177,6 +201,32 @@
return grp && (val == TWL6030_CFG_STATE_ON);
}
+static int twl6030reg_set_trans_state(struct regulator_dev *rdev,
+ u8 shift, u8 val)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int rval;
+ u8 mask;
+
+ /* Read CFG_TRANS register of TWL6030 */
+ rval = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_TRANS);
+
+ if (rval < 0)
+ return rval;
+
+ mask = TWL6030_CFG_TRANS_STATE_MASK << shift;
+ val = (val << shift) & mask;
+
+ /* If value is already set, no need to write to reg */
+ if (val == (rval & mask))
+ return 0;
+
+ rval &= ~mask;
+ rval |= val;
+
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_TRANS, rval);
+}
+
static int twl4030reg_enable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
@@ -210,7 +260,14 @@
ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE,
grp << TWL6030_CFG_STATE_GRP_SHIFT |
TWL6030_CFG_STATE_ON);
-
+ /*
+ * Ensure it stays in Auto mode when we enter suspend state.
+ * (TWL6030 in sleep mode).
+ */
+ if (!ret)
+ ret = twl6030reg_set_trans_state(rdev,
+ TWL6030_CFG_TRANS_SLEEP_SHIFT,
+ TWL6030_CFG_TRANS_STATE_AUTO);
udelay(info->delay);
return ret;
@@ -247,6 +304,11 @@
(grp) << TWL6030_CFG_STATE_GRP_SHIFT |
TWL6030_CFG_STATE_OFF);
+ /* Ensure it remains OFF when we enter suspend (TWL6030 in sleep). */
+ if (!ret)
+ ret = twl6030reg_set_trans_state(rdev,
+ TWL6030_CFG_TRANS_SLEEP_SHIFT,
+ TWL6030_CFG_TRANS_STATE_OFF);
return ret;
}
@@ -357,6 +419,18 @@
return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, val);
}
+static int twl6030ldo_suspend_enable(struct regulator_dev *rdev)
+{
+ return twl6030reg_set_trans_state(rdev, TWL6030_CFG_TRANS_SLEEP_SHIFT,
+ TWL6030_CFG_TRANS_STATE_AUTO);
+}
+
+static int twl6030ldo_suspend_disable(struct regulator_dev *rdev)
+{
+ return twl6030reg_set_trans_state(rdev, TWL6030_CFG_TRANS_SLEEP_SHIFT,
+ TWL6030_CFG_TRANS_STATE_OFF);
+}
+
/*----------------------------------------------------------------------*/
/*
@@ -570,6 +644,9 @@
.set_mode = twl6030reg_set_mode,
.get_status = twl6030reg_get_status,
+
+ .set_suspend_enable = twl6030ldo_suspend_enable,
+ .set_suspend_disable = twl6030ldo_suspend_disable,
};
/*----------------------------------------------------------------------*/
@@ -617,6 +694,9 @@
.set_mode = twl6030reg_set_mode,
.get_status = twl6030reg_get_status,
+
+ .set_suspend_enable = twl6030ldo_suspend_enable,
+ .set_suspend_disable = twl6030ldo_suspend_disable,
};
static struct regulator_ops twl6030_fixed_resource = {
@@ -827,6 +907,9 @@
.set_mode = twl6030reg_set_mode,
.get_status = twl6030reg_get_status,
+
+ .set_suspend_enable = twl6030ldo_suspend_enable,
+ .set_suspend_disable = twl6030ldo_suspend_disable,
};
/*----------------------------------------------------------------------*/
@@ -835,8 +918,8 @@
remap_conf) \
TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
remap_conf, TWL4030, twl4030fixed_ops)
-#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay) \
- TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
+#define TWL6030_FIXED_LDO(label, offset, mVolts, turnon_delay) \
+ TWL_FIXED_LDO(label, offset, mVolts, 0x0, turnon_delay, \
0x0, TWL6030, twl6030fixed_ops)
#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \
@@ -856,24 +939,22 @@
}, \
}
-#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \
+#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
.base = offset, \
- .id = num, \
.min_mV = min_mVolts, \
.max_mV = max_mVolts, \
.desc = { \
.name = #label, \
.id = TWL6030_REG_##label, \
- .n_voltages = (max_mVolts - min_mVolts)/100, \
+ .n_voltages = (max_mVolts - min_mVolts)/100 + 1, \
.ops = &twl6030ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
}
-#define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \
+#define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
.base = offset, \
- .id = num, \
.min_mV = min_mVolts, \
.max_mV = max_mVolts, \
.desc = { \
@@ -903,9 +984,8 @@
}, \
}
-#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay) { \
+#define TWL6030_FIXED_RESOURCE(label, offset, turnon_delay) { \
.base = offset, \
- .id = num, \
.delay = turnon_delay, \
.desc = { \
.name = #label, \
@@ -916,15 +996,28 @@
}, \
}
-#define TWL6025_ADJUSTABLE_SMPS(label, offset, num) { \
+#define TWL6030_ADJUSTABLE_SMPS(label, offset, min_mVolts, max_mVolts) { \
.base = offset, \
- .id = num, \
+ .min_mV = min_mVolts, \
+ .max_mV = max_mVolts, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL6030_REG_##label, \
+ .n_voltages = TWL603X_SMPS_NUMBER_VOLTAGES, \
+ .ops = &twlsmps_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
+#define TWL6025_ADJUSTABLE_SMPS(label, offset) { \
+ .base = offset, \
.min_mV = 600, \
.max_mV = 2100, \
.desc = { \
.name = #label, \
.id = TWL6025_REG_##label, \
- .n_voltages = 63, \
+ .n_voltages = TWL603X_SMPS_NUMBER_VOLTAGES, \
.ops = &twlsmps_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
@@ -961,32 +1054,36 @@
/* 6030 REG with base as PMC Slave Misc : 0x0030 */
/* Turnon-delay and remap configuration values for 6030 are not
verified since the specification is not public */
- TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1),
- TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2),
- TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3),
- TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4),
- TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5),
- TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7),
- TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0),
- TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0),
- TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0),
- TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0),
- TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0),
+ TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300),
+ TWL6030_FIXED_LDO(VANA, 0x50, 2100, 0),
+ TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 0),
+ TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0),
+ TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0),
+ TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 0),
+ TWL6030_FIXED_RESOURCE(CLK32KAUDIO, 0x8F, 0),
+ TWL6030_ADJUSTABLE_SMPS(VDD3, 0x2e, 600, 4000),
+ TWL6030_ADJUSTABLE_SMPS(VMEM, 0x34, 600, 4000),
+ TWL6030_ADJUSTABLE_SMPS(V2V1, 0x1c, 1800, 2100),
/* 6025 are renamed compared to 6030 versions */
- TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300, 1),
- TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300, 2),
- TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300, 3),
- TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300, 4),
- TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300, 5),
- TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300, 7),
- TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300, 16),
- TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300, 17),
- TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300, 18),
+ TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300),
- TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34, 1),
- TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10, 2),
- TWL6025_ADJUSTABLE_SMPS(VIO, 0x16, 3),
+ TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34),
+ TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10),
+ TWL6025_ADJUSTABLE_SMPS(VIO, 0x16),
};
static u8 twl_get_smps_offset(void)
@@ -1014,6 +1111,7 @@
struct regulator_init_data *initdata;
struct regulation_constraints *c;
struct regulator_dev *rdev;
+ int ret;
for (i = 0, info = NULL; i < ARRAY_SIZE(twl_regs); i++) {
if (twl_regs[i].desc.id != pdev->id)
@@ -1049,6 +1147,18 @@
case TWL4030_REG_VINTDIG:
c->always_on = true;
break;
+ case TWL6030_REG_VUSB:
+ /* Program CFG_LDO_PD2 register and set VUSB bit */
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID0, 0x1,
+ TWL6030_CFG_LDO_PD2);
+ if (ret < 0)
+ return ret;
+
+ /* Program MISC2 register and set bit VUSB_IN_VBAT */
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID0, 0x10, TWL6030_MISC2);
+ if (ret < 0)
+ return ret;
+ break;
default:
break;
}
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
new file mode 100644
index 0000000..af0894f
--- /dev/null
+++ b/drivers/remoteproc/Kconfig
@@ -0,0 +1,60 @@
+#
+# Generic framework for controlling remote processors
+#
+
+# Remote proc gets selected by whoever wants it.
+config REMOTE_PROC
+ tristate
+
+config REMOTE_PROC_AUTOSUSPEND
+ bool "Autosuspend support for remoteproc"
+ depends on REMOTE_PROC
+ default y
+ help
+ Say Y here if you want remote processor to suspend
+ after some time of inactivity.
+
+# can't be tristate, due to omap_device_* and omap_hwmod_* dependency
+config OMAP_REMOTE_PROC
+ bool "OMAP remoteproc support"
+ depends on ARCH_OMAP4
+ select OMAP_IOMMU
+ select REMOTE_PROC
+ default y
+ help
+ Say y here to support OMAP's remote processors (dual M3
+ and DSP on OMAP4) via the remote processor framework.
+
+ Currently only supported on OMAP4.
+
+ Usually you want to say y here, in order to enable multimedia
+ use-cases to run on your platform (multimedia codecs are
+ offloaded to remote DSP processors using this framework).
+
+ It's safe to say n here if you're not interested in multimedia
+ offloading or just want a bare minium kernel.
+
+config OMAP_RPRES
+ bool "Remote Processor Resources"
+ depends on OMAP_REMOTE_PROC
+ default y
+ help
+ Say Y here if you want to use OMAP remote processor resources
+ frame work.
+
+config REMOTEPROC_WATCHDOG
+ bool "OMAP remoteproc watchdog timer"
+ depends on REMOTE_PROC
+ default y
+ help
+ Say y to enable watchdog timer for remote cores
+
+config REMOTEPROC_CORE_DUMP
+ bool "Support for extracting a core dump from a remote processor"
+ depends on REMOTE_PROC
+ default y
+ help
+ Say y to enable extracting a core dump from a running remote
+ processor at <debugfs>/remoteproc/<remoteproc>/core. Extracting the
+ core dump does not pause the remote processor--this must be
+ implemented separately.
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
new file mode 100644
index 0000000..eb0fdb1
--- /dev/null
+++ b/drivers/remoteproc/Makefile
@@ -0,0 +1,7 @@
+#
+# Generic framework for controlling remote processors
+#
+
+obj-$(CONFIG_REMOTE_PROC) += remoteproc.o
+obj-$(CONFIG_OMAP_REMOTE_PROC) += omap_remoteproc.o
+obj-$(CONFIG_OMAP_RPRES) += rpres.o rpres_dev.o
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
new file mode 100644
index 0000000..1e01a33
--- /dev/null
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -0,0 +1,608 @@
+/*
+ * Remote processor machine-specific module for OMAP4
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/sched.h>
+
+#include <plat/iommu.h>
+#include <plat/omap_device.h>
+#include <plat/remoteproc.h>
+#include <plat/mailbox.h>
+#include <plat/common.h>
+#include <plat/omap-pm.h>
+#include <plat/dmtimer.h>
+#include "../../arch/arm/mach-omap2/dvfs.h"
+#include "../../arch/arm/mach-omap2/clockdomain.h"
+
+#define PM_SUSPEND_MBOX 0xffffff07
+#define PM_SUSPEND_TIMEOUT 300
+
+struct omap_rproc_priv {
+ struct iommu *iommu;
+ int (*iommu_cb)(struct rproc *, u64, u32);
+ int (*wdt_cb)(struct rproc *);
+#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
+ struct omap_mbox *mbox;
+ void __iomem *idle;
+ u32 idle_mask;
+ void __iomem *suspend;
+ u32 suspend_mask;
+#endif
+};
+
+#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
+static bool _may_suspend(struct omap_rproc_priv *rpp)
+{
+ return readl(rpp->idle) & rpp->idle_mask;
+}
+
+static int _suspend(struct omap_rproc_priv *rpp)
+{
+ unsigned long timeout = msecs_to_jiffies(PM_SUSPEND_TIMEOUT) + jiffies;
+
+ omap_mbox_msg_send(rpp->mbox, PM_SUSPEND_MBOX);
+
+ while (time_after(timeout, jiffies)) {
+ if ((readl(rpp->suspend) & rpp->suspend_mask) &&
+ (readl(rpp->idle) & rpp->idle_mask))
+ return 0;
+ schedule();
+ }
+
+ return -EIO;
+}
+
+static int omap_suspend(struct rproc *rproc, bool force)
+{
+ struct omap_rproc_priv *rpp = rproc->priv;
+
+ if (rpp->idle && (force || _may_suspend(rpp)))
+ return _suspend(rpp);
+
+ return -EBUSY;
+}
+#endif
+
+static void omap_rproc_dump_registers(struct rproc *rproc)
+{
+ unsigned long flags;
+ char buf[64];
+ struct pt_regs regs;
+
+ if (!rproc->cdump_buf1)
+ return;
+
+ remoteproc_fill_pt_regs(®s,
+ (struct exc_regs *)rproc->cdump_buf1);
+
+ pr_info("REGISTER DUMP FOR REMOTEPROC %s\n", rproc->name);
+ pr_info("PC is at %08lx\n", instruction_pointer(®s));
+ pr_info("LR is at %08lx\n", regs.ARM_lr);
+ pr_info("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
+ "sp : %08lx ip : %08lx fp : %08lx\n",
+ regs.ARM_pc, regs.ARM_lr, regs.ARM_cpsr,
+ regs.ARM_sp, regs.ARM_ip, regs.ARM_fp);
+ pr_info("r10: %08lx r9 : %08lx r8 : %08lx\n",
+ regs.ARM_r10, regs.ARM_r9,
+ regs.ARM_r8);
+ pr_info("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
+ regs.ARM_r7, regs.ARM_r6,
+ regs.ARM_r5, regs.ARM_r4);
+ pr_info("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
+ regs.ARM_r3, regs.ARM_r2,
+ regs.ARM_r1, regs.ARM_r0);
+
+ flags = regs.ARM_cpsr;
+ buf[0] = flags & PSR_N_BIT ? 'N' : 'n';
+ buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z';
+ buf[2] = flags & PSR_C_BIT ? 'C' : 'c';
+ buf[3] = flags & PSR_V_BIT ? 'V' : 'v';
+ buf[4] = '\0';
+
+ pr_info("Flags: %s IRQs o%s FIQs o%s\n",
+ buf, interrupts_enabled(®s) ? "n" : "ff",
+ fast_interrupts_enabled(®s) ? "n" : "ff");
+}
+
+static int
+omap_rproc_map(struct device *dev, struct iommu *obj, u32 da, u32 pa, u32 size)
+{
+ struct iotlb_entry e;
+ u32 all_bits;
+ u32 pg_size[] = {SZ_16M, SZ_1M, SZ_64K, SZ_4K};
+ int size_flag[] = {MMU_CAM_PGSZ_16M, MMU_CAM_PGSZ_1M,
+ MMU_CAM_PGSZ_64K, MMU_CAM_PGSZ_4K};
+ int i, ret;
+
+ while (size) {
+ /*
+ * To find the max. page size with which both PA & VA are
+ * aligned
+ */
+ all_bits = pa | da;
+ for (i = 0; i < 4; i++) {
+ if ((size >= pg_size[i]) &&
+ ((all_bits & (pg_size[i] - 1)) == 0)) {
+ break;
+ }
+ }
+
+ memset(&e, 0, sizeof(e));
+
+ e.da = da;
+ e.pa = pa;
+ e.valid = 1;
+ e.pgsz = size_flag[i];
+ e.endian = MMU_RAM_ENDIAN_LITTLE;
+ e.elsz = MMU_RAM_ELSZ_32;
+
+ ret = iopgtable_store_entry(obj, &e);
+ if (ret) {
+ dev_err(dev, "iopgtable_store_entry fail: %d\n", ret);
+ return ret;
+ }
+
+ size -= pg_size[i];
+ da += pg_size[i];
+ pa += pg_size[i];
+ }
+
+ return 0;
+}
+
+
+static int omap_rproc_iommu_isr(struct iommu *iommu, u32 da, u32 errs, void *p)
+{
+ struct rproc *rproc = p;
+ struct omap_rproc_priv *rpp = rproc->priv;
+ int ret = -EIO;
+
+ if (rpp && rpp->iommu_cb)
+ ret = rpp->iommu_cb(rproc, (u64)da, errs);
+
+ return ret;
+}
+
+int omap_rproc_activate(struct omap_device *od)
+{
+ int i, ret = 0;
+ struct rproc *rproc = platform_get_drvdata(&od->pdev);
+ struct device *dev = rproc->dev;
+ struct omap_rproc_pdata *pdata = dev->platform_data;
+ struct omap_rproc_timers_info *timers = pdata->timers;
+#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
+ struct omap_rproc_priv *rpp = rproc->priv;
+ struct iommu *iommu;
+
+ if (!rpp->iommu) {
+ iommu = iommu_get(pdata->iommu_name);
+ if (IS_ERR(iommu)) {
+ dev_err(dev, "iommu_get error: %ld\n",
+ PTR_ERR(iommu));
+ return PTR_ERR(iommu);
+ }
+ rpp->iommu = iommu;
+ }
+
+ if (!rpp->mbox)
+ rpp->mbox = omap_mbox_get(pdata->sus_mbox_name, NULL);
+#endif
+
+ /**
+ * Domain is in HW SUP thus in hw_auto but
+ * since remoteproc will be enabled clkdm
+ * needs to be in sw_sup (Do not let it idle).
+ */
+ if (pdata->clkdm)
+ clkdm_wakeup(pdata->clkdm);
+
+ for (i = 0; i < pdata->timers_cnt; i++)
+ omap_dm_timer_start(timers[i].odt);
+
+ for (i = 0; i < od->hwmods_cnt; i++) {
+ ret = omap_hwmod_enable(od->hwmods[i]);
+ if (ret) {
+ for (i = 0; i < pdata->timers_cnt; i++)
+ omap_dm_timer_stop(timers[i].odt);
+ break;
+ }
+ }
+
+ /**
+ * Domain is in force_wkup but since remoteproc
+ * was enabled it is safe now to switch clkdm
+ * to hw_auto (let it idle).
+ */
+ if (pdata->clkdm)
+ clkdm_allow_idle(pdata->clkdm);
+
+ return ret;
+}
+
+int omap_rproc_deactivate(struct omap_device *od)
+{
+ int i, ret = 0;
+ struct rproc *rproc = platform_get_drvdata(&od->pdev);
+ struct device *dev = rproc->dev;
+ struct omap_rproc_pdata *pdata = dev->platform_data;
+ struct omap_rproc_timers_info *timers = pdata->timers;
+#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
+ struct omap_rproc_priv *rpp = rproc->priv;
+#endif
+ if (pdata->clkdm)
+ clkdm_wakeup(pdata->clkdm);
+
+ for (i = 0; i < od->hwmods_cnt; i++) {
+ ret = omap_hwmod_shutdown(od->hwmods[i]);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < pdata->timers_cnt; i++)
+ omap_dm_timer_stop(timers[i].odt);
+
+#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
+ if (rpp->iommu) {
+ iommu_put(rpp->iommu);
+ rpp->iommu = NULL;
+ }
+
+ if (rpp->mbox) {
+ omap_mbox_put(rpp->mbox, NULL);
+ rpp->mbox = NULL;
+ }
+#endif
+err:
+ if (pdata->clkdm)
+ clkdm_allow_idle(pdata->clkdm);
+
+ return ret;
+}
+
+static int omap_rproc_iommu_init(struct rproc *rproc,
+ int (*callback)(struct rproc *rproc, u64 fa, u32 flags))
+{
+ struct device *dev = rproc->dev;
+ struct omap_rproc_pdata *pdata = dev->platform_data;
+ int ret, i;
+ struct iommu *iommu;
+ struct omap_rproc_priv *rpp;
+
+ rpp = kzalloc(sizeof(*rpp), GFP_KERNEL);
+ if (!rpp)
+ return -ENOMEM;
+
+ if (pdata->clkdm)
+ clkdm_wakeup(pdata->clkdm);
+ iommu_set_isr(pdata->iommu_name, omap_rproc_iommu_isr, rproc);
+ iommu_set_secure(pdata->iommu_name, rproc->secure_mode,
+ rproc->secure_ttb);
+ iommu = iommu_get(pdata->iommu_name);
+ if (IS_ERR(iommu)) {
+ ret = PTR_ERR(iommu);
+ dev_err(dev, "iommu_get error: %d\n", ret);
+ goto err_mmu;
+ }
+
+ rpp->iommu = iommu;
+ rpp->iommu_cb = callback;
+ rproc->priv = rpp;
+
+ if (!rproc->secure_mode) {
+ for (i = 0; rproc->memory_maps[i].size; i++) {
+ const struct rproc_mem_entry *me =
+ &rproc->memory_maps[i];
+
+ ret = omap_rproc_map(dev, iommu, me->da, me->pa,
+ me->size);
+ if (ret)
+ goto err_map;
+ }
+ }
+ if (pdata->clkdm)
+ clkdm_allow_idle(pdata->clkdm);
+
+ return 0;
+
+err_map:
+ iommu_put(iommu);
+err_mmu:
+ iommu_set_secure(pdata->iommu_name, false, NULL);
+ if (pdata->clkdm)
+ clkdm_allow_idle(pdata->clkdm);
+ kfree(rpp);
+ return ret;
+}
+
+#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
+static int _init_pm_flags(struct rproc *rproc)
+{
+ struct omap_rproc_pdata *pdata = rproc->dev->platform_data;
+ struct omap_rproc_priv *rpp = rproc->priv;
+ struct omap_mbox *mbox;
+
+ if (!rpp->mbox) {
+ mbox = omap_mbox_get(pdata->sus_mbox_name, NULL);
+ if (IS_ERR(mbox))
+ return PTR_ERR(mbox);
+ rpp->mbox = mbox;
+ }
+ if (!pdata->idle_addr)
+ goto err_idle;
+
+ rpp->idle = ioremap(pdata->idle_addr, sizeof(u32));
+ if (!rpp->idle)
+ goto err_idle;
+
+ if (!pdata->suspend_addr)
+ goto err_suspend;
+
+ rpp->suspend = ioremap(pdata->suspend_addr, sizeof(u32));
+ if (!rpp->suspend)
+ goto err_suspend;
+
+ rpp->idle_mask = pdata->idle_mask;
+ rpp->suspend_mask = pdata->suspend_mask;
+
+ return 0;
+err_suspend:
+ iounmap(rpp->idle);
+ rpp->idle = NULL;
+err_idle:
+ omap_mbox_put(rpp->mbox, NULL);
+ rpp->mbox = NULL;
+ return -EIO;
+}
+
+static void _destroy_pm_flags(struct rproc *rproc)
+{
+ struct omap_rproc_priv *rpp = rproc->priv;
+
+ if (rpp->mbox) {
+ omap_mbox_put(rpp->mbox, NULL);
+ rpp->mbox = NULL;
+ }
+ if (rpp->idle) {
+ iounmap(rpp->idle);
+ rpp->idle = NULL;
+ }
+ if (rpp->suspend) {
+ iounmap(rpp->suspend);
+ rpp->suspend = NULL;
+ }
+}
+#endif
+#ifdef CONFIG_REMOTEPROC_WATCHDOG
+static int omap_rproc_watchdog_init(struct rproc *rproc,
+ int (*callback)(struct rproc *rproc))
+{
+ struct omap_rproc_priv *rpp = rproc->priv;
+
+ rpp->wdt_cb = callback;
+ return 0;
+}
+
+static int omap_rproc_watchdog_exit(struct rproc *rproc)
+{
+ struct omap_rproc_priv *rpp = rproc->priv;
+
+ rpp->wdt_cb = NULL;
+ return 0;
+}
+
+static irqreturn_t omap_rproc_watchdog_isr(int irq, void *p)
+{
+ struct rproc *rproc = p;
+ struct omap_rproc_pdata *pdata = rproc->dev->platform_data;
+ struct omap_rproc_timers_info *timers = pdata->timers;
+ struct omap_dm_timer *timer = NULL;
+ struct omap_rproc_priv *rpp = rproc->priv;
+ int i;
+
+ for (i = 0; i < pdata->timers_cnt; i++) {
+ if (irq == omap_dm_timer_get_irq(timers[i].odt)) {
+ timer = timers[i].odt;
+ break;
+ }
+ }
+
+ if (!timer)
+ return IRQ_NONE;
+
+ omap_dm_timer_write_status(timer, OMAP_TIMER_INT_OVERFLOW);
+
+ if (rpp->wdt_cb)
+ rpp->wdt_cb(rproc);
+
+ return IRQ_HANDLED;
+}
+#endif
+static inline int omap_rproc_start(struct rproc *rproc, u64 bootaddr)
+{
+ struct device *dev = rproc->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct omap_rproc_pdata *pdata = dev->platform_data;
+ struct omap_rproc_timers_info *timers = pdata->timers;
+ int i;
+ int ret = 0;
+
+ if (rproc->secure_mode) {
+ pr_err("TODO: Call secure service to authenticate\n");
+ if (ret)
+ return -ENXIO;
+ }
+
+#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
+ _init_pm_flags(rproc);
+#endif
+ for (i = 0; i < pdata->timers_cnt; i++) {
+ timers[i].odt = omap_dm_timer_request_specific(timers[i].id);
+ if (!timers[i].odt) {
+ ret = -EBUSY;
+ goto out;
+ }
+ omap_dm_timer_set_source(timers[i].odt, OMAP_TIMER_SRC_SYS_CLK);
+#ifdef CONFIG_REMOTEPROC_WATCHDOG
+ /* GPT 9 and 11 are using as WDT */
+ if (timers[i].id == 9 || timers[i].id == 11) {
+ ret = request_irq(omap_dm_timer_get_irq(timers[i].odt),
+ omap_rproc_watchdog_isr, IRQF_DISABLED,
+ "rproc-wdt", rproc);
+ /* Clean counter, remoteproc proc will set the value */
+ omap_dm_timer_set_load(timers[i].odt, 0, 0);
+ }
+#endif
+ }
+
+ ret = omap_device_enable(pdev);
+out:
+ if (ret) {
+ while (i--) {
+ omap_dm_timer_free(timers[i].odt);
+ timers[i].odt = NULL;
+ }
+ }
+
+ return ret;
+}
+
+static int omap_rproc_iommu_exit(struct rproc *rproc)
+{
+ struct omap_rproc_priv *rpp = rproc->priv;
+ struct omap_rproc_pdata *pdata = rproc->dev->platform_data;
+
+ if (pdata->clkdm)
+ clkdm_wakeup(pdata->clkdm);
+
+ if (rpp->iommu)
+ iommu_put(rpp->iommu);
+ kfree(rpp);
+ if (pdata->clkdm)
+ clkdm_allow_idle(pdata->clkdm);
+
+ return 0;
+}
+
+static inline int omap_rproc_stop(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct omap_rproc_pdata *pdata = dev->platform_data;
+ struct omap_rproc_timers_info *timers = pdata->timers;
+ int ret, i;
+#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
+ _destroy_pm_flags(rproc);
+#endif
+ ret = omap_device_idle(pdev);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < pdata->timers_cnt; i++) {
+#ifdef CONFIG_REMOTEPROC_WATCHDOG
+ /* GPT 9 and 11 are used as WDT */
+ if (timers[i].id == 9 || timers[i].id == 11)
+ free_irq(omap_dm_timer_get_irq(timers[i].odt), rproc);
+#endif
+ omap_dm_timer_free(timers[i].odt);
+ timers[i].odt = NULL;
+ }
+err:
+ return ret;
+}
+
+static int omap_rproc_set_lat(struct rproc *rproc, long val)
+{
+ pm_qos_update_request(rproc->qos_request, val);
+ return 0;
+}
+
+static int omap_rproc_set_l3_bw(struct rproc *rproc, long val)
+{
+ return omap_pm_set_min_bus_tput(rproc->dev, OCP_INITIATOR_AGENT, val);
+}
+
+static int omap_rproc_scale(struct rproc *rproc, long val)
+{
+ return omap_device_scale(rproc->dev, rproc->dev, val);
+}
+
+static struct rproc_ops omap_rproc_ops = {
+ .start = omap_rproc_start,
+ .stop = omap_rproc_stop,
+#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
+ .suspend = omap_suspend,
+#endif
+ .iommu_init = omap_rproc_iommu_init,
+ .iommu_exit = omap_rproc_iommu_exit,
+ .set_lat = omap_rproc_set_lat,
+ .set_bw = omap_rproc_set_l3_bw,
+ .scale = omap_rproc_scale,
+#ifdef CONFIG_REMOTEPROC_WATCHDOG
+ .watchdog_init = omap_rproc_watchdog_init,
+ .watchdog_exit = omap_rproc_watchdog_exit,
+#endif
+ .dump_registers = omap_rproc_dump_registers,
+};
+
+static int omap_rproc_probe(struct platform_device *pdev)
+{
+ struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
+
+ pdata->clkdm = clkdm_lookup(pdata->clkdm_name);
+
+ return rproc_register(&pdev->dev, pdata->name, &omap_rproc_ops,
+ pdata->firmware, pdata->memory_pool,
+ THIS_MODULE, pdata->sus_timeout);
+}
+
+static int __devexit omap_rproc_remove(struct platform_device *pdev)
+{
+ struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
+
+ return rproc_unregister(pdata->name);
+}
+
+static struct platform_driver omap_rproc_driver = {
+ .probe = omap_rproc_probe,
+ .remove = __devexit_p(omap_rproc_remove),
+ .driver = {
+ .name = "omap-rproc",
+ .owner = THIS_MODULE,
+ .pm = GENERIC_RPROC_PM_OPS,
+ },
+};
+
+static int __init omap_rproc_init(void)
+{
+ return platform_driver_register(&omap_rproc_driver);
+}
+/* must be ready in time for device_initcall users */
+subsys_initcall(omap_rproc_init);
+
+static void __exit omap_rproc_exit(void)
+{
+ platform_driver_unregister(&omap_rproc_driver);
+}
+module_exit(omap_rproc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("OMAP Remote Processor control driver");
diff --git a/drivers/remoteproc/remoteproc.c b/drivers/remoteproc/remoteproc.c
new file mode 100644
index 0000000..12dd4dd
--- /dev/null
+++ b/drivers/remoteproc/remoteproc.c
@@ -0,0 +1,1810 @@
+/*
+ * Remote Processor Framework
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Mark Grosen <mgrosen@ti.com>
+ * Brian Swetland <swetland@google.com>
+ * Fernando Guzman Lugo <fernando.lugo@ti.com>
+ * Robert Tivy <rtivy@ti.com>
+ * Armando Uribe De Leon <x0095078@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/remoteproc.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+#include <linux/elf.h>
+#include <linux/elfcore.h>
+#include <plat/remoteproc.h>
+
+/* list of available remote processors on this board */
+static LIST_HEAD(rprocs);
+static DEFINE_SPINLOCK(rprocs_lock);
+
+/* debugfs parent dir */
+static struct dentry *rproc_dbg;
+
+static ssize_t rproc_format_trace_buf(struct rproc *rproc, char __user *userbuf,
+ size_t count, loff_t *ppos,
+ const void *src, int size)
+{
+ const char *buf = (const char *) src;
+ ssize_t num_copied = 0;
+ static int from_beg;
+ loff_t pos = *ppos;
+ int *w_idx;
+ int i, w_pos, ret = 0;
+
+ if (mutex_lock_interruptible(&rproc->tlock))
+ return -EINTR;
+
+ /* When src is NULL, the remoteproc is offline. */
+ if (!src) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ if (size < 2 * sizeof(u32)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* Assume write_idx is the penultimate byte in the buffer trace*/
+ size = size - (sizeof(u32) * 2);
+ w_idx = (int *)(buf + size);
+ w_pos = *w_idx;
+
+ if (from_beg)
+ goto print_beg;
+
+ if (pos == 0)
+ *ppos = w_pos;
+
+ for (i = w_pos; i < size && buf[i]; i++)
+ ;
+
+ if (i > w_pos)
+ num_copied =
+ simple_read_from_buffer(userbuf, count, ppos, src, i);
+ if (!num_copied) {
+ from_beg = 1;
+ *ppos = 0;
+ } else {
+ ret = num_copied;
+ goto unlock;
+ }
+print_beg:
+ for (i = 0; i < w_pos && buf[i]; i++)
+ ;
+
+ if (i) {
+ num_copied =
+ simple_read_from_buffer(userbuf, count, ppos, src, i);
+ if (!num_copied)
+ from_beg = 0;
+ ret = num_copied;
+ }
+unlock:
+ mutex_unlock(&rproc->tlock);
+ return ret;
+}
+
+static ssize_t rproc_name_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct rproc *rproc = filp->private_data;
+ /* need room for the name, a newline and a terminating null */
+ char buf[RPROC_MAX_NAME + 2];
+ int i;
+
+ i = snprintf(buf, RPROC_MAX_NAME + 2, "%s\n", rproc->name);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, i);
+}
+
+static ssize_t rproc_version_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+
+ struct rproc *rproc = filp->private_data;
+ char *pch;
+ int len;
+ pch = strstr(rproc->header, "version:");
+ if (!pch)
+ return 0;
+ pch += strlen("version:") + 1;
+ len = rproc->header_len - (pch - rproc->header);
+ return simple_read_from_buffer(userbuf, count, ppos, pch, len);
+}
+
+static int rproc_open_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+#define DEBUGFS_READONLY_FILE(name, v, l) \
+static ssize_t name## _rproc_read(struct file *filp, \
+ char __user *ubuf, size_t count, loff_t *ppos) \
+{ \
+ struct rproc *rproc = filp->private_data; \
+ return rproc_format_trace_buf(rproc, ubuf, count, ppos, v, l); \
+} \
+ \
+static const struct file_operations name ##_rproc_ops = { \
+ .read = name ##_rproc_read, \
+ .open = rproc_open_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#ifdef CONFIG_REMOTEPROC_CORE_DUMP
+
+/* + 1 for the notes segment */
+#define NUM_PHDR (RPROC_MAX_MEM_ENTRIES + 1)
+
+#define CORE_STR "CORE"
+
+/* Intermediate core-dump-file format */
+struct core_rproc {
+ struct rproc *rproc;
+ /* ELF state */
+ Elf_Half e_phnum;
+
+ struct core {
+ struct elfhdr elf;
+ struct elf_phdr phdr[NUM_PHDR];
+ struct {
+ struct elf_note note_prstatus;
+ char name[sizeof(CORE_STR)];
+ struct elf_prstatus prstatus __aligned(4);
+ } core_note __packed __aligned(4);
+ } core __packed;
+
+ loff_t offset;
+};
+
+/* Return the number of segments to be written to the core file */
+static int rproc_core_map_count(const struct rproc *rproc)
+{
+ int i = 0;
+ int count = 0;
+ for (;; i++) {
+ if (!rproc->memory_maps[i].size)
+ break;
+ if (!rproc->memory_maps[i].core)
+ continue;
+ count++;
+ }
+
+ /* The Ducati has a low number of segments */
+ if (count > PN_XNUM)
+ return -1;
+
+ return count;
+}
+
+/* Copied from fs/binfmt_elf.c */
+static void fill_elf_header(struct elfhdr *elf, int segs)
+{
+ memset(elf, 0, sizeof(*elf));
+
+ memcpy(elf->e_ident, ELFMAG, SELFMAG);
+ elf->e_ident[EI_CLASS] = ELFCLASS32;
+ elf->e_ident[EI_DATA] = ELFDATA2LSB;
+ elf->e_ident[EI_VERSION] = EV_CURRENT;
+ elf->e_ident[EI_OSABI] = ELFOSABI_NONE;
+
+ elf->e_type = ET_CORE;
+ elf->e_machine = EM_ARM;
+ elf->e_version = EV_CURRENT;
+ elf->e_phoff = sizeof(struct elfhdr);
+ elf->e_flags = EF_ARM_EABI_VER5;
+ elf->e_ehsize = sizeof(struct elfhdr);
+ elf->e_phentsize = sizeof(struct elf_phdr);
+ elf->e_phnum = segs;
+
+ return;
+}
+
+static void fill_elf_segment_headers(struct core_rproc *d)
+{
+ int i = 0;
+ int hi = 0;
+ loff_t offset = d->offset;
+ for (;; i++) {
+ u32 size;
+
+ size = d->rproc->memory_maps[i].size;
+ if (!size)
+ break;
+ if (!d->rproc->memory_maps[i].core)
+ continue;
+
+ BUG_ON(hi >= d->e_phnum - 1);
+
+ d->core.phdr[hi].p_type = PT_LOAD;
+ d->core.phdr[hi].p_offset = offset;
+ d->core.phdr[hi].p_vaddr = d->rproc->memory_maps[i].da;
+ d->core.phdr[hi].p_paddr = d->rproc->memory_maps[i].pa;
+ d->core.phdr[hi].p_filesz = size;
+ d->core.phdr[hi].p_memsz = size;
+ /* FIXME: get these from the Ducati */
+ d->core.phdr[hi].p_flags = PF_R | PF_W | PF_X;
+
+ pr_debug("%s: phdr type %d f_off %08x va %08x pa %08x fl %x\n",
+ __func__,
+ d->core.phdr[hi].p_type,
+ d->core.phdr[hi].p_offset,
+ d->core.phdr[hi].p_vaddr,
+ d->core.phdr[hi].p_paddr,
+ d->core.phdr[hi].p_flags);
+
+ offset += size;
+ hi++;
+ }
+}
+
+static int setup_rproc_elf_core_dump(struct core_rproc *d)
+{
+ short __phnum;
+ struct elf_phdr *nphdr;
+ struct exc_regs *xregs = d->rproc->cdump_buf1;
+ struct pt_regs *regs =
+ (struct pt_regs *)&d->core.core_note.prstatus.pr_reg;
+
+ memset(&d->core.elf, 0, sizeof(d->core.elf));
+
+ __phnum = rproc_core_map_count(d->rproc);
+ if (__phnum < 0 || __phnum > ARRAY_SIZE(d->core.phdr))
+ return -EIO;
+ d->e_phnum = __phnum + 1; /* + 1 for notes */
+
+ pr_info("number of segments: %d\n", d->e_phnum);
+
+ fill_elf_header(&d->core.elf, d->e_phnum);
+
+ nphdr = d->core.phdr + __phnum;
+ nphdr->p_type = PT_NOTE;
+ nphdr->p_offset = 0;
+ nphdr->p_vaddr = 0;
+ nphdr->p_paddr = 0;
+ nphdr->p_filesz = 0;
+ nphdr->p_memsz = 0;
+ nphdr->p_flags = 0;
+ nphdr->p_align = 0;
+
+ /* The notes start right after the phdr array. Adjust p_filesz
+ * accordingly if you add more notes
+ */
+ nphdr->p_filesz = sizeof(d->core.core_note);
+ nphdr->p_offset = offsetof(struct core, core_note);
+
+ d->core.core_note.note_prstatus.n_namesz = sizeof(CORE_STR);
+ d->core.core_note.note_prstatus.n_descsz =
+ sizeof(struct elf_prstatus);
+ d->core.core_note.note_prstatus.n_type = NT_PRSTATUS;
+ memcpy(d->core.core_note.name, CORE_STR, sizeof(CORE_STR));
+
+ remoteproc_fill_pt_regs(regs, xregs);
+
+ /* We ignore the NVIC registers for now */
+
+ d->offset = sizeof(struct core);
+ d->offset = roundup(d->offset, PAGE_SIZE);
+ fill_elf_segment_headers(d);
+ return 0;
+}
+
+static int core_rproc_open(struct inode *inode, struct file *filp)
+{
+ int i;
+ struct core_rproc *d;
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ d->rproc = inode->i_private;
+ filp->private_data = d;
+
+ setup_rproc_elf_core_dump(d);
+
+ if (0) {
+ const struct rproc *rproc;
+ rproc = d->rproc;
+ for (i = 0; rproc->memory_maps[i].size; i++) {
+ pr_info("%s: memory_map[%d] pa %08x sz %d core %d\n",
+ __func__,
+ i,
+ rproc->memory_maps[i].pa,
+ rproc->memory_maps[i].size,
+ rproc->memory_maps[i].core);
+ }
+ }
+
+ return 0;
+}
+
+static int core_rproc_release(struct inode *inode, struct file *filp)
+{
+ pr_info("%s\n", __func__);
+ kfree(filp->private_data);
+ return 0;
+}
+
+/* Given an offset to read from, return the index of the memory-map region to
+ * read from.
+ */
+static int rproc_memory_map_index(const struct rproc *rproc, loff_t *off)
+{
+ int i = 0;
+ for (;; i++) {
+ int size = rproc->memory_maps[i].size;
+
+ if (!size)
+ break;
+ if (!rproc->memory_maps[i].core)
+ continue;
+ if (*off < size)
+ return i;
+
+ *off -= size;
+ }
+
+ return -1;
+}
+
+ssize_t core_rproc_write(struct file *filp,
+ const char __user *buffer, size_t count, loff_t *off)
+{
+ char cmd[100];
+ int cmdlen;
+ struct core_rproc *d = filp->private_data;
+ struct rproc *rproc = d->rproc;
+
+ cmdlen = min(sizeof(cmd) - 1, count);
+ if (copy_from_user(cmd, buffer, cmdlen))
+ return -EFAULT;
+ cmd[cmdlen] = 0;
+
+ if (!strncmp(cmd, "enable", 6)) {
+ pr_info("remoteproc %s halt on crash ENABLED\n", rproc->name);
+ rproc->halt_on_crash = true;
+ goto done;
+ } else if (!strncmp(cmd, "disable", 7)) {
+ pr_info("remoteproc %s halt on crash DISABLED\n", rproc->name);
+ rproc->halt_on_crash = false;
+ /* If you disable halt-on-crashed after the remote processor
+ * has already crashed, we will let it continue crashing (so it
+ * can get handled otherwise) as well.
+ */
+ if (rproc->state != RPROC_CRASHED)
+ goto done;
+ } else if (strncmp(cmd, "continue", 8)) {
+ pr_err("%s: invalid command: expecting \"enable\"," \
+ "\"disable\", or \"continue\"\n", __func__);
+ return -EINVAL;
+ }
+
+ if (rproc->state == RPROC_CRASHED) {
+ pr_info("remoteproc %s: resuming crash recovery\n",
+ rproc->name);
+ blocking_notifier_call_chain(&rproc->nbh, RPROC_ERROR, NULL);
+ }
+
+done:
+ *off += count;
+ return count;
+}
+
+static ssize_t core_rproc_read(struct file *filp,
+ char __user *userbuf, size_t count, loff_t *ppos)
+{
+ const struct core_rproc *d = filp->private_data;
+ const struct rproc *rproc = d->rproc;
+ int index;
+ loff_t pos;
+ size_t remaining = count;
+ ssize_t copied = 0;
+
+ pr_debug("%s count %d off %lld\n", __func__, count, *ppos);
+
+ /* copy the ELF and segment header first */
+ if (*ppos < d->offset) {
+ copied = simple_read_from_buffer(userbuf, count,
+ ppos, &d->core, d->offset);
+ if (copied < 0) {
+ pr_err("%s: could not copy ELF header\n", __func__);
+ return -EIO;
+ }
+
+ pr_debug("%s: copied %d/%lld from ELF header\n", __func__,
+ copied, d->offset);
+ remaining -= copied;
+ }
+
+ /* copy the data */
+ while (remaining) {
+ size_t remaining_in_region;
+ const struct rproc_mem_entry *r;
+ void __iomem *kvaddr;
+
+ pos = *ppos - d->offset;
+ index = rproc_memory_map_index(rproc, &pos);
+ if (index < 0) {
+ pr_info("%s: EOF at off %lld\n", __func__, *ppos);
+ break;
+ }
+
+ r = &rproc->memory_maps[index];
+
+ remaining_in_region = r->size - pos;
+ if (remaining_in_region > remaining)
+ remaining_in_region = remaining;
+
+ pr_debug("%s: iomap 0x%x size %d\n", __func__, r->pa, r->size);
+ kvaddr = ioremap(r->pa, r->size);
+ if (!kvaddr) {
+ pr_err("%s: iomap error: region %d (phys 0x%08x size %d)\n",
+ __func__, index, r->pa, r->size);
+ return -EIO;
+ }
+
+ pr_debug("%s: off %lld -> [%d](pa 0x%08x off %lld sz %d)\n",
+ __func__,
+ *ppos, index, r->pa, pos, r->size);
+
+ if (copy_to_user(userbuf + copied, kvaddr + pos,
+ remaining_in_region)) {
+ pr_err("%s: copy_to_user error\n", __func__);
+ return -EFAULT;
+ }
+
+ iounmap(kvaddr);
+
+ copied += remaining_in_region;
+ *ppos += remaining_in_region;
+ BUG_ON(remaining < remaining_in_region);
+ remaining -= remaining_in_region;
+ }
+
+ return copied;
+}
+
+static const struct file_operations core_rproc_ops = {
+ .read = core_rproc_read,
+ .write = core_rproc_write,
+ .open = core_rproc_open,
+ .release = core_rproc_release,
+ .llseek = generic_file_llseek,
+};
+#endif /* CONFIG_REMOTEPROC_CORE_DUMP */
+
+static const struct file_operations rproc_name_ops = {
+ .read = rproc_name_read,
+ .open = rproc_open_generic,
+ .llseek = generic_file_llseek,
+};
+
+static const struct file_operations rproc_version_ops = {
+ .read = rproc_version_read,
+ .open = rproc_open_generic,
+ .llseek = generic_file_llseek,
+};
+
+DEBUGFS_READONLY_FILE(trace0, rproc->trace_buf0, rproc->trace_len0);
+DEBUGFS_READONLY_FILE(trace1, rproc->trace_buf1, rproc->trace_len1);
+DEBUGFS_READONLY_FILE(trace0_last, rproc->last_trace_buf0,
+ rproc->last_trace_len0);
+DEBUGFS_READONLY_FILE(trace1_last, rproc->last_trace_buf1,
+ rproc->last_trace_len1);
+DEBUGFS_READONLY_FILE(cdump0, rproc->cdump_buf0, rproc->cdump_len0);
+DEBUGFS_READONLY_FILE(cdump1, rproc->cdump_buf1, rproc->cdump_len1);
+
+#define DEBUGFS_ADD(name) \
+ debugfs_create_file(#name, 0444, rproc->dbg_dir, \
+ rproc, &name## _rproc_ops)
+
+/**
+ * __find_rproc_by_name - find a registered remote processor by name
+ * @name: name of the remote processor
+ *
+ * Internal function that returns the rproc @name, or NULL if @name does
+ * not exists.
+ */
+static struct rproc *__find_rproc_by_name(const char *name)
+{
+ struct rproc *rproc;
+ struct list_head *tmp;
+
+ spin_lock(&rprocs_lock);
+
+ list_for_each(tmp, &rprocs) {
+ rproc = list_entry(tmp, struct rproc, next);
+ if (!strcmp(rproc->name, name))
+ break;
+ rproc = NULL;
+ }
+
+ spin_unlock(&rprocs_lock);
+
+ return rproc;
+}
+
+/**
+ * __rproc_da_to_pa - convert a device (virtual) address to its physical address
+ * @maps: the remote processor's memory mappings array
+ * @da: a device address (as seen by the remote processor)
+ * @pa: pointer to the physical address result
+ *
+ * This function converts @da to its physical address (pa) by going through
+ * @maps, looking for a mapping that contains @da, and then calculating the
+ * appropriate pa.
+ *
+ * On success 0 is returned, and the @pa is updated with the result.
+ * Otherwise, -EINVAL is returned.
+ */
+static int
+rproc_da_to_pa(const struct rproc_mem_entry *maps, u64 da, phys_addr_t *pa)
+{
+ int i;
+ u64 offset;
+
+ for (i = 0; maps[i].size; i++) {
+ const struct rproc_mem_entry *me = &maps[i];
+
+ if (da >= me->da && da < (me->da + me->size)) {
+ offset = da - me->da;
+ pr_debug("%s: matched mem entry no. %d\n",
+ __func__, i);
+ *pa = me->pa + offset;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int rproc_mmu_fault_isr(struct rproc *rproc, u64 da, u32 flags)
+{
+ dev_err(rproc->dev, "%s\n", __func__);
+ schedule_work(&rproc->error_work);
+ return -EIO;
+}
+
+static int rproc_watchdog_isr(struct rproc *rproc)
+{
+ dev_err(rproc->dev, "%s\n", __func__);
+ schedule_work(&rproc->error_work);
+ return 0;
+}
+
+static int rproc_crash(struct rproc *rproc)
+{
+ init_completion(&rproc->error_comp);
+#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
+ pm_runtime_dont_use_autosuspend(rproc->dev);
+#endif
+ if (rproc->ops->dump_registers)
+ rproc->ops->dump_registers(rproc);
+
+ if (rproc->trace_buf0 && rproc->last_trace_buf0)
+ memcpy(rproc->last_trace_buf0, rproc->trace_buf0,
+ rproc->last_trace_len0);
+ if (rproc->trace_buf1 && rproc->last_trace_buf1)
+ memcpy(rproc->last_trace_buf1, rproc->trace_buf1,
+ rproc->last_trace_len1);
+ rproc->state = RPROC_CRASHED;
+
+ return 0;
+}
+
+static int _event_notify(struct rproc *rproc, int type, void *data)
+{
+ if (type == RPROC_ERROR) {
+ mutex_lock(&rproc->lock);
+ /* only notify first crash */
+ if (rproc->state == RPROC_CRASHED) {
+ mutex_unlock(&rproc->lock);
+ return 0;
+ }
+ rproc_crash(rproc);
+ mutex_unlock(&rproc->lock);
+ /* If halt_on_crash do not notify the error */
+ pr_info("remoteproc: %s has crashed\n", rproc->name);
+ if (rproc->halt_on_crash) {
+ /* FIXME: send uevent here */
+ pr_info("remoteproc: %s: halt-on-crash enabled: "
+ "deferring crash recovery\n", rproc->name);
+ return 0;
+ }
+ }
+
+ return blocking_notifier_call_chain(&rproc->nbh, type, data);
+}
+
+/**
+ * rproc_start - power on the remote processor and let it start running
+ * @rproc: the remote processor
+ * @bootaddr: address of first instruction to execute (optional)
+ *
+ * Start a remote processor (i.e. power it on, take it out of reset, etc..)
+ */
+static void rproc_start(struct rproc *rproc, u64 bootaddr)
+{
+ struct device *dev = rproc->dev;
+ int err;
+
+ err = mutex_lock_interruptible(&rproc->lock);
+ if (err) {
+ dev_err(dev, "can't lock remote processor %d\n", err);
+ return;
+ }
+
+ if (rproc->ops->iommu_init) {
+ err = rproc->ops->iommu_init(rproc, rproc_mmu_fault_isr);
+ if (err) {
+ dev_err(dev, "can't configure iommu %d\n", err);
+ goto unlock_mutex;
+ }
+ }
+
+ if (rproc->ops->watchdog_init) {
+ err = rproc->ops->watchdog_init(rproc, rproc_watchdog_isr);
+ if (err) {
+ dev_err(dev, "can't configure watchdog timer %d\n",
+ err);
+ goto wdt_error;
+ }
+ }
+
+#ifdef CONFIG_REMOTEPROC_CORE_DUMP
+ debugfs_create_file("core", 0400, rproc->dbg_dir,
+ rproc, &core_rproc_ops);
+#endif
+
+ err = rproc->ops->start(rproc, bootaddr);
+ if (err) {
+ dev_err(dev, "can't start rproc %s: %d\n", rproc->name, err);
+ goto start_error;
+ }
+
+#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, rproc->sus_timeout);
+ pm_runtime_get_noresume(rproc->dev);
+ pm_runtime_set_active(rproc->dev);
+ if (!rproc->secure_mode)
+ pm_runtime_enable(rproc->dev);
+ pm_runtime_mark_last_busy(rproc->dev);
+ pm_runtime_put_autosuspend(rproc->dev);
+#endif
+
+ rproc->state = RPROC_RUNNING;
+
+ dev_info(dev, "remote processor %s is now up\n", rproc->name);
+ rproc->secure_ok = true;
+ complete_all(&rproc->secure_restart);
+ mutex_unlock(&rproc->lock);
+
+ return;
+
+ /*
+ * signal always, as we would need a notification in both the
+ * normal->secure & secure->normal mode transitions, otherwise
+ * we would have to introduce one more variable.
+ */
+start_error:
+ if (rproc->ops->watchdog_exit)
+ rproc->ops->watchdog_exit(rproc);
+wdt_error:
+ if (rproc->ops->iommu_exit)
+ rproc->ops->iommu_exit(rproc);
+unlock_mutex:
+ rproc->secure_ok = false;
+ complete_all(&rproc->secure_restart);
+ mutex_unlock(&rproc->lock);
+}
+
+static void rproc_reset_poolmem(struct rproc *rproc)
+{
+ struct rproc_mem_pool *pool = rproc->memory_pool;
+
+ if (!pool || !pool->mem_base || !pool->mem_size) {
+ pr_warn("invalid pool\n");
+ return;
+ }
+
+ pool->cur_base = pool->mem_base;
+ pool->cur_size = pool->mem_size;
+}
+
+static int rproc_add_mem_entry(struct rproc *rproc, struct fw_resource *rsc)
+{
+ struct rproc_mem_entry *me = rproc->memory_maps;
+ int i = 0;
+ int ret = 0;
+
+ while (me->da || me->pa || me->size) {
+ me += 1;
+ i++;
+ if (i == RPROC_MAX_MEM_ENTRIES) {
+ ret = -ENOSPC;
+ break;
+ }
+ }
+
+ if (!ret) {
+ me->da = rsc->da;
+ me->pa = (phys_addr_t)rsc->pa;
+ me->size = rsc->len;
+#ifdef CONFIG_REMOTEPROC_CORE_DUMP
+ /* FIXME: ION heaps are reported as RSC_CARVEOUT. We need a
+ * better way to understand which sections are for
+ * code/stack/heap/static data, and which belong to the
+ * carveouts we don't care about in a core dump.
+ * Perhaps the ION carveout should be reported as RSC_DEVMEM.
+ */
+ me->core = (rsc->type == RSC_CARVEOUT && rsc->pa != 0xba300000);
+#endif
+ }
+
+ return ret;
+}
+
+static int rproc_alloc_poolmem(struct rproc *rproc, u32 size, phys_addr_t *pa)
+{
+ struct rproc_mem_pool *pool = rproc->memory_pool;
+
+ *pa = 0;
+ if (!pool || !pool->mem_base || !pool->mem_size) {
+ pr_warn("invalid pool\n");
+ return -EINVAL;
+ }
+ if (pool->cur_size < size) {
+ pr_warn("out of carveout memory\n");
+ return -ENOMEM;
+ }
+
+ *pa = pool->cur_base;
+ pool->cur_base += size;
+ pool->cur_size -= size;
+ return 0;
+}
+
+static int rproc_check_poolmem(struct rproc *rproc, u32 size, phys_addr_t pa)
+{
+ struct rproc_mem_pool *pool = rproc->memory_pool;
+
+ if (!pool || !pool->st_base || !pool->st_size) {
+ pr_warn("invalid pool\n");
+ return -EINVAL;
+ }
+
+ if (pa < pool->st_base || pa + size > pool->st_base + pool->st_size) {
+ pr_warn("section size does not fit within carveout memory\n");
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int rproc_handle_resources(struct rproc *rproc, struct fw_resource *rsc,
+ int len, u64 *bootaddr)
+{
+ struct device *dev = rproc->dev;
+ phys_addr_t pa;
+ u64 da;
+ u64 trace_da0 = 0;
+ u64 trace_da1 = 0;
+ u64 cdump_da0 = 0;
+ u64 cdump_da1 = 0;
+ int ret = 0;
+
+ while (len >= sizeof(*rsc) && !ret) {
+ da = rsc->da;
+ pa = rsc->pa;
+ dev_dbg(dev, "resource: type %d, da 0x%llx, pa 0x%llx, "
+ "mapped pa: 0x%x, len 0x%x, reserved 0x%x, "
+ "name %s\n", rsc->type, rsc->da, rsc->pa, pa,
+ rsc->len, rsc->reserved, rsc->name);
+
+ if (rsc->reserved)
+ dev_warn(dev, "nonzero reserved\n");
+
+ switch (rsc->type) {
+ case RSC_TRACE:
+ if (trace_da0 && trace_da1) {
+ dev_warn(dev, "skipping extra trace rsc %s\n",
+ rsc->name);
+ break;
+ }
+
+ /* store the da for processing at the end */
+ if (!trace_da0) {
+ rproc->trace_len0 = rsc->len;
+ rproc->last_trace_len0 = rsc->len;
+ trace_da0 = da;
+ } else {
+ rproc->trace_len1 = rsc->len;
+ rproc->last_trace_len1 = rsc->len;
+ trace_da1 = da;
+ }
+ break;
+ case RSC_CRASHDUMP:
+ if (rproc->cdump_buf0 && rproc->cdump_buf1) {
+ dev_warn(dev, "skipping extra trace rsc %s\n",
+ rsc->name);
+ break;
+ }
+ /* store the da for processing at the end */
+ if (!cdump_da0) {
+ rproc->cdump_len0 = rsc->len;
+ cdump_da0 = da;
+ } else {
+ rproc->cdump_len1 = rsc->len;
+ cdump_da1 = da;
+ }
+ break;
+ case RSC_BOOTADDR:
+ *bootaddr = da;
+ break;
+ case RSC_DEVMEM:
+ ret = rproc_add_mem_entry(rproc, rsc);
+ if (ret) {
+ dev_err(dev, "can't add mem_entry %s\n",
+ rsc->name);
+ break;
+ }
+ break;
+ case RSC_CARVEOUT:
+ if (!pa) {
+ ret = rproc_alloc_poolmem(rproc, rsc->len, &pa);
+ if (ret) {
+ dev_err(dev, "can't alloc poolmem %s\n",
+ rsc->name);
+ break;
+ }
+ rsc->pa = pa;
+ } else {
+ ret = rproc_check_poolmem(rproc, rsc->len, pa);
+ if (ret) {
+ dev_err(dev, "static memory for %s "
+ "doesn't belong to poolmem\n",
+ rsc->name);
+ break;
+ }
+ }
+ ret = rproc_add_mem_entry(rproc, rsc);
+ if (ret) {
+ dev_err(dev, "can't add mem_entry %s\n",
+ rsc->name);
+ break;
+ }
+ break;
+ default:
+ /* we don't support much right now. so use dbg lvl */
+ dev_dbg(dev, "unsupported resource type %d\n",
+ rsc->type);
+ break;
+ }
+
+ rsc++;
+ len -= sizeof(*rsc);
+ }
+
+ if (ret)
+ goto error;
+
+ /*
+ * post-process trace buffers, as we cannot rely on the order of the
+ * trace section and the carveout sections.
+ *
+ * trace buffer memory _is_ normal memory, so we cast away the
+ * __iomem to make sparse happy
+ */
+
+ if (mutex_lock_interruptible(&rproc->tlock))
+ goto error;
+
+ if (trace_da0) {
+ ret = rproc_da_to_pa(rproc->memory_maps, trace_da0, &pa);
+ if (ret)
+ goto unlock;
+ rproc->trace_buf0 = (__force void *)
+ ioremap_nocache(pa, rproc->trace_len0);
+ if (rproc->trace_buf0) {
+ DEBUGFS_ADD(trace0);
+ if (!rproc->last_trace_buf0) {
+ rproc->last_trace_buf0 = kzalloc(sizeof(u32) *
+ rproc->last_trace_len0,
+ GFP_KERNEL);
+ if (!rproc->last_trace_buf0) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ DEBUGFS_ADD(trace0_last);
+ }
+ } else {
+ dev_err(dev, "can't ioremap trace buffer0\n");
+ ret = -EIO;
+ goto unlock;
+ }
+ }
+ if (trace_da1) {
+ ret = rproc_da_to_pa(rproc->memory_maps, trace_da1, &pa);
+ if (ret)
+ goto unlock;
+ rproc->trace_buf1 = (__force void *)
+ ioremap_nocache(pa, rproc->trace_len1);
+ if (rproc->trace_buf1) {
+ DEBUGFS_ADD(trace1);
+ if (!rproc->last_trace_buf1) {
+ rproc->last_trace_buf1 = kzalloc(sizeof(u32) *
+ rproc->last_trace_len1,
+ GFP_KERNEL);
+ if (!rproc->last_trace_buf1) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ DEBUGFS_ADD(trace1_last);
+ }
+ } else {
+ dev_err(dev, "can't ioremap trace buffer1\n");
+ ret = -EIO;
+ goto unlock;
+ }
+ }
+
+ /*
+ * post-process crash-dump buffers, as we cannot rely on the order of
+ * the crash-dump section and the carveout sections.
+ *
+ * crash-dump memory _is_ normal memory, so we cast away the __iomem to
+ * make sparse happy
+ */
+ if (cdump_da0) {
+ ret = rproc_da_to_pa(rproc->memory_maps, cdump_da0, &pa);
+ if (ret)
+ goto unlock;
+ rproc->cdump_buf0 = (__force void *)
+ ioremap_nocache(pa, rproc->cdump_len0);
+ if (rproc->cdump_buf0)
+ DEBUGFS_ADD(cdump0);
+ else {
+ dev_err(dev, "can't ioremap cdump buffer0\n");
+ ret = -EIO;
+ goto unlock;
+ }
+ }
+ if (cdump_da1) {
+ ret = rproc_da_to_pa(rproc->memory_maps, cdump_da1, &pa);
+ if (ret)
+ goto unlock;
+ rproc->cdump_buf1 = (__force void *)
+ ioremap_nocache(pa, rproc->cdump_len1);
+ if (rproc->cdump_buf1)
+ DEBUGFS_ADD(cdump1);
+ else {
+ dev_err(dev, "can't ioremap cdump buffer1\n");
+ ret = -EIO;
+ }
+ }
+
+unlock:
+ mutex_unlock(&rproc->tlock);
+
+error:
+ if (ret && rproc->dbg_dir) {
+ debugfs_remove_recursive(rproc->dbg_dir);
+ rproc->dbg_dir = NULL;
+ }
+ return ret;
+}
+
+static int rproc_process_fw(struct rproc *rproc, struct fw_section *section,
+ int left, u64 *bootaddr)
+{
+ struct device *dev = rproc->dev;
+ phys_addr_t pa;
+ u32 len, type;
+ u64 da;
+ int ret = 0;
+ void *ptr;
+ bool copy;
+
+ /* first section should be FW_RESOURCE section */
+ if (section->type != FW_RESOURCE) {
+ dev_err(dev, "first section is not FW_RESOURCE: type %u found",
+ section->type);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ while (left > sizeof(struct fw_section)) {
+ da = section->da;
+ len = section->len;
+ type = section->type;
+ copy = true;
+
+ dev_dbg(dev, "section: type %d da 0x%llx len 0x%x\n",
+ type, da, len);
+
+ left -= sizeof(struct fw_section);
+ if (left < section->len) {
+ dev_err(dev, "BIOS image is truncated\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ /* a resource table needs special handling */
+ if (section->type == FW_RESOURCE) {
+ ret = rproc_handle_resources(rproc,
+ (struct fw_resource *) section->content,
+ len, bootaddr);
+ if (ret) {
+ break;
+ }
+ }
+
+ if (section->type <= FW_DATA) {
+ ret = rproc_da_to_pa(rproc->memory_maps, da, &pa);
+ if (ret) {
+ dev_err(dev, "rproc_da_to_pa failed:%d\n", ret);
+ break;
+ }
+ } else if (rproc->secure_mode) {
+ pa = da;
+ if (section->type == FW_MMU)
+ rproc->secure_ttb = (void *)pa;
+ } else
+ copy = false;
+
+ dev_dbg(dev, "da 0x%llx pa 0x%x len 0x%x\n", da, pa, len);
+
+ if (copy) {
+ /* ioremaping normal memory, so make sparse happy */
+ ptr = (__force void *) ioremap_nocache(pa, len);
+ if (!ptr) {
+ dev_err(dev, "can't ioremap 0x%x\n", pa);
+ ret = -ENOMEM;
+ break;
+ }
+
+ memcpy(ptr, section->content, len);
+
+ /* iounmap normal memory, so make sparse happy */
+ iounmap((__force void __iomem *) ptr);
+ }
+
+ section = (struct fw_section *)(section->content + len);
+ left -= len;
+ }
+
+exit:
+ return ret;
+}
+
+static void rproc_loader_cont(const struct firmware *fw, void *context)
+{
+ struct rproc *rproc = context;
+ struct device *dev = rproc->dev;
+ const char *fwfile = rproc->firmware;
+ u64 bootaddr = 0;
+ struct fw_header *image;
+ struct fw_section *section;
+ int left, ret;
+
+ if (!fw) {
+ dev_err(dev, "%s: failed to load %s\n", __func__, fwfile);
+ goto complete_fw;
+ }
+
+ dev_info(dev, "Loaded BIOS image %s, size %d\n", fwfile, fw->size);
+
+ /* make sure this image is sane */
+ if (fw->size < sizeof(struct fw_header)) {
+ dev_err(dev, "Image is too small\n");
+ goto out;
+ }
+
+ image = (struct fw_header *) fw->data;
+
+ if (memcmp(image->magic, "RPRC", 4)) {
+ dev_err(dev, "Image is corrupted (bad magic)\n");
+ goto out;
+ }
+
+ dev_info(dev, "BIOS image version is %d\n", image->version);
+
+ rproc->header = kzalloc(image->header_len, GFP_KERNEL);
+ if (!rproc->header) {
+ dev_err(dev, "%s: kzalloc failed\n", __func__);
+ goto out;
+ }
+ memcpy(rproc->header, image->header, image->header_len);
+ rproc->header_len = image->header_len;
+
+ /* Ensure we recognize this BIOS version: */
+ if (image->version != RPROC_BIOS_VERSION) {
+ dev_err(dev, "Expected BIOS version: %d!\n",
+ RPROC_BIOS_VERSION);
+ goto out;
+ }
+
+ /* now process the image, section by section */
+ section = (struct fw_section *)(image->header + image->header_len);
+
+ left = fw->size - sizeof(struct fw_header) - image->header_len;
+
+ ret = rproc_process_fw(rproc, section, left, &bootaddr);
+ if (ret) {
+ dev_err(dev, "Failed to process the image: %d\n", ret);
+ goto out;
+ }
+
+ rproc_start(rproc, bootaddr);
+
+out:
+ release_firmware(fw);
+complete_fw:
+ /* allow all contexts calling rproc_put() to proceed */
+ complete_all(&rproc->firmware_loading_complete);
+}
+
+static int rproc_loader(struct rproc *rproc)
+{
+ const char *fwfile = rproc->firmware;
+ struct device *dev = rproc->dev;
+ int ret;
+
+ if (!fwfile) {
+ dev_err(dev, "%s: no firmware to load\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * allow building remoteproc as built-in kernel code, without
+ * hanging the boot process
+ */
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, fwfile,
+ dev, GFP_KERNEL, rproc, rproc_loader_cont);
+ if (ret < 0) {
+ dev_err(dev, "request_firmware_nowait failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int rproc_set_secure(const char *name, bool enable)
+{
+ struct rproc *rproc;
+ int ret;
+
+ rproc = __find_rproc_by_name(name);
+ if (!rproc) {
+ pr_err("can't find remote processor %s\n", name);
+ return -ENODEV;
+ }
+
+ /*
+ * set the secure_mode here, the secure_ttb will be filled up during
+ * the reload process.
+ */
+ if (mutex_lock_interruptible(&rproc->secure_lock))
+ return -EINTR;
+ rproc->secure_mode = enable;
+ rproc->secure_ttb = NULL;
+ rproc->secure_ok = false;
+ init_completion(&rproc->secure_restart);
+
+ /*
+ * restart the processor, the mode will dictate regular load or
+ * secure load
+ */
+ _event_notify(rproc, RPROC_SECURE, (void *)enable);
+
+ /* block until the restart is complete */
+ if (wait_for_completion_interruptible(&rproc->secure_restart)) {
+ pr_err("error waiting restart completion\n");
+ ret = -EINTR;
+ goto out;
+ }
+
+ ret = rproc->secure_ok ? 0 : -EACCES;
+out:
+ mutex_unlock(&rproc->secure_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(rproc_set_secure);
+
+int rproc_error_notify(struct rproc *rproc)
+{
+ return _event_notify(rproc, RPROC_ERROR, NULL);
+}
+EXPORT_SYMBOL_GPL(rproc_error_notify);
+
+struct rproc *rproc_get(const char *name)
+{
+ struct rproc *rproc, *ret = NULL;
+ struct device *dev;
+ int err;
+
+ rproc = __find_rproc_by_name(name);
+ if (!rproc) {
+ pr_err("can't find remote processor %s\n", name);
+ return NULL;
+ }
+
+ dev = rproc->dev;
+
+ err = mutex_lock_interruptible(&rproc->lock);
+ if (err) {
+ dev_err(dev, "can't lock remote processor %s\n", name);
+ return NULL;
+ }
+
+ if (rproc->state == RPROC_CRASHED) {
+ mutex_unlock(&rproc->lock);
+ if (wait_for_completion_interruptible(&rproc->error_comp)) {
+ dev_err(dev, "error waiting error completion\n");
+ return NULL;
+ }
+ mutex_lock(&rproc->lock);
+ }
+
+ /* prevent underlying implementation from being removed */
+ if (!try_module_get(rproc->owner)) {
+ dev_err(dev, "%s: can't get owner\n", __func__);
+ goto unlock_mutex;
+ }
+
+ /* bail if rproc is already powered up */
+ if (rproc->count++) {
+ ret = rproc;
+ goto unlock_mutex;
+ }
+
+ /* rproc_put() calls should wait until async loader completes */
+ init_completion(&rproc->firmware_loading_complete);
+
+ dev_info(dev, "powering up %s\n", name);
+
+ err = rproc_loader(rproc);
+ if (err) {
+ dev_err(dev, "failed to load rproc %s\n", rproc->name);
+ complete_all(&rproc->firmware_loading_complete);
+ module_put(rproc->owner);
+ --rproc->count;
+ goto unlock_mutex;
+ }
+
+ rproc->state = RPROC_LOADING;
+ ret = rproc;
+
+unlock_mutex:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rproc_get);
+
+void rproc_put(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev;
+ int ret;
+
+ /* make sure rproc is not loading now */
+ wait_for_completion(&rproc->firmware_loading_complete);
+
+ ret = mutex_lock_interruptible(&rproc->lock);
+ if (ret) {
+ dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
+ return;
+ }
+
+ if (!rproc->count) {
+ dev_warn(dev, "asymmetric rproc_put\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* if the remote proc is still needed, bail out */
+ if (--rproc->count)
+ goto out;
+
+ if (mutex_lock_interruptible(&rproc->tlock))
+ goto out;
+
+ if (rproc->trace_buf0)
+ /* iounmap normal memory, so make sparse happy */
+ iounmap((__force void __iomem *) rproc->trace_buf0);
+ if (rproc->trace_buf1)
+ /* iounmap normal memory, so make sparse happy */
+ iounmap((__force void __iomem *) rproc->trace_buf1);
+ rproc->trace_buf0 = rproc->trace_buf1 = NULL;
+
+ if (rproc->cdump_buf0)
+ /* iounmap normal memory, so make sparse happy */
+ iounmap((__force void __iomem *) rproc->cdump_buf0);
+ if (rproc->cdump_buf1)
+ /* iounmap normal memory, so make sparse happy */
+ iounmap((__force void __iomem *) rproc->cdump_buf1);
+ rproc->cdump_buf0 = rproc->cdump_buf1 = NULL;
+
+ mutex_unlock(&rproc->tlock);
+
+ rproc_reset_poolmem(rproc);
+ memset(rproc->memory_maps, 0, sizeof(rproc->memory_maps));
+ kfree(rproc->header);
+
+ /*
+ * make sure rproc is really running before powering it off.
+ * this is important, because the fw loading might have failed.
+ */
+ if (rproc->state == RPROC_RUNNING || rproc->state == RPROC_CRASHED) {
+#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
+ /*
+ * Call resume, it will cancel any pending autosuspend,
+ * so that no callback is executed after the device is stopped.
+ * Device stop function takes care of shutting down the device.
+ */
+ pm_runtime_get_sync(rproc->dev);
+ pm_runtime_put_noidle(rproc->dev);
+ if (!rproc->secure_reset)
+ pm_runtime_disable(rproc->dev);
+
+ pm_runtime_set_suspended(rproc->dev);
+#endif
+ ret = rproc->ops->stop(rproc);
+ if (ret) {
+ dev_err(dev, "can't stop rproc %s: %d\n", rproc->name,
+ ret);
+ goto out;
+ }
+ if (rproc->ops->watchdog_exit) {
+ ret = rproc->ops->watchdog_exit(rproc);
+ if (ret) {
+ dev_err(rproc->dev, "error watchdog_exit %d\n",
+ ret);
+ goto out;
+ }
+ }
+ if (rproc->ops->iommu_exit) {
+ ret = rproc->ops->iommu_exit(rproc);
+ if (ret) {
+ dev_err(rproc->dev, "error iommu_exit %d\n",
+ ret);
+ goto out;
+ }
+ }
+ }
+
+ if (rproc->state == RPROC_CRASHED)
+ complete_all(&rproc->error_comp);
+
+ rproc->state = RPROC_OFFLINE;
+
+ dev_info(dev, "stopped remote processor %s\n", rproc->name);
+
+out:
+ mutex_unlock(&rproc->lock);
+ if (!ret)
+ module_put(rproc->owner);
+}
+EXPORT_SYMBOL_GPL(rproc_put);
+
+static void rproc_error_work(struct work_struct *work)
+{
+ struct rproc *rproc = container_of(work, struct rproc, error_work);
+
+ dev_dbg(rproc->dev, "%s\n", __func__);
+ _event_notify(rproc, RPROC_ERROR, NULL);
+}
+
+int rproc_event_register(struct rproc *rproc, struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&rproc->nbh, nb);
+}
+EXPORT_SYMBOL_GPL(rproc_event_register);
+
+int rproc_event_unregister(struct rproc *rproc, struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&rproc->nbh, nb);
+}
+EXPORT_SYMBOL_GPL(rproc_event_unregister);
+
+void rproc_last_busy(struct rproc *rproc)
+{
+#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
+ struct device *dev = rproc->dev;
+
+ mutex_lock(&rproc->pm_lock);
+ if (pm_runtime_suspended(dev) ||
+ !pm_runtime_autosuspend_expiration(dev)) {
+ pm_runtime_mark_last_busy(dev);
+ mutex_unlock(&rproc->pm_lock);
+ /*
+ * if the remote processor is suspended, we can not wake it
+ * up (that would abort system suspend), instead state that
+ * the remote processor needs to be waken up on system resume.
+ */
+ mutex_lock(&rproc->lock);
+ if (rproc->state == RPROC_SUSPENDED) {
+ rproc->need_resume = true;
+ mutex_unlock(&rproc->lock);
+ return;
+ }
+ mutex_unlock(&rproc->lock);
+ pm_runtime_get_sync(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return;
+ }
+ pm_runtime_mark_last_busy(dev);
+ mutex_unlock(&rproc->pm_lock);
+#endif
+}
+EXPORT_SYMBOL(rproc_last_busy);
+
+#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
+static int rproc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ dev_dbg(dev, "Enter %s\n", __func__);
+
+ mutex_lock(&rproc->lock);
+ if (rproc->state != RPROC_SUSPENDED) {
+ mutex_unlock(&rproc->lock);
+ return 0;
+ }
+
+ if (!rproc->need_resume)
+ goto unlock;
+
+ rproc->need_resume = false;
+ pm_runtime_get_sync(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+unlock:
+ rproc->state = (ret) ? RPROC_CRASHED : RPROC_RUNNING;
+ mutex_unlock(&rproc->lock);
+ if (ret) {
+ _event_notify(rproc, RPROC_ERROR, NULL);
+ dev_err(dev, "Error resuming %d\n", ret);
+ }
+ return ret;
+}
+
+static int rproc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ dev_dbg(dev, "Enter %s\n", __func__);
+
+ mutex_lock(&rproc->lock);
+ if (rproc->state != RPROC_RUNNING) {
+ mutex_unlock(&rproc->lock);
+ return 0;
+ }
+
+ if (pm_runtime_suspended(dev))
+ goto out;
+ /*
+ * If it is not runtime suspended, it means remote processor is still
+ * doing something. However we need to stop it.
+ */
+
+ dev_dbg(dev, "%s: will be forced to suspend\n", rproc->name);
+
+ rproc->force_suspend = true;
+ ret = pm_runtime_suspend(dev);
+ rproc->force_suspend = false;
+ if (ret)
+ goto out;
+ /*
+ * As the remote processor had to be forced to suspend, it was
+ * executing some task, so it needs to be waken up on system resume
+ */
+ rproc->need_resume = true;
+out:
+ if (!ret)
+ rproc->state = RPROC_SUSPENDED;
+ mutex_unlock(&rproc->lock);
+
+ return ret;
+}
+
+static int rproc_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ dev_dbg(dev, "Enter %s\n", __func__);
+
+ if (rproc->ops->resume)
+ ret = rproc->ops->resume(rproc);
+
+ if (!ret)
+ _event_notify(rproc, RPROC_RESUME, NULL);
+
+ return 0;
+}
+
+static int rproc_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ int ret = 0;
+ unsigned to;
+
+ dev_dbg(dev, "Enter %s\n", __func__);
+
+ if (rproc->state == RPROC_SUSPENDED)
+ return 0;
+
+ mutex_lock(&rproc->pm_lock);
+
+ if (pm_runtime_autosuspend_expiration(dev) && !rproc->force_suspend) {
+ ret = -EBUSY;
+ goto abort;
+ }
+
+ /*
+ * Notify PROC_PRE_SUSPEND only when the suspend is not forced.
+ * Users can use pre suspend call back to cancel autosuspend, but
+ * when the suspend is forced, there is no need to notify them
+ */
+ if (!rproc->force_suspend)
+ ret = _event_notify(rproc, RPROC_PRE_SUSPEND, NULL);
+ /*
+ * If rproc user avoids suspend, that means it is still using rproc.
+ * Lets go to abort suspend.
+ */
+ if (ret) {
+ dev_dbg(dev, "suspend aborted by user %d\n", ret);
+ ret = -EBUSY;
+ goto abort;
+ }
+ /* Now call machine-specific suspend function (if exist) */
+ if (rproc->ops->suspend)
+ ret = rproc->ops->suspend(rproc, rproc->force_suspend);
+ /*
+ * If it fails with -EBUSY/EAGAIN, remote processor is still running,
+ * but rproc users were not aware of that, so lets abort suspend.
+ * If it is a different error, there is something wrong with the
+ * remote processor. Return that error to pm runtime framework,
+ * which will disable autosuspend.
+ */
+ if (ret) {
+ dev_dbg(dev, "suspend aborted by remote processor %d\n", ret);
+ if (ret != -EBUSY && ret != -EAGAIN)
+ dev_err(dev, "suspend error %d", ret);
+ goto abort;
+ }
+ /* we are not interested in the returned value */
+ _event_notify(rproc, RPROC_POS_SUSPEND, NULL);
+ mutex_unlock(&rproc->pm_lock);
+
+ return 0;
+abort:
+ pm_runtime_mark_last_busy(dev);
+ to = jiffies_to_msecs(pm_runtime_autosuspend_expiration(dev) - jiffies);
+ pm_schedule_suspend(dev, to);
+ dev->power.timer_autosuspends = 1;
+ mutex_unlock(&rproc->pm_lock);
+ return ret;
+}
+
+const struct dev_pm_ops rproc_gen_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(rproc_suspend, rproc_resume)
+ SET_RUNTIME_PM_OPS(rproc_runtime_suspend, rproc_runtime_resume, NULL)
+};
+#endif
+int
+rproc_set_constraints(struct rproc *rproc, enum rproc_constraint type, long v)
+{
+ int ret;
+ char *cname[] = {"scale", "latency", "bandwidth"};
+ int (*func)(struct rproc *, long);
+
+ switch (type) {
+ case RPROC_CONSTRAINT_SCALE:
+ func = rproc->ops->scale;
+ break;
+ case RPROC_CONSTRAINT_LATENCY:
+ func = rproc->ops->set_lat;
+ break;
+ case RPROC_CONSTRAINT_BANDWIDTH:
+ func = rproc->ops->set_bw;
+ break;
+ default:
+ dev_err(rproc->dev, "invalid constraint\n");
+ return -EINVAL;
+ }
+
+ if (!func) {
+ dev_err(rproc->dev, "%s: no %s constraint\n",
+ __func__, cname[type]);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rproc->lock);
+ if (rproc->state == RPROC_OFFLINE) {
+ pr_err("%s: rproc inactive\n", __func__);
+ mutex_unlock(&rproc->lock);
+ return -EPERM;
+ }
+
+ dev_dbg(rproc->dev, "set %s constraint %ld\n", cname[type], v);
+ ret = func(rproc, v);
+ if (ret)
+ dev_err(rproc->dev, "error %s constraint\n", cname[type]);
+ mutex_unlock(&rproc->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(rproc_set_constraints);
+
+int rproc_register(struct device *dev, const char *name,
+ const struct rproc_ops *ops,
+ const char *firmware,
+ struct rproc_mem_pool *memory_pool,
+ struct module *owner,
+ unsigned sus_timeout)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rproc *rproc;
+
+ if (!dev || !name || !ops)
+ return -EINVAL;
+
+ rproc = kzalloc(sizeof(struct rproc), GFP_KERNEL);
+ if (!rproc) {
+ dev_err(dev, "%s: kzalloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ rproc->dev = dev;
+ rproc->name = name;
+ rproc->ops = ops;
+ rproc->firmware = firmware;
+ rproc->owner = owner;
+ rproc->memory_pool = memory_pool;
+#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
+ rproc->sus_timeout = sus_timeout;
+ mutex_init(&rproc->pm_lock);
+#endif
+ mutex_init(&rproc->lock);
+ mutex_init(&rproc->secure_lock);
+ mutex_init(&rproc->tlock);
+ INIT_WORK(&rproc->error_work, rproc_error_work);
+ BLOCKING_INIT_NOTIFIER_HEAD(&rproc->nbh);
+
+ rproc->state = RPROC_OFFLINE;
+
+ rproc->qos_request = kzalloc(sizeof(*rproc->qos_request),
+ GFP_KERNEL);
+ if (!rproc->qos_request) {
+ kfree(rproc);
+ return -ENOMEM;
+ }
+
+ pm_qos_add_request(rproc->qos_request, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
+ rproc->secure_mode = false;
+ rproc->secure_ttb = NULL;
+ init_completion(&rproc->secure_restart);
+
+ spin_lock(&rprocs_lock);
+ list_add_tail(&rproc->next, &rprocs);
+ spin_unlock(&rprocs_lock);
+
+ platform_set_drvdata(pdev, rproc);
+
+ dev_info(dev, "%s is available\n", name);
+
+ if (!rproc_dbg)
+ goto out;
+
+ rproc->dbg_dir = debugfs_create_dir(dev_name(dev), rproc_dbg);
+ if (!rproc->dbg_dir) {
+ dev_err(dev, "can't create debugfs dir\n");
+ goto out;
+ }
+
+ debugfs_create_file("name", 0444, rproc->dbg_dir, rproc,
+ &rproc_name_ops);
+
+ debugfs_create_file("version", 0444, rproc->dbg_dir, rproc,
+ &rproc_version_ops);
+out:
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rproc_register);
+
+int rproc_unregister(const char *name)
+{
+ struct rproc *rproc;
+
+ rproc = __find_rproc_by_name(name);
+ if (!rproc) {
+ pr_err("can't find remote processor %s\n", name);
+ return -EINVAL;
+ }
+
+ dev_info(rproc->dev, "removing %s\n", name);
+
+ if (rproc->dbg_dir)
+ debugfs_remove_recursive(rproc->dbg_dir);
+
+ spin_lock(&rprocs_lock);
+ list_del(&rproc->next);
+ spin_unlock(&rprocs_lock);
+
+ rproc->secure_mode = false;
+ rproc->secure_ttb = NULL;
+ pm_qos_remove_request(rproc->qos_request);
+ kfree(rproc->qos_request);
+ kfree(rproc->last_trace_buf0);
+ kfree(rproc->last_trace_buf1);
+ kfree(rproc);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rproc_unregister);
+
+static int __init remoteproc_init(void)
+{
+ if (debugfs_initialized()) {
+ rproc_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!rproc_dbg)
+ pr_err("can't create debugfs dir\n");
+ }
+
+ return 0;
+}
+/* must be ready in time for device_initcall users */
+subsys_initcall(remoteproc_init);
+
+static void __exit remoteproc_exit(void)
+{
+ if (rproc_dbg)
+ debugfs_remove(rproc_dbg);
+}
+module_exit(remoteproc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Generic Remote Processor Framework");
diff --git a/drivers/remoteproc/rpres.c b/drivers/remoteproc/rpres.c
new file mode 100644
index 0000000..e839f70
--- /dev/null
+++ b/drivers/remoteproc/rpres.c
@@ -0,0 +1,204 @@
+/*
+ * Remote processor resources
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Fernando Guzman Lugo <fernando.lugo@ti.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <plat/omap_device.h>
+#include <plat/rpres.h>
+
+static LIST_HEAD(rpres_list);
+static DEFINE_SPINLOCK(rpres_lock);
+
+static struct rpres *__find_by_name(const char *name)
+{
+ struct rpres *obj;
+
+ list_for_each_entry(obj, &rpres_list, next)
+ if (!strcmp(obj->name, name))
+ return obj;
+ return NULL;
+}
+
+struct rpres *rpres_get(const char *name)
+{
+ int ret;
+ struct rpres *r;
+ struct rpres_platform_data *pdata;
+
+ spin_lock(&rpres_lock);
+ r = __find_by_name(name);
+ spin_unlock(&rpres_lock);
+ if (!r)
+ return ERR_PTR(-ENOENT);
+
+ mutex_lock(&r->lock);
+ if (r->state == RPRES_ACTIVE) {
+ pr_err("%s:resource already active\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+ pdata = r->pdev->dev.platform_data;
+ ret = pdata->ops->start(r->pdev);
+ if (!ret)
+ r->state = RPRES_ACTIVE;
+out:
+ mutex_unlock(&r->lock);
+ if (ret)
+ return ERR_PTR(ret);
+ return r;
+}
+EXPORT_SYMBOL(rpres_get);
+
+void rpres_put(struct rpres *obj)
+{
+ struct rpres_platform_data *pdata = obj->pdev->dev.platform_data;
+ mutex_lock(&obj->lock);
+ if (obj->state == RPRES_INACTIVE) {
+ pr_err("%s:resource already inactive\n", __func__);
+ } else {
+ pdata->ops->stop(obj->pdev);
+ obj->state = RPRES_INACTIVE;
+ }
+ mutex_unlock(&obj->lock);
+}
+EXPORT_SYMBOL(rpres_put);
+
+int rpres_set_constraints(struct rpres *obj, enum rpres_constraint type, long val)
+{
+ int ret;
+ struct rpres_platform_data *pdata = obj->pdev->dev.platform_data;
+ struct platform_device *pdev = obj->pdev;
+ static const char *cname[] = {"scale", "latency", "bandwidth"};
+ int (*func)(struct platform_device *, long);
+
+ switch (type) {
+ case RPRES_CONSTRAINT_SCALE:
+ func = pdata->ops->scale_dev;
+ break;
+ case RPRES_CONSTRAINT_LATENCY:
+ func = pdata->ops->set_lat;
+ break;
+ case RPRES_CONSTRAINT_BANDWIDTH:
+ func = pdata->ops->set_bw;
+ break;
+ default:
+ dev_err(&pdev->dev, "%s: invalid constraint %d\n",
+ __func__, type);
+ return -EINVAL;
+ }
+
+ if (!func) {
+ dev_err(&pdev->dev, "%s: No %s constraint\n",
+ __func__, cname[type]);
+ return -EINVAL;
+ }
+
+ mutex_lock(&obj->lock);
+ if (obj->state == RPRES_INACTIVE) {
+ mutex_unlock(&obj->lock);
+ pr_err("%s: resource inactive\n", __func__);
+ return -EPERM;
+ }
+
+ dev_dbg(&pdev->dev, "set %s constraint %ld\n", cname[type], val);
+ ret = func(pdev, val);
+ if (ret)
+ dev_err(&pdev->dev, "%s: error setting constraint %s\n",
+ __func__, cname[type]);
+ mutex_unlock(&obj->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(rpres_set_constraints);
+
+static int rpres_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rpres_platform_data *pdata = dev->platform_data;
+ struct rpres *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ obj->pdev = pdev;
+ obj->name = pdata->name;
+ obj->state = RPRES_INACTIVE;
+ mutex_init(&obj->lock);
+
+ spin_lock(&rpres_lock);
+ list_add_tail(&obj->next, &rpres_list);
+ spin_unlock(&rpres_lock);
+
+ return 0;
+}
+
+static int __devexit rpres_remove(struct platform_device *pdev)
+{
+ struct rpres_platform_data *pdata = pdev->dev.platform_data;
+ struct rpres *obj;
+
+ spin_lock(&rpres_lock);
+ obj = __find_by_name(pdata->name);
+ if (!obj) {
+ spin_unlock(&rpres_lock);
+ dev_err(&pdev->dev, "fail to remove %s\n", pdata->name);
+ return -ENOENT;
+ }
+ list_del(&obj->next);
+ spin_unlock(&rpres_lock);
+
+ kfree(obj);
+
+ return 0;
+}
+
+static struct platform_device_id rpres_id_table[] = {
+ {
+ .name = "iva",
+ },
+ {
+ .name = "fdif",
+ },
+ {
+ .name = "rpres",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, rpres_id_table);
+
+static struct platform_driver omap_rpres_driver = {
+ .id_table = rpres_id_table,
+ .probe = rpres_probe,
+ .remove = __devexit_p(rpres_remove),
+ .driver = {
+ .name = "rpres",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init rpres_init(void)
+{
+ return platform_driver_register(&omap_rpres_driver);
+}
+late_initcall(rpres_init);
+
+static void __exit rpres_exit(void)
+{
+ platform_driver_unregister(&omap_rpres_driver);
+}
+module_exit(rpres_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/rpres_dev.c b/drivers/remoteproc/rpres_dev.c
new file mode 100644
index 0000000..8ecd950
--- /dev/null
+++ b/drivers/remoteproc/rpres_dev.c
@@ -0,0 +1,220 @@
+/*
+ * Remote processor resources
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Fernando Guzman Lugo <fernando.lugo@ti.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <plat/omap_device.h>
+#include <plat/omap_hwmod.h>
+#include <plat/clock.h>
+#include <plat/rpres.h>
+#include <linux/pm_qos_params.h>
+#include <plat/common.h>
+#include <plat/omap-pm.h>
+#include "../../arch/arm/mach-omap2/dvfs.h"
+
+static void _enable_optional_clocks(struct omap_hwmod *oh)
+{
+ struct omap_hwmod_opt_clk *oc;
+ int i;
+
+ pr_debug("%s: %s: enabling optional clocks\n", __func__, oh->name);
+
+ for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
+ if (oc->_clk)
+ clk_enable(oc->_clk);
+}
+
+static void _disable_optional_clocks(struct omap_hwmod *oh)
+{
+ struct omap_hwmod_opt_clk *oc;
+ int i;
+
+ pr_debug("%s: %s: disabling optional clocks\n", __func__, oh->name);
+
+ for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
+ if (oc->_clk)
+ clk_disable(oc->_clk);
+}
+
+static int rpres_iss_enable(struct platform_device *pdev)
+{
+ int ret;
+ struct rpres_platform_data *pdata = pdev->dev.platform_data;
+
+ ret = omap_device_enable(pdev);
+ if (!ret)
+ _enable_optional_clocks(pdata->oh);
+
+ return ret;
+}
+
+static int rpres_iss_shutdown(struct platform_device *pdev)
+{
+ int ret;
+ struct rpres_platform_data *pdata = pdev->dev.platform_data;
+
+ omap_hwmod_reset(pdata->oh);
+
+ ret = omap_device_idle(pdev);
+ if (!ret)
+ _disable_optional_clocks(pdata->oh);
+
+ return ret;
+}
+
+static int rpres_fdif_shutdown(struct platform_device *pdev)
+{
+ struct rpres_platform_data *pdata = pdev->dev.platform_data;
+
+ omap_hwmod_reset(pdata->oh);
+
+ return omap_device_idle(pdev);
+}
+
+static int rpres_scale_dev(struct platform_device *pdev, long val)
+{
+ return omap_device_scale(&pdev->dev, &pdev->dev, val);
+}
+
+static int rpres_set_dev_lat(struct platform_device *pdev, long val)
+{
+ return omap_pm_set_max_dev_wakeup_lat(&pdev->dev, &pdev->dev, val);
+}
+
+static int rpres_set_l3_bw(struct platform_device *pdev, long val)
+{
+ return omap_pm_set_min_bus_tput(&pdev->dev, OCP_INITIATOR_AGENT, val);
+}
+
+static struct rpres_ops iss_ops = {
+ .start = rpres_iss_enable,
+ .stop = rpres_iss_shutdown,
+ .set_lat = rpres_set_dev_lat,
+ .set_bw = rpres_set_l3_bw,
+};
+
+static struct rpres_ops ivahd_ops = {
+ .start = omap_device_enable,
+ .stop = omap_device_shutdown,
+ .set_lat = rpres_set_dev_lat,
+ .set_bw = rpres_set_l3_bw,
+ .scale_dev = rpres_scale_dev,
+};
+
+static struct rpres_ops fdif_ops = {
+ .start = omap_device_enable,
+ .stop = rpres_fdif_shutdown,
+ .set_lat = rpres_set_dev_lat,
+ .set_bw = rpres_set_l3_bw,
+ .scale_dev = rpres_scale_dev,
+};
+
+static struct rpres_ops gen_ops = {
+ .start = omap_device_enable,
+ .stop = omap_device_shutdown,
+};
+
+static struct omap_device_pm_latency rpres_latency[] = {
+ {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
+};
+
+static struct rpres_platform_data rpres_data[] = {
+ {
+ .name = "rpres_iva",
+ .oh_name = "iva",
+ .ops = &ivahd_ops,
+ .get_dev = omap2_get_iva_device,
+ },
+ {
+ .name = "rpres_iva_seq0",
+ .oh_name = "iva_seq0",
+ .ops = &gen_ops,
+ },
+ {
+ .name = "rpres_iva_seq1",
+ .oh_name = "iva_seq1",
+ .ops = &gen_ops,
+ },
+ {
+ .name = "rpres_iss",
+ .oh_name = "iss",
+ .ops = &iss_ops,
+ .opt_clk_name = "ctrlclk",
+ },
+ {
+ .name = "rpres_fdif",
+ .oh_name = "fdif",
+ .ops = &fdif_ops,
+ .get_dev = omap4_get_fdif_device,
+ },
+ {
+ .name = "rpres_sl2if",
+ .oh_name = "sl2if",
+ .ops = &gen_ops,
+ },
+};
+
+static int __init init(void)
+{
+ int i, ret;
+ struct omap_hwmod *oh;
+ struct omap_device_pm_latency *ohl = rpres_latency;
+ int ohl_cnt = ARRAY_SIZE(rpres_latency);
+ struct omap_device *od;
+ struct device *dev;
+ struct platform_device *pdev;
+
+ for (i = 0; i < ARRAY_SIZE(rpres_data); i++) {
+ oh = omap_hwmod_lookup(rpres_data[i].oh_name);
+ if (!oh) {
+ pr_err("No hdmod for %s\n", rpres_data[i].name);
+ continue;
+ }
+ rpres_data[i].oh = oh;
+
+ if (rpres_data[i].get_dev) {
+ dev = rpres_data[i].get_dev();
+ if (!dev) {
+ pr_err("No dev for %s\n", rpres_data[i].name);
+ continue;
+ }
+ pdev = to_platform_device(dev);
+ ret = platform_device_add_data(pdev, &rpres_data[i],
+ sizeof(struct rpres_platform_data));
+ if (ret) {
+ pr_err("Error pdev add for %s\n",
+ rpres_data[i].name);
+ continue;
+ }
+ od = to_omap_device(pdev);
+ od->pm_lats = ohl;
+ od->pm_lats_cnt = ohl_cnt;
+ } else {
+ od = omap_device_build("rpres", i, oh,
+ &rpres_data[i],
+ sizeof(struct rpres_platform_data),
+ ohl, ohl_cnt, false);
+ if (IS_ERR(od))
+ pr_err("Error building device for %s\n",
+ rpres_data[i].name);
+ }
+ }
+ return 0;
+}
+device_initcall(init);
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
new file mode 100644
index 0000000..e40c9d8
--- /dev/null
+++ b/drivers/rpmsg/Kconfig
@@ -0,0 +1,54 @@
+config RPMSG
+ tristate "Virtio-based remote processor messaging bus"
+ default y
+ select VIRTIO
+ select VIRTIO_RING
+ depends on OMAP_RPMSG
+ ---help---
+ This virtio driver provides support for shared-memory-based
+ remote processor messaging, by registering the RPMSG bus which
+ in turn enables a handful of IPC drivers.
+
+ Such support is usually required to offload cpu-intensive
+ or latency-sensitive tasks to specialized remote processors with
+ dedicated hardware accelerators and/or real-time properties.
+
+ If unsure, say N.
+
+config RPMSG_OMX
+ tristate "rpmsg OMX driver"
+ default y
+ depends on RPMSG
+ depends on TI_TILER
+ ---help---
+ An rpmsg driver that exposes OMX API to user space, in order to
+ allow multimedia applications to offload OMX processing to
+ remote processors.
+
+ If unsure, say N.
+
+config RPMSG_RESMGR
+ tristate "rpmsg resource manager"
+ default y
+ depends on RPMSG
+ depends on OMAP_RPRES
+ ---help---
+ Add Framework base on RPMSG to request and release resources
+ from a remote Processor.
+ Say either Y or M. You know you want to.
+
+config RPMSG_CLIENT_SAMPLE
+ tristate "An rpmsg client sample"
+ default m
+ depends on RPMSG
+ ---help---
+ This is just a sample client driver for the rpmsg bus.
+ Say either Y or M. You know you want to.
+
+config RPMSG_SERVER_SAMPLE
+ tristate "An rpmsg server sample"
+ default m
+ depends on RPMSG
+ ---help---
+ This is just a sample server driver for the rpmsg bus.
+ Say either Y or M. You know you want to.
diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile
new file mode 100644
index 0000000..1b0e04b
--- /dev/null
+++ b/drivers/rpmsg/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_RPMSG) += virtio_rpmsg_bus.o
+
+obj-$(CONFIG_RPMSG_OMX) += rpmsg_omx.o
+obj-$(CONFIG_RPMSG_CLIENT_SAMPLE) += rpmsg_client_sample.o
+obj-$(CONFIG_RPMSG_SERVER_SAMPLE) += rpmsg_server_sample.o
+obj-$(CONFIG_RPMSG_RESMGR) += rpmsg_resmgr.o
diff --git a/drivers/rpmsg/rpmsg_client_sample.c b/drivers/rpmsg/rpmsg_client_sample.c
new file mode 100644
index 0000000..a3f5013
--- /dev/null
+++ b/drivers/rpmsg/rpmsg_client_sample.c
@@ -0,0 +1,100 @@
+/*
+ * Remote processor messaging transport - sample client driver
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/virtio.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/rpmsg.h>
+
+#define MSG ("hello world!")
+#define MSG_LIMIT 100
+
+static void rpmsg_sample_cb(struct rpmsg_channel *rpdev, void *data, int len,
+ void *priv, u32 src)
+{
+ int err;
+ static int rx_count;
+
+ dev_info(&rpdev->dev, "incoming msg %d (src: 0x%x)\n", ++rx_count, src);
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+
+ /* samples should not live forever */
+ if (rx_count >= MSG_LIMIT) {
+ dev_info(&rpdev->dev, "goodbye!\n");
+ return;
+ }
+
+ /* send a new message now */
+ err = rpmsg_send(rpdev, MSG, strlen(MSG));
+ if (err)
+ pr_err("rpmsg_send failed: %d\n", err);
+}
+
+static int rpmsg_sample_probe(struct rpmsg_channel *rpdev)
+{
+ int err;
+
+ dev_info(&rpdev->dev, "new channel: 0x%x <-> 0x%x!\n",
+ rpdev->src, rpdev->dst);
+
+ /* send a message to our remote processor */
+ err = rpmsg_send(rpdev, MSG, strlen(MSG));
+ if (err) {
+ pr_err("rpmsg_send failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __devexit rpmsg_sample_remove(struct rpmsg_channel *rpdev)
+{
+ dev_info(&rpdev->dev, "rpmsg sample client driver is removed\n");
+}
+
+static struct rpmsg_device_id rpmsg_driver_sample_id_table[] = {
+ { .name = "rpmsg-client-sample" },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, rpmsg_driver_sample_id_table);
+
+static struct rpmsg_driver rpmsg_sample_client_driver = {
+ .drv.name = KBUILD_MODNAME,
+ .drv.owner = THIS_MODULE,
+ .id_table = rpmsg_driver_sample_id_table,
+ .probe = rpmsg_sample_probe,
+ .callback = rpmsg_sample_cb,
+ .remove = __devexit_p(rpmsg_sample_remove),
+};
+
+static int __init init(void)
+{
+ return register_rpmsg_driver(&rpmsg_sample_client_driver);
+}
+
+static void __exit fini(void)
+{
+ unregister_rpmsg_driver(&rpmsg_sample_client_driver);
+}
+module_init(init);
+module_exit(fini);
+
+MODULE_DESCRIPTION("Virtio remote processor messaging sample client driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/rpmsg_omx.c b/drivers/rpmsg/rpmsg_omx.c
new file mode 100644
index 0000000..972a918
--- /dev/null
+++ b/drivers/rpmsg/rpmsg_omx.c
@@ -0,0 +1,833 @@
+/*
+ * OMX offloading remote processor driver
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/cdev.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/skbuff.h>
+#include <linux/sched.h>
+#include <linux/rpmsg.h>
+#include <linux/rpmsg_omx.h>
+#include <linux/completion.h>
+
+#include <mach/tiler.h>
+
+#ifdef CONFIG_ION_OMAP
+#include <linux/ion.h>
+#include <linux/omap_ion.h>
+
+extern struct ion_device *omap_ion_device;
+#endif
+
+/* maximum OMX devices this driver can handle */
+#define MAX_OMX_DEVICES 8
+
+enum rpc_omx_map_info_type {
+ RPC_OMX_MAP_INFO_NONE = 0,
+ RPC_OMX_MAP_INFO_ONE_BUF = 1,
+ RPC_OMX_MAP_INFO_TWO_BUF = 2,
+ RPC_OMX_MAP_INFO_THREE_BUF = 3,
+ RPC_OMX_MAP_INFO_MAX = 0x7FFFFFFF
+};
+
+enum {
+ OMX_SERVICE_DOWN,
+ OMX_SERVICE_UP
+};
+
+struct rpmsg_omx_service {
+ struct list_head next;
+ struct cdev cdev;
+ struct device *dev;
+ struct rpmsg_channel *rpdev;
+ int minor;
+ struct list_head list;
+ struct mutex lock;
+ struct completion comp;
+ int state;
+#ifdef CONFIG_ION_OMAP
+ struct ion_client *ion_client;
+#endif
+};
+
+struct rpmsg_omx_instance {
+ struct list_head next;
+ struct rpmsg_omx_service *omxserv;
+ struct sk_buff_head queue;
+ struct mutex lock;
+ wait_queue_head_t readq;
+ struct completion reply_arrived;
+ struct rpmsg_endpoint *ept;
+ u32 dst;
+ int state;
+#ifdef CONFIG_ION_OMAP
+ struct ion_client *ion_client;
+#endif
+};
+
+static struct class *rpmsg_omx_class;
+static dev_t rpmsg_omx_dev;
+
+/* store all remote omx connection services (usually one per remoteproc) */
+static DEFINE_IDR(rpmsg_omx_services);
+static DEFINE_SPINLOCK(rpmsg_omx_services_lock);
+static LIST_HEAD(rpmsg_omx_services_list);
+
+#ifdef CONFIG_ION_OMAP
+#ifdef CONFIG_PVR_SGX
+#include "../gpu/pvr/ion.h"
+#endif
+#endif
+
+/*
+ * TODO: Need to do this using lookup with rproc, but rproc is not
+ * visible to rpmsg_omx
+ */
+#define TILER_START 0x60000000
+#define TILER_END 0x80000000
+#define ION_1D_START 0xBA300000
+#define ION_1D_END 0xBFD00000
+#define ION_1D_VA 0x88000000
+static u32 _rpmsg_pa_to_da(u32 pa)
+{
+ if (pa >= TILER_START && pa < TILER_END)
+ return pa;
+ else if (pa >= ION_1D_START && pa < ION_1D_END)
+ return (pa - ION_1D_START + ION_1D_VA);
+ else
+ return 0;
+}
+
+static u32 _rpmsg_omx_buffer_lookup(struct rpmsg_omx_instance *omx, long buffer)
+{
+ phys_addr_t pa;
+ u32 va;
+#ifdef CONFIG_ION_OMAP
+ struct ion_handle *handle;
+ ion_phys_addr_t paddr;
+ size_t unused;
+ int fd;
+
+ /* is it an ion handle? */
+ handle = (struct ion_handle *)buffer;
+ if (!ion_phys(omx->ion_client, handle, &paddr, &unused)) {
+ pa = (phys_addr_t) paddr;
+ goto to_va;
+ }
+
+#ifdef CONFIG_PVR_SGX
+ /* how about an sgx buffer wrapping an ion handle? */
+ {
+ struct ion_client *pvr_ion_client;
+ fd = buffer;
+ handle = PVRSRVExportFDToIONHandle(fd, &pvr_ion_client);
+ if (handle &&
+ !ion_phys(pvr_ion_client, handle, &paddr, &unused)) {
+ pa = (phys_addr_t)paddr;
+ goto to_va;
+ }
+ }
+#endif
+#endif
+ pa = (phys_addr_t) tiler_virt2phys(buffer);
+
+#ifdef CONFIG_ION_OMAP
+to_va:
+#endif
+ va = _rpmsg_pa_to_da(pa);
+ return va;
+}
+
+static int _rpmsg_omx_map_buf(struct rpmsg_omx_instance *omx, char *packet)
+{
+ int ret = -EINVAL, offset = 0;
+ long *buffer;
+ char *data;
+ enum rpc_omx_map_info_type maptype;
+ u32 da = 0;
+
+ data = (char *)((struct omx_packet *)packet)->data;
+ maptype = *((enum rpc_omx_map_info_type *)data);
+
+ /*Nothing to map*/
+ if (maptype == RPC_OMX_MAP_INFO_NONE)
+ return 0;
+ if ((maptype != RPC_OMX_MAP_INFO_THREE_BUF) &&
+ (maptype != RPC_OMX_MAP_INFO_TWO_BUF) &&
+ (maptype != RPC_OMX_MAP_INFO_ONE_BUF))
+ return ret;
+
+ offset = *(int *)((int)data + sizeof(maptype));
+ buffer = (long *)((int)data + offset);
+
+ da = _rpmsg_omx_buffer_lookup(omx, *buffer);
+ if (da) {
+ *buffer = da;
+ ret = 0;
+ }
+
+ if (!ret && (maptype >= RPC_OMX_MAP_INFO_TWO_BUF)) {
+ buffer = (long *)((int)data + offset + sizeof(*buffer));
+ if (*buffer != 0) {
+ ret = -EIO;
+ da = _rpmsg_omx_buffer_lookup(omx, *buffer);
+ if (da) {
+ *buffer = da;
+ ret = 0;
+ }
+ }
+ }
+
+ if (!ret && maptype >= RPC_OMX_MAP_INFO_THREE_BUF) {
+ buffer = (long *)((int)data + offset + 2*sizeof(*buffer));
+ if (*buffer != 0) {
+ ret = -EIO;
+ da = _rpmsg_omx_buffer_lookup(omx, *buffer);
+ if (da) {
+ *buffer = da;
+ ret = 0;
+ }
+ }
+ }
+ return ret;
+}
+
+static void rpmsg_omx_cb(struct rpmsg_channel *rpdev, void *data, int len,
+ void *priv, u32 src)
+{
+ struct omx_msg_hdr *hdr = data;
+ struct rpmsg_omx_instance *omx = priv;
+ struct omx_conn_rsp *rsp;
+ struct sk_buff *skb;
+ char *skbdata;
+
+ if (len < sizeof(*hdr) || hdr->len < len - sizeof(*hdr)) {
+ dev_warn(&rpdev->dev, "%s: truncated message\n", __func__);
+ return;
+ }
+
+ dev_dbg(&rpdev->dev, "%s: incoming msg src 0x%x type %d len %d\n",
+ __func__, src, hdr->type, hdr->len);
+ print_hex_dump(KERN_DEBUG, "rpmsg_omx RX: ", DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+
+ switch (hdr->type) {
+ case OMX_CONN_RSP:
+ if (hdr->len < sizeof(*rsp)) {
+ dev_warn(&rpdev->dev, "incoming empty response msg\n");
+ break;
+ }
+ rsp = (struct omx_conn_rsp *) hdr->data;
+ dev_info(&rpdev->dev, "conn rsp: status %d addr %d\n",
+ rsp->status, rsp->addr);
+ omx->dst = rsp->addr;
+ if (rsp->status)
+ omx->state = OMX_FAIL;
+ else
+ omx->state = OMX_CONNECTED;
+ complete(&omx->reply_arrived);
+ break;
+ case OMX_RAW_MSG:
+ skb = alloc_skb(hdr->len, GFP_KERNEL);
+ if (!skb) {
+ dev_err(&rpdev->dev, "alloc_skb err: %u\n", hdr->len);
+ break;
+ }
+ skbdata = skb_put(skb, hdr->len);
+ memcpy(skbdata, hdr->data, hdr->len);
+
+ mutex_lock(&omx->lock);
+ skb_queue_tail(&omx->queue, skb);
+ mutex_unlock(&omx->lock);
+ /* wake up any blocking processes, waiting for new data */
+ wake_up_interruptible(&omx->readq);
+ break;
+ default:
+ dev_warn(&rpdev->dev, "unexpected msg type: %d\n", hdr->type);
+ break;
+ }
+}
+
+static int rpmsg_omx_connect(struct rpmsg_omx_instance *omx, char *omxname)
+{
+ struct omx_msg_hdr *hdr;
+ struct omx_conn_req *payload;
+ struct rpmsg_omx_service *omxserv = omx->omxserv;
+ char connect_msg[sizeof(*hdr) + sizeof(*payload)] = { 0 };
+ int ret;
+
+ if (omx->state == OMX_CONNECTED) {
+ dev_dbg(omxserv->dev, "endpoint already connected\n");
+ return -EISCONN;
+ }
+
+ hdr = (struct omx_msg_hdr *)connect_msg;
+ hdr->type = OMX_CONN_REQ;
+ hdr->flags = 0;
+ hdr->len = strlen(omxname) + 1;
+ payload = (struct omx_conn_req *)hdr->data;
+ strcpy(payload->name, omxname);
+
+ init_completion(&omx->reply_arrived);
+
+ /* send a conn req to the remote OMX connection service. use
+ * the new local address that was just allocated by ->open */
+ ret = rpmsg_send_offchannel(omxserv->rpdev, omx->ept->addr,
+ omxserv->rpdev->dst, connect_msg, sizeof(connect_msg));
+ if (ret) {
+ dev_err(omxserv->dev, "rpmsg_send failed: %d\n", ret);
+ return ret;
+ }
+
+ /* wait until a connection reply arrives or 5 seconds elapse */
+ ret = wait_for_completion_interruptible_timeout(&omx->reply_arrived,
+ msecs_to_jiffies(5000));
+ if (omx->state == OMX_CONNECTED)
+ return 0;
+
+ if (omx->state == OMX_FAIL)
+ return -ENXIO;
+
+ if (ret) {
+ dev_err(omxserv->dev, "premature wakeup: %d\n", ret);
+ return -EIO;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static
+long rpmsg_omx_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct rpmsg_omx_instance *omx = filp->private_data;
+ struct rpmsg_omx_service *omxserv = omx->omxserv;
+ char buf[48];
+ int ret = 0;
+
+ dev_dbg(omxserv->dev, "%s: cmd %d, arg 0x%lx\n", __func__, cmd, arg);
+
+ if (_IOC_TYPE(cmd) != OMX_IOC_MAGIC)
+ return -ENOTTY;
+ if (_IOC_NR(cmd) > OMX_IOC_MAXNR)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case OMX_IOCCONNECT:
+ ret = copy_from_user(buf, (char __user *) arg, sizeof(buf));
+ if (ret) {
+ dev_err(omxserv->dev,
+ "%s: %d: copy_from_user fail: %d\n", __func__,
+ _IOC_NR(cmd), ret);
+ ret = -EFAULT;
+ break;
+ }
+ /* make sure user input is null terminated */
+ buf[sizeof(buf) - 1] = '\0';
+ ret = rpmsg_omx_connect(omx, buf);
+ break;
+#ifdef CONFIG_ION_OMAP
+ case OMX_IOCIONREGISTER:
+ {
+ struct ion_fd_data data;
+ if (copy_from_user(&data, (char __user *) arg, sizeof(data))) {
+ dev_err(omxserv->dev,
+ "%s: %d: copy_from_user fail: %d\n", __func__,
+ _IOC_NR(cmd), ret);
+ return -EFAULT;
+ }
+ data.handle = ion_import_fd(omx->ion_client, data.fd);
+ if (IS_ERR(data.handle))
+ data.handle = NULL;
+ if (copy_to_user(&data, (char __user *) arg, sizeof(data))) {
+ dev_err(omxserv->dev,
+ "%s: %d: copy_to_user fail: %d\n", __func__,
+ _IOC_NR(cmd), ret);
+ return -EFAULT;
+ }
+ break;
+ }
+ case OMX_IOCIONUNREGISTER:
+ {
+ struct ion_fd_data data;
+ if (copy_from_user(&data, (char __user *) arg, sizeof(data))) {
+ dev_err(omxserv->dev,
+ "%s: %d: copy_from_user fail: %d\n", __func__,
+ _IOC_NR(cmd), ret);
+ return -EFAULT;
+ }
+ ion_free(omx->ion_client, data.handle);
+ if (copy_to_user(&data, (char __user *) arg, sizeof(data))) {
+ dev_err(omxserv->dev,
+ "%s: %d: copy_to_user fail: %d\n", __func__,
+ _IOC_NR(cmd), ret);
+ return -EFAULT;
+ }
+ break;
+ }
+#endif
+ default:
+ dev_warn(omxserv->dev, "unhandled ioctl cmd: %d\n", cmd);
+ break;
+ }
+
+ return ret;
+}
+
+static int rpmsg_omx_open(struct inode *inode, struct file *filp)
+{
+ struct rpmsg_omx_service *omxserv;
+ struct rpmsg_omx_instance *omx;
+
+ omxserv = container_of(inode->i_cdev, struct rpmsg_omx_service, cdev);
+
+ if (omxserv->state == OMX_SERVICE_DOWN)
+ if (filp->f_flags & O_NONBLOCK ||
+ wait_for_completion_interruptible(&omxserv->comp))
+ return -EBUSY;
+
+ omx = kzalloc(sizeof(*omx), GFP_KERNEL);
+ if (!omx)
+ return -ENOMEM;
+
+ mutex_init(&omx->lock);
+ skb_queue_head_init(&omx->queue);
+ init_waitqueue_head(&omx->readq);
+ omx->omxserv = omxserv;
+ omx->state = OMX_UNCONNECTED;
+
+ /* assign a new, unique, local address and associate omx with it */
+ omx->ept = rpmsg_create_ept(omxserv->rpdev, rpmsg_omx_cb, omx,
+ RPMSG_ADDR_ANY);
+ if (!omx->ept) {
+ dev_err(omxserv->dev, "create ept failed\n");
+ kfree(omx);
+ return -ENOMEM;
+ }
+#ifdef CONFIG_ION_OMAP
+ omx->ion_client = ion_client_create(omap_ion_device,
+ (1<< ION_HEAP_TYPE_CARVEOUT) |
+ (1 << OMAP_ION_HEAP_TYPE_TILER),
+ "rpmsg-omx");
+#endif
+
+ /* associate filp with the new omx instance */
+ filp->private_data = omx;
+ mutex_lock(&omxserv->lock);
+ list_add(&omx->next, &omxserv->list);
+ mutex_unlock(&omxserv->lock);
+
+ dev_info(omxserv->dev, "local addr assigned: 0x%x\n", omx->ept->addr);
+
+ return 0;
+}
+
+static int rpmsg_omx_release(struct inode *inode, struct file *filp)
+{
+ struct rpmsg_omx_instance *omx = filp->private_data;
+ struct rpmsg_omx_service *omxserv = omx->omxserv;
+ char kbuf[512];
+ struct omx_msg_hdr *hdr = (struct omx_msg_hdr *) kbuf;
+ struct omx_disc_req *disc_req = (struct omx_disc_req *)hdr->data;
+ int use, ret;
+
+ /* todo: release resources here */
+ /*
+ * If state == fail, remote processor crashed, so don't send it
+ * any message.
+ */
+ if (omx->state == OMX_FAIL)
+ goto out;
+
+ /* send a disconnect msg with the OMX instance addr */
+ hdr->type = OMX_DISCONNECT;
+ hdr->flags = 0;
+ hdr->len = sizeof(struct omx_disc_req);
+ disc_req->addr = omx->dst;
+ use = sizeof(*hdr) + hdr->len;
+
+ dev_info(omxserv->dev, "Disconnecting from OMX service at %d\n",
+ omx->dst);
+
+ /* send the msg to the remote OMX connection service */
+ ret = rpmsg_send_offchannel(omxserv->rpdev, omx->ept->addr,
+ omxserv->rpdev->dst, kbuf, use);
+ if (ret) {
+ dev_err(omxserv->dev, "rpmsg_send failed: %d\n", ret);
+ return ret;
+ }
+ rpmsg_destroy_ept(omx->ept);
+out:
+#ifdef CONFIG_ION_OMAP
+ ion_client_destroy(omx->ion_client);
+#endif
+ mutex_lock(&omxserv->lock);
+ list_del(&omx->next);
+ mutex_unlock(&omxserv->lock);
+ kfree(omx);
+
+ return 0;
+}
+
+static ssize_t rpmsg_omx_read(struct file *filp, char __user *buf,
+ size_t len, loff_t *offp)
+{
+ struct rpmsg_omx_instance *omx = filp->private_data;
+ struct sk_buff *skb;
+ int use;
+
+ if (mutex_lock_interruptible(&omx->lock))
+ return -ERESTARTSYS;
+
+ if (omx->state == OMX_FAIL) {
+ mutex_unlock(&omx->lock);
+ return -ENXIO;
+ }
+
+ if (omx->state != OMX_CONNECTED) {
+ mutex_unlock(&omx->lock);
+ return -ENOTCONN;
+ }
+
+ /* nothing to read ? */
+ if (skb_queue_empty(&omx->queue)) {
+ mutex_unlock(&omx->lock);
+ /* non-blocking requested ? return now */
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ /* otherwise block, and wait for data */
+ if (wait_event_interruptible(omx->readq,
+ (!skb_queue_empty(&omx->queue) ||
+ omx->state == OMX_FAIL)))
+ return -ERESTARTSYS;
+ if (mutex_lock_interruptible(&omx->lock))
+ return -ERESTARTSYS;
+ }
+
+ if (omx->state == OMX_FAIL) {
+ mutex_unlock(&omx->lock);
+ return -ENXIO;
+ }
+
+ skb = skb_dequeue(&omx->queue);
+ if (!skb) {
+ mutex_unlock(&omx->lock);
+ dev_err(omx->omxserv->dev, "err is rmpsg_omx racy ?\n");
+ return -EIO;
+ }
+
+ mutex_unlock(&omx->lock);
+
+ use = min(len, skb->len);
+
+ if (copy_to_user(buf, skb->data, use)) {
+ dev_err(omx->omxserv->dev, "%s: copy_to_user fail\n", __func__);
+ use = -EFAULT;
+ }
+
+ kfree_skb(skb);
+ return use;
+}
+
+static ssize_t rpmsg_omx_write(struct file *filp, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct rpmsg_omx_instance *omx = filp->private_data;
+ struct rpmsg_omx_service *omxserv = omx->omxserv;
+ char kbuf[512];
+ struct omx_msg_hdr *hdr = (struct omx_msg_hdr *) kbuf;
+ int use, ret;
+
+ if (omx->state != OMX_CONNECTED)
+ return -ENOTCONN;
+
+ /*
+ * for now, limit msg size to 512 bytes (incl. header).
+ * (note: rpmsg's limit is even tighter. this whole thing needs fixing)
+ */
+ use = min(sizeof(kbuf) - sizeof(*hdr), len);
+
+ /*
+ * copy the data. Later, number of copies can be optimized if found to
+ * be significant in real use cases
+ */
+ if (copy_from_user(hdr->data, ubuf, use))
+ return -EMSGSIZE;
+
+ ret = _rpmsg_omx_map_buf(omx, hdr->data);
+ if (ret < 0)
+ return ret;
+
+ hdr->type = OMX_RAW_MSG;
+ hdr->flags = 0;
+ hdr->len = use;
+
+ use += sizeof(*hdr);
+
+ ret = rpmsg_send_offchannel(omxserv->rpdev, omx->ept->addr,
+ omx->dst, kbuf, use);
+ if (ret) {
+ dev_err(omxserv->dev, "rpmsg_send failed: %d\n", ret);
+ return ret;
+ }
+
+ return use;
+}
+
+static
+unsigned int rpmsg_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ struct rpmsg_omx_instance *omx = filp->private_data;
+ unsigned int mask = 0;
+
+ if (mutex_lock_interruptible(&omx->lock))
+ return -ERESTARTSYS;
+
+ poll_wait(filp, &omx->readq, wait);
+ if (omx->state == OMX_FAIL) {
+ mutex_unlock(&omx->lock);
+ return -ENXIO;
+ }
+
+ if (!skb_queue_empty(&omx->queue))
+ mask |= POLLIN | POLLRDNORM;
+
+ /* implement missing rpmsg virtio functionality here */
+ if (true)
+ mask |= POLLOUT | POLLWRNORM;
+
+ mutex_unlock(&omx->lock);
+
+ return mask;
+}
+
+static const struct file_operations rpmsg_omx_fops = {
+ .open = rpmsg_omx_open,
+ .release = rpmsg_omx_release,
+ .unlocked_ioctl = rpmsg_omx_ioctl,
+ .read = rpmsg_omx_read,
+ .write = rpmsg_omx_write,
+ .poll = rpmsg_poll,
+ .owner = THIS_MODULE,
+};
+
+static int rpmsg_omx_probe(struct rpmsg_channel *rpdev)
+{
+ int ret, major, minor;
+ struct rpmsg_omx_service *omxserv = NULL, *tmp;
+
+ if (!idr_pre_get(&rpmsg_omx_services, GFP_KERNEL)) {
+ dev_err(&rpdev->dev, "idr_pre_get failes\n");
+ return -ENOMEM;
+ }
+
+ /* dynamically assign a new minor number */
+ spin_lock(&rpmsg_omx_services_lock);
+ ret = idr_get_new(&rpmsg_omx_services, omxserv, &minor);
+ if (ret) {
+ spin_unlock(&rpmsg_omx_services_lock);
+ dev_err(&rpdev->dev, "failed to idr_get_new: %d\n", ret);
+ return ret;
+ }
+
+ /* look for an already created omx service */
+ list_for_each_entry(tmp, &rpmsg_omx_services_list, next) {
+ if (tmp->minor == minor) {
+ omxserv = tmp;
+ idr_replace(&rpmsg_omx_services, omxserv, minor);
+ break;
+ }
+ }
+ spin_unlock(&rpmsg_omx_services_lock);
+ if (omxserv)
+ goto serv_up;
+
+ omxserv = kzalloc(sizeof(*omxserv), GFP_KERNEL);
+ if (!omxserv) {
+ dev_err(&rpdev->dev, "kzalloc failed\n");
+ ret = -ENOMEM;
+ goto rem_idr;
+ }
+
+ spin_lock(&rpmsg_omx_services_lock);
+ idr_replace(&rpmsg_omx_services, omxserv, minor);
+ spin_unlock(&rpmsg_omx_services_lock);
+ INIT_LIST_HEAD(&omxserv->list);
+ mutex_init(&omxserv->lock);
+ init_completion(&omxserv->comp);
+
+ list_add(&omxserv->next, &rpmsg_omx_services_list);
+
+ major = MAJOR(rpmsg_omx_dev);
+
+ cdev_init(&omxserv->cdev, &rpmsg_omx_fops);
+ omxserv->cdev.owner = THIS_MODULE;
+ ret = cdev_add(&omxserv->cdev, MKDEV(major, minor), 1);
+ if (ret) {
+ dev_err(&rpdev->dev, "cdev_add failed: %d\n", ret);
+ goto free_omx;
+ }
+
+ omxserv->dev = device_create(rpmsg_omx_class, &rpdev->dev,
+ MKDEV(major, minor), NULL,
+ "rpmsg-omx%d", minor);
+ if (IS_ERR(omxserv->dev)) {
+ ret = PTR_ERR(omxserv->dev);
+ dev_err(&rpdev->dev, "device_create failed: %d\n", ret);
+ goto clean_cdev;
+ }
+serv_up:
+ omxserv->rpdev = rpdev;
+ omxserv->minor = minor;
+ omxserv->state = OMX_SERVICE_UP;
+ dev_set_drvdata(&rpdev->dev, omxserv);
+ complete_all(&omxserv->comp);
+
+ dev_info(omxserv->dev, "new OMX connection srv channel: %u -> %u!\n",
+ rpdev->src, rpdev->dst);
+ return 0;
+
+clean_cdev:
+ cdev_del(&omxserv->cdev);
+free_omx:
+ kfree(omxserv);
+rem_idr:
+ spin_lock(&rpmsg_omx_services_lock);
+ idr_remove(&rpmsg_omx_services, minor);
+ spin_unlock(&rpmsg_omx_services_lock);
+ return ret;
+}
+
+static void __devexit rpmsg_omx_remove(struct rpmsg_channel *rpdev)
+{
+ struct rpmsg_omx_service *omxserv = dev_get_drvdata(&rpdev->dev);
+ int major = MAJOR(rpmsg_omx_dev);
+ struct rpmsg_omx_instance *omx;
+
+ dev_info(omxserv->dev, "rpmsg omx driver is removed\n");
+
+ spin_lock(&rpmsg_omx_services_lock);
+ idr_remove(&rpmsg_omx_services, omxserv->minor);
+ spin_unlock(&rpmsg_omx_services_lock);
+
+ mutex_lock(&omxserv->lock);
+ /*
+ * If there is omx instrances that means it is a revovery.
+ * TODO: make sure it is a recovery.
+ */
+ if (list_empty(&omxserv->list)) {
+ device_destroy(rpmsg_omx_class, MKDEV(major, omxserv->minor));
+ cdev_del(&omxserv->cdev);
+ list_del(&omxserv->next);
+ mutex_unlock(&omxserv->lock);
+ kfree(omxserv);
+ return;
+ }
+ /* If it is a recovery, don't clean the omxserv */
+ init_completion(&omxserv->comp);
+ omxserv->state = OMX_SERVICE_DOWN;
+ list_for_each_entry(omx, &omxserv->list, next) {
+ /* set omx instance to fail state */
+ omx->state = OMX_FAIL;
+ /* unblock any pending omx thread*/
+ complete_all(&omx->reply_arrived);
+ wake_up_interruptible(&omx->readq);
+ }
+ mutex_unlock(&omxserv->lock);
+}
+
+static void rpmsg_omx_driver_cb(struct rpmsg_channel *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ dev_warn(&rpdev->dev, "uhm, unexpected message\n");
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+}
+
+static struct rpmsg_device_id rpmsg_omx_id_table[] = {
+ { .name = "rpmsg-omx" },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, rpmsg_omx_id_table);
+
+static struct rpmsg_driver rpmsg_omx_driver = {
+ .drv.name = KBUILD_MODNAME,
+ .drv.owner = THIS_MODULE,
+ .id_table = rpmsg_omx_id_table,
+ .probe = rpmsg_omx_probe,
+ .callback = rpmsg_omx_driver_cb,
+ .remove = __devexit_p(rpmsg_omx_remove),
+};
+
+static int __init init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&rpmsg_omx_dev, 0, MAX_OMX_DEVICES,
+ KBUILD_MODNAME);
+ if (ret) {
+ pr_err("alloc_chrdev_region failed: %d\n", ret);
+ goto out;
+ }
+
+ rpmsg_omx_class = class_create(THIS_MODULE, KBUILD_MODNAME);
+ if (IS_ERR(rpmsg_omx_class)) {
+ ret = PTR_ERR(rpmsg_omx_class);
+ pr_err("class_create failed: %d\n", ret);
+ goto unreg_region;
+ }
+
+ return register_rpmsg_driver(&rpmsg_omx_driver);
+
+unreg_region:
+ unregister_chrdev_region(rpmsg_omx_dev, MAX_OMX_DEVICES);
+out:
+ return ret;
+}
+module_init(init);
+
+static void __exit fini(void)
+{
+ struct rpmsg_omx_service *omxserv, *tmp;
+ int major = MAJOR(rpmsg_omx_dev);
+
+ unregister_rpmsg_driver(&rpmsg_omx_driver);
+ list_for_each_entry_safe(omxserv, tmp, &rpmsg_omx_services_list, next) {
+ device_destroy(rpmsg_omx_class, MKDEV(major, omxserv->minor));
+ cdev_del(&omxserv->cdev);
+ list_del(&omxserv->next);
+ kfree(omxserv);
+ }
+ class_destroy(rpmsg_omx_class);
+ unregister_chrdev_region(rpmsg_omx_dev, MAX_OMX_DEVICES);
+}
+module_exit(fini);
+
+MODULE_DESCRIPTION("OMX offloading rpmsg driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/rpmsg_resmgr.c b/drivers/rpmsg/rpmsg_resmgr.c
new file mode 100644
index 0000000..c98a1b9
--- /dev/null
+++ b/drivers/rpmsg/rpmsg_resmgr.c
@@ -0,0 +1,1211 @@
+/*
+ * Remote processor resource manager
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Fernando Guzman Lugo <fernando.lugo@ti.com>
+ * Miguel Vadillo <vadillo@ti.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/virtio.h>
+#include <linux/slab.h>
+#include <linux/rpmsg.h>
+#include <linux/delay.h>
+#include <linux/idr.h>
+#include <linux/remoteproc.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/rpmsg_resmgr.h>
+#include <linux/pm_runtime.h>
+#include <plat/dmtimer.h>
+#include <plat/rpres.h>
+#include <plat/clock.h>
+#include <plat/dma.h>
+#include <plat/i2c.h>
+#include <plat/omap_hwmod.h>
+
+#define NAME_SIZE 50
+#define REGULATOR_MAX 1
+#define NUM_SRC_CLK 3
+#define AUX_CLK_MIN 0
+#define AUX_CLK_MAX 5
+#define GPTIMERS_MAX 11
+#define MHZ 1000000
+#define MAX_MSG (sizeof(struct rprm_ack) + sizeof(struct rprm_sdma))
+
+static struct dentry *rprm_dbg;
+
+static char *regulator_name[] = {
+ "cam2pwr"
+};
+
+static char *clk_src_name[] = {
+ "sys_clkin_ck",
+ "dpll_core_m3x2_ck",
+ "dpll_per_m3x2_ck",
+};
+
+static const char const *rnames[] = {
+ [RPRM_GPTIMER] = "GP Timer",
+ [RPRM_L3BUS] = "L3 bus",
+ [RPRM_IVAHD] = "IVA HD",
+ [RPRM_IVASEQ0] = "IVA SEQ0",
+ [RPRM_IVASEQ1] = "IVA SEQ1",
+ [RPRM_ISS] = "ISS",
+ [RPRM_SL2IF] = "SL2IF",
+ [RPRM_FDIF] = "FDIF",
+ [RPRM_AUXCLK] = "AUXCLK",
+ [RPRM_REGULATOR] = "REGULATOR",
+ [RPRM_GPIO] = "GPIO",
+ [RPRM_SDMA] = "SDMA",
+ [RPRM_IPU] = "IPU",
+ [RPRM_DSP] = "DSP",
+ [RPRM_I2C] = "I2C",
+};
+
+static const char *rname(u32 type) {
+ if (type >= RPRM_MAX)
+ return "(invalid)";
+ return rnames[type];
+}
+
+struct rprm_elem {
+ struct list_head next;
+ u32 src;
+ u32 type;
+ u32 id;
+ void *handle;
+ u32 base;
+ struct rprm_constraints_data *constraints;
+ char res[];
+};
+
+struct rprm {
+ struct list_head res_list;
+ struct idr conn_list;
+ struct idr id_list;
+ struct mutex lock;
+ struct dentry *dbg_dir;
+};
+
+struct rprm_auxclk_depot {
+ struct clk *aux_clk;
+ struct clk *src;
+};
+
+struct rprm_regulator_depot {
+ struct regulator *reg_p;
+ u32 orig_uv;
+};
+
+static struct rprm_constraints_data def_data = {
+ .frequency = 0,
+ .bandwidth = -1,
+ .latency = -1,
+};
+
+static int _get_rprm_size(u32 type)
+{
+ switch (type) {
+ case RPRM_GPTIMER:
+ return sizeof(struct rprm_gpt);
+ case RPRM_AUXCLK:
+ return sizeof(struct rprm_auxclk);
+ case RPRM_REGULATOR:
+ return sizeof(struct rprm_regulator);
+ case RPRM_GPIO:
+ return sizeof(struct rprm_gpio);
+ case RPRM_SDMA:
+ return sizeof(struct rprm_sdma);
+ case RPRM_I2C:
+ return sizeof(struct rprm_i2c);
+ }
+ return 0;
+}
+
+static int rprm_gptimer_request(struct rprm_elem *e, struct rprm_gpt *obj)
+{
+ int ret;
+ struct omap_dm_timer *gpt;
+
+ if (obj->id > GPTIMERS_MAX) {
+ pr_err("Invalid gptimer %u\n", obj->id);
+ return -EINVAL;
+ }
+
+ gpt = omap_dm_timer_request_specific(obj->id);
+ if (!gpt)
+ return -EBUSY;
+
+ ret = omap_dm_timer_set_source(gpt, obj->src_clk);
+ if (!ret)
+ e->handle = gpt;
+ else
+ omap_dm_timer_free(gpt);
+
+ return ret;
+}
+
+static void rprm_gptimer_release(struct omap_dm_timer *obj)
+{
+ omap_dm_timer_free(obj);
+}
+
+static int rprm_auxclk_request(struct rprm_elem *e, struct rprm_auxclk *obj)
+{
+ int ret;
+ char clk_name[NAME_SIZE];
+ char src_clk_name[NAME_SIZE];
+ struct rprm_auxclk_depot *acd;
+ struct clk *src_parent;
+
+ if ((obj->id < AUX_CLK_MIN) || (obj->id > AUX_CLK_MAX)) {
+ pr_err("Invalid aux_clk %d\n", obj->id);
+ return -EINVAL;
+ }
+
+ /* Create auxclks depot */
+ acd = kmalloc(sizeof(*acd), GFP_KERNEL);
+ if (!acd)
+ return -ENOMEM;
+
+ sprintf(clk_name, "auxclk%d_ck", obj->id);
+ acd->aux_clk = clk_get(NULL, clk_name);
+ if (!acd->aux_clk) {
+ pr_err("%s: unable to get clock %s\n", __func__, clk_name);
+ ret = -EIO;
+ goto error;
+ }
+
+ if (unlikely(acd->aux_clk->usecount))
+ pr_warn("There are other users of %d clk\n", obj->id);
+
+ sprintf(src_clk_name, "auxclk%d_src_ck", obj->id);
+ acd->src = clk_get(NULL, src_clk_name);
+ if (!acd->src) {
+ pr_err("%s: unable to get clock %s\n", __func__, src_clk_name);
+ ret = -EIO;
+ goto error_aux;
+ }
+
+ src_parent = clk_get(NULL, clk_src_name[obj->parent_src_clk]);
+ if (!src_parent) {
+ pr_err("%s: unable to get parent clock %s\n", __func__,
+ clk_src_name[obj->parent_src_clk]);
+ ret = -EIO;
+ goto error_aux_src;
+ }
+
+ ret = clk_set_rate(src_parent, (obj->parent_src_clk_rate * MHZ));
+ if (ret) {
+ pr_err("%s: rate not supported by %s\n", __func__,
+ clk_src_name[obj->parent_src_clk]);
+ goto error_aux_src_parent;
+ }
+
+ ret = clk_set_parent(acd->src, src_parent);
+ if (ret) {
+ pr_err("%s: unable to set clk %s as parent of aux_clk %s\n",
+ __func__,
+ clk_src_name[obj->parent_src_clk],
+ src_clk_name);
+ goto error_aux_src_parent;
+ }
+
+ ret = clk_enable(acd->src);
+ if (ret) {
+ pr_err("%s: error enabling %s\n", __func__, src_clk_name);
+ goto error_aux_src_parent;
+ }
+
+ ret = clk_set_rate(acd->aux_clk, (obj->clk_rate * MHZ));
+ if (ret) {
+ pr_err("%s: rate not supported by %s\n", __func__, clk_name);
+ goto error_aux_enable;
+ }
+
+ ret = clk_enable(acd->aux_clk);
+ if (ret) {
+ pr_err("%s: error enabling %s\n", __func__, clk_name);
+ goto error_aux_enable;
+ }
+ clk_put(src_parent);
+
+ e->handle = acd;
+
+ return 0;
+error_aux_enable:
+ clk_disable(acd->src);
+error_aux_src_parent:
+ clk_put(src_parent);
+error_aux_src:
+ clk_put(acd->src);
+error_aux:
+ clk_put(acd->aux_clk);
+error:
+ kfree(acd);
+
+ return ret;
+}
+
+static void rprm_auxclk_release(struct rprm_auxclk_depot *obj)
+{
+ clk_disable((struct clk *)obj->aux_clk);
+ clk_put((struct clk *)obj->aux_clk);
+ clk_disable((struct clk *)obj->src);
+ clk_put((struct clk *)obj->src);
+
+ kfree(obj);
+}
+
+static
+int rprm_regulator_request(struct rprm_elem *e, struct rprm_regulator *obj)
+{
+ int ret;
+ struct rprm_regulator_depot *rd;
+ char *reg_name;
+
+ if (obj->id > REGULATOR_MAX) {
+ pr_err("Invalid regulator %d\n", obj->id);
+ return -EINVAL;
+ }
+
+ /* Create regulator depot */
+ rd = kmalloc(sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+
+ reg_name = regulator_name[obj->id - 1];
+ rd->reg_p = regulator_get_exclusive(NULL, reg_name);
+ if (IS_ERR_OR_NULL(rd->reg_p)) {
+ pr_err("%s: error providing regulator %s\n", __func__, reg_name);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ rd->orig_uv = regulator_get_voltage(rd->reg_p);
+
+ ret = regulator_set_voltage(rd->reg_p, obj->min_uv, obj->max_uv);
+ if (ret) {
+ pr_err("%s: error setting %s voltage\n", __func__, reg_name);
+ goto error_reg;
+ }
+
+ ret = regulator_enable(rd->reg_p);
+ if (ret) {
+ pr_err("%s: error enabling %s ldo\n", __func__, reg_name);
+ goto error_reg;
+ }
+
+ e->handle = rd;
+
+ return 0;
+
+error_reg:
+ regulator_put(rd->reg_p);
+error:
+ kfree(rd);
+
+ return ret;
+}
+
+static void rprm_regulator_release(struct rprm_regulator_depot *obj)
+{
+ int ret;
+
+ ret = regulator_disable(obj->reg_p);
+ if (ret) {
+ pr_err("%s: error disabling ldo\n", __func__);
+ return;
+ }
+
+ /* Restore orginal voltage */
+ ret = regulator_set_voltage(obj->reg_p, obj->orig_uv, obj->orig_uv);
+ if (ret) {
+ pr_err("%s: error restoring voltage\n", __func__);
+ return;
+ }
+
+ regulator_put(obj->reg_p);
+ kfree(obj);
+}
+
+static int rprm_gpio_request(struct rprm_elem *e, struct rprm_gpio *obj)
+{
+ int ret;
+ struct rprm_gpio *gd;
+
+ /* Create gpio depot */
+ gd = kmalloc(sizeof(*gd), GFP_KERNEL);
+ if (!gd)
+ return -ENOMEM;
+
+ ret = gpio_request(obj->id , "rpmsg_resmgr");
+ if (ret) {
+ pr_err("%s: error providing gpio %d\n", __func__, obj->id);
+ return ret;
+ }
+
+ e->handle = memcpy(gd, obj, sizeof(*obj));
+
+ return ret;
+}
+
+static void rprm_gpio_release(struct rprm_gpio *obj)
+{
+ gpio_free(obj->id);
+ kfree(obj);
+}
+
+static int rprm_sdma_request(struct rprm_elem *e, struct rprm_sdma *obj)
+{
+ int ret;
+ int sdma;
+ int i;
+ struct rprm_sdma *sd;
+
+ /* Create sdma depot */
+ sd = kmalloc(sizeof(*sd), GFP_KERNEL);
+ if (!sd)
+ return -ENOMEM;
+
+ if (obj->num_chs > MAX_NUM_SDMA_CHANNELS) {
+ pr_err("Not able to provide %u channels\n", obj->num_chs);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < obj->num_chs; i++) {
+ ret = omap_request_dma(0, "rpmsg_resmgr", NULL, NULL, &sdma);
+ if (ret) {
+ pr_err("Error providing sdma channel %d\n", ret);
+ goto err;
+ }
+ obj->channels[i] = sdma;
+ pr_debug("Providing sdma ch %d\n", sdma);
+ }
+
+ e->handle = memcpy(sd, obj, sizeof(*obj));
+
+ return 0;
+err:
+ while (i--)
+ omap_free_dma(obj->channels[i]);
+ kfree(sd);
+ return ret;
+}
+
+static void rprm_sdma_release(struct rprm_sdma *obj)
+{
+ int i = obj->num_chs;
+
+ while (i--) {
+ omap_free_dma(obj->channels[i]);
+ pr_debug("Releasing sdma ch %d\n", obj->channels[i]);
+ }
+ kfree(obj);
+}
+
+static int rprm_i2c_request(struct rprm_elem *e, struct rprm_i2c *obj)
+{
+ struct device *i2c_dev;
+ struct i2c_adapter *adapter;
+ char i2c_name[NAME_SIZE];
+ int ret = -EINVAL;
+
+ sprintf(i2c_name, "i2c%d", obj->id);
+ i2c_dev = omap_hwmod_name_get_dev(i2c_name);
+ if (IS_ERR_OR_NULL(i2c_dev)) {
+ pr_err("%s: unable to lookup %s\n", __func__, i2c_name);
+ return ret;
+ }
+
+ adapter = i2c_get_adapter(obj->id);
+ if (!adapter) {
+ pr_err("%s: could not get i2c%d adapter\n", __func__, obj->id);
+ return -EINVAL;
+ }
+ i2c_detect_ext_master(adapter);
+ i2c_put_adapter(adapter);
+
+ ret = pm_runtime_get_sync(i2c_dev);
+ /*
+ * pm_runtime_get_sync can return 1 in case it is already active,
+ * change it to 0 to indicate success.
+ */
+ ret -= ret == 1;
+ if (!ret)
+ e->handle = i2c_dev;
+ else
+ dev_warn(i2c_dev, "%s: failed get sync %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int rprm_i2c_release(struct device *i2c_dev)
+{
+ int ret = -EINVAL;
+
+ if (IS_ERR_OR_NULL(i2c_dev)) {
+ pr_err("%s: invalid device passed\n", __func__);
+ return ret;
+ }
+
+ ret = pm_runtime_put_sync(i2c_dev);
+ if (ret)
+ dev_warn(i2c_dev, "%s: failed put sync %d\n", __func__, ret);
+
+ return ret;
+
+}
+
+static const char *_get_rpres_name(int type)
+{
+ switch (type) {
+ case RPRM_IVAHD:
+ return "rpres_iva";
+ case RPRM_IVASEQ0:
+ return "rpres_iva_seq0";
+ case RPRM_IVASEQ1:
+ return "rpres_iva_seq1";
+ case RPRM_ISS:
+ return "rpres_iss";
+ case RPRM_FDIF:
+ return "rpres_fdif";
+ case RPRM_SL2IF:
+ return "rpres_sl2if";
+ }
+ return "";
+}
+
+static int _rpres_set_constraints(struct rprm_elem *e, u32 type, long val)
+{
+ switch (type) {
+ case RPRM_SCALE:
+ return rpres_set_constraints(e->handle,
+ RPRES_CONSTRAINT_SCALE,
+ val);
+ case RPRM_LATENCY:
+ return rpres_set_constraints(e->handle,
+ RPRES_CONSTRAINT_LATENCY,
+ val);
+ case RPRM_BANDWIDTH:
+ return rpres_set_constraints(e->handle,
+ RPRES_CONSTRAINT_BANDWIDTH,
+ val);
+ }
+ pr_err("Invalid constraint\n");
+ return -EINVAL;
+}
+
+static int _rproc_set_constraints(struct rprm_elem *e, u32 type, long val)
+{
+ switch (type) {
+ case RPRM_SCALE:
+ return rproc_set_constraints(e->handle,
+ RPROC_CONSTRAINT_SCALE,
+ val);
+ case RPRM_LATENCY:
+ return rproc_set_constraints(e->handle,
+ RPROC_CONSTRAINT_LATENCY,
+ val);
+ case RPRM_BANDWIDTH:
+ return rproc_set_constraints(e->handle,
+ RPROC_CONSTRAINT_BANDWIDTH,
+ val);
+ }
+ pr_err("Invalid constraint\n");
+ return -EINVAL;
+}
+
+static
+int _set_constraints(struct rprm_elem *e, struct rprm_constraints_data *c)
+{
+ int ret = -EINVAL;
+ u32 mask = 0;
+ int (*_set_constraints_func)(struct rprm_elem *, u32 type, long val);
+
+ switch (e->type) {
+ case RPRM_IVAHD:
+ case RPRM_ISS:
+ case RPRM_FDIF:
+ _set_constraints_func = _rpres_set_constraints;
+ break;
+ case RPRM_IPU:
+ _set_constraints_func = _rproc_set_constraints;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (c->mask & RPRM_SCALE) {
+ ret = _set_constraints_func(e, RPRM_SCALE, c->frequency);
+ if (ret)
+ goto err;
+ mask |= RPRM_SCALE;
+ e->constraints->frequency = c->frequency;
+ }
+
+ if (c->mask & RPRM_LATENCY) {
+ ret = _set_constraints_func(e, RPRM_LATENCY, c->latency);
+ if (ret)
+ goto err;
+ mask |= RPRM_LATENCY;
+ e->constraints->latency = c->latency;
+ }
+
+ if (c->mask & RPRM_BANDWIDTH) {
+ ret = _set_constraints_func(e, RPRM_BANDWIDTH, c->bandwidth);
+ if (ret)
+ goto err;
+ mask |= RPRM_BANDWIDTH;
+ e->constraints->bandwidth = c->bandwidth;
+ }
+err:
+ c->mask = mask;
+ return ret;
+}
+
+static int rprm_set_constraints(struct rprm *rprm, u32 addr, int res_id,
+ void *data, bool set)
+{
+ int ret = 0;
+ struct rprm_elem *e;
+
+ mutex_lock(&rprm->lock);
+ if (!idr_find(&rprm->conn_list, addr)) {
+ ret = -ENOTCONN;
+ goto out;
+ }
+
+ e = idr_find(&rprm->id_list, res_id);
+ if (!e || e->src != addr) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ if (!e->constraints) {
+ pr_warn("No constraints\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (set) {
+ ret = _set_constraints(e, data);
+ if (!ret) {
+ e->constraints->mask |=
+ ((struct rprm_constraints_data *)data)->mask;
+ goto out;
+ }
+ }
+ def_data.mask = ((struct rprm_constraints_data *)data)->mask;
+ if (def_data.mask) {
+ _set_constraints(e, &def_data);
+ e->constraints->mask &=
+ ~((struct rprm_constraints_data *)data)->mask;
+ }
+out:
+ mutex_unlock(&rprm->lock);
+ return ret;
+}
+
+
+static int rprm_rpres_request(struct rprm_elem *e, int type)
+{
+ const char *res_name = _get_rpres_name(type);
+ struct rpres *res;
+
+ e->constraints = kzalloc(sizeof(*(e->constraints)), GFP_KERNEL);
+ if (!(e->constraints))
+ return -ENOMEM;
+
+ res = rpres_get(res_name);
+
+ if (IS_ERR(res)) {
+ pr_err("%s: error requesting %s\n", __func__, res_name);
+ kfree(e->constraints);
+ return PTR_ERR(res);
+ }
+ e->handle = res;
+
+ return 0;
+}
+
+static void rprm_rpres_release(struct rpres *res)
+{
+ rpres_put(res);
+}
+
+static int rprm_rproc_request(struct rprm_elem *e, char *name)
+{
+ struct rproc *rp;
+
+ e->constraints = kzalloc(sizeof(*(e->constraints)), GFP_KERNEL);
+ if (!(e->constraints))
+ return -ENOMEM;
+
+ rp = rproc_get(name);
+ if (IS_ERR(rp)) {
+ pr_debug("Error requesting %s\n", name);
+ kfree(e->constraints);
+ return PTR_ERR(rp);
+ }
+ e->handle = rp;
+
+ return 0;
+}
+
+static void rprm_rproc_release(struct rproc *rp)
+{
+ rproc_put(rp);
+}
+
+static int _resource_free(struct rprm_elem *e)
+{
+ int ret = 0;
+ if (e->constraints && e->constraints->mask) {
+ def_data.mask = e->constraints->mask;
+ _set_constraints(e, &def_data);
+ }
+ kfree(e->constraints);
+
+ switch (e->type) {
+ case RPRM_GPTIMER:
+ rprm_gptimer_release(e->handle);
+ break;
+ case RPRM_IVAHD:
+ case RPRM_IVASEQ0:
+ case RPRM_IVASEQ1:
+ case RPRM_ISS:
+ case RPRM_SL2IF:
+ case RPRM_FDIF:
+ rprm_rpres_release(e->handle);
+ break;
+ case RPRM_IPU:
+ case RPRM_DSP:
+ rprm_rproc_release(e->handle);
+ break;
+ case RPRM_AUXCLK:
+ rprm_auxclk_release(e->handle);
+ break;
+ case RPRM_I2C:
+ ret = rprm_i2c_release(e->handle);
+ break;
+ case RPRM_REGULATOR:
+ rprm_regulator_release(e->handle);
+ break;
+ case RPRM_GPIO:
+ rprm_gpio_release(e->handle);
+ break;
+ case RPRM_SDMA:
+ rprm_sdma_release(e->handle);
+ break;
+ case RPRM_L3BUS:
+ /* ignore silently */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int rprm_resource_free(struct rprm *rprm, u32 addr, int res_id)
+{
+ int ret = 0;
+ struct rprm_elem *e;
+
+ mutex_lock(&rprm->lock);
+ if (!idr_find(&rprm->conn_list, addr)) {
+ ret = -ENOTCONN;
+ goto out;
+ }
+
+ e = idr_find(&rprm->id_list, res_id);
+ if (!e || e->src != addr) {
+ ret = -ENOENT;
+ goto out;
+ }
+ idr_remove(&rprm->id_list, res_id);
+ list_del(&e->next);
+out:
+ mutex_unlock(&rprm->lock);
+
+ if (!ret) {
+ ret = _resource_free(e);
+ kfree(e);
+ }
+
+ return ret;
+}
+
+static int _resource_alloc(struct rprm_elem *e, int type, void *data)
+{
+ int ret = 0;
+
+ switch (type) {
+ case RPRM_GPTIMER:
+ ret = rprm_gptimer_request(e, data);
+ break;
+ case RPRM_IVAHD:
+ case RPRM_IVASEQ0:
+ case RPRM_IVASEQ1:
+ case RPRM_ISS:
+ case RPRM_SL2IF:
+ case RPRM_FDIF:
+ ret = rprm_rpres_request(e, type);
+ break;
+ case RPRM_IPU:
+ ret = rprm_rproc_request(e, "ipu");
+ break;
+ case RPRM_DSP:
+ ret = rprm_rproc_request(e, "dsp");
+ break;
+ case RPRM_AUXCLK:
+ ret = rprm_auxclk_request(e, data);
+ break;
+ case RPRM_I2C:
+ ret = rprm_i2c_request(e, data);
+ break;
+ case RPRM_REGULATOR:
+ ret = rprm_regulator_request(e, data);
+ break;
+ case RPRM_GPIO:
+ ret = rprm_gpio_request(e, data);
+ break;
+ case RPRM_SDMA:
+ ret = rprm_sdma_request(e, data);
+ break;
+ case RPRM_L3BUS:
+ /* ignore silently; */
+ break;
+ default:
+ pr_err("%s: invalid source %d!\n", __func__, type);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int rprm_resource_alloc(struct rprm *rprm, u32 addr, int *res_id,
+ int type, void *data)
+{
+ struct rprm_elem *e;
+ int ret;
+ int rlen = _get_rprm_size(type);
+
+ e = kzalloc(sizeof(*e) + rlen, GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ ret = _resource_alloc(e, type, data);
+ if (ret) {
+ pr_err("%s: request for %d (%s) failed: %d\n", __func__,
+ type, rname(type), ret);
+ goto err_res_alloc;
+ }
+
+ mutex_lock(&rprm->lock);
+ if (!idr_find(&rprm->conn_list, addr)) {
+ pr_err("%s: addr %d not connected!\n", __func__, addr);
+ ret = -ENOTCONN;
+ goto err;
+ }
+ /*
+ * Create a resource id to avoid sending kernel address to the
+ * remote processor.
+ */
+ if (!idr_pre_get(&rprm->id_list, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ ret = idr_get_new(&rprm->id_list, e, res_id);
+ if (ret)
+ goto err;
+
+ e->type = type;
+ e->src = addr;
+ e->id = *res_id;
+ memcpy(e->res, data, rlen);
+ list_add(&e->next, &rprm->res_list);
+ mutex_unlock(&rprm->lock);
+
+ return 0;
+err:
+ mutex_unlock(&rprm->lock);
+ _resource_free(e);
+err_res_alloc:
+ kfree(e);
+
+ return ret;
+}
+
+static int rprm_disconnect_client(struct rprm *rprm, u32 addr)
+{
+ struct rprm_elem *e, *tmp;
+ int ret;
+
+ mutex_lock(&rprm->lock);
+ if (!idr_find(&rprm->conn_list, addr)) {
+ ret = -ENOTCONN;
+ goto out;
+ }
+ list_for_each_entry_safe(e, tmp, &rprm->res_list, next) {
+ if (e->src == addr) {
+ _resource_free(e);
+ idr_remove(&rprm->id_list, e->id);
+ list_del(&e->next);
+ kfree(e);
+ }
+ }
+
+ idr_remove(&rprm->conn_list, addr);
+out:
+ mutex_unlock(&rprm->lock);
+
+ return 0;
+}
+
+static int rpmsg_connect_client(struct rprm *rprm, u32 addr)
+{
+ int ret;
+ int tid;
+
+ mutex_lock(&rprm->lock);
+ if (idr_find(&rprm->conn_list, addr)) {
+ pr_err("Connection already opened\n");
+ ret = -EISCONN;
+ goto out;
+ }
+ if (!idr_pre_get(&rprm->conn_list, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = idr_get_new_above(&rprm->conn_list, &rprm->res_list, addr, &tid);
+ BUG_ON(addr != tid);
+out:
+ mutex_unlock(&rprm->lock);
+
+ return ret;
+}
+
+static void rprm_cb(struct rpmsg_channel *rpdev, void *data, int len,
+ void *priv, u32 src)
+{
+ int ret;
+ struct device *dev = &rpdev->dev;
+ struct rprm *rprm = dev_get_drvdata(dev);
+ struct rprm_request *req = data;
+ char ack_msg[MAX_MSG];
+ struct rprm_ack *ack = (void *)ack_msg;
+ int r_sz = 0;
+
+ if (len < sizeof(*req)) {
+ dev_err(dev, "Bad message\n");
+ return;
+ }
+
+ dev_dbg(dev, "resource type %d\n"
+ "request type %d\n"
+ "res_id %d",
+ req->res_type, req->acquire, req->res_id);
+
+ switch (req->acquire) {
+ case RPRM_CONNECT:
+ ret = rpmsg_connect_client(rprm, src);
+ if (ret)
+ dev_err(dev, "connection failed! ret %d\n", ret);
+ break;
+ case RPRM_REQ_ALLOC:
+ r_sz = len - sizeof(*req);
+ if (r_sz != _get_rprm_size(req->res_type)) {
+ r_sz = 0;
+ ret = -EINVAL;
+ break;
+ }
+ ret = rprm_resource_alloc(rprm, src, &req->res_id,
+ req->res_type, req->data);
+ if (ret)
+ dev_err(dev, "resource allocation failed! ret %d\n",
+ ret);
+ break;
+ case RPRM_REQ_FREE:
+ ret = rprm_resource_free(rprm, src, req->res_id);
+ if (ret)
+ dev_err(dev, "resource release failed! ret %d\n", ret);
+ return;
+ case RPRM_DISCONNECT:
+ ret = rprm_disconnect_client(rprm, src);
+ if (ret)
+ dev_err(dev, "disconnection failed ret %d\n", ret);
+ return;
+ case RPRM_REQ_CONSTRAINTS:
+ r_sz = len - sizeof(*req);
+ if (r_sz != sizeof(struct rprm_constraints_data)) {
+ r_sz = 0;
+ ret = -EINVAL;
+ break;
+ }
+ ret = rprm_set_constraints(rprm, src, req->res_id,
+ req->data, true);
+ if (ret)
+ dev_err(dev, "set constraints failed! ret %d\n", ret);
+ break;
+ case RPRM_REL_CONSTRAINTS:
+ ret = rprm_set_constraints(rprm, src, req->res_id,
+ req->data, false);
+ if (ret)
+ dev_err(dev, "rel constraints failed! ret %d\n", ret);
+ return;
+ default:
+ dev_err(dev, "Unknow request\n");
+ ret = -EINVAL;
+ }
+
+ ack->ret = ret;
+ ack->res_type = req->res_type;
+ ack->res_id = req->res_id;
+ memcpy(ack->data, req->data, r_sz);
+
+ ret = rpmsg_sendto(rpdev, ack, sizeof(*ack) + r_sz, src);
+ if (ret)
+ dev_err(dev, "rprm ack failed: %d\n", ret);
+}
+
+static int _printf_gptimer_args(char *buf, struct rprm_gpt *obj)
+{
+ return sprintf(buf,
+ "Id:%d\n"
+ "Source:%d\n",
+ obj->id, obj->src_clk);
+}
+
+static int _printf_auxclk_args(char *buf, struct rprm_auxclk *obj)
+{
+ return sprintf(buf,
+ "Id:%d\n"
+ "Rate:%2d\n"
+ "ParentSrc:%d\n"
+ "ParentSrcRate:%d\n",
+ obj->id, obj->clk_rate, obj->parent_src_clk,
+ obj->parent_src_clk_rate);
+}
+
+static int _printf_regulator_args(char *buf, struct rprm_regulator *obj)
+{
+ return sprintf(buf,
+ "Id:%d\n"
+ "min_uV:%d\n"
+ "max_uV:%d\n",
+ obj->id, obj->min_uv, obj->max_uv);
+}
+
+static int _printf_gpio_args(char *buf, struct rprm_gpio *obj)
+{
+ return sprintf(buf, "Id:%d\n", obj->id);
+}
+
+static int _printf_i2c_args(char *buf, struct rprm_i2c *obj)
+{
+ return sprintf(buf, "Id:%d\n", obj->id);
+}
+
+static int _printf_sdma_args(char *buf, struct rprm_sdma *obj)
+{
+ int i, ret = 0;
+ ret += sprintf(buf, "NumChannels:%d\n", obj->num_chs);
+ for (i = 0 ; i < obj->num_chs; i++)
+ ret += sprintf(buf + ret, "Channel[%d]:%d\n", i,
+ obj->channels[i]);
+ return ret;
+}
+
+static int _print_res_args(char *buf, struct rprm_elem *e)
+{
+ void *res = (void *)e->res;
+
+ switch (e->type) {
+ case RPRM_GPTIMER:
+ return _printf_gptimer_args(buf, res);
+ case RPRM_AUXCLK:
+ return _printf_auxclk_args(buf, res);
+ case RPRM_I2C:
+ return _printf_i2c_args(buf, res);
+ case RPRM_REGULATOR:
+ return _printf_regulator_args(buf, res);
+ case RPRM_GPIO:
+ return _printf_gpio_args(buf, res);
+ case RPRM_SDMA:
+ return _printf_sdma_args(buf, res);
+ }
+ return 0;
+}
+
+static int _printf_constraints_args(char *buf, struct rprm_elem *e)
+{
+ return sprintf(buf,
+ "Mask:0x%x\n"
+ "Frequency:%ld\n"
+ "Latency:%ld\n"
+ "Bandwidth:%ld\n",
+ e->constraints->mask, e->constraints->frequency,
+ e->constraints->latency, e->constraints->bandwidth);
+}
+
+static ssize_t rprm_dbg_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct rprm *rprm = filp->private_data;
+ struct rprm_elem *e;
+ char res[512];
+ int total = 0, c, tmp;
+ loff_t p = 0, pt;
+
+ list_for_each_entry(e, &rprm->res_list, next) {
+ c = sprintf(res,
+ "\nResource Name:%s\n"
+ "Source address:%d\n",
+ rnames[e->type], e->src);
+
+ if (_get_rprm_size(e->type))
+ c += _print_res_args(res + c, e);
+
+ if (e->constraints && e->constraints->mask)
+ c += _printf_constraints_args(res + c, e);
+
+ p += c;
+ if (*ppos >= p)
+ continue;
+ pt = c - p + *ppos;
+ tmp = simple_read_from_buffer(userbuf + total, count, &pt,
+ res, c);
+ total += tmp;
+ *ppos += tmp;
+ if (tmp - c)
+ break;
+ }
+
+ return total;
+}
+
+static int rprm_dbg_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations rprm_dbg_ops = {
+ .read = rprm_dbg_read,
+ .open = rprm_dbg_open,
+ .llseek = generic_file_llseek,
+};
+
+static int rprm_probe(struct rpmsg_channel *rpdev)
+{
+ struct rprm *rprm;
+
+ rprm = kmalloc(sizeof(*rprm), GFP_KERNEL);
+ if (!rprm)
+ return -ENOMEM;
+
+ mutex_init(&rprm->lock);
+ INIT_LIST_HEAD(&rprm->res_list);
+ idr_init(&rprm->conn_list);
+ idr_init(&rprm->id_list);
+ dev_set_drvdata(&rpdev->dev, rprm);
+
+ rprm->dbg_dir = debugfs_create_dir(dev_name(&rpdev->dev), rprm_dbg);
+ if (!rprm->dbg_dir)
+ dev_err(&rpdev->dev, "can't create debugfs dir\n");
+
+ debugfs_create_file("resources", 0400, rprm->dbg_dir, rprm,
+ &rprm_dbg_ops);
+
+ return 0;
+}
+
+static void __devexit rprm_remove(struct rpmsg_channel *rpdev)
+{
+ struct rprm *rprm = dev_get_drvdata(&rpdev->dev);
+ struct rprm_elem *e, *tmp;
+
+ dev_info(&rpdev->dev, "Enter %s\n", __func__);
+
+ if (rprm->dbg_dir)
+ debugfs_remove_recursive(rprm->dbg_dir);
+
+ mutex_lock(&rprm->lock);
+
+ /* clean up remaining resources */
+ list_for_each_entry_safe(e, tmp, &rprm->res_list, next) {
+ _resource_free(e);
+ list_del(&e->next);
+ kfree(e);
+ }
+ idr_remove_all(&rprm->id_list);
+ idr_destroy(&rprm->id_list);
+ idr_remove_all(&rprm->conn_list);
+ idr_destroy(&rprm->conn_list);
+
+ mutex_unlock(&rprm->lock);
+
+ kfree(rprm);
+}
+
+static struct rpmsg_device_id rprm_id_table[] = {
+ {
+ .name = "rpmsg-resmgr",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, rprm_id_table);
+
+static struct rpmsg_driver rprm_driver = {
+ .drv.name = KBUILD_MODNAME,
+ .drv.owner = THIS_MODULE,
+ .id_table = rprm_id_table,
+ .probe = rprm_probe,
+ .callback = rprm_cb,
+ .remove = __devexit_p(rprm_remove),
+};
+
+static int __init init(void)
+{
+ int r;
+
+ if (debugfs_initialized()) {
+ rprm_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!rprm_dbg)
+ pr_err("Error creating rprm debug directory\n");
+ }
+ r = register_rpmsg_driver(&rprm_driver);
+ if (r && rprm_dbg)
+ debugfs_remove_recursive(rprm_dbg);
+
+ return r;
+}
+
+static void __exit fini(void)
+{
+ if (rprm_dbg)
+ debugfs_remove_recursive(rprm_dbg);
+ unregister_rpmsg_driver(&rprm_driver);
+}
+module_init(init);
+module_exit(fini);
+
+MODULE_DESCRIPTION("Remote Processor Resource Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/rpmsg_server_sample.c b/drivers/rpmsg/rpmsg_server_sample.c
new file mode 100644
index 0000000..67da012
--- /dev/null
+++ b/drivers/rpmsg/rpmsg_server_sample.c
@@ -0,0 +1,99 @@
+/*
+ * Remote processor messaging transport - sample server driver
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/virtio.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/rpmsg.h>
+
+#define MSG ("hello world!")
+#define MSG_LIMIT 100
+
+static void rpmsg_sample_cb(struct rpmsg_channel *rpdev, void *data, int len,
+ void *priv, u32 src)
+{
+ int err;
+ static int rx_count;
+
+ dev_info(&rpdev->dev, "incoming msg %d (src: 0x%x)\n", ++rx_count, src);
+
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+
+ /* samples should not live forever */
+ if (rx_count >= MSG_LIMIT) {
+ dev_info(&rpdev->dev, "goodbye!\n");
+ return;
+ }
+
+ /* reply */
+ err = rpmsg_sendto(rpdev, MSG, strlen(MSG), src);
+ if (err)
+ pr_err("rpmsg_send failed: %d\n", err);
+}
+
+static int rpmsg_sample_probe(struct rpmsg_channel *rpdev)
+{
+ int err;
+
+ dev_info(&rpdev->dev, "new channel: 0x%x -> 0x%x!\n",
+ rpdev->src, rpdev->dst);
+
+ err = rpmsg_sendto(rpdev, MSG, strlen(MSG), 50);
+ if (err) {
+ pr_err("rpmsg_send failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __devexit rpmsg_sample_remove(struct rpmsg_channel *rpdev)
+{
+ dev_info(&rpdev->dev, "rpmsg sample driver is removed\n");
+}
+
+static struct rpmsg_device_id rpmsg_driver_sample_id_table[] = {
+ { .name = "rpmsg-server-sample" },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, rpmsg_driver_sample_id_table);
+
+static struct rpmsg_driver rpmsg_sample_server_driver = {
+ .drv.name = KBUILD_MODNAME,
+ .drv.owner = THIS_MODULE,
+ .id_table = rpmsg_driver_sample_id_table,
+ .probe = rpmsg_sample_probe,
+ .callback = rpmsg_sample_cb,
+ .remove = __devexit_p(rpmsg_sample_remove),
+};
+
+static int __init init(void)
+{
+ return register_rpmsg_driver(&rpmsg_sample_server_driver);
+}
+
+static void __exit fini(void)
+{
+ unregister_rpmsg_driver(&rpmsg_sample_server_driver);
+}
+module_init(init);
+module_exit(fini);
+
+MODULE_DESCRIPTION("Virtio remote processor messaging sample driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
new file mode 100644
index 0000000..247e887
--- /dev/null
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -0,0 +1,818 @@
+/*
+ * Virtio-based remote processor messaging bus
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/rpmsg.h>
+
+/**
+ * struct virtproc_info - virtual remote processor info
+ *
+ * @vdev: the virtio device
+ * @rvq: rx virtqueue (from pov of local processor)
+ * @svq: tx virtqueue (from pov of local processor)
+ * @rbufs: address of rx buffers
+ * @sbufs: address of tx buffers
+ * @last_rbuf: index of last rx buffer used
+ * @last_sbuf: index of last tx buffer used
+ * @sim_base: simulated base addr base to make virtio's virt_to_page happy
+ * @svq_lock: protects the tx virtqueue, to allow several concurrent senders
+ * @num_bufs: total number of buffers allocated for communicating with this
+ * virtual remote processor. half is used for rx and half for tx.
+ * @buf_size: size of buffers allocated for communications
+ * @endpoints: the set of local endpoints
+ * @endpoints_lock: lock of the endpoints set
+ * @sendq: wait queue of sending contexts waiting for free rpmsg buffer
+ * @ns_ept: the bus's name service endpoint
+ *
+ * This structure stores the rpmsg state of a given virtio remote processor
+ * device (there might be several virtio rproc devices for each physical
+ * remote processor).
+ */
+struct virtproc_info {
+ struct virtio_device *vdev;
+ struct virtqueue *rvq, *svq;
+ void *rbufs, *sbufs;
+ int last_rbuf, last_sbuf;
+ void *sim_base;
+ struct mutex svq_lock;
+ int num_bufs;
+ int buf_size;
+ struct idr endpoints;
+ spinlock_t endpoints_lock;
+ wait_queue_head_t sendq;
+ struct rpmsg_endpoint *ns_ept;
+};
+
+#define to_rpmsg_channel(d) container_of(d, struct rpmsg_channel, dev)
+#define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv)
+
+/*
+ * Local addresses are dynamically allocated on-demand.
+ * We do not dynamically assign addresses from the low 1024 range,
+ * in order to reserve that address range for predefined services.
+ */
+#define RPMSG_RESERVED_ADDRESSES (1024)
+
+/* Address 53 is reserved for advertising remote services */
+#define RPMSG_NS_ADDR (53)
+
+/* show configuration fields */
+#define rpmsg_show_attr(field, path, format_string) \
+static ssize_t \
+field##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); \
+ \
+ return sprintf(buf, format_string, rpdev->path); \
+}
+
+rpmsg_show_attr(name, id.name, "%s\n");
+rpmsg_show_attr(dst, dst, "0x%x\n");
+rpmsg_show_attr(src, src, "0x%x\n");
+rpmsg_show_attr(announce, announce ? "true" : "false", "%s\n");
+
+/* unique (free running) numbering for rpmsg devices */
+static unsigned int rpmsg_dev_index;
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
+
+ return sprintf(buf, RPMSG_DEVICE_MODALIAS_FMT "\n", rpdev->id.name);
+}
+
+static struct device_attribute rpmsg_dev_attrs[] = {
+ __ATTR_RO(name),
+ __ATTR_RO(modalias),
+ __ATTR_RO(dst),
+ __ATTR_RO(src),
+ __ATTR_RO(announce),
+ __ATTR_NULL
+};
+
+static inline int rpmsg_id_match(const struct rpmsg_channel *rpdev,
+ const struct rpmsg_device_id *id)
+{
+ if (strncmp(id->name, rpdev->id.name, RPMSG_NAME_SIZE))
+ return 0;
+
+ return 1;
+}
+
+/* match rpmsg channel and rpmsg driver */
+static int rpmsg_dev_match(struct device *dev, struct device_driver *drv)
+{
+ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
+ struct rpmsg_driver *rpdrv = to_rpmsg_driver(drv);
+ const struct rpmsg_device_id *ids = rpdrv->id_table;
+ unsigned int i;
+
+ for (i = 0; ids[i].name[0]; i++) {
+ if (rpmsg_id_match(rpdev, &ids[i]))
+ return 1;
+ }
+
+ return 0;
+}
+
+static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
+
+ return add_uevent_var(env, "MODALIAS=" RPMSG_DEVICE_MODALIAS_FMT,
+ rpdev->id.name);
+}
+
+static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
+ struct rpmsg_channel *rpdev,
+ void (*cb)(struct rpmsg_channel *, void *, int, void *, u32),
+ void *priv, u32 addr)
+{
+ int err, tmpaddr, request;
+ struct rpmsg_endpoint *ept;
+ struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev;
+
+ if (!idr_pre_get(&vrp->endpoints, GFP_KERNEL))
+ return NULL;
+
+ ept = kzalloc(sizeof(*ept), GFP_KERNEL);
+ if (!ept) {
+ dev_err(dev, "failed to kzalloc a new ept\n");
+ return NULL;
+ }
+
+ ept->rpdev = rpdev;
+ ept->cb = cb;
+ ept->priv = priv;
+
+ /* do we need to allocate a local address ? */
+ request = addr == RPMSG_ADDR_ANY ? RPMSG_RESERVED_ADDRESSES : addr;
+
+ spin_lock(&vrp->endpoints_lock);
+
+ /* bind the endpoint to an rpmsg address (and allocate one if needed) */
+ err = idr_get_new_above(&vrp->endpoints, ept, request, &tmpaddr);
+ if (err) {
+ dev_err(dev, "idr_get_new_above failed: %d\n", err);
+ goto free_ept;
+ }
+
+ if (addr != RPMSG_ADDR_ANY && tmpaddr != addr) {
+ dev_err(dev, "address 0x%x already in use\n", addr);
+ goto rem_idr;
+ }
+
+ ept->addr = tmpaddr;
+
+ spin_unlock(&vrp->endpoints_lock);
+
+ return ept;
+
+rem_idr:
+ idr_remove(&vrp->endpoints, request);
+free_ept:
+ spin_unlock(&vrp->endpoints_lock);
+ kfree(ept);
+ return NULL;
+}
+
+struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *rpdev,
+ void (*cb)(struct rpmsg_channel *, void *, int, void *, u32),
+ void *priv, u32 addr)
+{
+ return __rpmsg_create_ept(rpdev->vrp, rpdev, cb, priv, addr);
+}
+EXPORT_SYMBOL(rpmsg_create_ept);
+
+void rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
+{
+ struct virtproc_info *vrp = ept->rpdev->vrp;
+
+ spin_lock(&vrp->endpoints_lock);
+ idr_remove(&vrp->endpoints, ept->addr);
+ spin_unlock(&vrp->endpoints_lock);
+
+ kfree(ept);
+}
+EXPORT_SYMBOL(rpmsg_destroy_ept);
+
+static int rpmsg_dev_probe(struct device *dev)
+{
+ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
+ struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver);
+ struct virtproc_info *vrp = rpdev->vrp;
+ struct rpmsg_endpoint *ept;
+ int err;
+
+ ept = rpmsg_create_ept(rpdev, rpdrv->callback, NULL, rpdev->src);
+ if (!ept) {
+ dev_err(dev, "failed to create endpoint\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ rpdev->ept = ept;
+ rpdev->src = ept->addr;
+
+ err = rpdrv->probe(rpdev);
+ if (err) {
+ dev_err(dev, "%s: failed: %d\n", __func__, err);
+ rpmsg_destroy_ept(ept);
+ goto out;
+ }
+
+ /* need to tell remote processor's name service about this channel ? */
+ if (rpdev->announce &&
+ virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
+ struct rpmsg_ns_msg nsm;
+
+ strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
+ nsm.addr = rpdev->src;
+ nsm.flags = RPMSG_NS_CREATE;
+
+ err = rpmsg_sendto(rpdev, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
+ if (err)
+ dev_err(dev, "failed to announce service %d\n", err);
+ }
+
+out:
+ return err;
+}
+
+static int rpmsg_dev_remove(struct device *dev)
+{
+ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
+ struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver);
+ struct virtproc_info *vrp = rpdev->vrp;
+ int err = 0;
+
+ /* tell remote processor's name service we're removing this channel */
+ if (rpdev->announce &&
+ virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
+ struct rpmsg_ns_msg nsm;
+
+ strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
+ nsm.addr = rpdev->src;
+ nsm.flags = RPMSG_NS_DESTROY;
+
+ err = rpmsg_sendto(rpdev, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
+ if (err)
+ dev_err(dev, "failed to announce service %d\n", err);
+ }
+
+ rpdrv->remove(rpdev);
+
+ rpmsg_destroy_ept(rpdev->ept);
+
+ return err;
+}
+
+static struct bus_type rpmsg_bus = {
+ .name = "rpmsg",
+ .match = rpmsg_dev_match,
+ .dev_attrs = rpmsg_dev_attrs,
+ .uevent = rpmsg_uevent,
+ .probe = rpmsg_dev_probe,
+ .remove = rpmsg_dev_remove,
+};
+
+int register_rpmsg_driver(struct rpmsg_driver *rpdrv)
+{
+ rpdrv->drv.bus = &rpmsg_bus;
+ return driver_register(&rpdrv->drv);
+}
+EXPORT_SYMBOL(register_rpmsg_driver);
+
+void unregister_rpmsg_driver(struct rpmsg_driver *rpdrv)
+{
+ driver_unregister(&rpdrv->drv);
+}
+EXPORT_SYMBOL(unregister_rpmsg_driver);
+
+static void rpmsg_release_device(struct device *dev)
+{
+ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
+
+ kfree(rpdev);
+}
+
+/* match an rpmsg channel with channel info structs */
+static int rpmsg_channel_match(struct device *dev, void *data)
+{
+ struct rpmsg_channel_info *chinfo = data;
+ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
+
+ if (chinfo->src != RPMSG_ADDR_ANY && chinfo->src != rpdev->src)
+ return 0;
+
+ if (chinfo->dst != RPMSG_ADDR_ANY && chinfo->dst != rpdev->dst)
+ return 0;
+
+ if (strncmp(chinfo->name, rpdev->id.name, RPMSG_NAME_SIZE))
+ return 0;
+
+ return 1;
+}
+
+static struct rpmsg_channel *rpmsg_create_channel(struct virtproc_info *vrp,
+ struct rpmsg_channel_info *chinfo)
+{
+ struct rpmsg_channel *rpdev;
+ struct device *tmp, *dev = &vrp->vdev->dev;
+ int ret;
+
+ /* make sure a similar channel doesn't already exist */
+ tmp = device_find_child(dev, chinfo, rpmsg_channel_match);
+ if (tmp) {
+ dev_err(dev, "channel %s:%x:%x already exist\n",
+ chinfo->name, chinfo->src, chinfo->dst);
+ return NULL;
+ }
+
+ rpdev = kzalloc(sizeof(struct rpmsg_channel), GFP_KERNEL);
+ if (!rpdev) {
+ pr_err("kzalloc failed\n");
+ return NULL;
+ }
+
+ rpdev->vrp = vrp;
+ rpdev->src = chinfo->src;
+ rpdev->dst = chinfo->dst;
+
+ /*
+ * rpmsg server channels has predefined local address, and their
+ * existence needs to be announced remotely
+ */
+ rpdev->announce = rpdev->src != RPMSG_ADDR_ANY ? true : false;
+
+ strncpy(rpdev->id.name, chinfo->name, RPMSG_NAME_SIZE);
+
+ /* very simple device indexing plumbing which just works (for now) */
+ dev_set_name(&rpdev->dev, "rpmsg%d", rpmsg_dev_index++);
+
+ rpdev->dev.parent = &vrp->vdev->dev;
+ rpdev->dev.bus = &rpmsg_bus;
+ rpdev->dev.release = rpmsg_release_device;
+
+ ret = device_register(&rpdev->dev);
+ if (ret) {
+ dev_err(dev, "device_register failed: %d\n", ret);
+ kfree(rpdev);
+ return NULL;
+ }
+
+ return rpdev;
+}
+
+static void rpmsg_destroy_channel(struct rpmsg_channel *rpdev)
+{
+ device_unregister(&rpdev->dev);
+}
+
+static int rpmsg_destroy_channel_by_info(struct virtproc_info *vrp,
+ struct rpmsg_channel_info *chinfo)
+{
+ struct virtio_device *vdev = vrp->vdev;
+ struct device *dev;
+
+ dev = device_find_child(&vdev->dev, chinfo, rpmsg_channel_match);
+ if (!dev)
+ return -EINVAL;
+
+ rpmsg_destroy_channel(to_rpmsg_channel(dev));
+
+ return 0;
+}
+
+/* minimal buf "allocator" that is just enough for now */
+static void *get_a_buf(struct virtproc_info *vrp)
+{
+ unsigned int len;
+ void *buf = NULL;
+
+ /* make sure the descriptors are updated before reading */
+ rmb();
+ /* either pick the next unused buffer */
+ if (vrp->last_sbuf < vrp->num_bufs / 2)
+ buf = vrp->sbufs + vrp->buf_size * vrp->last_sbuf++;
+ /* or recycle a used one */
+ else
+ buf = virtqueue_get_buf(vrp->svq, &len);
+
+ return buf;
+}
+
+/* XXX: the blocking 'wait' mechanism hasn't been tested yet */
+int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst,
+ void *data, int len, bool wait)
+{
+ struct virtproc_info *vrp = rpdev->vrp;
+ struct device *dev = &rpdev->dev;
+ struct scatterlist sg;
+ struct rpmsg_hdr *msg;
+ int err;
+ unsigned long offset;
+ void *sim_addr;
+
+ if (src == RPMSG_ADDR_ANY || dst == RPMSG_ADDR_ANY) {
+ dev_err(dev, "invalid addr (src 0x%x, dst 0x%x)\n", src, dst);
+ return -EINVAL;
+ }
+
+ /* the payload's size is currently limited */
+ if (len > vrp->buf_size - sizeof(struct rpmsg_hdr)) {
+ dev_err(dev, "message is too big (%d)\n", len);
+ return -EMSGSIZE;
+ }
+
+ /*
+ * protect svq from simultaneous concurrent manipulations,
+ * and serialize the sending of messages
+ */
+ if (mutex_lock_interruptible(&vrp->svq_lock))
+ return -ERESTARTSYS;
+ /* grab a buffer */
+ msg = get_a_buf(vrp);
+ if (!msg && !wait) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* no free buffer ? wait for one (but bail after 15 seconds) */
+ if (!msg) {
+ /* enable "tx-complete" interrupts before dozing off */
+ virtqueue_enable_cb(vrp->svq);
+
+ /*
+ * sleep until a free buffer is available or 15 secs elapse.
+ * the timeout period is not configurable because frankly
+ * i don't see why drivers need to deal with that.
+ * if later this happens to be required, it'd be easy to add.
+ */
+ err = wait_event_interruptible_timeout(vrp->sendq,
+ (msg = get_a_buf(vrp)),
+ msecs_to_jiffies(15000));
+
+ /* on success, suppress "tx-complete" interrupts again */
+ virtqueue_disable_cb(vrp->svq);
+
+ if (err < 0) {
+ err = -ERESTARTSYS;
+ goto out;
+ }
+
+ if (!msg) {
+ dev_err(dev, "timeout waiting for buffer\n");
+ err = -ETIMEDOUT;
+ goto out;
+ }
+ }
+
+ msg->len = len;
+ msg->flags = 0;
+ msg->src = src;
+ msg->dst = dst;
+ msg->unused = 0;
+ memcpy(msg->data, data, len);
+
+ dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Unused %d\n",
+ msg->src, msg->dst, msg->len,
+ msg->flags, msg->unused);
+ print_hex_dump(KERN_DEBUG, "rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
+ msg, sizeof(*msg) + msg->len, true);
+
+ offset = ((unsigned long) msg) - ((unsigned long) vrp->rbufs);
+ sim_addr = vrp->sim_base + offset;
+ sg_init_one(&sg, sim_addr, sizeof(*msg) + len);
+
+ /* add message to the remote processor's virtqueue */
+ err = virtqueue_add_buf_gfp(vrp->svq, &sg, 1, 0, msg, GFP_KERNEL);
+ if (err < 0) {
+ dev_err(dev, "virtqueue_add_buf_gfp failed: %d\n", err);
+ goto out;
+ }
+ /* descriptors must be written before kicking remote processor */
+ wmb();
+
+ /* tell the remote processor it has a pending message to read */
+ virtqueue_kick(vrp->svq);
+
+ err = 0;
+out:
+ mutex_unlock(&vrp->svq_lock);
+ return err;
+}
+EXPORT_SYMBOL(rpmsg_send_offchannel_raw);
+
+static void rpmsg_recv_done(struct virtqueue *rvq)
+{
+ struct rpmsg_hdr *msg;
+ unsigned int len;
+ struct rpmsg_endpoint *ept;
+ struct scatterlist sg;
+ unsigned long offset;
+ void *sim_addr;
+ struct virtproc_info *vrp = rvq->vdev->priv;
+ struct device *dev = &rvq->vdev->dev;
+ int err;
+
+ /* make sure the descriptors are updated before reading */
+ rmb();
+ msg = virtqueue_get_buf(rvq, &len);
+ if (!msg) {
+ dev_err(dev, "uhm, incoming signal, but no used buffer ?\n");
+ return;
+ }
+
+ dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Unused: %d\n",
+ msg->src, msg->dst, msg->len,
+ msg->flags, msg->unused);
+ print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
+ msg, sizeof(*msg) + msg->len, true);
+
+ /* fetch the callback of the appropriate user */
+ spin_lock(&vrp->endpoints_lock);
+ ept = idr_find(&vrp->endpoints, msg->dst);
+ spin_unlock(&vrp->endpoints_lock);
+
+ if (ept && ept->cb)
+ ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, msg->src);
+ else
+ dev_warn(dev, "msg received with no recepient\n");
+
+ /* add the buffer back to the remote processor's virtqueue */
+ offset = ((unsigned long) msg) - ((unsigned long) vrp->rbufs);
+ sim_addr = vrp->sim_base + offset;
+ sg_init_one(&sg, sim_addr, sizeof(*msg) + len);
+
+ err = virtqueue_add_buf_gfp(vrp->rvq, &sg, 0, 1, msg, GFP_KERNEL);
+ if (err < 0) {
+ dev_err(dev, "failed to add a virtqueue buffer: %d\n", err);
+ return;
+ }
+ /* descriptors must be written before kicking remote processor */
+ wmb();
+
+ /* tell the remote processor we added another available rx buffer */
+ virtqueue_kick(vrp->rvq);
+}
+
+static void rpmsg_xmit_done(struct virtqueue *svq)
+{
+ struct virtproc_info *vrp = svq->vdev->priv;
+
+ dev_dbg(&svq->vdev->dev, "%s\n", __func__);
+
+ /* wake up potential processes that are waiting for a buffer */
+ wake_up_interruptible(&vrp->sendq);
+}
+
+static void rpmsg_ns_cb(struct rpmsg_channel *rpdev, void *data, int len,
+ void *priv, u32 src)
+{
+ struct rpmsg_ns_msg *msg = data;
+ struct rpmsg_channel *newch;
+ struct rpmsg_channel_info chinfo;
+ struct virtproc_info *vrp = priv;
+ struct device *dev = &vrp->vdev->dev;
+ int ret;
+
+#if 0
+ print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+#endif
+
+ if (len != sizeof(*msg)) {
+ dev_err(dev, "malformed ns msg (%d)\n", len);
+ return;
+ }
+
+ /*
+ * the name service ept does _not_ belong to a real rpmsg channel,
+ * and is handled by the rpmsg bus itself.
+ * for sanity reasons, make sure a valid rpdev has _not_ sneaked
+ * in somehow.
+ */
+ if (rpdev) {
+ dev_err(dev, "anomaly: ns ept has an rpdev handle\n");
+ return;
+ }
+
+ /* don't trust the remote processor for null terminating the name */
+ msg->name[RPMSG_NAME_SIZE - 1] = '\0';
+
+ dev_info(dev, "%sing channel %s addr 0x%x\n",
+ msg->flags & RPMSG_NS_DESTROY ? "destroy" : "creat",
+ msg->name, msg->addr);
+
+ strncpy(chinfo.name, msg->name, sizeof(chinfo.name));
+ chinfo.src = RPMSG_ADDR_ANY;
+ chinfo.dst = msg->addr;
+
+ if (msg->flags & RPMSG_NS_DESTROY) {
+ ret = rpmsg_destroy_channel_by_info(vrp, &chinfo);
+ if (ret)
+ dev_err(dev, "rpmsg_destroy_channel failed: %d\n", ret);
+ } else {
+ newch = rpmsg_create_channel(vrp, &chinfo);
+ if (!newch)
+ dev_err(dev, "rpmsg_create_channel failed\n");
+ }
+}
+
+static int rpmsg_probe(struct virtio_device *vdev)
+{
+ vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
+ const char *names[] = { "input", "output" };
+ struct virtqueue *vqs[2];
+ struct virtproc_info *vrp;
+ void *addr;
+ int err, i, num_bufs, buf_size, total_buf_size;
+ struct rpmsg_channel_info *ch;
+
+ vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
+ if (!vrp)
+ return -ENOMEM;
+
+ vrp->vdev = vdev;
+
+ idr_init(&vrp->endpoints);
+ spin_lock_init(&vrp->endpoints_lock);
+ mutex_init(&vrp->svq_lock);
+ init_waitqueue_head(&vrp->sendq);
+
+ /* We expect two virtqueues, rx and tx (in this order) */
+ err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names);
+ if (err)
+ goto free_vi;
+
+ vrp->rvq = vqs[0];
+ vrp->svq = vqs[1];
+
+ /* Platform must supply pre-allocated uncached buffers for now */
+ vdev->config->get(vdev, VPROC_BUF_ADDR, &addr, sizeof(addr));
+ vdev->config->get(vdev, VPROC_BUF_NUM, &num_bufs,
+ sizeof(num_bufs));
+ vdev->config->get(vdev, VPROC_BUF_SZ, &buf_size, sizeof(buf_size));
+
+ total_buf_size = num_bufs * buf_size;
+
+ dev_dbg(&vdev->dev, "%d buffers, size %d, addr 0x%x, total 0x%x\n",
+ num_bufs, buf_size, (unsigned int) addr, total_buf_size);
+
+ vrp->num_bufs = num_bufs;
+ vrp->buf_size = buf_size;
+ vrp->rbufs = addr;
+ vrp->sbufs = addr + total_buf_size / 2;
+
+ /* simulated addr base to make virt_to_page happy */
+ vdev->config->get(vdev, VPROC_SIM_BASE, &vrp->sim_base,
+ sizeof(vrp->sim_base));
+
+ /* set up the receive buffers */
+ for (i = 0; i < num_bufs / 2; i++) {
+ struct scatterlist sg;
+ void *tmpaddr = vrp->rbufs + i * buf_size;
+ void *simaddr = vrp->sim_base + i * buf_size;
+
+ sg_init_one(&sg, simaddr, buf_size);
+ err = virtqueue_add_buf_gfp(vrp->rvq, &sg, 0, 1, tmpaddr,
+ GFP_KERNEL);
+ WARN_ON(err < 0); /* sanity check; this can't really happen */
+ }
+
+ /* tell the remote processor it can start sending data */
+ virtqueue_kick(vrp->rvq);
+
+ /* suppress "tx-complete" interrupts */
+ virtqueue_disable_cb(vrp->svq);
+
+ vdev->priv = vrp;
+
+ dev_info(&vdev->dev, "rpmsg backend virtproc probed successfully\n");
+
+ /* if supported by the remote processor, enable the name service */
+ if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
+ vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
+ vrp, RPMSG_NS_ADDR);
+ if (!vrp->ns_ept) {
+ dev_err(&vdev->dev, "failed to create the ns ept\n");
+ err = -ENOMEM;
+ goto vqs_del;
+ }
+ }
+
+ /* look for platform-specific static channels */
+ vdev->config->get(vdev, VPROC_STATIC_CHANNELS, &ch, sizeof(ch));
+
+ for (i = 0; ch && ch[i].name[0]; i++)
+ rpmsg_create_channel(vrp, &ch[i]);
+
+ return 0;
+
+vqs_del:
+ vdev->config->del_vqs(vrp->vdev);
+free_vi:
+ kfree(vrp);
+ return err;
+}
+
+static int rpmsg_remove_device(struct device *dev, void *data)
+{
+ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
+
+ rpmsg_destroy_channel(rpdev);
+
+ return 0;
+}
+
+static void __devexit rpmsg_remove(struct virtio_device *vdev)
+{
+ struct virtproc_info *vrp = vdev->priv;
+ int ret;
+
+ ret = device_for_each_child(&vdev->dev, NULL, rpmsg_remove_device);
+ if (ret)
+ dev_warn(&vdev->dev, "can't remove rpmsg device: %d\n", ret);
+
+ idr_remove_all(&vrp->endpoints);
+ idr_destroy(&vrp->endpoints);
+
+ vdev->config->del_vqs(vrp->vdev);
+
+ kfree(vrp);
+}
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_RPMSG, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static unsigned int features[] = {
+ VIRTIO_RPMSG_F_NS,
+};
+
+static struct virtio_driver virtio_ipc_driver = {
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = rpmsg_probe,
+ .remove = __devexit_p(rpmsg_remove),
+};
+
+static int __init init(void)
+{
+ int ret;
+
+ ret = bus_register(&rpmsg_bus);
+ if (ret) {
+ pr_err("failed to register rpmsg bus: %d\n", ret);
+ return ret;
+ }
+
+ return register_virtio_driver(&virtio_ipc_driver);
+}
+module_init(init);
+
+static void __exit fini(void)
+{
+ unregister_virtio_driver(&virtio_ipc_driver);
+ bus_unregister(&rpmsg_bus);
+}
+module_exit(fini);
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio-based remote processor messaging bus");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 5e4e725..48fd765 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -362,14 +362,6 @@
int res;
u8 rd_reg;
-#ifdef CONFIG_LOCKDEP
- /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
- * we don't want and can't tolerate. Although it might be
- * friendlier not to borrow this thread context...
- */
- local_irq_enable();
-#endif
-
res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
if (res)
goto out;
@@ -428,24 +420,12 @@
static int __devinit twl_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
- int ret = 0;
+ int ret = -EINVAL;
int irq = platform_get_irq(pdev, 0);
u8 rd_reg;
if (irq <= 0)
- return -EINVAL;
-
- rtc = rtc_device_register(pdev->name,
- &pdev->dev, &twl_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
- dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
- PTR_ERR(rtc));
- goto out0;
-
- }
-
- platform_set_drvdata(pdev, rtc);
+ goto out1;
ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
if (ret < 0)
@@ -462,14 +442,6 @@
if (ret < 0)
goto out1;
- ret = request_irq(irq, twl_rtc_interrupt,
- IRQF_TRIGGER_RISING,
- dev_name(&rtc->dev), rtc);
- if (ret < 0) {
- dev_err(&pdev->dev, "IRQ is not free.\n");
- goto out1;
- }
-
if (twl_class_is_6030()) {
twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
REG_INT_MSK_LINE_A);
@@ -480,14 +452,14 @@
/* Check RTC module status, Enable if it is off */
ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
if (ret < 0)
- goto out2;
+ goto out1;
if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
dev_info(&pdev->dev, "Enabling TWL-RTC.\n");
rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
if (ret < 0)
- goto out2;
+ goto out1;
}
/* ensure interrupts are disabled, bootloaders can be strange */
@@ -498,15 +470,35 @@
/* init cached IRQ enable bits */
ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
if (ret < 0)
- goto out2;
+ goto out1;
- return ret;
+ rtc = rtc_device_register(pdev->name,
+ &pdev->dev, &twl_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ ret = PTR_ERR(rtc);
+ dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
+ PTR_ERR(rtc));
+ goto out1;
+
+ }
+
+ ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
+ IRQF_TRIGGER_RISING,
+ dev_name(&rtc->dev), rtc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "IRQ is not free.\n");
+ goto out2;
+ }
+
+ if (enable_irq_wake(irq) < 0)
+ dev_warn(&pdev->dev, "Cannot enable wakeup for IRQ %d\n", irq);
+
+ platform_set_drvdata(pdev, rtc);
+ return 0;
out2:
- free_irq(irq, rtc);
-out1:
rtc_device_unregister(rtc);
-out0:
+out1:
return ret;
}
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index 969cdd2..bdf64c5 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -650,6 +650,7 @@
struct spi_transfer *t)
{
struct omap2_mcspi_cs *cs = spi->controller_state;
+ struct omap2_mcspi_device_config *cd = spi->controller_data;
struct omap2_mcspi *mcspi;
struct spi_master *spi_cntrl;
u32 l = 0, div = 0;
@@ -675,8 +676,13 @@
/* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
* REVISIT: this controller could support SPI_3WIRE mode.
*/
- l &= ~(OMAP2_MCSPI_CHCONF_IS|OMAP2_MCSPI_CHCONF_DPE1);
- l |= OMAP2_MCSPI_CHCONF_DPE0;
+ if (cd && cd->swap_datalines) {
+ l &= ~OMAP2_MCSPI_CHCONF_DPE0;
+ l |= OMAP2_MCSPI_CHCONF_IS | OMAP2_MCSPI_CHCONF_DPE1;
+ } else {
+ l &= ~(OMAP2_MCSPI_CHCONF_IS | OMAP2_MCSPI_CHCONF_DPE1);
+ l |= OMAP2_MCSPI_CHCONF_DPE0;
+ }
/* wordlength */
l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 6d3ec14b..642fce3 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -37,17 +37,27 @@
#include <linux/clk.h>
#include <linux/serial_core.h>
#include <linux/irq.h>
+#include <linux/pm_runtime.h>
#include <plat/dma.h>
#include <plat/dmtimer.h>
#include <plat/omap-serial.h>
+#include <plat/omap_device.h>
+#include <plat/serial.h>
+#include <plat/omap-pm.h>
+
+#define UART_OMAP_IIR_ID 0x3e
+#define UART_OMAP_IIR_RX_TIMEOUT 0xc
+
+#define UART_OMAP_TXFIFO_LVL (0x68/4)
static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS];
/* Forward declaration of functions */
static void uart_tx_dma_callback(int lch, u16 ch_status, void *data);
-static void serial_omap_rx_timeout(unsigned long uart_no);
+static void serial_omap_rxdma_poll(unsigned long uart_no);
static int serial_omap_start_rxdma(struct uart_omap_port *up);
+static void omap_uart_mdr1_errataset(struct uart_omap_port *up, u8 mdr1);
static inline unsigned int serial_in(struct uart_omap_port *up, int offset)
{
@@ -94,6 +104,86 @@
return port->uartclk/(baud * divisor);
}
+static inline void serial_omap_port_disable(struct uart_omap_port *up)
+{
+ if (up->suspended) {
+ /*
+ * If the port has been suspended by system-wide suspend,
+ * put it back to low power mode immediately.
+ */
+ pm_runtime_put_sync_suspend(&up->pdev->dev);
+ } else {
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
+ }
+}
+
+static inline void serial_omap_port_enable(struct uart_omap_port *up)
+{
+ pm_runtime_get_sync(&up->pdev->dev);
+}
+
+/* TBD: Should be removed once we irq-chaining mechanism in place */
+u32 omap_uart_resume_idle()
+{
+ int i;
+ u32 ret = 0;
+
+ for (i = 0; i < OMAP_MAX_HSUART_PORTS; i++) {
+ struct uart_omap_port *up = ui[i];
+
+ if (!up)
+ continue;
+
+ if (up->chk_wakeup(up->pdev)) {
+ serial_omap_port_enable(up);
+ serial_omap_port_disable(up);
+ ret++;
+ }
+ }
+ return ret;
+}
+
+int omap_uart_enable(u8 uart_num)
+{
+ if (uart_num > OMAP_MAX_HSUART_PORTS)
+ return -ENODEV;
+
+ if (!ui[uart_num - 1])
+ return -ENODEV;
+
+ pm_runtime_get_sync(&ui[uart_num - 1]->pdev->dev);
+
+ return 0;
+}
+
+int omap_uart_disable(u8 uart_num)
+{
+ if (uart_num > OMAP_MAX_HSUART_PORTS)
+ return -ENODEV;
+
+ if (!ui[uart_num - 1])
+ return -ENODEV;
+
+ pm_runtime_put_sync_suspend(&ui[uart_num - 1]->pdev->dev);
+
+ return 0;
+}
+
+int omap_uart_wake(u8 uart_num)
+{
+ if (uart_num > OMAP_MAX_HSUART_PORTS)
+ return -ENODEV;
+
+ if (!ui[uart_num - 1])
+ return -ENODEV;
+
+ serial_omap_port_enable(ui[uart_num - 1]);
+ serial_omap_port_disable(ui[uart_num - 1]);
+
+ return 0;
+}
+
static void serial_omap_stop_rxdma(struct uart_omap_port *up)
{
if (up->uart_dma.rx_dma_used) {
@@ -102,6 +192,7 @@
omap_free_dma(up->uart_dma.rx_dma_channel);
up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
up->uart_dma.rx_dma_used = false;
+ serial_omap_port_disable(up);
}
}
@@ -110,8 +201,11 @@
struct uart_omap_port *up = (struct uart_omap_port *)port;
dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->pdev->id);
+
+ serial_omap_port_enable(up);
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
+ serial_omap_port_disable(up);
}
static void serial_omap_stop_tx(struct uart_port *port)
@@ -129,23 +223,29 @@
omap_stop_dma(up->uart_dma.tx_dma_channel);
omap_free_dma(up->uart_dma.tx_dma_channel);
up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ serial_omap_port_disable(up);
}
+ serial_omap_port_enable(up);
if (up->ier & UART_IER_THRI) {
up->ier &= ~UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
+
+ serial_omap_port_disable(up);
}
static void serial_omap_stop_rx(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
+ serial_omap_port_enable(up);
if (up->use_dma)
serial_omap_stop_rxdma(up);
up->ier &= ~UART_IER_RLSI;
up->port.read_status_mask &= ~UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
+ serial_omap_port_disable(up);
}
static inline void receive_chars(struct uart_omap_port *up, int *status)
@@ -215,10 +315,10 @@
spin_lock(&up->port.lock);
}
-static void transmit_chars(struct uart_omap_port *up)
+static void transmit_chars(struct uart_omap_port *up, u8 tx_fifo_lvl)
{
struct circ_buf *xmit = &up->port.state->xmit;
- int count;
+ int count, i;
if (up->port.x_char) {
serial_out(up, UART_TX, up->port.x_char);
@@ -230,14 +330,14 @@
serial_omap_stop_tx(&up->port);
return;
}
- count = up->port.fifosize / 4;
- do {
+ count = up->port.fifosize - tx_fifo_lvl;
+ for (i = 0; i < count; i++) {
serial_out(up, UART_TX, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
up->port.icount.tx++;
if (uart_circ_empty(xmit))
break;
- } while (--count > 0);
+ }
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&up->port);
@@ -262,7 +362,9 @@
int ret = 0;
if (!up->use_dma) {
+ serial_omap_port_enable(up);
serial_omap_enable_ier_thri(up);
+ serial_omap_port_disable(up);
return;
}
@@ -272,6 +374,7 @@
xmit = &up->port.state->xmit;
if (up->uart_dma.tx_dma_channel == OMAP_UART_DMA_CH_FREE) {
+ serial_omap_port_enable(up);
ret = omap_request_dma(up->uart_dma.uart_dma_tx,
"UART Tx DMA",
(void *)uart_tx_dma_callback, up,
@@ -352,15 +455,24 @@
{
struct uart_omap_port *up = dev_id;
unsigned int iir, lsr;
+ unsigned int int_id;
unsigned long flags;
+ int ret = IRQ_HANDLED;
+ u8 tx_fifo_lvl;
+ serial_omap_port_enable(up);
iir = serial_in(up, UART_IIR);
- if (iir & UART_IIR_NO_INT)
+ if (iir & UART_IIR_NO_INT) {
+ serial_omap_port_disable(up);
return IRQ_NONE;
+ }
+
+ int_id = iir & UART_OMAP_IIR_ID;
spin_lock_irqsave(&up->port.lock, flags);
lsr = serial_in(up, UART_LSR);
- if (iir & UART_IIR_RLSI) {
+ if (int_id == UART_IIR_RDI || int_id == UART_OMAP_IIR_RX_TIMEOUT ||
+ int_id == UART_IIR_RLSI) {
if (!up->use_dma) {
if (lsr & UART_LSR_DR)
receive_chars(up, &lsr);
@@ -374,12 +486,19 @@
}
check_modem_status(up);
- if ((lsr & UART_LSR_THRE) && (iir & UART_IIR_THRI))
- transmit_chars(up);
+ if (int_id == UART_IIR_THRI) {
+ tx_fifo_lvl = serial_in(up, UART_OMAP_TXFIFO_LVL);
+ if (lsr & UART_LSR_THRE || tx_fifo_lvl < up->port.fifosize)
+ transmit_chars(up, tx_fifo_lvl);
+ else
+ ret = IRQ_NONE;
+ }
spin_unlock_irqrestore(&up->port.lock, flags);
+ serial_omap_port_disable(up);
+
up->port_activity = jiffies;
- return IRQ_HANDLED;
+ return ret;
}
static unsigned int serial_omap_tx_empty(struct uart_port *port)
@@ -388,11 +507,12 @@
unsigned long flags = 0;
unsigned int ret = 0;
+ serial_omap_port_enable(up);
dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->pdev->id);
spin_lock_irqsave(&up->port.lock, flags);
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
spin_unlock_irqrestore(&up->port.lock, flags);
-
+ serial_omap_port_disable(up);
return ret;
}
@@ -402,7 +522,10 @@
unsigned char status;
unsigned int ret = 0;
+ serial_omap_port_enable(up);
status = check_modem_status(up);
+ serial_omap_port_disable(up);
+
dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->pdev->id);
if (status & UART_MSR_DCD)
@@ -433,8 +556,11 @@
if (mctrl & TIOCM_LOOP)
mcr |= UART_MCR_LOOP;
- mcr |= up->mcr;
- serial_out(up, UART_MCR, mcr);
+ serial_omap_port_enable(up);
+ up->mcr = serial_in(up, UART_MCR);
+ up->mcr |= mcr;
+ serial_out(up, UART_MCR, up->mcr);
+ serial_omap_port_disable(up);
}
static void serial_omap_break_ctl(struct uart_port *port, int break_state)
@@ -443,6 +569,7 @@
unsigned long flags = 0;
dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->pdev->id);
+ serial_omap_port_enable(up);
spin_lock_irqsave(&up->port.lock, flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
@@ -450,24 +577,19 @@
up->lcr &= ~UART_LCR_SBC;
serial_out(up, UART_LCR, up->lcr);
spin_unlock_irqrestore(&up->port.lock, flags);
+ serial_omap_port_disable(up);
}
static int serial_omap_startup(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned long flags = 0;
- int retval;
- /*
- * Allocate the IRQ
- */
- retval = request_irq(up->port.irq, serial_omap_irq, up->port.irqflags,
- up->name, up);
- if (retval)
- return retval;
+ enable_irq(up->port.irq);
dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->pdev->id);
+ serial_omap_port_enable(up);
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in set_termios())
@@ -475,6 +597,7 @@
serial_omap_clear_fifos(up);
/* For Hardware flow control */
serial_out(up, UART_MCR, UART_MCR_RTS);
+ up->mcr = serial_in(up, UART_MCR);
/*
* Clear the interrupt registers.
@@ -505,7 +628,7 @@
(dma_addr_t *)&(up->uart_dma.tx_buf_dma_phys),
0);
init_timer(&(up->uart_dma.rx_timer));
- up->uart_dma.rx_timer.function = serial_omap_rx_timeout;
+ up->uart_dma.rx_timer.function = serial_omap_rxdma_poll;
up->uart_dma.rx_timer.data = up->pdev->id;
/* Currently the buffer size is 4KB. Can increase it */
up->uart_dma.rx_buf = dma_alloc_coherent(NULL,
@@ -521,8 +644,9 @@
serial_out(up, UART_IER, up->ier);
/* Enable module level wake up */
- serial_out(up, UART_OMAP_WER, OMAP_UART_WER_MOD_WKUP);
+ serial_out(up, UART_OMAP_WER, up->wer);
+ serial_omap_port_disable(up);
up->port_activity = jiffies;
return 0;
}
@@ -533,10 +657,13 @@
unsigned long flags = 0;
dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->pdev->id);
+
+ serial_omap_port_enable(up);
/*
- * Disable interrupts from this port
+ * Disable interrupts & wakeup events from this port
*/
up->ier = 0;
+ serial_out(up, UART_OMAP_WER, 0);
serial_out(up, UART_IER, 0);
spin_lock_irqsave(&up->port.lock, flags);
@@ -566,15 +693,14 @@
up->uart_dma.rx_buf_dma_phys);
up->uart_dma.rx_buf = NULL;
}
- free_irq(up->port.irq, up);
+ serial_omap_port_disable(up);
+ disable_irq(up->port.irq);
}
static inline void
serial_omap_configure_xonxoff
(struct uart_omap_port *up, struct ktermios *termios)
{
- unsigned char efr = 0;
-
up->lcr = serial_in(up, UART_LCR);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
up->efr = serial_in(up, UART_EFR);
@@ -584,24 +710,23 @@
serial_out(up, UART_XOFF1, termios->c_cc[VSTOP]);
/* clear SW control mode bits */
- efr = up->efr;
- efr &= OMAP_UART_SW_CLR;
+ up->efr &= OMAP_UART_SW_CLR;
/*
* IXON Flag:
- * Enable XON/XOFF flow control on output.
- * Transmit XON1, XOFF1
+ * Flow control for OMAP.TX
+ * OMAP.RX should listen for XON/XOFF
*/
if (termios->c_iflag & IXON)
- efr |= OMAP_UART_SW_TX;
+ up->efr |= OMAP_UART_SW_RX;
/*
* IXOFF Flag:
- * Enable XON/XOFF flow control on input.
- * Receiver compares XON1, XOFF1.
+ * Flow control for OMAP.RX
+ * OMAP.TX should send XON/XOFF
*/
if (termios->c_iflag & IXOFF)
- efr |= OMAP_UART_SW_RX;
+ up->efr |= OMAP_UART_SW_TX;
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
@@ -624,7 +749,7 @@
* load the new software flow control mode IXON or IXOFF
* and restore the UARTi.EFR_REG[4] ENHANCED_EN value.
*/
- serial_out(up, UART_EFR, efr | UART_EFR_SCD);
+ serial_out(up, UART_EFR, up->efr | UART_EFR_SCD);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr & ~UART_MCR_TCRTLR);
@@ -671,6 +796,10 @@
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/13);
quot = serial_omap_get_divisor(port, baud);
+ up->dll = quot & 0xff;
+ up->dlh = quot >> 8;
+ up->mdr1 = UART_OMAP_MDR1_DISABLE;
+
up->fcr = UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_01 |
UART_FCR_ENABLE_FIFO;
if (up->use_dma)
@@ -680,6 +809,7 @@
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
+ serial_omap_port_enable(up);
spin_lock_irqsave(&up->port.lock, flags);
/*
@@ -723,6 +853,7 @@
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, cval); /* reset DLAB */
+ up->lcr = cval;
/* FIFOs and DMA Settings */
@@ -748,6 +879,11 @@
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
if (up->use_dma) {
+ if (up->errata & OMAP4_UART_ERRATA_i659_TX_THR) {
+ serial_out(up, UART_MDR3, SET_DMA_TX_THRESHOLD);
+ serial_out(up, UART_TX_DMA_THRESHOLD, TX_FIFO_THR_LVL);
+ }
+
serial_out(up, UART_TI752_TLR, 0);
serial_out(up, UART_OMAP_SCR,
(UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8));
@@ -758,8 +894,11 @@
serial_out(up, UART_MCR, up->mcr);
/* Protocol, Baud Rate, and Interrupt Settings */
+ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
+ omap_uart_mdr1_errataset(up, up->mdr1);
+ else
+ serial_out(up, UART_OMAP_MDR1, up->mdr1);
- serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
up->efr = serial_in(up, UART_EFR);
@@ -769,8 +908,8 @@
serial_out(up, UART_IER, 0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
- serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
+ serial_out(up, UART_DLL, up->dll); /* LS of divisor */
+ serial_out(up, UART_DLM, up->dlh); /* MS of divisor */
serial_out(up, UART_LCR, 0);
serial_out(up, UART_IER, up->ier);
@@ -780,27 +919,38 @@
serial_out(up, UART_LCR, cval);
if (baud > 230400 && baud != 3000000)
- serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_13X_MODE);
+ up->mdr1 = UART_OMAP_MDR1_13X_MODE;
else
- serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_16X_MODE);
+ up->mdr1 = UART_OMAP_MDR1_16X_MODE;
+
+ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
+ omap_uart_mdr1_errataset(up, up->mdr1);
+ else
+ serial_out(up, UART_OMAP_MDR1, up->mdr1);
/* Hardware Flow Control Configuration */
if (termios->c_cflag & CRTSCTS) {
- efr |= (UART_EFR_CTS | UART_EFR_RTS);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
-
up->mcr = serial_in(up, UART_MCR);
serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
up->efr = serial_in(up, UART_EFR);
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
-
serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG);
- serial_out(up, UART_EFR, efr); /* Enable AUTORTS and AUTOCTS */
+
+ up->efr |= (UART_EFR_CTS | UART_EFR_RTS);
+ serial_out(up, UART_EFR, up->efr); /* Enable AUTORTS and AUTOCTS */
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
- serial_out(up, UART_MCR, up->mcr | UART_MCR_RTS);
+ up->mcr |= UART_MCR_RTS;
+ serial_out(up, UART_MCR, up->mcr);
+ serial_out(up, UART_LCR, cval);
+ } else {
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ up->efr = serial_in(up, UART_EFR);
+ up->efr &= ~(UART_EFR_CTS | UART_EFR_RTS);
+ serial_out(up, UART_EFR, up->efr); /* Disable AUTORTS and AUTOCTS */
serial_out(up, UART_LCR, cval);
}
@@ -808,7 +958,14 @@
/* Software Flow Control Configuration */
serial_omap_configure_xonxoff(up, termios);
+ /* Now we are ready for RX data: enable rts line */
+ if (up->rts_mux_driver_control && up->rts_pullup_in_suspend) {
+ omap_rts_mux_write(0, up->port.line);
+ up->rts_pullup_in_suspend = 0;
+ }
+
spin_unlock_irqrestore(&up->port.lock, flags);
+ serial_omap_port_disable(up);
dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id);
}
@@ -818,8 +975,12 @@
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned char efr;
+ unsigned char lcr;
dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->pdev->id);
+
+ serial_omap_port_enable(up);
+ lcr = serial_in(up, UART_LCR);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
efr = serial_in(up, UART_EFR);
serial_out(up, UART_EFR, efr | UART_EFR_ECB);
@@ -828,7 +989,19 @@
serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, efr);
- serial_out(up, UART_LCR, 0);
+ serial_out(up, UART_LCR, lcr);
+ if (state)
+ pm_runtime_put_sync(&up->pdev->dev);
+ else
+ serial_omap_port_disable(up);
+}
+
+static void serial_omap_wake_peer(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ if (up->wake_peer)
+ up->wake_peer(port);
}
static void serial_omap_release_port(struct uart_port *port)
@@ -906,25 +1079,31 @@
static void serial_omap_poll_put_char(struct uart_port *port, unsigned char ch)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ serial_omap_port_enable(up);
wait_for_xmitr(up);
serial_out(up, UART_TX, ch);
+ serial_omap_port_disable(up);
}
static int serial_omap_poll_get_char(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
- unsigned int status = serial_in(up, UART_LSR);
+ unsigned int status;
+ serial_omap_port_enable(up);
+ status = serial_in(up, UART_LSR);
if (!(status & UART_LSR_DR))
return NO_POLL_CHAR;
- return serial_in(up, UART_RX);
+ status = serial_in(up, UART_RX);
+ serial_omap_port_disable(up);
+ return status;
}
#endif /* CONFIG_CONSOLE_POLL */
#ifdef CONFIG_SERIAL_OMAP_CONSOLE
-
static struct uart_omap_port *serial_omap_console_ports[4];
static struct uart_driver serial_omap_reg;
@@ -944,7 +1123,22 @@
struct uart_omap_port *up = serial_omap_console_ports[co->index];
unsigned long flags;
unsigned int ier;
- int locked = 1;
+ int console_lock = 0, locked = 1;
+
+ if (console_trylock())
+ console_lock = 1;
+
+ /*
+ * If console_lock is not available and we are in suspending
+ * state then we can avoid the console usage scenario
+ * as this may introduce recursive prints.
+ * Basically this scenario occurs during boot while
+ * printing debug bootlogs.
+ */
+
+ if (!console_lock &&
+ up->pdev->dev.power.runtime_status == RPM_SUSPENDING)
+ return;
local_irq_save(flags);
if (up->port.sysrq)
@@ -954,6 +1148,8 @@
else
spin_lock(&up->port.lock);
+ serial_omap_port_enable(up);
+
/*
* First save the IER then disable the interrupts
*/
@@ -978,6 +1174,10 @@
if (up->msr_saved_flags)
check_modem_status(up);
+ if (console_lock)
+ console_unlock();
+
+ serial_omap_port_disable(up);
if (locked)
spin_unlock(&up->port.lock);
local_irq_restore(flags);
@@ -1041,6 +1241,7 @@
.shutdown = serial_omap_shutdown,
.set_termios = serial_omap_set_termios,
.pm = serial_omap_pm,
+ .wake_peer = serial_omap_wake_peer,
.type = serial_omap_type,
.release_port = serial_omap_release_port,
.request_port = serial_omap_request_port,
@@ -1060,26 +1261,37 @@
.cons = OMAP_CONSOLE,
};
-static int
-serial_omap_suspend(struct platform_device *pdev, pm_message_t state)
+static int serial_omap_suspend(struct device *dev)
{
- struct uart_omap_port *up = platform_get_drvdata(pdev);
+ struct uart_omap_port *up = dev_get_drvdata(dev);
- if (up)
+ if (up) {
+ disable_irq(up->port.irq);
+ if (up->rts_mux_driver_control) {
+ up->rts_pullup_in_suspend = 1;
+ omap_rts_mux_write(MUX_PULL_UP, up->port.line);
+ }
+ up->suspended = true;
uart_suspend_port(&serial_omap_reg, &up->port);
+ serial_omap_pm(&up->port, 3, 0);
+ }
return 0;
}
-static int serial_omap_resume(struct platform_device *dev)
+static int serial_omap_resume(struct device *dev)
{
- struct uart_omap_port *up = platform_get_drvdata(dev);
+ struct uart_omap_port *up = dev_get_drvdata(dev);
- if (up)
+ if (up) {
uart_resume_port(&serial_omap_reg, &up->port);
+ up->suspended = false;
+ enable_irq(up->port.irq);
+ }
+
return 0;
}
-static void serial_omap_rx_timeout(unsigned long uart_no)
+static void serial_omap_rxdma_poll(unsigned long uart_no)
{
struct uart_omap_port *up = ui[uart_no];
unsigned int curr_dma_pos, curr_transmitted_size;
@@ -1089,9 +1301,9 @@
if ((curr_dma_pos == up->uart_dma.prev_rx_dma_pos) ||
(curr_dma_pos == 0)) {
if (jiffies_to_msecs(jiffies - up->port_activity) <
- RX_TIMEOUT) {
+ up->uart_dma.rx_timeout) {
mod_timer(&up->uart_dma.rx_timer, jiffies +
- usecs_to_jiffies(up->uart_dma.rx_timeout));
+ usecs_to_jiffies(up->uart_dma.rx_poll_rate));
} else {
serial_omap_stop_rxdma(up);
up->ier |= (UART_IER_RDI | UART_IER_RLSI);
@@ -1120,7 +1332,7 @@
}
} else {
mod_timer(&up->uart_dma.rx_timer, jiffies +
- usecs_to_jiffies(up->uart_dma.rx_timeout));
+ usecs_to_jiffies(up->uart_dma.rx_poll_rate));
}
up->port_activity = jiffies;
}
@@ -1135,6 +1347,7 @@
int ret = 0;
if (up->uart_dma.rx_dma_channel == -1) {
+ serial_omap_port_enable(up);
ret = omap_request_dma(up->uart_dma.uart_dma_rx,
"UART Rx DMA",
(void *)uart_rx_dma_callback, up,
@@ -1158,7 +1371,7 @@
/* FIXME: Cache maintenance needed here? */
omap_start_dma(up->uart_dma.rx_dma_channel);
mod_timer(&up->uart_dma.rx_timer, jiffies +
- usecs_to_jiffies(up->uart_dma.rx_timeout));
+ usecs_to_jiffies(up->uart_dma.rx_poll_rate));
up->uart_dma.rx_dma_used = true;
return ret;
}
@@ -1223,9 +1436,10 @@
static int serial_omap_probe(struct platform_device *pdev)
{
- struct uart_omap_port *up;
+ struct uart_omap_port *up = NULL;
struct resource *mem, *irq, *dma_tx, *dma_rx;
struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data;
+ struct omap_device *od;
int ret = -ENOSPC;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1249,13 +1463,13 @@
dma_rx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
if (!dma_rx) {
ret = -EINVAL;
- goto err;
+ goto do_release_region;
}
dma_tx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
if (!dma_tx) {
ret = -EINVAL;
- goto err;
+ goto do_release_region;
}
up = kzalloc(sizeof(*up), GFP_KERNEL);
@@ -1275,39 +1489,82 @@
up->port.ops = &serial_omap_pops;
up->port.line = pdev->id;
- up->port.membase = omap_up_info->membase;
- up->port.mapbase = omap_up_info->mapbase;
+ up->port.mapbase = mem->start;
+ up->port.membase = ioremap(mem->start, mem->end - mem->start);
+
+ if (!up->port.membase) {
+ dev_err(&pdev->dev, "can't ioremap UART\n");
+ ret = -ENOMEM;
+ goto do_free;
+ }
+
up->port.flags = omap_up_info->flags;
- up->port.irqflags = omap_up_info->irqflags;
up->port.uartclk = omap_up_info->uartclk;
up->uart_dma.uart_base = mem->start;
+ up->errata = omap_up_info->errata;
+ up->enable_wakeup = omap_up_info->enable_wakeup;
+ up->wer = omap_up_info->wer;
+ up->chk_wakeup = omap_up_info->chk_wakeup;
+ up->wake_peer = omap_up_info->wake_peer;
+ up->rts_mux_driver_control = omap_up_info->rts_mux_driver_control;
+ up->rts_pullup_in_suspend = 0;
- if (omap_up_info->dma_enabled) {
+ if (omap_up_info->use_dma) {
up->uart_dma.uart_dma_tx = dma_tx->start;
up->uart_dma.uart_dma_rx = dma_rx->start;
up->use_dma = 1;
- up->uart_dma.rx_buf_size = 4096;
- up->uart_dma.rx_timeout = 2;
+ up->uart_dma.rx_buf_size = omap_up_info->dma_rx_buf_size;
+ up->uart_dma.rx_timeout = omap_up_info->dma_rx_timeout;
+ up->uart_dma.rx_poll_rate = omap_up_info->dma_rx_poll_rate;
spin_lock_init(&(up->uart_dma.tx_lock));
spin_lock_init(&(up->uart_dma.rx_lock));
up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
}
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ omap_up_info->auto_sus_timeout);
+
+ if (device_may_wakeup(&pdev->dev))
+ pm_runtime_enable(&pdev->dev);
+
+ pm_runtime_irq_safe(&pdev->dev);
+ if (omap_up_info->console_uart) {
+ od = to_omap_device(up->pdev);
+ omap_hwmod_idle(od->hwmods[0]);
+ serial_omap_port_enable(up);
+ serial_omap_port_disable(up);
+ }
+
ui[pdev->id] = up;
serial_omap_add_console_port(up);
+ ret = request_irq(up->port.irq, serial_omap_irq, up->port.irqflags,
+ up->name, up);
+ if (ret)
+ goto do_iounmap;
+ disable_irq(up->port.irq);
+
ret = uart_add_one_port(&serial_omap_reg, &up->port);
if (ret != 0)
- goto do_release_region;
+ goto do_free_irq;
+ dev_set_drvdata(&pdev->dev, up);
platform_set_drvdata(pdev, up);
+
return 0;
-err:
- dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n",
- pdev->id, __func__, ret);
+
+do_free_irq:
+ free_irq(up->port.irq, up);
+do_iounmap:
+ iounmap(up->port.membase);
+do_free:
+ kfree(up);
do_release_region:
release_mem_region(mem->start, (mem->end - mem->start) + 1);
+ dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n",
+ pdev->id, __func__, ret);
return ret;
}
@@ -1317,20 +1574,145 @@
platform_set_drvdata(dev, NULL);
if (up) {
+ pm_runtime_disable(&up->pdev->dev);
+ free_irq(up->port.irq, up);
uart_remove_one_port(&serial_omap_reg, &up->port);
+ iounmap(up->port.membase);
kfree(up);
}
return 0;
}
+/*
+ * Work Around for Errata i202 (3430 - 1.12, 3630 - 1.6)
+ * The access to uart register after MDR1 Access
+ * causes UART to corrupt data.
+ *
+ * Need a delay =
+ * 5 L4 clock cycles + 5 UART functional clock cycle (@48MHz = ~0.2uS)
+ * give 10 times as much
+ */
+static void omap_uart_mdr1_errataset(struct uart_omap_port *up, u8 mdr1)
+{
+ u8 timeout = 255;
+
+ serial_out(up, UART_OMAP_MDR1, mdr1);
+ udelay(2);
+ serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT |
+ UART_FCR_CLEAR_RCVR);
+ /*
+ * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
+ * TX_FIFO_E bit is 1.
+ */
+ while (UART_LSR_THRE != (serial_in(up, UART_LSR) &
+ (UART_LSR_THRE | UART_LSR_DR))) {
+ timeout--;
+ if (!timeout) {
+ /* Should *never* happen. we warn and carry on */
+ dev_crit(&up->pdev->dev, "Errata i202: timedout %x\n",
+ serial_in(up, UART_LSR));
+ break;
+ }
+ udelay(1);
+ }
+}
+
+static void omap_uart_restore_context(struct uart_omap_port *up)
+{
+ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
+ omap_uart_mdr1_errataset(up, UART_OMAP_MDR1_DISABLE);
+ else
+ serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
+ serial_out(up, UART_EFR, UART_EFR_ECB);
+ serial_out(up, UART_LCR, 0x0); /* Operational mode */
+ serial_out(up, UART_IER, 0x0);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
+ serial_out(up, UART_DLL, up->dll);
+ serial_out(up, UART_DLM, up->dlh);
+ serial_out(up, UART_LCR, 0x0); /* Operational mode */
+ serial_out(up, UART_IER, up->ier);
+ serial_out(up, UART_FCR, up->fcr);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+ serial_out(up, UART_MCR, up->mcr);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
+ serial_out(up, UART_EFR, up->efr);
+ serial_out(up, UART_LCR, up->lcr);
+ /* Enable module level wake up */
+ serial_out(up, UART_OMAP_WER, up->wer);
+ if (up->use_dma) {
+ if (up->errata & OMAP4_UART_ERRATA_i659_TX_THR) {
+ serial_out(up, UART_MDR3, SET_DMA_TX_THRESHOLD);
+ serial_out(up, UART_TX_DMA_THRESHOLD, TX_FIFO_THR_LVL);
+ }
+
+ serial_out(up, UART_TI752_TLR, 0);
+ serial_out(up, UART_OMAP_SCR,
+ (UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8));
+ }
+
+ /* UART 16x mode */
+ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
+ omap_uart_mdr1_errataset(up, up->mdr1);
+ else
+ serial_out(up, UART_OMAP_MDR1, up->mdr1);
+}
+
+static int omap_serial_runtime_suspend(struct device *dev)
+{
+ struct uart_omap_port *up = dev_get_drvdata(dev);
+
+ if (!up)
+ goto done;
+
+ if (up->rts_mux_driver_control) {
+ omap_rts_mux_write(MUX_PULL_UP, up->port.line);
+ /* wait a few bytes to allow current transmission to complete */
+ udelay(300);
+ }
+ if (device_may_wakeup(dev))
+ up->enable_wakeup(up->pdev, true);
+ else
+ up->enable_wakeup(up->pdev, false);
+done:
+ return 0;
+}
+
+static int omap_serial_runtime_resume(struct device *dev)
+{
+ struct uart_omap_port *up = dev_get_drvdata(dev);
+ struct omap_device *od;
+
+ if (up) {
+ if (omap_pm_was_context_lost(dev))
+ omap_uart_restore_context(up);
+
+ if (up->use_dma) {
+ /* NO TX_DMA WAKEUP SO KEEP IN NO IDLE MODE */
+ od = to_omap_device(up->pdev);
+ omap_hwmod_set_slave_idlemode(od->hwmods[0],
+ HWMOD_IDLEMODE_NO);
+ }
+ if (up->rts_mux_driver_control && (!up->rts_pullup_in_suspend))
+ omap_rts_mux_write(0, up->port.line);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops omap_serial_dev_pm_ops = {
+ .suspend = serial_omap_suspend,
+ .resume = serial_omap_resume,
+ .runtime_suspend = omap_serial_runtime_suspend,
+ .runtime_resume = omap_serial_runtime_resume,
+};
+
static struct platform_driver serial_omap_driver = {
.probe = serial_omap_probe,
.remove = serial_omap_remove,
-
- .suspend = serial_omap_suspend,
- .resume = serial_omap_resume,
.driver = {
.name = DRIVER_NAME,
+ .pm = &omap_serial_dev_pm_ops,
},
};
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 55a57c2..d534fa6 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -39,8 +39,13 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/usb/ulpi.h>
-#include <plat/usb.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+
+#include <plat/omap_hwmod.h>
+#include <plat/usb.h>
+#include <plat/clock.h>
/* EHCI Register Set */
#define EHCI_INSNREG04 (0xA0)
@@ -178,11 +183,7 @@
}
}
- ret = omap_usbhs_enable(dev);
- if (ret) {
- dev_err(dev, "failed to start usbhs with err %d\n", ret);
- goto err_enable;
- }
+ pm_runtime_get_sync(dev->parent);
/*
* An undocumented "feature" in the OMAP3 EHCI controller,
@@ -228,10 +229,7 @@
return 0;
err_add_hcd:
- omap_usbhs_disable(dev);
-
-err_enable:
- usb_put_hcd(hcd);
+ pm_runtime_put_sync(dev->parent);
err_io:
return ret;
@@ -252,25 +250,87 @@
struct usb_hcd *hcd = dev_get_drvdata(dev);
usb_remove_hcd(hcd);
- omap_usbhs_disable(dev);
+ pm_runtime_put_sync(dev->parent);
usb_put_hcd(hcd);
return 0;
}
static void ehci_hcd_omap_shutdown(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev);
- if (hcd->driver->shutdown)
+ if (hcd->driver->shutdown) {
+ pm_runtime_get_sync(dev->parent);
hcd->driver->shutdown(hcd);
+ pm_runtime_put(dev->parent);
+ }
+}
+
+static int ehci_omap_bus_suspend(struct usb_hcd *hcd)
+{
+ struct device *dev = hcd->self.controller;
+ struct ehci_hcd_omap_platform_data *pdata;
+ struct omap_hwmod *oh;
+ struct clk *clk;
+ int ret = 0;
+ int i;
+
+ dev_dbg(dev, "ehci_omap_bus_suspend\n");
+
+ ret = ehci_bus_suspend(hcd);
+
+ if (ret != 0) {
+ dev_dbg(dev, "ehci_omap_bus_suspend failed %d\n", ret);
+ return ret;
+ }
+
+ oh = omap_hwmod_lookup(USBHS_EHCI_HWMODNAME);
+
+ omap_hwmod_enable_ioring_wakeup(oh);
+
+ if (dev->parent)
+ pm_runtime_put_sync(dev->parent);
+
+ /* At the end, disable any external transceiver clocks */
+ pdata = dev->platform_data;
+ for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
+ clk = pdata->transceiver_clk[i];
+ if (clk)
+ clk_disable(clk);
+ }
+
+ return ret;
+}
+
+static int ehci_omap_bus_resume(struct usb_hcd *hcd)
+{
+ struct device *dev = hcd->self.controller;
+ struct ehci_hcd_omap_platform_data *pdata;
+ struct clk *clk;
+ int i;
+
+ dev_dbg(dev, "ehci_omap_bus_resume\n");
+
+ /* Re-enable any external transceiver clocks first */
+ pdata = dev->platform_data;
+ for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
+ clk = pdata->transceiver_clk[i];
+ if (clk)
+ clk_enable(clk);
+ }
+
+ if (dev->parent) {
+ pm_runtime_get_sync(dev->parent);
+ }
+
+ return ehci_bus_resume(hcd);
}
static struct platform_driver ehci_hcd_omap_driver = {
.probe = ehci_hcd_omap_probe,
.remove = ehci_hcd_omap_remove,
.shutdown = ehci_hcd_omap_shutdown,
- /*.suspend = ehci_hcd_omap_suspend, */
- /*.resume = ehci_hcd_omap_resume, */
.driver = {
.name = "ehci-omap",
}
@@ -315,8 +375,8 @@
*/
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
+ .bus_suspend = ehci_omap_bus_suspend,
+ .bus_resume = ehci_omap_bus_resume,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 08fdcfa..7321920 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -506,6 +506,11 @@
last = list_entry (qtd->qtd_list.prev,
struct ehci_qtd, qtd_list);
last->hw_next = qtd->hw_next;
+ /*
+ * Make sure the new hw_next pointer is visible
+ * to the HW before freeing the old one
+ */
+ wmb();
}
/* remove qtd; it's recycled after possible urb completion */
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
index 6048f2f..875a837 100644
--- a/drivers/usb/host/ohci-omap3.c
+++ b/drivers/usb/host/ohci-omap3.c
@@ -31,6 +31,8 @@
#include <linux/platform_device.h>
#include <plat/usb.h>
+#include <plat/omap_hwmod.h>
+#include <linux/pm_runtime.h>
/*-------------------------------------------------------------------------*/
@@ -41,6 +43,51 @@
return ohci_init(hcd_to_ohci(hcd));
}
+static int ohci_omap3_bus_suspend(struct usb_hcd *hcd)
+{
+ struct device *dev = hcd->self.controller;
+ struct omap_hwmod *oh;
+ int ret = 0;
+
+ dev_dbg(dev, "ohci_omap3_bus_suspend\n");
+
+ ret = ohci_bus_suspend(hcd);
+
+ /* Delay required so that after ohci suspend
+ * smart stand by can be set in the driver.
+ * required for power mangament
+ */
+ msleep(5);
+
+ if (ret != 0) {
+ dev_dbg(dev, "ohci_omap3_bus_suspend failed %d\n", ret);
+ return ret;
+ }
+
+ oh = omap_hwmod_lookup(USBHS_OHCI_HWMODNAME);
+
+ omap_hwmod_enable_ioring_wakeup(oh);
+
+ if (dev->parent)
+ pm_runtime_put_sync(dev->parent);
+
+ return ret;
+}
+
+
+static int ohci_omap3_bus_resume(struct usb_hcd *hcd)
+{
+ struct device *dev = hcd->self.controller;
+
+ dev_dbg(dev, "ohci_omap3_bus_resume\n");
+
+ if (dev->parent)
+ pm_runtime_get_sync(dev->parent);
+
+ return ohci_bus_resume(hcd);
+}
+
+
/*-------------------------------------------------------------------------*/
static int ohci_omap3_start(struct usb_hcd *hcd)
@@ -104,8 +151,8 @@
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
+ .bus_suspend = ohci_omap3_bus_suspend,
+ .bus_resume = ohci_omap3_bus_resume,
#endif
.start_port_reset = ohci_start_port_reset,
};
@@ -172,11 +219,7 @@
hcd->rsrc_len = resource_size(res);
hcd->regs = regs;
- ret = omap_usbhs_enable(dev);
- if (ret) {
- dev_dbg(dev, "failed to start ohci\n");
- goto err_end;
- }
+ pm_runtime_get_sync(dev->parent);
ohci_hcd_init(hcd_to_ohci(hcd));
@@ -189,7 +232,7 @@
return 0;
err_add_hcd:
- omap_usbhs_disable(dev);
+ pm_runtime_get_sync(dev->parent);
err_end:
usb_put_hcd(hcd);
@@ -220,9 +263,8 @@
iounmap(hcd->regs);
usb_remove_hcd(hcd);
- omap_usbhs_disable(dev);
+ pm_runtime_put_sync(dev->parent);
usb_put_hcd(hcd);
-
return 0;
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index a0232a7..0226c20 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2212,6 +2212,7 @@
musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
for (i = 0; i < musb->config->num_eps; ++i) {
+ musb_writeb(musb_base, MUSB_INDEX, i);
epio = musb->endpoints[i].regs;
musb->context.index_regs[i].txmaxp =
musb_readw(epio, MUSB_TXMAXP);
@@ -2278,6 +2279,7 @@
musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
for (i = 0; i < musb->config->num_eps; ++i) {
+ musb_writeb(musb_base, MUSB_INDEX, i);
epio = musb->endpoints[i].regs;
musb_writew(epio, MUSB_TXMAXP,
musb->context.index_regs[i].txmaxp);
@@ -2335,7 +2337,8 @@
struct platform_device *pdev = to_platform_device(dev);
unsigned long flags;
struct musb *musb = dev_to_musb(&pdev->dev);
-
+ if (pm_runtime_suspended(dev))
+ return 0;
spin_lock_irqsave(&musb->lock, flags);
if (is_peripheral_active(musb)) {
@@ -2347,7 +2350,6 @@
* they will even be wakeup-enabled.
*/
}
-
musb_save_context(musb);
spin_unlock_irqrestore(&musb->lock, flags);
@@ -2358,7 +2360,8 @@
{
struct platform_device *pdev = to_platform_device(dev);
struct musb *musb = dev_to_musb(&pdev->dev);
-
+ if (pm_runtime_suspended(dev))
+ return 0;
musb_restore_context(musb);
/* for static cmos like DaVinci, register values were preserved
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 0e053b5..263d31c 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -370,6 +370,7 @@
u8 index, testmode;
u8 devctl, busctl, misc;
+ u32 otg_interfsel;
struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
};
@@ -386,6 +387,7 @@
irqreturn_t (*isr)(int, void *);
struct work_struct irq_work;
+ struct workqueue_struct *otg_notifier_wq;
u16 hwvers;
/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
@@ -497,6 +499,7 @@
struct usb_gadget g; /* the gadget */
struct usb_gadget_driver *gadget_driver; /* its driver */
#endif
+ bool is_ac_charger:1;
/*
* FIXME: Remove this flag.
@@ -518,6 +521,12 @@
#endif
};
+struct musb_otg_work {
+ struct work_struct work;
+ enum usb_xceiv_events xceiv_event;
+ struct musb *musb;
+};
+
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
static inline struct musb *gadget_to_musb(struct usb_gadget *g)
{
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 99ceaef..a46923a 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -330,6 +330,13 @@
musb_ep = req->ep;
+ /* Check if EP is disabled */
+ if (!musb_ep->desc) {
+ dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
+ musb_ep->end_point.name);
+ return;
+ }
+
/* we shouldn't get here while DMA is active ... but we do ... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
dev_dbg(musb->controller, "dma pending...\n");
@@ -557,8 +564,7 @@
&& (request->actual == request->length))
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
|| (is_dma && (!dma->desired_mode ||
- (request->actual &
- (musb_ep->packet_sz - 1))))
+ (request->actual % musb_ep->packet_sz)))
#endif
) {
/*
@@ -570,8 +576,14 @@
dev_dbg(musb->controller, "sending zero pkt\n");
musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
- | MUSB_TXCSR_TXPKTRDY);
+ | MUSB_TXCSR_TXPKTRDY
+ | (csr & MUSB_TXCSR_P_ISO));
request->zero = 0;
+ /*
+ * Return from here with the expectation of the endpoint
+ * interrupt for further action.
+ */
+ return;
}
if (request->actual == request->length) {
@@ -651,6 +663,13 @@
len = musb_ep->packet_sz;
+ /* Check if EP is disabled */
+ if (!musb_ep->desc) {
+ dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
+ musb_ep->end_point.name);
+ return;
+ }
+
/* We shouldn't get here while DMA is active, but we do... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
dev_dbg(musb->controller, "DMA pending...\n");
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 8b2473f..bbac21d 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1663,7 +1663,7 @@
d->status = d_status;
buf = urb->transfer_dma + d->offset;
} else {
- length = rx_count;
+ length = min(rx_count, urb->transfer_buffer_length);
buf = urb->transfer_dma +
urb->actual_length;
}
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 6958ab9..c1982fc 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -230,15 +230,37 @@
}
/* blocking notifier support */
+static void musb_otg_notifier_work(struct work_struct *data_notifier_work);
+
static int musb_otg_notifications(struct notifier_block *nb,
unsigned long event, void *unused)
{
struct musb *musb = container_of(nb, struct musb, nb);
+ struct musb_otg_work *otg_work;
+
+ otg_work = kmalloc(sizeof(struct musb_otg_work), GFP_ATOMIC);
+ if (!otg_work)
+ return notifier_from_errno(-ENOMEM);
+ INIT_WORK(&otg_work->work, musb_otg_notifier_work);
+ otg_work->xceiv_event = event;
+ otg_work->musb = musb;
+ queue_work(musb->otg_notifier_wq, &otg_work->work);
+ return 0;
+}
+
+static void musb_otg_notifier_work(struct work_struct *data_notifier_work)
+{
+ struct musb_otg_work *otg_work =
+ container_of(data_notifier_work, struct musb_otg_work, work);
+ struct musb *musb = otg_work->musb;
struct device *dev = musb->controller;
struct musb_hdrc_platform_data *pdata = dev->platform_data;
struct omap_musb_board_data *data = pdata->board_data;
+ enum usb_xceiv_events xceiv_event = otg_work->xceiv_event;
- switch (event) {
+ kfree(otg_work);
+
+ switch (xceiv_event) {
case USB_EVENT_ID:
dev_dbg(musb->controller, "ID GND\n");
@@ -257,6 +279,11 @@
}
break;
+ case USB_EVENT_CHARGER:
+ dev_dbg(musb->controller, "Dedicated charger connect\n");
+ musb->is_ac_charger = true;
+ break;
+
case USB_EVENT_VBUS:
dev_dbg(musb->controller, "VBUS Connect\n");
@@ -268,6 +295,13 @@
break;
case USB_EVENT_NONE:
+ if (musb->is_ac_charger) {
+ dev_dbg(musb->controller,
+ "Dedicated charger disconnect\n");
+ musb->is_ac_charger = false;
+ break;
+ }
+
dev_dbg(musb->controller, "VBUS Disconnect\n");
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
@@ -311,10 +345,17 @@
return -ENODEV;
}
+ musb->otg_notifier_wq = create_singlethread_workqueue("musb-otg");
+ if (!musb->otg_notifier_wq) {
+ pr_err("HS USB OTG: cannot allocate otg event wq\n");
+ status = -ENOMEM;
+ goto err1;
+ }
+
status = pm_runtime_get_sync(dev);
if (status < 0) {
dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
- goto err1;
+ goto err2;
}
l = musb_readl(musb->mregs, OTG_INTERFSEL);
@@ -347,7 +388,10 @@
return 0;
+err2:
+ destroy_workqueue(musb->otg_notifier_wq);
err1:
+ otg_put_transceiver(musb->xceiv);
pm_runtime_disable(dev);
return status;
}
@@ -401,6 +445,8 @@
{
del_timer_sync(&musb_idle_timer);
+ otg_unregister_notifier(musb->xceiv, &musb->nb);
+ destroy_workqueue(musb->otg_notifier_wq);
omap2430_low_level_exit(musb);
otg_put_transceiver(musb->xceiv);
@@ -505,6 +551,9 @@
struct omap2430_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
+ musb->context.otg_interfsel = musb_readl(musb->mregs,
+ OTG_INTERFSEL);
+
omap2430_low_level_exit(musb);
otg_set_suspend(musb->xceiv, 1);
@@ -517,6 +566,9 @@
struct musb *musb = glue_to_musb(glue);
omap2430_low_level_init(musb);
+ musb_writel(musb->mregs, OTG_INTERFSEL,
+ musb->context.otg_interfsel);
+
otg_set_suspend(musb->xceiv, 0);
return 0;
diff --git a/drivers/usb/otg/twl6030-usb.c b/drivers/usb/otg/twl6030-usb.c
index cfb5aa7..9f51eaf 100644
--- a/drivers/usb/otg/twl6030-usb.c
+++ b/drivers/usb/otg/twl6030-usb.c
@@ -63,9 +63,6 @@
#define USB_OTG_ADP_RISE 0x19
#define USB_OTG_REVISION 0x1A
-/* to be moved to LDO */
-#define TWL6030_MISC2 0xE5
-#define TWL6030_CFG_LDO_PD2 0xF5
#define TWL6030_BACKUP_REG 0xFA
#define STS_HW_CONDITIONS 0x21
@@ -95,11 +92,15 @@
struct regulator *usb3v3;
+ /* used to set vbus, in atomic path */
+ struct work_struct set_vbus_work;
+
int irq1;
int irq2;
u8 linkstat;
u8 asleep;
bool irq_enabled;
+ bool vbus_enable;
unsigned long features;
};
@@ -215,12 +216,6 @@
/* Set to OTG_REV 1.3 and turn on the ID_WAKEUP_COMP */
twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_BACKUP_REG);
- /* Program CFG_LDO_PD2 register and set VUSB bit */
- twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_CFG_LDO_PD2);
-
- /* Program MISC2 register and set bit VUSB_IN_VBAT */
- twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x10, TWL6030_MISC2);
-
twl->usb3v3 = regulator_get(twl->dev, regulator_name);
if (IS_ERR(twl->usb3v3))
return -ENODEV;
@@ -370,20 +365,28 @@
return 0;
}
-static int twl6030_set_vbus(struct otg_transceiver *x, bool enabled)
+static void otg_set_vbus_work(struct work_struct *data)
{
- struct twl6030_usb *twl = xceiv_to_twl(x);
-
+ struct twl6030_usb *twl = container_of(data, struct twl6030_usb,
+ set_vbus_work);
/*
* Start driving VBUS. Set OPA_MODE bit in CHARGERUSB_CTRL1
* register. This enables boost mode.
*/
- if (enabled)
+ if (twl->vbus_enable)
twl6030_writeb(twl, TWL_MODULE_MAIN_CHARGE , 0x40,
CHARGERUSB_CTRL1);
- else
+ else
twl6030_writeb(twl, TWL_MODULE_MAIN_CHARGE , 0x00,
CHARGERUSB_CTRL1);
+}
+
+static int twl6030_set_vbus(struct otg_transceiver *x, bool enabled)
+{
+ struct twl6030_usb *twl = xceiv_to_twl(x);
+
+ twl->vbus_enable = enabled;
+ schedule_work(&twl->set_vbus_work);
return 0;
}
@@ -444,6 +447,9 @@
ATOMIC_INIT_NOTIFIER_HEAD(&twl->otg.notifier);
+ INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work);
+
+ twl->vbus_enable = false;
twl->irq_enabled = true;
status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
@@ -494,6 +500,7 @@
regulator_put(twl->usb3v3);
pdata->phy_exit(twl->dev);
device_remove_file(twl->dev, &dev_attr_vbus);
+ cancel_work_sync(&twl->set_vbus_work);
kfree(twl);
return 0;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 4c85a4b..1d04ca2f 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -23,6 +23,8 @@
source "drivers/gpu/stub/Kconfig"
+source "drivers/gpu/pvr/Kconfig"
+
source "drivers/gpu/ion/Kconfig"
config VGASTATE
@@ -2385,6 +2387,13 @@
Choose this option if you want to use the Unigfx device as a
framebuffer device. Without the support of PCI & AGP.
+config HDMI_TI_4XXX_IP
+ tristate
+ default n
+ help
+ HDMI Library Interface , for TI OMAP4/Netra IP.
+ See http://www.hdmi.org/ for HDMI specification.
+
source "drivers/video/omap/Kconfig"
source "drivers/video/omap2/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 8b83129..22eaeb6 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -134,6 +134,7 @@
obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o
obj-$(CONFIG_FB_OMAP) += omap/
obj-y += omap2/
+obj-$(CONFIG_HDMI_TI_4XXX_IP) += hdmi_ti_4xxx_ip.o
obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
obj-$(CONFIG_FB_CARMINE) += carminefb.o
obj-$(CONFIG_FB_MB862XX) += mb862xx/
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index 4f57485..f3fa446 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -546,6 +546,9 @@
static void get_detailed_timing(unsigned char *block,
struct fb_videomode *mode)
{
+ int v_size = V_SIZE;
+ int h_size = H_SIZE;
+
mode->xres = H_ACTIVE;
mode->yres = V_ACTIVE;
mode->pixclock = PIXEL_CLOCK;
@@ -574,11 +577,18 @@
}
mode->flag = FB_MODE_IS_DETAILED;
+ /* get aspect ratio */
+ if (h_size * 18 > v_size * 31 && h_size * 18 < v_size * 33)
+ mode->flag |= FB_FLAG_RATIO_16_9;
+ if (h_size * 18 > v_size * 23 && h_size * 18 < v_size * 25)
+ mode->flag |= FB_FLAG_RATIO_4_3;
+
DPRINTK(" %d MHz ", PIXEL_CLOCK/1000000);
DPRINTK("%d %d %d %d ", H_ACTIVE, H_ACTIVE + H_SYNC_OFFSET,
H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH, H_ACTIVE + H_BLANKING);
DPRINTK("%d %d %d %d ", V_ACTIVE, V_ACTIVE + V_SYNC_OFFSET,
V_ACTIVE + V_SYNC_OFFSET + V_SYNC_WIDTH, V_ACTIVE + V_BLANKING);
+ DPRINTK("%dmm %dmm ", H_SIZE, V_SIZE);
DPRINTK("%sHSync %sVSync\n\n", (HSYNC_POSITIVE) ? "+" : "-",
(VSYNC_POSITIVE) ? "+" : "-");
}
@@ -976,7 +986,7 @@
/**
* fb_edid_add_monspecs() - add monitor video modes from E-EDID data
* @edid: 128 byte array with an E-EDID block
- * @spacs: monitor specs to be extended
+ * @specs: monitor specs to be extended
*/
void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
{
@@ -1001,14 +1011,23 @@
while (pos < edid[2]) {
u8 len = edid[pos] & 0x1f, type = (edid[pos] >> 5) & 7;
pr_debug("Data block %u of %u bytes\n", type, len);
- if (type == 2)
+
+ pos++;
+ if (type == 2) {
for (i = pos; i < pos + len; i++) {
- u8 idx = edid[pos + i] & 0x7f;
+ u8 idx = edid[i] & 0x7f;
svd[svd_n++] = idx;
pr_debug("N%sative mode #%d\n",
- edid[pos + i] & 0x80 ? "" : "on-n", idx);
+ edid[i] & 0x80 ? "" : "on-n", idx);
}
- pos += len + 1;
+ } else if (type == 3 && len >= 3) {
+ u32 ieee_reg = edid[pos] | (edid[pos + 1] << 8) |
+ (edid[pos + 2] << 16);
+ if (ieee_reg == 0x000c03)
+ specs->misc |= FB_MISC_HDMI;
+ }
+
+ pos += len;
}
block = edid + edid[2];
@@ -1041,10 +1060,8 @@
for (i = specs->modedb_len + num; i < specs->modedb_len + num + svd_n; i++) {
int idx = svd[i - specs->modedb_len - num];
- if (!idx || idx > 63) {
+ if (!idx || idx > (CEA_MODEDB_SIZE - 1)) {
pr_warning("Reserved SVD code %d\n", idx);
- } else if (idx > ARRAY_SIZE(cea_modes) || !cea_modes[idx].xres) {
- pr_warning("Unimplemented SVD code %d\n", idx);
} else {
memcpy(&m[i], cea_modes + idx, sizeof(m[i]));
pr_debug("Adding SVD #%d: %ux%u@%u\n", idx,
diff --git a/drivers/video/hdmi_ti_4xxx_ip.c b/drivers/video/hdmi_ti_4xxx_ip.c
new file mode 100644
index 0000000..ad5d5294
--- /dev/null
+++ b/drivers/video/hdmi_ti_4xxx_ip.c
@@ -0,0 +1,1368 @@
+/*
+ * hdmi_ti_4xxx_ip.c
+ *
+ * HDMI TI81xx, TI38xx, TI OMAP4 etc IP driver Library
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Yong Zhi
+ * Mythri pk <mythripk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/omapfb.h>
+
+#include "hdmi_ti_4xxx_ip.h"
+
+static inline void hdmi_write_reg(void __iomem *base_addr,
+ const struct hdmi_reg idx, u32 val)
+{
+ __raw_writel(val, base_addr + idx.idx);
+}
+
+static inline u32 hdmi_read_reg(void __iomem *base_addr,
+ const struct hdmi_reg idx)
+{
+ return __raw_readl(base_addr + idx.idx);
+}
+
+static inline void __iomem *hdmi_wp_base(struct hdmi_ip_data *ip_data)
+{
+ return (void __iomem *) (ip_data->base_wp);
+}
+
+static inline void __iomem *hdmi_phy_base(struct hdmi_ip_data *ip_data)
+{
+ return (void __iomem *) (ip_data->base_wp + ip_data->hdmi_phy_offset);
+}
+
+static inline void __iomem *hdmi_pll_base(struct hdmi_ip_data *ip_data)
+{
+ return (void __iomem *) (ip_data->base_wp + ip_data->hdmi_pll_offset);
+}
+
+static inline void __iomem *hdmi_av_base(struct hdmi_ip_data *ip_data)
+{
+ return (void __iomem *)
+ (ip_data->base_wp + ip_data->hdmi_core_av_offset);
+}
+
+static inline void __iomem *hdmi_core_sys_base(struct hdmi_ip_data *ip_data)
+{
+ return (void __iomem *)
+ (ip_data->base_wp + ip_data->hdmi_core_sys_offset);
+}
+
+static inline int hdmi_wait_for_bit_change(void __iomem *base_addr,
+ const struct hdmi_reg idx,
+ int b2, int b1, u32 val)
+{
+ u32 t = 0;
+ while (val != REG_GET(base_addr, idx, b2, b1)) {
+ udelay(1);
+ if (t++ > 10000)
+ return !val;
+ }
+ return val;
+}
+
+static int hdmi_pll_init(struct hdmi_ip_data *ip_data,
+ enum hdmi_clk_refsel refsel, int dcofreq,
+ struct hdmi_pll_info *fmt, u16 sd)
+{
+ u32 r;
+
+ /* PLL start always use manual mode */
+ REG_FLD_MOD(hdmi_pll_base(ip_data), PLLCTRL_PLL_CONTROL, 0x0, 0, 0);
+
+ r = hdmi_read_reg(hdmi_pll_base(ip_data), PLLCTRL_CFG1);
+ r = FLD_MOD(r, fmt->regm, 20, 9); /* CFG1_PLL_REGM */
+ r = FLD_MOD(r, fmt->regn, 8, 1); /* CFG1_PLL_REGN */
+
+ hdmi_write_reg(hdmi_pll_base(ip_data), PLLCTRL_CFG1, r);
+
+ r = hdmi_read_reg(hdmi_pll_base(ip_data), PLLCTRL_CFG2);
+
+ r = FLD_MOD(r, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */
+ r = FLD_MOD(r, 0x1, 13, 13); /* PLL_REFEN */
+ r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */
+
+ if (dcofreq) {
+ /* divider programming for frequency beyond 1000Mhz */
+ REG_FLD_MOD(hdmi_pll_base(ip_data), PLLCTRL_CFG3, sd, 17, 10);
+ r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */
+ } else {
+ r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */
+ }
+
+ hdmi_write_reg(hdmi_pll_base(ip_data), PLLCTRL_CFG2, r);
+
+ r = hdmi_read_reg(hdmi_pll_base(ip_data), PLLCTRL_CFG4);
+ r = FLD_MOD(r, fmt->regm2, 24, 18);
+ r = FLD_MOD(r, fmt->regmf, 17, 0);
+
+ hdmi_write_reg(hdmi_pll_base(ip_data), PLLCTRL_CFG4, r);
+
+ /* go now */
+ REG_FLD_MOD(hdmi_pll_base(ip_data), PLLCTRL_PLL_GO, 0x1, 0, 0);
+
+ /* wait for bit change */
+ if (hdmi_wait_for_bit_change(hdmi_pll_base(ip_data), PLLCTRL_PLL_GO,
+ 0, 0, 1) != 1) {
+ pr_err("PLL GO bit not set\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Wait till the lock bit is set in PLL status */
+ if (hdmi_wait_for_bit_change(hdmi_pll_base(ip_data),
+ PLLCTRL_PLL_STATUS, 1, 1, 1) != 1) {
+ pr_err("cannot lock PLL\n");
+ pr_err("CFG1 0x%x\n",
+ hdmi_read_reg(hdmi_pll_base(ip_data), PLLCTRL_CFG1));
+ pr_err("CFG2 0x%x\n",
+ hdmi_read_reg(hdmi_pll_base(ip_data), PLLCTRL_CFG2));
+ pr_err("CFG4 0x%x\n",
+ hdmi_read_reg(hdmi_pll_base(ip_data), PLLCTRL_CFG4));
+ return -ETIMEDOUT;
+ }
+
+ pr_debug("PLL locked!\n");
+
+ return 0;
+}
+static int hdmi_wait_for_audio_stop(struct hdmi_ip_data *ip_data)
+{
+ int count = 0;
+ /* wait for audio to stop before powering off the phy*/
+ while (REG_GET(hdmi_wp_base(ip_data),
+ HDMI_WP_AUDIO_CTRL, 31, 31) != 0) {
+ msleep(100);
+ if (count++ > 100) {
+ pr_err("Audio is not turned off "
+ "even after 10 seconds\n");
+ return -ETIMEDOUT;
+ }
+ }
+ return 0;
+}
+
+/* PHY_PWR_CMD */
+static int hdmi_set_phy_pwr(struct hdmi_ip_data *ip_data,
+ enum hdmi_phy_pwr val, bool set_mode)
+{
+ /* FIXME audio driver should have already stopped, but not yet */
+ if (val == HDMI_PHYPWRCMD_OFF && !set_mode)
+ hdmi_wait_for_audio_stop(ip_data);
+
+ /* Command for power control of HDMI PHY */
+ REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, val, 7, 6);
+
+ /* Status of the power control of HDMI PHY */
+ if (hdmi_wait_for_bit_change(hdmi_wp_base(ip_data),
+ HDMI_WP_PWR_CTRL, 5, 4, val) != val) {
+ pr_err("Failed to set PHY power mode to %d\n", val);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/* PLL_PWR_CMD */
+int hdmi_ti_4xxx_set_pll_pwr(struct hdmi_ip_data *ip_data, enum hdmi_pll_pwr val)
+{
+ /* Command for power control of HDMI PLL */
+ REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, val, 3, 2);
+
+ /* wait till PHY_PWR_STATUS is set */
+if (hdmi_wait_for_bit_change(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL,
+ 1, 0, val) != val) {
+ pr_err("Failed to set PLL_PWR_STATUS\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_ti_4xxx_set_pll_pwr);
+
+static int hdmi_pll_reset(struct hdmi_ip_data *ip_data)
+{
+ /* SYSRESET controlled by power FSM */
+ REG_FLD_MOD(hdmi_pll_base(ip_data), PLLCTRL_PLL_CONTROL, 0x0, 3, 3);
+
+ /* READ 0x0 reset is in progress */
+ if (hdmi_wait_for_bit_change(hdmi_pll_base(ip_data),
+ PLLCTRL_PLL_STATUS, 0, 0, 1) != 1) {
+ pr_err("Failed to sysreset PLL\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int hdmi_ti_4xxx_pll_program(struct hdmi_ip_data *ip_data,
+ struct hdmi_pll_info *fmt)
+{
+ u16 r = 0;
+ enum hdmi_clk_refsel refsel;
+
+ r = hdmi_ti_4xxx_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_ALLOFF);
+ if (r)
+ return r;
+
+ r = hdmi_ti_4xxx_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
+ if (r)
+ return r;
+
+ r = hdmi_pll_reset(ip_data);
+ if (r)
+ return r;
+
+ refsel = HDMI_REFSEL_SYSCLK;
+
+ r = hdmi_pll_init(ip_data, refsel, fmt->dcofreq, fmt, fmt->regsd);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+int hdmi_ti_4xxx_phy_init(struct hdmi_ip_data *ip_data)
+{
+ u16 r = 0;
+
+ r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON, false);
+ if (r)
+ return r;
+
+ r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON, false);
+ if (r)
+ return r;
+
+ /*
+ * Read address 0 in order to get the SCP reset done completed
+ * Dummy access performed to make sure reset is done
+ */
+ hdmi_read_reg(hdmi_phy_base(ip_data), HDMI_TXPHY_TX_CTRL);
+
+ /*
+ * Write to phy address 0 to configure the clock
+ * use HFBITCLK write HDMI_TXPHY_TX_CONTROL_FREQOUT field
+ */
+ REG_FLD_MOD(hdmi_phy_base(ip_data), HDMI_TXPHY_TX_CTRL, 0x1, 31, 30);
+
+ /* Write to phy address 1 to start HDMI line (TXVALID and TMDSCLKEN) */
+ hdmi_write_reg(hdmi_phy_base(ip_data),
+ HDMI_TXPHY_DIGITAL_CTRL, 0xF0000000);
+
+ /* Write to phy address 3 to change the polarity control */
+ REG_FLD_MOD(hdmi_phy_base(ip_data),
+ HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
+
+ return 0;
+}
+
+void hdmi_ti_4xxx_phy_off(struct hdmi_ip_data *ip_data, bool set_mode)
+{
+ hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF, set_mode);
+}
+EXPORT_SYMBOL(hdmi_ti_4xxx_phy_init);
+EXPORT_SYMBOL(hdmi_ti_4xxx_phy_off);
+
+static int hdmi_core_ddc_edid(struct hdmi_ip_data *ip_data,
+ u8 *pedid, int ext)
+{
+ u32 i, j;
+ char checksum = 0;
+ u32 offset = 0;
+
+ /* Turn on CLK for DDC */
+ REG_FLD_MOD(hdmi_av_base(ip_data), HDMI_CORE_AV_DPD, 0x7, 2, 0);
+
+ /*
+ * SW HACK : Without the Delay DDC(i2c bus) reads 0 values /
+ * right shifted values( The behavior is not consistent and seen only
+ * with some TV's)
+ */
+ msleep(300);
+
+ if (!ext) {
+ /* Clk SCL Devices */
+ REG_FLD_MOD(hdmi_core_sys_base(ip_data),
+ HDMI_CORE_DDC_CMD, 0xA, 3, 0);
+
+ /* HDMI_CORE_DDC_STATUS_IN_PROG */
+ if (hdmi_wait_for_bit_change(hdmi_core_sys_base(ip_data),
+ HDMI_CORE_DDC_STATUS, 4, 4, 0) != 0) {
+ pr_err("Failed to program DDC\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Clear FIFO */
+ REG_FLD_MOD(hdmi_core_sys_base(ip_data)
+ , HDMI_CORE_DDC_CMD, 0x9, 3, 0);
+
+ /* HDMI_CORE_DDC_STATUS_IN_PROG */
+ if (hdmi_wait_for_bit_change(hdmi_core_sys_base(ip_data),
+ HDMI_CORE_DDC_STATUS, 4, 4, 0) != 0) {
+ pr_err("Failed to program DDC\n");
+ return -ETIMEDOUT;
+ }
+
+ } else {
+ if (ext % 2 != 0)
+ offset = 0x80;
+ }
+
+ /* Load Segment Address Register */
+ REG_FLD_MOD(hdmi_core_sys_base(ip_data),
+ HDMI_CORE_DDC_SEGM, ext/2, 7, 0);
+
+ /* Load Slave Address Register */
+ REG_FLD_MOD(hdmi_core_sys_base(ip_data),
+ HDMI_CORE_DDC_ADDR, 0xA0 >> 1, 7, 1);
+
+ /* Load Offset Address Register */
+ REG_FLD_MOD(hdmi_core_sys_base(ip_data),
+ HDMI_CORE_DDC_OFFSET, offset, 7, 0);
+
+ /* Load Byte Count */
+ REG_FLD_MOD(hdmi_core_sys_base(ip_data),
+ HDMI_CORE_DDC_COUNT1, 0x80, 7, 0);
+ REG_FLD_MOD(hdmi_core_sys_base(ip_data),
+ HDMI_CORE_DDC_COUNT2, 0x0, 1, 0);
+
+ /* Set DDC_CMD */
+ if (ext)
+ REG_FLD_MOD(hdmi_core_sys_base(ip_data),
+ HDMI_CORE_DDC_CMD, 0x4, 3, 0);
+ else
+ REG_FLD_MOD(hdmi_core_sys_base(ip_data),
+ HDMI_CORE_DDC_CMD, 0x2, 3, 0);
+
+ /* HDMI_CORE_DDC_STATUS_BUS_LOW */
+ if (REG_GET(hdmi_core_sys_base(ip_data),
+ HDMI_CORE_DDC_STATUS, 6, 6) == 1) {
+ pr_err("I2C Bus Low?\n");
+ return -EIO;
+ }
+ /* HDMI_CORE_DDC_STATUS_NO_ACK */
+ if (REG_GET(hdmi_core_sys_base(ip_data),
+ HDMI_CORE_DDC_STATUS, 5, 5) == 1) {
+ pr_err("I2C No Ack\n");
+ return -EIO;
+ }
+
+ i = ext * 128;
+ j = 0;
+ while (((REG_GET(hdmi_core_sys_base(ip_data),
+ HDMI_CORE_DDC_STATUS, 4, 4) == 1) ||
+ (REG_GET(hdmi_core_sys_base(ip_data),
+ HDMI_CORE_DDC_STATUS, 2, 2) == 0)) && j < 128) {
+
+ if (REG_GET(hdmi_core_sys_base(ip_data)
+ , HDMI_CORE_DDC_STATUS, 2, 2) == 0) {
+ /* FIFO not empty */
+ pedid[i++] = REG_GET(hdmi_core_sys_base(ip_data),
+ HDMI_CORE_DDC_DATA, 7, 0);
+ j++;
+ }
+ }
+
+ for (j = 0; j < 128; j++)
+ checksum += pedid[j];
+
+ if (checksum != 0) {
+ pr_err("E-EDID checksum failed!!\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int read_ti_4xxx_edid(struct hdmi_ip_data *ip_data, u8 *pedid, u16 max_length)
+{
+ int r = 0, n = 0, i = 0;
+ int max_ext_blocks = (max_length / 128) - 1;
+
+ r = hdmi_core_ddc_edid(ip_data, pedid, 0);
+ if (r) {
+ return r;
+ } else {
+ n = pedid[0x7e];
+
+ /*
+ * README: need to comply with max_length set by the caller.
+ * Better implementation should be to allocate necessary
+ * memory to store EDID according to nb_block field found
+ * in first block
+ */
+ if (n > max_ext_blocks)
+ n = max_ext_blocks;
+
+ for (i = 1; i <= n; i++) {
+ r = hdmi_core_ddc_edid(ip_data, pedid, i);
+ if (r)
+ return r;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(read_ti_4xxx_edid);
+
+static void hdmi_core_init(enum hdmi_deep_color_mode deep_color,
+ struct hdmi_core_video_config *video_cfg,
+ struct hdmi_core_infoframe_avi *avi_cfg,
+ struct hdmi_core_packet_enable_repeat *repeat_cfg)
+{
+ pr_debug("Enter hdmi_core_init\n");
+
+ /* video core */
+ switch (deep_color) {
+ case HDMI_DEEP_COLOR_30BIT:
+ video_cfg->ip_bus_width = HDMI_INPUT_10BIT;
+ video_cfg->op_dither_truc = HDMI_OUTPUTTRUNCATION_10BIT;
+ video_cfg->deep_color_pkt = HDMI_DEEPCOLORPACKECTENABLE;
+ video_cfg->pkt_mode = HDMI_PACKETMODE30BITPERPIXEL;
+ break;
+ case HDMI_DEEP_COLOR_36BIT:
+ video_cfg->ip_bus_width = HDMI_INPUT_12BIT;
+ video_cfg->op_dither_truc = HDMI_OUTPUTTRUNCATION_12BIT;
+ video_cfg->deep_color_pkt = HDMI_DEEPCOLORPACKECTENABLE;
+ video_cfg->pkt_mode = HDMI_PACKETMODE36BITPERPIXEL;
+ break;
+ case HDMI_DEEP_COLOR_24BIT:
+ default:
+ video_cfg->ip_bus_width = HDMI_INPUT_8BIT;
+ video_cfg->op_dither_truc = HDMI_OUTPUTTRUNCATION_8BIT;
+ video_cfg->deep_color_pkt = HDMI_DEEPCOLORPACKECTDISABLE;
+ video_cfg->pkt_mode = HDMI_PACKETMODERESERVEDVALUE;
+ break;
+ }
+
+ video_cfg->hdmi_dvi = HDMI_DVI;
+ video_cfg->tclk_sel_clkmult = HDMI_FPLL10IDCK;
+
+ /* info frame */
+ avi_cfg->db1_format = 0;
+ avi_cfg->db1_active_info = 0;
+ avi_cfg->db1_bar_info_dv = 0;
+ avi_cfg->db1_scan_info = 0;
+ avi_cfg->db2_colorimetry = 0;
+ avi_cfg->db2_aspect_ratio = 0;
+ avi_cfg->db2_active_fmt_ar = 0;
+ avi_cfg->db3_itc = 0;
+ avi_cfg->db3_ec = 0;
+ avi_cfg->db3_q_range = 0;
+ avi_cfg->db3_nup_scaling = 0;
+ avi_cfg->db4_videocode = 0;
+ avi_cfg->db5_pixel_repeat = 0;
+ avi_cfg->db6_7_line_eoftop = 0 ;
+ avi_cfg->db8_9_line_sofbottom = 0;
+ avi_cfg->db10_11_pixel_eofleft = 0;
+ avi_cfg->db12_13_pixel_sofright = 0;
+
+ /* packet enable and repeat */
+ repeat_cfg->audio_pkt = 0;
+ repeat_cfg->audio_pkt_repeat = 0;
+ repeat_cfg->avi_infoframe = 0;
+ repeat_cfg->avi_infoframe_repeat = 0;
+ repeat_cfg->gen_cntrl_pkt = 0;
+ repeat_cfg->gen_cntrl_pkt_repeat = 0;
+ repeat_cfg->generic_pkt = 0;
+ repeat_cfg->generic_pkt_repeat = 0;
+}
+
+static void hdmi_core_powerdown_disable(struct hdmi_ip_data *ip_data)
+{
+ pr_debug("Enter hdmi_core_powerdown_disable\n");
+ REG_FLD_MOD(hdmi_core_sys_base(ip_data), HDMI_CORE_CTRL1, 0x0, 0, 0);
+}
+
+static void hdmi_core_swreset_release(struct hdmi_ip_data *ip_data)
+{
+ pr_debug("Enter hdmi_core_swreset_release\n");
+ REG_FLD_MOD(hdmi_core_sys_base(ip_data), HDMI_CORE_SYS_SRST, 0x0, 0, 0);
+}
+
+static void hdmi_core_swreset_assert(struct hdmi_ip_data *ip_data)
+{
+ pr_debug("Enter hdmi_core_swreset_assert\n");
+ REG_FLD_MOD(hdmi_core_sys_base(ip_data), HDMI_CORE_SYS_SRST, 0x1, 0, 0);
+}
+
+/* HDMI_CORE_VIDEO_CONFIG */
+static void hdmi_core_video_config(struct hdmi_ip_data *ip_data,
+ struct hdmi_core_video_config *cfg)
+{
+ u32 r = 0;
+
+ /* sys_ctrl1 default configuration not tunable */
+ r = hdmi_read_reg(hdmi_core_sys_base(ip_data), HDMI_CORE_CTRL1);
+ r = FLD_MOD(r, HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC, 5, 5);
+ r = FLD_MOD(r, HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC, 4, 4);
+ r = FLD_MOD(r, HDMI_CORE_CTRL1_BSEL_24BITBUS, 2, 2);
+ r = FLD_MOD(r, HDMI_CORE_CTRL1_EDGE_RISINGEDGE, 1, 1);
+ /* PD bit has to be written to recieve the interrupts */
+ r = FLD_MOD(r, HDMI_CORE_CTRL1_POWER_DOWN, 0, 0);
+ hdmi_write_reg(hdmi_core_sys_base(ip_data), HDMI_CORE_CTRL1, r);
+
+ REG_FLD_MOD(hdmi_core_sys_base(ip_data),
+ HDMI_CORE_SYS_VID_ACEN, cfg->ip_bus_width, 7, 6);
+
+ /* Vid_Mode */
+ r = hdmi_read_reg(hdmi_core_sys_base(ip_data), HDMI_CORE_SYS_VID_MODE);
+
+ /* dither truncation configuration */
+ if (cfg->op_dither_truc > HDMI_OUTPUTTRUNCATION_12BIT) {
+ r = FLD_MOD(r, cfg->op_dither_truc - 3, 7, 6);
+ r = FLD_MOD(r, 1, 5, 5);
+ } else {
+ r = FLD_MOD(r, cfg->op_dither_truc, 7, 6);
+ r = FLD_MOD(r, 0, 5, 5);
+ }
+ hdmi_write_reg(hdmi_core_sys_base(ip_data), HDMI_CORE_SYS_VID_MODE, r);
+
+ /* HDMI_Ctrl */
+ r = hdmi_read_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_HDMI_CTRL);
+ r = FLD_MOD(r, cfg->deep_color_pkt, 6, 6);
+ r = FLD_MOD(r, cfg->pkt_mode, 5, 3);
+ r = FLD_MOD(r, cfg->hdmi_dvi, 0, 0);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_HDMI_CTRL, r);
+
+ /* TMDS_CTRL */
+ REG_FLD_MOD(hdmi_core_sys_base(ip_data),
+ HDMI_CORE_SYS_TMDS_CTRL, cfg->tclk_sel_clkmult, 6, 5);
+}
+
+static void hdmi_core_aux_infoframe_avi_config(struct hdmi_ip_data *ip_data,
+ struct hdmi_core_infoframe_avi info_avi)
+{
+ u32 val;
+ char sum = 0, checksum = 0;
+
+ sum += 0x82 + 0x002 + 0x00D;
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AVI_TYPE, 0x082);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AVI_VERS, 0x002);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AVI_LEN, 0x00D);
+
+ val = (info_avi.db1_format << 5) |
+ (info_avi.db1_active_info << 4) |
+ (info_avi.db1_bar_info_dv << 2) |
+ (info_avi.db1_scan_info);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AVI_DBYTE(0), val);
+ sum += val;
+
+ val = (info_avi.db2_colorimetry << 6) |
+ (info_avi.db2_aspect_ratio << 4) |
+ (info_avi.db2_active_fmt_ar);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AVI_DBYTE(1), val);
+ sum += val;
+
+ val = (info_avi.db3_itc << 7) |
+ (info_avi.db3_ec << 4) |
+ (info_avi.db3_q_range << 2) |
+ (info_avi.db3_nup_scaling);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AVI_DBYTE(2), val);
+ sum += val;
+
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AVI_DBYTE(3),
+ info_avi.db4_videocode);
+ sum += info_avi.db4_videocode;
+
+ val = info_avi.db5_pixel_repeat;
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AVI_DBYTE(4), val);
+ sum += val;
+
+ val = info_avi.db6_7_line_eoftop & 0x00FF;
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AVI_DBYTE(5), val);
+ sum += val;
+
+ val = ((info_avi.db6_7_line_eoftop >> 8) & 0x00FF);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AVI_DBYTE(6), val);
+ sum += val;
+
+ val = info_avi.db8_9_line_sofbottom & 0x00FF;
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AVI_DBYTE(7), val);
+ sum += val;
+
+ val = ((info_avi.db8_9_line_sofbottom >> 8) & 0x00FF);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AVI_DBYTE(8), val);
+ sum += val;
+
+ val = info_avi.db10_11_pixel_eofleft & 0x00FF;
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AVI_DBYTE(9), val);
+ sum += val;
+
+ val = ((info_avi.db10_11_pixel_eofleft >> 8) & 0x00FF);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AVI_DBYTE(10), val);
+ sum += val;
+
+ val = info_avi.db12_13_pixel_sofright & 0x00FF;
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AVI_DBYTE(11), val);
+ sum += val;
+
+ val = ((info_avi.db12_13_pixel_sofright >> 8) & 0x00FF);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AVI_DBYTE(12), val);
+ sum += val;
+
+ checksum = 0x100 - sum;
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AVI_CHSUM, checksum);
+}
+
+static void hdmi_core_av_packet_config(struct hdmi_ip_data *ip_data,
+ struct hdmi_core_packet_enable_repeat repeat_cfg)
+{
+ /* enable/repeat the infoframe */
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_PB_CTRL1,
+ (repeat_cfg.audio_pkt << 5) |
+ (repeat_cfg.audio_pkt_repeat << 4) |
+ (repeat_cfg.avi_infoframe << 1) |
+ (repeat_cfg.avi_infoframe_repeat));
+
+ /* enable/repeat the packet */
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_PB_CTRL2,
+ (repeat_cfg.gen_cntrl_pkt << 3) |
+ (repeat_cfg.gen_cntrl_pkt_repeat << 2) |
+ (repeat_cfg.generic_pkt << 1) |
+ (repeat_cfg.generic_pkt_repeat));
+}
+
+static void hdmi_wp_init(struct omap_video_timings *timings,
+ struct hdmi_video_format *video_fmt,
+ struct hdmi_video_interface *video_int)
+{
+ pr_debug("Enter hdmi_wp_init\n");
+
+ timings->hbp = 0;
+ timings->hfp = 0;
+ timings->hsw = 0;
+ timings->vbp = 0;
+ timings->vfp = 0;
+ timings->vsw = 0;
+
+ video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
+ video_fmt->y_res = 0;
+ video_fmt->x_res = 0;
+
+ video_int->vsp = 0;
+ video_int->hsp = 0;
+
+ video_int->interlacing = 0;
+ video_int->tm = 0; /* HDMI_TIMING_SLAVE */
+
+}
+
+void hdmi_ti_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start)
+{
+ REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, start, 31, 31);
+}
+EXPORT_SYMBOL(hdmi_ti_4xxx_wp_video_start);
+
+int hdmi_ti_4xxx_wp_get_video_state(struct hdmi_ip_data *ip_data)
+{
+ u32 status = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG);
+
+ return (status & 0x80000000) ? 1 : 0;
+}
+
+int hdmi_ti_4xxx_set_wait_soft_reset(struct hdmi_ip_data *ip_data)
+{
+ u8 count = 0;
+
+ /* reset W1 */
+ REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_SYSCONFIG, 0x1, 0, 0);
+
+ /* wait till SOFTRESET == 0 */
+ while (hdmi_wait_for_bit_change(hdmi_wp_base(ip_data),
+ HDMI_WP_SYSCONFIG, 0, 0, 0) != 0) {
+ if (count++ > 10) {
+ pr_err("SYSCONFIG[SOFTRESET] bit not set to 0\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ /* Make madule smart and wakeup capable*/
+ REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_SYSCONFIG, 0x3, 3, 2);
+
+ return 0;
+}
+
+
+static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt,
+ struct omap_video_timings *timings, struct hdmi_config *param)
+{
+ pr_debug("Enter hdmi_wp_video_init_format\n");
+
+ video_fmt->y_res = param->timings.yres;
+ video_fmt->x_res = param->timings.xres;
+
+ omapfb_fb2dss_timings(¶m->timings, timings);
+}
+
+static void hdmi_wp_video_config_format(struct hdmi_ip_data *ip_data,
+ struct hdmi_video_format *video_fmt)
+{
+ u32 l = 0;
+
+ REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG,
+ video_fmt->packing_mode, 10, 8);
+
+ l |= FLD_VAL(video_fmt->y_res, 31, 16);
+ l |= FLD_VAL(video_fmt->x_res, 15, 0);
+ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_SIZE, l);
+}
+
+static void hdmi_wp_video_config_interface(struct hdmi_ip_data *ip_data,
+ struct hdmi_video_interface *video_int)
+{
+ u32 r;
+ pr_debug("Enter hdmi_wp_video_config_interface\n");
+
+ r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG);
+ r = FLD_MOD(r, video_int->vsp, 7, 7);
+ r = FLD_MOD(r, video_int->hsp, 6, 6);
+ r = FLD_MOD(r, video_int->interlacing, 3, 3);
+ r = FLD_MOD(r, video_int->tm, 1, 0);
+ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, r);
+}
+
+static void hdmi_wp_video_config_timing(struct hdmi_ip_data *ip_data,
+ struct omap_video_timings *timings)
+{
+ u32 timing_h = 0;
+ u32 timing_v = 0;
+
+ pr_debug("Enter hdmi_wp_video_config_timing\n");
+
+ timing_h |= FLD_VAL(timings->hbp, 31, 20);
+ timing_h |= FLD_VAL(timings->hfp, 19, 8);
+ timing_h |= FLD_VAL(timings->hsw, 7, 0);
+ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_TIMING_H, timing_h);
+
+ timing_v |= FLD_VAL(timings->vbp, 31, 20);
+ timing_v |= FLD_VAL(timings->vfp, 19, 8);
+ timing_v |= FLD_VAL(timings->vsw, 7, 0);
+ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_TIMING_V, timing_v);
+}
+
+static void hdmi_wp_core_interrupt_set(struct hdmi_ip_data *ip_data, u32 val)
+{
+ u32 irqStatus;
+ irqStatus = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_IRQENABLE_SET);
+ pr_debug("[HDMI] WP_IRQENABLE_SET..currently reads as:%x\n", irqStatus);
+ irqStatus = irqStatus | val;
+ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_IRQENABLE_SET, irqStatus);
+ pr_debug("[HDMI]WP_IRQENABLE_SET..changed to :%x\n", irqStatus);
+}
+
+void hdmi_ti_4xxx_basic_configure(struct hdmi_ip_data *ip_data,
+ struct hdmi_config *cfg)
+{
+ /* HDMI */
+ struct omap_video_timings video_timing;
+ struct hdmi_video_format video_format;
+ struct hdmi_video_interface video_interface;
+ /* HDMI core */
+ struct hdmi_core_infoframe_avi avi_cfg;
+ struct hdmi_core_video_config v_core_cfg;
+ struct hdmi_core_packet_enable_repeat repeat_cfg;
+
+ hdmi_wp_init(&video_timing, &video_format,
+ &video_interface);
+
+ hdmi_core_init(cfg->deep_color, &v_core_cfg,
+ &avi_cfg,
+ &repeat_cfg);
+
+ hdmi_wp_core_interrupt_set(ip_data, HDMI_WP_IRQENABLE_CORE |
+ HDMI_WP_AUDIO_FIFO_UNDERFLOW);
+
+ hdmi_wp_video_init_format(&video_format, &video_timing, cfg);
+
+ hdmi_wp_video_config_timing(ip_data, &video_timing);
+
+ /* video config */
+ video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
+
+ hdmi_wp_video_config_format(ip_data, &video_format);
+
+ video_interface.vsp = !!(cfg->timings.sync & FB_SYNC_VERT_HIGH_ACT);
+ video_interface.hsp = !!(cfg->timings.sync & FB_SYNC_HOR_HIGH_ACT);
+ video_interface.interlacing = cfg->timings.vmode & FB_VMODE_INTERLACED;
+ video_interface.tm = 1 ; /* HDMI_TIMING_MASTER_24BIT */
+
+ hdmi_wp_video_config_interface(ip_data, &video_interface);
+
+ /*
+ * configure core video part
+ * set software reset in the core
+ */
+ hdmi_core_swreset_assert(ip_data);
+
+ /* power down off */
+ hdmi_core_powerdown_disable(ip_data);
+
+ v_core_cfg.pkt_mode = HDMI_PACKETMODE24BITPERPIXEL;
+ v_core_cfg.hdmi_dvi = cfg->cm.mode;
+
+ hdmi_core_video_config(ip_data, &v_core_cfg);
+
+ /* release software reset in the core */
+ hdmi_core_swreset_release(ip_data);
+
+ /*
+ * configure packet
+ * info frame video see doc CEA861-D page 65
+ */
+ avi_cfg.db1_format = HDMI_INFOFRAME_AVI_DB1Y_RGB;
+ avi_cfg.db1_active_info =
+ HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF;
+ avi_cfg.db1_bar_info_dv = HDMI_INFOFRAME_AVI_DB1B_NO;
+ avi_cfg.db1_scan_info = HDMI_INFOFRAME_AVI_DB1S_0;
+ avi_cfg.db2_colorimetry = HDMI_INFOFRAME_AVI_DB2C_NO;
+ avi_cfg.db2_aspect_ratio = HDMI_INFOFRAME_AVI_DB2M_NO;
+ if (cfg->cm.mode == HDMI_HDMI && cfg->cm.code < CEA_MODEDB_SIZE) {
+ if (cea_modes[cfg->cm.code].flag & FB_FLAG_RATIO_16_9)
+ avi_cfg.db2_aspect_ratio = HDMI_INFOFRAME_AVI_DB2M_169;
+ else if (cea_modes[cfg->cm.code].flag & FB_FLAG_RATIO_4_3)
+ avi_cfg.db2_aspect_ratio = HDMI_INFOFRAME_AVI_DB2M_43;
+ }
+ avi_cfg.db2_active_fmt_ar = HDMI_INFOFRAME_AVI_DB2R_SAME;
+ avi_cfg.db3_itc = HDMI_INFOFRAME_AVI_DB3ITC_NO;
+ avi_cfg.db3_ec = HDMI_INFOFRAME_AVI_DB3EC_XVYUV601;
+ avi_cfg.db3_q_range = HDMI_INFOFRAME_AVI_DB3Q_DEFAULT;
+ avi_cfg.db3_nup_scaling = HDMI_INFOFRAME_AVI_DB3SC_NO;
+ avi_cfg.db4_videocode = cfg->cm.code;
+ avi_cfg.db5_pixel_repeat = HDMI_INFOFRAME_AVI_DB5PR_NO;
+ avi_cfg.db6_7_line_eoftop = 0;
+ avi_cfg.db8_9_line_sofbottom = 0;
+ avi_cfg.db10_11_pixel_eofleft = 0;
+ avi_cfg.db12_13_pixel_sofright = 0;
+
+ hdmi_core_aux_infoframe_avi_config(ip_data, avi_cfg);
+
+ /* enable/repeat the infoframe */
+ repeat_cfg.avi_infoframe = HDMI_PACKETENABLE;
+ repeat_cfg.avi_infoframe_repeat = HDMI_PACKETREPEATON;
+ /* wakeup */
+ repeat_cfg.audio_pkt = HDMI_PACKETENABLE;
+ repeat_cfg.audio_pkt_repeat = HDMI_PACKETREPEATON;
+ hdmi_core_av_packet_config(ip_data, repeat_cfg);
+}
+EXPORT_SYMBOL(hdmi_ti_4xxx_basic_configure);
+
+u32 hdmi_ti_4xxx_irq_handler(struct hdmi_ip_data *ip_data)
+{
+ u32 val, sys_stat = 0, core_state = 0;
+ u32 intr2 = 0, intr3 = 0, r = 0;
+ void __iomem *wp_base = hdmi_wp_base(ip_data);
+ void __iomem *core_base = hdmi_core_sys_base(ip_data);
+
+ pr_debug("Enter hdmi_ti_4xxx_irq_handler\n");
+
+ val = hdmi_read_reg(wp_base, HDMI_WP_IRQSTATUS);
+ if (val & HDMI_WP_IRQSTATUS_CORE) {
+ core_state = hdmi_read_reg(core_base, HDMI_CORE_SYS_INTR_STATE);
+ if (core_state & 0x1) {
+ sys_stat = hdmi_read_reg(core_base,
+ HDMI_CORE_SYS_SYS_STAT);
+ intr2 = hdmi_read_reg(core_base, HDMI_CORE_SYS_INTR2);
+ intr3 = hdmi_read_reg(core_base, HDMI_CORE_SYS_INTR3);
+
+ pr_debug("HDMI_CORE_SYS_SYS_STAT = 0x%x\n", sys_stat);
+ pr_debug("HDMI_CORE_SYS_INTR2 = 0x%x\n", intr2);
+ pr_debug("HDMI_CORE_SYS_INTR3 = 0x%x\n", intr3);
+
+ hdmi_write_reg(core_base, HDMI_CORE_SYS_INTR2, intr2);
+ hdmi_write_reg(core_base, HDMI_CORE_SYS_INTR3, intr3);
+
+ hdmi_read_reg(core_base, HDMI_CORE_SYS_INTR2);
+ hdmi_read_reg(core_base, HDMI_CORE_SYS_INTR3);
+ }
+ }
+
+ if (val & HDMI_WP_AUDIO_FIFO_UNDERFLOW)
+ pr_err("HDMI_WP_AUDIO_FIFO_UNDERFLOW\n");
+
+ pr_debug("HDMI_WP_IRQSTATUS = 0x%x\n", val);
+ pr_debug("HDMI_CORE_SYS_INTR_STATE = 0x%x\n", core_state);
+
+ if (intr2 & HDMI_CORE_SYSTEM_INTR2__BCAP)
+ r |= HDMI_BCAP;
+
+ if (intr3 & HDMI_CORE_SYSTEM_INTR3__RI_ERR)
+ r |= HDMI_RI_ERR;
+
+ /* Ack other interrupts if any */
+ hdmi_write_reg(wp_base, HDMI_WP_IRQSTATUS, val);
+ /* flush posted write */
+ hdmi_read_reg(wp_base, HDMI_WP_IRQSTATUS);
+ return r;
+}
+EXPORT_SYMBOL(hdmi_ti_4xxx_irq_handler);
+
+void hdmi_ti_4xxx_dump_regs(struct hdmi_ip_data *ip_data, struct seq_file *s)
+{
+#define DUMPREG(g, r) seq_printf(s, "%-35s %08x\n", #r, hdmi_read_reg(g, r))
+
+ void __iomem *wp_base = hdmi_wp_base(ip_data);
+ void __iomem *core_sys_base = hdmi_core_sys_base(ip_data);
+ void __iomem *phy_base = hdmi_phy_base(ip_data);
+ void __iomem *pll_base = hdmi_pll_base(ip_data);
+ void __iomem *av_base = hdmi_av_base(ip_data);
+
+ /* wrapper registers */
+ DUMPREG(wp_base, HDMI_WP_REVISION);
+ DUMPREG(wp_base, HDMI_WP_SYSCONFIG);
+ DUMPREG(wp_base, HDMI_WP_IRQSTATUS_RAW);
+ DUMPREG(wp_base, HDMI_WP_IRQSTATUS);
+ DUMPREG(wp_base, HDMI_WP_PWR_CTRL);
+ DUMPREG(wp_base, HDMI_WP_IRQENABLE_SET);
+ DUMPREG(wp_base, HDMI_WP_VIDEO_SIZE);
+ DUMPREG(wp_base, HDMI_WP_VIDEO_TIMING_H);
+ DUMPREG(wp_base, HDMI_WP_VIDEO_TIMING_V);
+ DUMPREG(wp_base, HDMI_WP_WP_CLK);
+
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_VND_IDL);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_DEV_IDL);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_DEV_IDH);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_DEV_REV);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_SRST);
+ DUMPREG(core_sys_base, HDMI_CORE_CTRL1);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_SYS_STAT);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_VID_ACEN);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_VID_MODE);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_INTR_STATE);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_INTR1);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_INTR2);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_INTR3);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_INTR4);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_UMASK1);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_TMDS_CTRL);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_DE_DLY);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_DE_CTRL);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_DE_TOP);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_DE_CNTL);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_DE_CNTH);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_DE_LINL);
+ DUMPREG(core_sys_base, HDMI_CORE_SYS_DE_LINH_1);
+ DUMPREG(core_sys_base, HDMI_CORE_DDC_CMD);
+ DUMPREG(core_sys_base, HDMI_CORE_DDC_STATUS);
+ DUMPREG(core_sys_base, HDMI_CORE_DDC_ADDR);
+ DUMPREG(core_sys_base, HDMI_CORE_DDC_OFFSET);
+ DUMPREG(core_sys_base, HDMI_CORE_DDC_COUNT1);
+ DUMPREG(core_sys_base, HDMI_CORE_DDC_COUNT2);
+ DUMPREG(core_sys_base, HDMI_CORE_DDC_DATA);
+ DUMPREG(core_sys_base, HDMI_CORE_DDC_SEGM);
+
+ DUMPREG(av_base, HDMI_CORE_AV_HDMI_CTRL);
+ DUMPREG(av_base, HDMI_CORE_AV_AVI_DBYTE_NELEMS);
+ DUMPREG(av_base, HDMI_CORE_AV_SPD_DBYTE);
+ DUMPREG(av_base, HDMI_CORE_AV_SPD_DBYTE_NELEMS);
+ DUMPREG(av_base, HDMI_CORE_AV_AUD_DBYTE_NELEMS);
+ DUMPREG(av_base, HDMI_CORE_AV_MPEG_DBYTE);
+ DUMPREG(av_base, HDMI_CORE_AV_MPEG_DBYTE_NELEMS);
+ DUMPREG(av_base, HDMI_CORE_AV_GEN_DBYTE);
+ DUMPREG(av_base, HDMI_CORE_AV_GEN_DBYTE_NELEMS);
+ DUMPREG(av_base, HDMI_CORE_AV_GEN2_DBYTE);
+ DUMPREG(av_base, HDMI_CORE_AV_GEN2_DBYTE_NELEMS);
+ DUMPREG(av_base, HDMI_CORE_AV_ACR_CTRL);
+ DUMPREG(av_base, HDMI_CORE_AV_FREQ_SVAL);
+ DUMPREG(av_base, HDMI_CORE_AV_N_SVAL1);
+ DUMPREG(av_base, HDMI_CORE_AV_N_SVAL2);
+ DUMPREG(av_base, HDMI_CORE_AV_N_SVAL3);
+ DUMPREG(av_base, HDMI_CORE_AV_CTS_SVAL1);
+ DUMPREG(av_base, HDMI_CORE_AV_CTS_SVAL2);
+ DUMPREG(av_base, HDMI_CORE_AV_CTS_SVAL3);
+ DUMPREG(av_base, HDMI_CORE_AV_CTS_HVAL1);
+ DUMPREG(av_base, HDMI_CORE_AV_CTS_HVAL2);
+ DUMPREG(av_base, HDMI_CORE_AV_CTS_HVAL3);
+ DUMPREG(av_base, HDMI_CORE_AV_AUD_MODE);
+ DUMPREG(av_base, HDMI_CORE_AV_SPDIF_CTRL);
+ DUMPREG(av_base, HDMI_CORE_AV_HW_SPDIF_FS);
+ DUMPREG(av_base, HDMI_CORE_AV_SWAP_I2S);
+ DUMPREG(av_base, HDMI_CORE_AV_SPDIF_ERTH);
+ DUMPREG(av_base, HDMI_CORE_AV_I2S_IN_MAP);
+ DUMPREG(av_base, HDMI_CORE_AV_I2S_IN_CTRL);
+ DUMPREG(av_base, HDMI_CORE_AV_I2S_CHST0);
+ DUMPREG(av_base, HDMI_CORE_AV_I2S_CHST1);
+ DUMPREG(av_base, HDMI_CORE_AV_I2S_CHST2);
+ DUMPREG(av_base, HDMI_CORE_AV_I2S_CHST4);
+ DUMPREG(av_base, HDMI_CORE_AV_I2S_CHST5);
+ DUMPREG(av_base, HDMI_CORE_AV_ASRC);
+ DUMPREG(av_base, HDMI_CORE_AV_I2S_IN_LEN);
+ DUMPREG(av_base, HDMI_CORE_AV_AUDO_TXSTAT);
+ DUMPREG(av_base, HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
+ DUMPREG(av_base, HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
+ DUMPREG(av_base, HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
+ DUMPREG(av_base, HDMI_CORE_AV_TEST_TXCTRL);
+
+ DUMPREG(av_base, HDMI_CORE_AV_DPD);
+ DUMPREG(av_base, HDMI_CORE_AV_PB_CTRL1);
+ DUMPREG(av_base, HDMI_CORE_AV_PB_CTRL2);
+ DUMPREG(av_base, HDMI_CORE_AV_AVI_TYPE);
+ DUMPREG(av_base, HDMI_CORE_AV_AVI_VERS);
+ DUMPREG(av_base, HDMI_CORE_AV_AVI_LEN);
+ DUMPREG(av_base, HDMI_CORE_AV_AVI_CHSUM);
+ DUMPREG(av_base, HDMI_CORE_AV_SPD_TYPE);
+ DUMPREG(av_base, HDMI_CORE_AV_SPD_VERS);
+ DUMPREG(av_base, HDMI_CORE_AV_SPD_LEN);
+ DUMPREG(av_base, HDMI_CORE_AV_SPD_CHSUM);
+ DUMPREG(av_base, HDMI_CORE_AV_AUDIO_TYPE);
+ DUMPREG(av_base, HDMI_CORE_AV_AUDIO_VERS);
+ DUMPREG(av_base, HDMI_CORE_AV_AUDIO_LEN);
+ DUMPREG(av_base, HDMI_CORE_AV_AUDIO_CHSUM);
+ DUMPREG(av_base, HDMI_CORE_AV_MPEG_TYPE);
+ DUMPREG(av_base, HDMI_CORE_AV_MPEG_VERS);
+ DUMPREG(av_base, HDMI_CORE_AV_MPEG_LEN);
+ DUMPREG(av_base, HDMI_CORE_AV_MPEG_CHSUM);
+ DUMPREG(av_base, HDMI_CORE_AV_CP_BYTE1);
+ DUMPREG(av_base, HDMI_CORE_AV_CEC_ADDR_ID);
+
+ DUMPREG(pll_base, PLLCTRL_PLL_CONTROL);
+ DUMPREG(pll_base, PLLCTRL_PLL_STATUS);
+ DUMPREG(pll_base, PLLCTRL_PLL_GO);
+ DUMPREG(pll_base, PLLCTRL_CFG1);
+ DUMPREG(pll_base, PLLCTRL_CFG2);
+ DUMPREG(pll_base, PLLCTRL_CFG3);
+ DUMPREG(pll_base, PLLCTRL_CFG4);
+
+ DUMPREG(phy_base, HDMI_TXPHY_TX_CTRL);
+ DUMPREG(phy_base, HDMI_TXPHY_DIGITAL_CTRL);
+ DUMPREG(phy_base, HDMI_TXPHY_POWER_CTRL);
+ DUMPREG(phy_base, HDMI_TXPHY_PAD_CFG_CTRL);
+
+#undef DUMPREG
+}
+EXPORT_SYMBOL(hdmi_ti_4xxx_dump_regs);
+
+int hdmi_ti_4xxx_config_audio_acr(struct hdmi_ip_data *ip_data,
+ u32 sample_freq, u32 *n, u32 *cts, u32 pclk)
+{
+ u32 r;
+ u32 deep_color = 0;
+
+
+ if (n == NULL || cts == NULL)
+ return -EINVAL;
+ /*
+ * Obtain current deep color configuration. This needed
+ * to calculate the TMDS clock based on the pixel clock.
+ */
+ r = REG_GET(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, 1, 0);
+
+
+ switch (r) {
+ case 1: /* No deep color selected */
+ deep_color = 100;
+ break;
+ case 2: /* 10-bit deep color selected */
+ deep_color = 125;
+ break;
+ case 3: /* 12-bit deep color selected */
+ deep_color = 150;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (sample_freq) {
+ case 32000:
+ if ((deep_color == 125) && ((pclk == 54054)
+ || (pclk == 74250)))
+ *n = 8192;
+ else
+ *n = 4096;
+ break;
+ case 44100:
+ *n = 6272;
+ break;
+ case 48000:
+ if ((deep_color == 125) && ((pclk == 54054)
+ || (pclk == 74250)))
+ *n = 8192;
+ else
+ *n = 6144;
+ break;
+ default:
+ *n = 0;
+ return -EINVAL;
+ }
+
+ /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
+ *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_ti_4xxx_config_audio_acr);
+
+
+void hdmi_ti_4xxx_wp_audio_config_format(struct hdmi_ip_data *ip_data,
+ struct hdmi_audio_format *aud_fmt)
+{
+ u32 r, reset_wp;
+
+ pr_debug("Enter hdmi_wp_audio_config_format\n");
+
+ reset_wp = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL);
+ /* Reset HDMI wrapper */
+ if (reset_wp & 0x80000000)
+ REG_FLD_MOD(hdmi_wp_base(ip_data),
+ HDMI_WP_AUDIO_CTRL, 0, 31, 31);
+
+ r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG);
+ r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24);
+ r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16);
+ r = FLD_MOD(r, aud_fmt->en_sig_blk_strt_end, 5, 5);
+ r = FLD_MOD(r, aud_fmt->type, 4, 4);
+ r = FLD_MOD(r, aud_fmt->justification, 3, 3);
+ r = FLD_MOD(r, aud_fmt->sample_order, 2, 2);
+ r = FLD_MOD(r, aud_fmt->samples_per_word, 1, 1);
+ r = FLD_MOD(r, aud_fmt->sample_size, 0, 0);
+ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG, r);
+
+ if (r & 0x80000000)
+ REG_FLD_MOD(hdmi_wp_base(ip_data),
+ HDMI_WP_AUDIO_CTRL, 1, 31, 31);
+}
+EXPORT_SYMBOL(hdmi_ti_4xxx_wp_audio_config_format);
+
+void hdmi_ti_4xxx_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
+ struct hdmi_audio_dma *aud_dma)
+{
+ u32 r;
+
+ pr_debug("Enter hdmi_wp_audio_config_dma\n");
+
+ r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG2);
+ r = FLD_MOD(r, aud_dma->transfer_size, 15, 8);
+ r = FLD_MOD(r, aud_dma->block_size, 7, 0);
+ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG2, r);
+
+ r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL);
+ r = FLD_MOD(r, aud_dma->mode, 9, 9);
+ r = FLD_MOD(r, aud_dma->fifo_threshold, 8, 0);
+ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL, r);
+}
+EXPORT_SYMBOL(hdmi_ti_4xxx_wp_audio_config_dma);
+
+
+void hdmi_ti_4xxx_core_audio_config(struct hdmi_ip_data *ip_data,
+ struct hdmi_core_audio_config *cfg)
+{
+ u32 r;
+
+ /* audio clock recovery parameters */
+ r = hdmi_read_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_ACR_CTRL);
+ /*
+ * MCLK_EN: use TCLK for ACR packets. For devices that use
+ * the MCLK, this is the first part of the MCLK initialization
+ */
+ r = FLD_MOD(r, 0, 2, 2);
+ r = FLD_MOD(r, cfg->en_acr_pkt, 1, 1);
+ r = FLD_MOD(r, cfg->cts_mode, 0, 0);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_ACR_CTRL, r);
+
+ REG_FLD_MOD(hdmi_av_base(ip_data), HDMI_CORE_AV_N_SVAL1, cfg->n, 7, 0);
+ REG_FLD_MOD(hdmi_av_base(ip_data),
+ HDMI_CORE_AV_N_SVAL2, cfg->n >> 8, 7, 0);
+ REG_FLD_MOD(hdmi_av_base(ip_data),
+ HDMI_CORE_AV_N_SVAL3, cfg->n >> 16, 7, 0);
+
+ if (cfg->use_mclk)
+ REG_FLD_MOD(hdmi_av_base(ip_data),
+ HDMI_CORE_AV_FREQ_SVAL, cfg->mclk_mode, 2, 0);
+
+ if (cfg->cts_mode == HDMI_AUDIO_CTS_MODE_SW) {
+ REG_FLD_MOD(hdmi_av_base(ip_data),
+ HDMI_CORE_AV_CTS_SVAL1, cfg->cts, 7, 0);
+ REG_FLD_MOD(hdmi_av_base(ip_data),
+ HDMI_CORE_AV_CTS_SVAL2, cfg->cts >> 8, 7, 0);
+ REG_FLD_MOD(hdmi_av_base(ip_data),
+ HDMI_CORE_AV_CTS_SVAL3, cfg->cts >> 16, 7, 0);
+ } else {
+ /* Configure clock for audio packets */
+ REG_FLD_MOD(hdmi_av_base(ip_data),
+ HDMI_CORE_AV_AUD_PAR_BUSCLK_1,
+ cfg->aud_par_busclk, 7, 0);
+ REG_FLD_MOD(hdmi_av_base(ip_data),
+ HDMI_CORE_AV_AUD_PAR_BUSCLK_2,
+ (cfg->aud_par_busclk >> 8), 7, 0);
+ REG_FLD_MOD(hdmi_av_base(ip_data),
+ HDMI_CORE_AV_AUD_PAR_BUSCLK_3,
+ (cfg->aud_par_busclk >> 16), 7, 0);
+ }
+
+ /* For devices using MCLK, this completes its initialization. */
+ REG_FLD_MOD(hdmi_av_base(ip_data), HDMI_CORE_AV_ACR_CTRL,
+ cfg->use_mclk, 2, 2);
+
+ /* Override of SPDIF sample frequency with value in I2S_CHST4 */
+ REG_FLD_MOD(hdmi_av_base(ip_data), HDMI_CORE_AV_SPDIF_CTRL,
+ cfg->fs_override, 1, 1);
+
+ /* I2S parameters */
+ REG_FLD_MOD(hdmi_av_base(ip_data), HDMI_CORE_AV_I2S_CHST4,
+ cfg->freq_sample, 3, 0);
+
+ r = hdmi_read_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_I2S_IN_CTRL);
+ r = FLD_MOD(r, cfg->i2s_cfg.en_high_bitrate_aud, 7, 7);
+ r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6);
+ r = FLD_MOD(r, cfg->i2s_cfg.cbit_order, 5, 5);
+ r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4);
+ r = FLD_MOD(r, cfg->i2s_cfg.ws_polarity, 3, 3);
+ r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2);
+ r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1);
+ r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_I2S_IN_CTRL, r);
+
+ r = hdmi_read_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_I2S_CHST5);
+ r = FLD_MOD(r, cfg->freq_sample, 7, 4);
+ r = FLD_MOD(r, cfg->i2s_cfg.word_length, 3, 1);
+ r = FLD_MOD(r, cfg->i2s_cfg.word_max_length, 0, 0);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_I2S_CHST5, r);
+
+ REG_FLD_MOD(hdmi_av_base(ip_data), HDMI_CORE_AV_I2S_IN_LEN,
+ cfg->i2s_cfg.in_length_bits, 3, 0);
+
+ /* Audio channels and mode parameters */
+ REG_FLD_MOD(hdmi_av_base(ip_data), HDMI_CORE_AV_HDMI_CTRL,
+ cfg->layout, 2, 1);
+ r = hdmi_read_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AUD_MODE);
+ r = FLD_MOD(r, cfg->i2s_cfg.active_sds, 7, 4);
+ r = FLD_MOD(r, cfg->en_dsd_audio, 3, 3);
+ r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2);
+ r = FLD_MOD(r, cfg->en_spdif, 1, 1);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AUD_MODE, r);
+}
+EXPORT_SYMBOL(hdmi_ti_4xxx_core_audio_config);
+
+void hdmi_ti_4xxx_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
+ struct hdmi_core_infoframe_audio *info_aud)
+{
+ u8 val;
+ u8 sum = 0, checksum = 0;
+
+ /*
+ * Set audio info frame type, version and length as
+ * described in HDMI 1.4a Section 8.2.2 specification.
+ * Checksum calculation is defined in Section 5.3.5.
+ */
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AUDIO_TYPE, 0x84);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AUDIO_VERS, 0x01);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AUDIO_LEN, 0x0a);
+ sum += 0x84 + 0x001 + 0x00a;
+
+ val = (info_aud->db1_coding_type << 4)
+ | (info_aud->db1_channel_count - 1);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AUD_DBYTE(0), val);
+ sum += val;
+
+ val = (info_aud->db2_sample_freq << 2) | info_aud->db2_sample_size;
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AUD_DBYTE(1), val);
+ sum += val;
+
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AUD_DBYTE(2), 0x00);
+
+ val = info_aud->db4_channel_alloc;
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AUD_DBYTE(3), val);
+ sum += val;
+
+ val = (info_aud->db5_downmix_inh << 7) | (info_aud->db5_lsv << 3);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AUD_DBYTE(4), val);
+ sum += val;
+
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AUD_DBYTE(5), 0x00);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AUD_DBYTE(6), 0x00);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AUD_DBYTE(7), 0x00);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AUD_DBYTE(8), 0x00);
+ hdmi_write_reg(hdmi_av_base(ip_data), HDMI_CORE_AV_AUD_DBYTE(9), 0x00);
+
+ checksum = 0x100 - sum;
+ hdmi_write_reg(hdmi_av_base(ip_data),
+ HDMI_CORE_AV_AUDIO_CHSUM, checksum);
+
+ /*
+ * TODO: Add MPEG and SPD enable and repeat cfg when EDID parsing
+ * is available.
+ */
+}
+EXPORT_SYMBOL(hdmi_ti_4xxx_core_audio_infoframe_config);
+
+void hdmi_ti_4xxx_audio_transfer_en(struct hdmi_ip_data *ip_data,
+ bool enable)
+{
+ REG_FLD_MOD(hdmi_wp_base(ip_data),
+ HDMI_WP_AUDIO_CTRL, enable, 30, 30);
+ REG_FLD_MOD(hdmi_av_base(ip_data),
+ HDMI_CORE_AV_AUD_MODE, enable, 0, 0);
+}
+EXPORT_SYMBOL(hdmi_ti_4xxx_audio_transfer_en);
+
+
+void hdmi_ti_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable)
+{
+ REG_FLD_MOD(hdmi_wp_base(ip_data),
+ HDMI_WP_AUDIO_CTRL, enable, 31, 31);
+}
+EXPORT_SYMBOL(hdmi_ti_4xxx_wp_audio_enable);
+
+int hdmi_ti_4xx_check_aksv_data(struct hdmi_ip_data *ip_data)
+{
+ u32 aksv_data[5];
+ int i, j, ret;
+ int one = 0, zero = 0;
+ /* check if HDCP AKSV registers are populated.
+ * If not load the keys and reset the wrapper.
+ */
+ for (i = 0; i < 5; i++) {
+ aksv_data[i] = hdmi_read_reg(hdmi_core_sys_base(ip_data),
+ HDMI_CORE_AKSV(i));
+ /* Count number of zero / one */
+ for (j = 0; j < 8; j++)
+ (aksv_data[i] & (0x01 << j)) ? one++ : zero++;
+ pr_debug("%x ", aksv_data[i] & 0xFF);
+ }
+
+ ret = (one == zero) ? HDMI_AKSV_VALID :
+ (one == 0) ? HDMI_AKSV_ZERO : HDMI_AKSV_ERROR;
+
+ return ret;
+
+}
+EXPORT_SYMBOL(hdmi_ti_4xx_check_aksv_data);
+
+static int __init hdmi_ti_4xxx_init(void)
+{
+ return 0;
+}
+
+static void __exit hdmi_ti_4xxx_exit(void)
+{
+
+}
+
+module_init(hdmi_ti_4xxx_init);
+module_exit(hdmi_ti_4xxx_exit);
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("hdmi_ti_4xxx_ip module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/hdmi_ti_4xxx_ip.h b/drivers/video/hdmi_ti_4xxx_ip.h
new file mode 100644
index 0000000..15f2703
--- /dev/null
+++ b/drivers/video/hdmi_ti_4xxx_ip.h
@@ -0,0 +1,346 @@
+/*
+ * hdmi_ti_4xxx_ip.h
+ *
+ * HDMI header definition for TI81xx, TI38xx, TI OMAP4 etc processors.
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _HDMI_TI_4xxx_H_
+#define _HDMI_TI_4xxx_H_
+
+#include <linux/string.h>
+#include <video/omapdss.h>
+#include <video/hdmi_ti_4xxx_ip.h>
+
+struct hdmi_reg { u16 idx; };
+
+#define HDMI_REG(idx) ((const struct hdmi_reg) { idx })
+
+/* HDMI Wrapper */
+#define HDMI_WP_REG(idx) HDMI_REG(idx)
+
+#define HDMI_WP_REVISION HDMI_WP_REG(0x0)
+#define HDMI_WP_SYSCONFIG HDMI_WP_REG(0x10)
+#define HDMI_WP_IRQSTATUS_RAW HDMI_WP_REG(0x24)
+#define HDMI_WP_IRQSTATUS HDMI_WP_REG(0x28)
+#define HDMI_WP_PWR_CTRL HDMI_WP_REG(0x40)
+#define HDMI_WP_IRQENABLE_SET HDMI_WP_REG(0x2C)
+#define HDMI_WP_VIDEO_CFG HDMI_WP_REG(0x50)
+#define HDMI_WP_VIDEO_SIZE HDMI_WP_REG(0x60)
+#define HDMI_WP_VIDEO_TIMING_H HDMI_WP_REG(0x68)
+#define HDMI_WP_VIDEO_TIMING_V HDMI_WP_REG(0x6C)
+#define HDMI_WP_WP_CLK HDMI_WP_REG(0x70)
+#define HDMI_WP_AUDIO_CFG HDMI_WP_REG(0x80)
+#define HDMI_WP_AUDIO_CFG2 HDMI_WP_REG(0x84)
+#define HDMI_WP_AUDIO_CTRL HDMI_WP_REG(0x88)
+#define HDMI_WP_AUDIO_DATA HDMI_WP_REG(0x8C)
+#define HDMI_WP_IRQSTATUS_CORE 0x1
+#define HDMI_WP_IRQENABLE_CORE 0x1
+#define HDMI_WP_AUDIO_FIFO_UNDERFLOW (0x1 << 8)
+
+/* HDMI IP Core System */
+#define HDMI_CORE_SYS_REG(idx) HDMI_REG(idx)
+
+#define HDMI_CORE_SYS_VND_IDL HDMI_CORE_SYS_REG(0x0)
+#define HDMI_CORE_SYS_DEV_IDL HDMI_CORE_SYS_REG(0x8)
+#define HDMI_CORE_SYS_DEV_IDH HDMI_CORE_SYS_REG(0xC)
+#define HDMI_CORE_SYS_DEV_REV HDMI_CORE_SYS_REG(0x10)
+#define HDMI_CORE_SYS_SRST HDMI_CORE_SYS_REG(0x14)
+#define HDMI_CORE_CTRL1 HDMI_CORE_SYS_REG(0x20)
+#define HDMI_CORE_SYS_SYS_STAT HDMI_CORE_SYS_REG(0x24)
+#define HDMI_CORE_SYS_VID_ACEN HDMI_CORE_SYS_REG(0x124)
+#define HDMI_CORE_SYS_VID_MODE HDMI_CORE_SYS_REG(0x128)
+#define HDMI_CORE_SYS_INTR_STATE HDMI_CORE_SYS_REG(0x1C0)
+#define HDMI_CORE_SYS_INTR1 HDMI_CORE_SYS_REG(0x1C4)
+#define HDMI_CORE_SYS_INTR2 HDMI_CORE_SYS_REG(0x1C8)
+#define HDMI_CORE_SYS_INTR3 HDMI_CORE_SYS_REG(0x1CC)
+#define HDMI_CORE_SYS_INTR4 HDMI_CORE_SYS_REG(0x1D0)
+#define HDMI_CORE_SYS_UMASK1 HDMI_CORE_SYS_REG(0x1D4)
+#define HDMI_CORE_SYS_TMDS_CTRL HDMI_CORE_SYS_REG(0x208)
+#define HDMI_CORE_SYS_DE_DLY HDMI_CORE_SYS_REG(0xC8)
+#define HDMI_CORE_SYS_DE_CTRL HDMI_CORE_SYS_REG(0xCC)
+#define HDMI_CORE_SYS_DE_TOP HDMI_CORE_SYS_REG(0xD0)
+#define HDMI_CORE_SYS_DE_CNTL HDMI_CORE_SYS_REG(0xD8)
+#define HDMI_CORE_SYS_DE_CNTH HDMI_CORE_SYS_REG(0xDC)
+#define HDMI_CORE_SYS_DE_LINL HDMI_CORE_SYS_REG(0xE0)
+#define HDMI_CORE_SYS_DE_LINH_1 HDMI_CORE_SYS_REG(0xE4)
+#define HDMI_CORE_AKSV(n) HDMI_CORE_SYS_REG(n*4 + 0x74)
+#define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC 0x1
+#define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC 0x1
+#define HDMI_CORE_CTRL1_BSEL_24BITBUS 0x1
+#define HDMI_CORE_CTRL1_EDGE_RISINGEDGE 0x1
+#define HDMI_CORE_CTRL1_POWER_DOWN 0x1
+#define HDMI_CORE_SYSTEM_INTR2__BCAP 0x80
+#define HDMI_CORE_SYSTEM_INTR3__RI_ERR 0xF0
+
+/* HDMI DDC E-DID */
+#define HDMI_CORE_DDC_CMD HDMI_CORE_SYS_REG(0x3CC)
+#define HDMI_CORE_DDC_STATUS HDMI_CORE_SYS_REG(0x3C8)
+#define HDMI_CORE_DDC_ADDR HDMI_CORE_SYS_REG(0x3B4)
+#define HDMI_CORE_DDC_OFFSET HDMI_CORE_SYS_REG(0x3BC)
+#define HDMI_CORE_DDC_COUNT1 HDMI_CORE_SYS_REG(0x3C0)
+#define HDMI_CORE_DDC_COUNT2 HDMI_CORE_SYS_REG(0x3C4)
+#define HDMI_CORE_DDC_DATA HDMI_CORE_SYS_REG(0x3D0)
+#define HDMI_CORE_DDC_SEGM HDMI_CORE_SYS_REG(0x3B8)
+
+/* HDMI IP Core Audio Video */
+#define HDMI_CORE_AV_REG(idx) HDMI_REG(idx)
+
+#define HDMI_CORE_AV_HDMI_CTRL HDMI_CORE_AV_REG(0xBC)
+#define HDMI_CORE_AV_DPD HDMI_CORE_AV_REG(0xF4)
+#define HDMI_CORE_AV_PB_CTRL1 HDMI_CORE_AV_REG(0xF8)
+#define HDMI_CORE_AV_PB_CTRL2 HDMI_CORE_AV_REG(0xFC)
+#define HDMI_CORE_AV_AVI_TYPE HDMI_CORE_AV_REG(0x100)
+#define HDMI_CORE_AV_AVI_VERS HDMI_CORE_AV_REG(0x104)
+#define HDMI_CORE_AV_AVI_LEN HDMI_CORE_AV_REG(0x108)
+#define HDMI_CORE_AV_AVI_CHSUM HDMI_CORE_AV_REG(0x10C)
+#define HDMI_CORE_AV_AVI_DBYTE(n) HDMI_CORE_AV_REG(n * 4 + 0x110)
+#define HDMI_CORE_AV_AVI_DBYTE_NELEMS HDMI_CORE_AV_REG(15)
+#define HDMI_CORE_AV_SPD_DBYTE HDMI_CORE_AV_REG(0x190)
+#define HDMI_CORE_AV_SPD_DBYTE_NELEMS HDMI_CORE_AV_REG(27)
+#define HDMI_CORE_AV_AUD_DBYTE(n) HDMI_CORE_AV_REG(n * 4 + 0x210)
+#define HDMI_CORE_AV_AUD_DBYTE_NELEMS HDMI_CORE_AV_REG(10)
+#define HDMI_CORE_AV_MPEG_DBYTE HDMI_CORE_AV_REG(0x290)
+#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS HDMI_CORE_AV_REG(27)
+#define HDMI_CORE_AV_GEN_DBYTE HDMI_CORE_AV_REG(0x300)
+#define HDMI_CORE_AV_GEN_DBYTE_NELEMS HDMI_CORE_AV_REG(31)
+#define HDMI_CORE_AV_GEN2_DBYTE HDMI_CORE_AV_REG(0x380)
+#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS HDMI_CORE_AV_REG(31)
+#define HDMI_CORE_AV_ACR_CTRL HDMI_CORE_AV_REG(0x4)
+#define HDMI_CORE_AV_FREQ_SVAL HDMI_CORE_AV_REG(0x8)
+#define HDMI_CORE_AV_N_SVAL1 HDMI_CORE_AV_REG(0xC)
+#define HDMI_CORE_AV_N_SVAL2 HDMI_CORE_AV_REG(0x10)
+#define HDMI_CORE_AV_N_SVAL3 HDMI_CORE_AV_REG(0x14)
+#define HDMI_CORE_AV_CTS_SVAL1 HDMI_CORE_AV_REG(0x18)
+#define HDMI_CORE_AV_CTS_SVAL2 HDMI_CORE_AV_REG(0x1C)
+#define HDMI_CORE_AV_CTS_SVAL3 HDMI_CORE_AV_REG(0x20)
+#define HDMI_CORE_AV_CTS_HVAL1 HDMI_CORE_AV_REG(0x24)
+#define HDMI_CORE_AV_CTS_HVAL2 HDMI_CORE_AV_REG(0x28)
+#define HDMI_CORE_AV_CTS_HVAL3 HDMI_CORE_AV_REG(0x2C)
+#define HDMI_CORE_AV_AUD_MODE HDMI_CORE_AV_REG(0x50)
+#define HDMI_CORE_AV_SPDIF_CTRL HDMI_CORE_AV_REG(0x54)
+#define HDMI_CORE_AV_HW_SPDIF_FS HDMI_CORE_AV_REG(0x60)
+#define HDMI_CORE_AV_SWAP_I2S HDMI_CORE_AV_REG(0x64)
+#define HDMI_CORE_AV_SPDIF_ERTH HDMI_CORE_AV_REG(0x6C)
+#define HDMI_CORE_AV_I2S_IN_MAP HDMI_CORE_AV_REG(0x70)
+#define HDMI_CORE_AV_I2S_IN_CTRL HDMI_CORE_AV_REG(0x74)
+#define HDMI_CORE_AV_I2S_CHST0 HDMI_CORE_AV_REG(0x78)
+#define HDMI_CORE_AV_I2S_CHST1 HDMI_CORE_AV_REG(0x7C)
+#define HDMI_CORE_AV_I2S_CHST2 HDMI_CORE_AV_REG(0x80)
+#define HDMI_CORE_AV_I2S_CHST4 HDMI_CORE_AV_REG(0x84)
+#define HDMI_CORE_AV_I2S_CHST5 HDMI_CORE_AV_REG(0x88)
+#define HDMI_CORE_AV_ASRC HDMI_CORE_AV_REG(0x8C)
+#define HDMI_CORE_AV_I2S_IN_LEN HDMI_CORE_AV_REG(0x90)
+#define HDMI_CORE_AV_HDMI_CTRL HDMI_CORE_AV_REG(0xBC)
+#define HDMI_CORE_AV_AUDO_TXSTAT HDMI_CORE_AV_REG(0xC0)
+#define HDMI_CORE_AV_AUD_PAR_BUSCLK_1 HDMI_CORE_AV_REG(0xCC)
+#define HDMI_CORE_AV_AUD_PAR_BUSCLK_2 HDMI_CORE_AV_REG(0xD0)
+#define HDMI_CORE_AV_AUD_PAR_BUSCLK_3 HDMI_CORE_AV_REG(0xD4)
+#define HDMI_CORE_AV_TEST_TXCTRL HDMI_CORE_AV_REG(0xF0)
+#define HDMI_CORE_AV_DPD HDMI_CORE_AV_REG(0xF4)
+#define HDMI_CORE_AV_PB_CTRL1 HDMI_CORE_AV_REG(0xF8)
+#define HDMI_CORE_AV_PB_CTRL2 HDMI_CORE_AV_REG(0xFC)
+#define HDMI_CORE_AV_AVI_TYPE HDMI_CORE_AV_REG(0x100)
+#define HDMI_CORE_AV_AVI_VERS HDMI_CORE_AV_REG(0x104)
+#define HDMI_CORE_AV_AVI_LEN HDMI_CORE_AV_REG(0x108)
+#define HDMI_CORE_AV_AVI_CHSUM HDMI_CORE_AV_REG(0x10C)
+#define HDMI_CORE_AV_SPD_TYPE HDMI_CORE_AV_REG(0x180)
+#define HDMI_CORE_AV_SPD_VERS HDMI_CORE_AV_REG(0x184)
+#define HDMI_CORE_AV_SPD_LEN HDMI_CORE_AV_REG(0x188)
+#define HDMI_CORE_AV_SPD_CHSUM HDMI_CORE_AV_REG(0x18C)
+#define HDMI_CORE_AV_AUDIO_TYPE HDMI_CORE_AV_REG(0x200)
+#define HDMI_CORE_AV_AUDIO_VERS HDMI_CORE_AV_REG(0x204)
+#define HDMI_CORE_AV_AUDIO_LEN HDMI_CORE_AV_REG(0x208)
+#define HDMI_CORE_AV_AUDIO_CHSUM HDMI_CORE_AV_REG(0x20C)
+#define HDMI_CORE_AV_MPEG_TYPE HDMI_CORE_AV_REG(0x280)
+#define HDMI_CORE_AV_MPEG_VERS HDMI_CORE_AV_REG(0x284)
+#define HDMI_CORE_AV_MPEG_LEN HDMI_CORE_AV_REG(0x288)
+#define HDMI_CORE_AV_MPEG_CHSUM HDMI_CORE_AV_REG(0x28C)
+#define HDMI_CORE_AV_CP_BYTE1 HDMI_CORE_AV_REG(0x37C)
+#define HDMI_CORE_AV_CEC_ADDR_ID HDMI_CORE_AV_REG(0x3FC)
+#define HDMI_CORE_AV_SPD_DBYTE_ELSIZE 0x4
+#define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE 0x4
+#define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE 0x4
+#define HDMI_CORE_AV_GEN_DBYTE_ELSIZE 0x4
+
+/* PLL */
+#define HDMI_PLL_REG(idx) HDMI_REG(idx)
+
+#define PLLCTRL_PLL_CONTROL HDMI_PLL_REG(0x0)
+#define PLLCTRL_PLL_STATUS HDMI_PLL_REG(0x4)
+#define PLLCTRL_PLL_GO HDMI_PLL_REG(0x8)
+#define PLLCTRL_CFG1 HDMI_PLL_REG(0xC)
+#define PLLCTRL_CFG2 HDMI_PLL_REG(0x10)
+#define PLLCTRL_CFG3 HDMI_PLL_REG(0x14)
+#define PLLCTRL_CFG4 HDMI_PLL_REG(0x20)
+
+/* HDMI PHY */
+#define HDMI_PHY_REG(idx) HDMI_REG(idx)
+
+#define HDMI_TXPHY_TX_CTRL HDMI_PHY_REG(0x0)
+#define HDMI_TXPHY_DIGITAL_CTRL HDMI_PHY_REG(0x4)
+#define HDMI_TXPHY_POWER_CTRL HDMI_PHY_REG(0x8)
+#define HDMI_TXPHY_PAD_CFG_CTRL HDMI_PHY_REG(0xC)
+
+#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
+#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
+#define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end))
+#define FLD_MOD(orig, val, start, end) \
+ (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
+#define REG_FLD_MOD(base, idx, val, start, end) \
+ hdmi_write_reg(base, idx, FLD_MOD(hdmi_read_reg(base, idx),\
+ val, start, end))
+#define REG_GET(base, idx, start, end) \
+ FLD_GET(hdmi_read_reg(base, idx), start, end)
+
+enum hdmi_phy_pwr {
+ HDMI_PHYPWRCMD_OFF = 0,
+ HDMI_PHYPWRCMD_LDOON = 1,
+ HDMI_PHYPWRCMD_TXON = 2
+};
+
+enum hdmi_clk_refsel {
+ HDMI_REFSEL_PCLK = 0,
+ HDMI_REFSEL_REF1 = 1,
+ HDMI_REFSEL_REF2 = 2,
+ HDMI_REFSEL_SYSCLK = 3
+};
+
+enum hdmi_core_inputbus_width {
+ HDMI_INPUT_8BIT = 0,
+ HDMI_INPUT_10BIT = 1,
+ HDMI_INPUT_12BIT = 2
+};
+
+enum hdmi_core_dither_trunc {
+ HDMI_OUTPUTTRUNCATION_8BIT = 0,
+ HDMI_OUTPUTTRUNCATION_10BIT = 1,
+ HDMI_OUTPUTTRUNCATION_12BIT = 2,
+ HDMI_OUTPUTDITHER_8BIT = 3,
+ HDMI_OUTPUTDITHER_10BIT = 4,
+ HDMI_OUTPUTDITHER_12BIT = 5
+};
+
+enum hdmi_core_deepcolor_ed {
+ HDMI_DEEPCOLORPACKECTDISABLE = 0,
+ HDMI_DEEPCOLORPACKECTENABLE = 1
+};
+
+enum hdmi_core_packet_mode {
+ HDMI_PACKETMODERESERVEDVALUE = 0,
+ HDMI_PACKETMODE24BITPERPIXEL = 4,
+ HDMI_PACKETMODE30BITPERPIXEL = 5,
+ HDMI_PACKETMODE36BITPERPIXEL = 6,
+ HDMI_PACKETMODE48BITPERPIXEL = 7
+};
+
+enum hdmi_core_tclkselclkmult {
+ HDMI_FPLL05IDCK = 0,
+ HDMI_FPLL10IDCK = 1,
+ HDMI_FPLL20IDCK = 2,
+ HDMI_FPLL40IDCK = 3
+};
+
+enum hdmi_core_packet_ctrl {
+ HDMI_PACKETENABLE = 1,
+ HDMI_PACKETDISABLE = 0,
+ HDMI_PACKETREPEATON = 1,
+ HDMI_PACKETREPEATOFF = 0
+};
+
+enum hdmi_packing_mode {
+ HDMI_PACK_10b_RGB_YUV444 = 0,
+ HDMI_PACK_24b_RGB_YUV444_YUV422 = 1,
+ HDMI_PACK_20b_YUV422 = 2,
+ HDMI_PACK_ALREADYPACKED = 7
+};
+
+
+struct hdmi_core_video_config {
+ enum hdmi_core_inputbus_width ip_bus_width;
+ enum hdmi_core_dither_trunc op_dither_truc;
+ enum hdmi_core_deepcolor_ed deep_color_pkt;
+ enum hdmi_core_packet_mode pkt_mode;
+ enum hdmi_core_hdmi_dvi hdmi_dvi;
+ enum hdmi_core_tclkselclkmult tclk_sel_clkmult;
+};
+
+/*
+ * Refer to section 8.2 in HDMI 1.3 specification for
+ * details about infoframe databytes
+ */
+struct hdmi_core_infoframe_avi {
+ u8 db1_format;
+ /* Y0, Y1 rgb,yCbCr */
+ u8 db1_active_info;
+ /* A0 Active information Present */
+ u8 db1_bar_info_dv;
+ /* B0, B1 Bar info data valid */
+ u8 db1_scan_info;
+ /* S0, S1 scan information */
+ u8 db2_colorimetry;
+ /* C0, C1 colorimetry */
+ u8 db2_aspect_ratio;
+ /* M0, M1 Aspect ratio (4:3, 16:9) */
+ u8 db2_active_fmt_ar;
+ /* R0...R3 Active format aspect ratio */
+ u8 db3_itc;
+ /* ITC IT content. */
+ u8 db3_ec;
+ /* EC0, EC1, EC2 Extended colorimetry */
+ u8 db3_q_range;
+ /* Q1, Q0 Quantization range */
+ u8 db3_nup_scaling;
+ /* SC1, SC0 Non-uniform picture scaling */
+ u8 db4_videocode;
+ /* VIC0..6 Video format identification */
+ u8 db5_pixel_repeat;
+ /* PR0..PR3 Pixel repetition factor */
+ u16 db6_7_line_eoftop;
+ /* Line number end of top bar */
+ u16 db8_9_line_sofbottom;
+ /* Line number start of bottom bar */
+ u16 db10_11_pixel_eofleft;
+ /* Pixel number end of left bar */
+ u16 db12_13_pixel_sofright;
+ /* Pixel number start of right bar */
+};
+
+struct hdmi_core_packet_enable_repeat {
+ u32 audio_pkt;
+ u32 audio_pkt_repeat;
+ u32 avi_infoframe;
+ u32 avi_infoframe_repeat;
+ u32 gen_cntrl_pkt;
+ u32 gen_cntrl_pkt_repeat;
+ u32 generic_pkt;
+ u32 generic_pkt_repeat;
+};
+
+struct hdmi_video_format {
+ enum hdmi_packing_mode packing_mode;
+ u32 y_res; /* Line per panel */
+ u32 x_res; /* pixel per line */
+};
+
+struct hdmi_video_interface {
+ int vsp; /* Vsync polarity */
+ int hsp; /* Hsync polarity */
+ int interlacing;
+ int tm; /* Timing mode */
+};
+
+#endif
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index cb175fe..4bffa63 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -292,64 +292,524 @@
};
#ifdef CONFIG_FB_MODE_HELPERS
-const struct fb_videomode cea_modes[64] = {
- /* #1: 640x480p@59.94/60Hz */
- [1] = {
- NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0,
- FB_VMODE_NONINTERLACED, 0,
- },
- /* #3: 720x480p@59.94/60Hz */
- [3] = {
- NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0,
- FB_VMODE_NONINTERLACED, 0,
- },
- /* #5: 1920x1080i@59.94/60Hz */
- [5] = {
- NULL, 60, 1920, 1080, 13763, 148, 88, 15, 2, 44, 5,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- FB_VMODE_INTERLACED, 0,
- },
- /* #7: 720(1440)x480iH@59.94/60Hz */
- [7] = {
- NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0,
- FB_VMODE_INTERLACED, 0,
- },
- /* #9: 720(1440)x240pH@59.94/60Hz */
- [9] = {
- NULL, 60, 1440, 240, 18554, 114, 38, 16, 4, 124, 3, 0,
- FB_VMODE_NONINTERLACED, 0,
- },
- /* #18: 720x576pH@50Hz */
- [18] = {
- NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0,
- FB_VMODE_NONINTERLACED, 0,
- },
- /* #19: 1280x720p@50Hz */
- [19] = {
- NULL, 50, 1280, 720, 13468, 220, 440, 20, 5, 40, 5,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- FB_VMODE_NONINTERLACED, 0,
- },
- /* #20: 1920x1080i@50Hz */
- [20] = {
- NULL, 50, 1920, 1080, 13480, 148, 528, 15, 5, 528, 5,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- FB_VMODE_INTERLACED, 0,
- },
- /* #32: 1920x1080p@23.98/24Hz */
- [32] = {
- NULL, 24, 1920, 1080, 13468, 148, 638, 36, 4, 44, 5,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- FB_VMODE_NONINTERLACED, 0,
- },
- /* #35: (2880)x480p4x@59.94/60Hz */
- [35] = {
- NULL, 60, 2880, 480, 9250, 240, 64, 30, 9, 248, 6, 0,
- FB_VMODE_NONINTERLACED, 0,
- },
+const struct fb_videomode cea_modes[CEA_MODEDB_SIZE] = {
+ {},
+ /* 1: 640x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 640, .yres = 480, .pixclock = 39721,
+ .left_margin = 48, .right_margin = 16,
+ .upper_margin = 33, .lower_margin = 10,
+ .hsync_len = 96, .vsync_len = 2,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 2: 720x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 720, .yres = 480, .pixclock = 37037,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 3: 720x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 720, .yres = 480, .pixclock = 37037,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 4: 1280x720p @ 59.94Hz/60Hz */
+ {.refresh = 60, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 110,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 5: 1920x1080i @ 59.94Hz/60Hz */
+ {.refresh = 60, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 6: 720(1440)x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 7: 720(1440)x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 8: 720(1440)x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 240, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 9: 720(1440)x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 240, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 10: 2880x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 11: 2880x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 12: 2880x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 240, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 13: 2880x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 240, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 14: 1440x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 120, .right_margin = 32,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 124, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 15: 1440x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 120, .right_margin = 32,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 124, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 16: 1920x1080p @ 59.94Hz/60Hz */
+ {.refresh = 60, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 17: 720x576p @ 50Hz */
+ {.refresh = 50, .xres = 720, .yres = 576, .pixclock = 37037,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 18: 720x576p @ 50Hz */
+ {.refresh = 50, .xres = 720, .yres = 576, .pixclock = 37037,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 19: 1280x720p @ 50Hz */
+ {.refresh = 50, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 440,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 20: 1920x1080i @ 50Hz */
+ {.refresh = 50, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 21: 720(1440)x576i @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 22: 720(1440)x576i @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 23: 720(1440)x288p @ 50Hz */
+ {.refresh = 49, .xres = 1440, .yres = 288, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 24: 720(1440)x288p @ 50Hz */
+ {.refresh = 49, .xres = 1440, .yres = 288, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 25: 2880x576i @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 26: 2880x576i @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 27: 2880x288p @ 50Hz */
+ {.refresh = 49, .xres = 2880, .yres = 288, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 28: 2880x288p @ 50Hz */
+ {.refresh = 49, .xres = 2880, .yres = 288, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 29: 1440x576p @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 136, .right_margin = 24,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 128, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 30: 1440x576p @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 136, .right_margin = 24,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 128, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 31: 1920x1080p @ 50Hz */
+ {.refresh = 50, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 32: 1920x1080p @ 23.97Hz/24Hz */
+ {.refresh = 24, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 638,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 33: 1920x1080p @ 25Hz */
+ {.refresh = 25, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 34: 1920x1080p @ 29.97Hz/30Hz */
+ {.refresh = 30, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 35: 2880x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 9259,
+ .left_margin = 240, .right_margin = 64,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 248, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 36: 2880x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 9259,
+ .left_margin = 240, .right_margin = 64,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 248, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 37: 2880x576p @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 9259,
+ .left_margin = 272, .right_margin = 48,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 256, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 38: 2880x576p @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 9259,
+ .left_margin = 272, .right_margin = 48,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 256, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 39: 1920x1080i @ 50Hz */
+ {.refresh = 50, .xres = 1920, .yres = 1080, .pixclock = 13888,
+ .left_margin = 184, .right_margin = 32,
+ .upper_margin = 57, .lower_margin = 2,
+ .hsync_len = 168, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 40: 1920x1080i @ 100Hz */
+ {.refresh = 100, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 41: 1280x720p @ 100Hz */
+ {.refresh = 100, .xres = 1280, .yres = 720, .pixclock = 6734,
+ .left_margin = 220, .right_margin = 440,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 42: 720x576p @ 100Hz */
+ {.refresh = 100, .xres = 720, .yres = 576, .pixclock = 18518,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 43: 720x576p @ 100Hz */
+ {.refresh = 100, .xres = 720, .yres = 576, .pixclock = 18518,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 44: 720(1440)x576i @ 100Hz */
+ {.refresh = 100, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 45: 720(1440)x576i @ 100Hz */
+ {.refresh = 100, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 46: 1920x1080i @ 119.88/120Hz */
+ {.refresh = 120, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 47: 1280x720p @ 119.88/120Hz */
+ {.refresh = 120, .xres = 1280, .yres = 720, .pixclock = 6734,
+ .left_margin = 220, .right_margin = 110,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 48: 720x480p @ 119.88/120Hz */
+ {.refresh = 119, .xres = 720, .yres = 480, .pixclock = 18518,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 49: 720x480p @ 119.88/120Hz */
+ {.refresh = 119, .xres = 720, .yres = 480, .pixclock = 18518,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 50: 720(1440)x480i @ 119.88/120Hz */
+ {.refresh = 119, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 51: 720(1440)x480i @ 119.88/120Hz */
+ {.refresh = 119, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 52: 720x576p @ 200Hz */
+ {.refresh = 200, .xres = 720, .yres = 576, .pixclock = 9259,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 53: 720x576p @ 200Hz */
+ {.refresh = 200, .xres = 720, .yres = 576, .pixclock = 9259,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 54: 720(1440)x576i @ 200Hz */
+ {.refresh = 200, .xres = 1440, .yres = 576, .pixclock = 9259,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 55: 720(1440)x576i @ 200Hz */
+ {.refresh = 200, .xres = 1440, .yres = 576, .pixclock = 9259,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 56: 720x480p @ 239.76/240Hz */
+ {.refresh = 239, .xres = 720, .yres = 480, .pixclock = 9259,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 57: 720x480p @ 239.76/240Hz */
+ {.refresh = 239, .xres = 720, .yres = 480, .pixclock = 9259,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 58: 720(1440)x480i @ 239.76/240Hz */
+ {.refresh = 239, .xres = 1440, .yres = 480, .pixclock = 9259,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_4_3 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 59: 720(1440)x480i @ 239.76/240Hz */
+ {.refresh = 239, .xres = 1440, .yres = 480, .pixclock = 9259,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .flag = FB_FLAG_RATIO_16_9 | FB_FLAG_PIXEL_REPEAT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 60: 1280x720p @ 23.97Hz/24Hz */
+ {.refresh = 24, .xres = 1280, .yres = 720, .pixclock = 16835,
+ .left_margin = 220, .right_margin = 1760,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 61: 1280x720p @ 25Hz */
+ {.refresh = 25, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 2420,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 62: 1280x720p @ 29.97Hz/30Hz */
+ {.refresh = 30, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 1760,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 63: 1920x1080p @ 119.88/120Hz */
+ {.refresh = 120, .xres = 1920, .yres = 1080, .pixclock = 3367,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 64: 1920x1080p @ 100Hz */
+ {.refresh = 100, .xres = 1920, .yres = 1080, .pixclock = 3367,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED},
};
+EXPORT_SYMBOL(cea_modes);
-const struct fb_videomode vesa_modes[] = {
+const struct fb_videomode vesa_modes[VESA_MODEDB_SIZE] = {
/* 0 640x350-85 VESA */
{ NULL, 85, 640, 350, 31746, 96, 32, 60, 32, 64, 3,
FB_SYNC_HOR_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA},
diff --git a/drivers/video/omap2/Kconfig b/drivers/video/omap2/Kconfig
index d877c36..c4ec41a 100644
--- a/drivers/video/omap2/Kconfig
+++ b/drivers/video/omap2/Kconfig
@@ -7,3 +7,5 @@
source "drivers/video/omap2/dss/Kconfig"
source "drivers/video/omap2/omapfb/Kconfig"
source "drivers/video/omap2/displays/Kconfig"
+source "drivers/video/omap2/dsscomp/Kconfig"
+source "drivers/video/omap2/hdcp/Kconfig"
diff --git a/drivers/video/omap2/Makefile b/drivers/video/omap2/Makefile
index 5ddef12..ceb1dd9 100644
--- a/drivers/video/omap2/Makefile
+++ b/drivers/video/omap2/Makefile
@@ -3,4 +3,6 @@
obj-$(CONFIG_OMAP2_DSS) += dss/
obj-$(CONFIG_FB_OMAP2) += omapfb/
+obj-$(CONFIG_OMAP4_HDCP) += hdcp/
obj-y += displays/
+obj-y += dsscomp/
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index 9c90f75..611be7e 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -82,6 +82,30 @@
.name = "generic",
},
+ /* generic 720p */
+ {
+ {
+ .x_res = 1280,
+ .y_res = 720,
+
+ .pixel_clock = 74250,
+
+ .hfp = 110,
+ .hsw = 40,
+ .hbp = 20,
+
+ .vfp = 5,
+ .vsw = 5,
+ .vbp = 20,
+ },
+ .acbi = 0x0,
+ .acb = 0x0,
+ .config = OMAP_DSS_LCD_TFT,
+ .power_on_delay = 0,
+ .power_off_delay = 0,
+ .name = "generic_720p",
+ },
+
/* Sharp LQ043T1DG01 */
{
{
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index fdd5d4ae..b82bcc1 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -504,14 +504,18 @@
return 0;
r = omapdss_dsi_display_enable(dssdev);
- if (r)
- goto err;
+ if (r) {
+ dev_err(&dssdev->dev, "failed to enable DSI\n");
+ goto err1;
+ }
omapdss_dsi_vc_enable_hs(dssdev, td->channel, true);
r = _taal_enable_te(dssdev, true);
- if (r)
- goto err;
+ if (r) {
+ dev_err(&dssdev->dev, "failed to re-enable TE");
+ goto err2;
+ }
enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
@@ -521,13 +525,15 @@
return 0;
-err:
- dev_err(&dssdev->dev, "exit ULPS failed");
+err2:
+ dev_err(&dssdev->dev, "failed to exit ULPS");
+
r = taal_panel_reset(dssdev);
-
- enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
- td->ulps_enabled = false;
-
+ if (!r) {
+ enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
+ td->ulps_enabled = false;
+ }
+err1:
taal_queue_ulps_work(dssdev);
return r;
@@ -1317,8 +1323,11 @@
dsi_bus_lock(dssdev);
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
- taal_wake_up(dssdev);
- taal_power_off(dssdev);
+ int r;
+
+ r = taal_wake_up(dssdev);
+ if (!r)
+ taal_power_off(dssdev);
}
dsi_bus_unlock(dssdev);
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 6b3e2da..8f9420e 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -63,6 +63,7 @@
config OMAP4_DSS_HDMI
bool "HDMI support"
depends on ARCH_OMAP4
+ select HDMI_TI_4XXX_IP
default y
help
HDMI Interface. This adds the High Definition Multimedia Interface.
@@ -117,18 +118,6 @@
Max FCK is 173MHz, so this doesn't work if your PCK
is very high.
-config OMAP2_DSS_SLEEP_BEFORE_RESET
- bool "Sleep 50ms before DSS reset"
- default y
- help
- For some unknown reason we may get SYNC_LOST errors from the display
- subsystem at initialization time if we don't sleep before resetting
- the DSS. See the source (dss.c) for more comments.
-
- However, 50ms is quite long time to sleep, and with some
- configurations the SYNC_LOST may never happen, so the sleep can
- be disabled here.
-
config OMAP2_DSS_SLEEP_AFTER_VENC_RESET
bool "Sleep 20ms after VENC reset"
default y
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index 10d9d3b..e67db6e 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -1,9 +1,9 @@
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
-omapdss-y := core.o dss.o dss_features.o dispc.o display.o manager.o overlay.o
+omapdss-y := core.o dss.o dss_features.o dispc.o display.o manager.o overlay.o fifothreshold.o
omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi.o \
- hdmi_omap4_panel.o
+ hdmi_panel.o
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 3da4267..2a1e6ed 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -145,6 +145,8 @@
debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir,
&venc_dump_regs, &dss_debug_fops);
#endif
+ debugfs_create_file("hdmi", S_IRUGO, dss_debugfs_dir,
+ &hdmi_dump_regs, &dss_debug_fops);
return 0;
}
@@ -183,8 +185,11 @@
goto err_dss;
}
- /* keep clocks enabled to prevent context saves/restores during init */
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ r = dispc_init_platform_driver();
+ if (r) {
+ DSSERR("Failed to initialize dispc platform driver\n");
+ goto err_dispc;
+ }
r = rfbi_init_platform_driver();
if (r) {
@@ -192,12 +197,6 @@
goto err_rfbi;
}
- r = dispc_init_platform_driver();
- if (r) {
- DSSERR("Failed to initialize dispc platform driver\n");
- goto err_dispc;
- }
-
r = venc_init_platform_driver();
if (r) {
DSSERR("Failed to initialize venc platform driver\n");
@@ -238,8 +237,6 @@
pdata->default_device = dssdev;
}
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
-
return 0;
err_register:
@@ -424,6 +421,24 @@
return 0;
}
+static void omap_dss_driver_disable(struct omap_dss_device *dssdev)
+{
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)
+ blocking_notifier_call_chain(&dssdev->state_notifiers,
+ OMAP_DSS_DISPLAY_DISABLED, dssdev);
+ dssdev->driver->disable_orig(dssdev);
+ dssdev->first_vsync = false;
+}
+
+static int omap_dss_driver_enable(struct omap_dss_device *dssdev)
+{
+ int r = dssdev->driver->enable_orig(dssdev);
+ if (!r && dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ blocking_notifier_call_chain(&dssdev->state_notifiers,
+ OMAP_DSS_DISPLAY_ACTIVE, dssdev);
+ return r;
+}
+
int omap_dss_register_driver(struct omap_dss_driver *dssdriver)
{
dssdriver->driver.bus = &dss_bus_type;
@@ -436,6 +451,11 @@
dssdriver->get_recommended_bpp =
omapdss_default_get_recommended_bpp;
+ dssdriver->disable_orig = dssdriver->disable;
+ dssdriver->disable = omap_dss_driver_disable;
+ dssdriver->enable_orig = dssdriver->enable;
+ dssdriver->enable = omap_dss_driver_enable;
+
return driver_register(&dssdriver->driver);
}
EXPORT_SYMBOL(omap_dss_register_driver);
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 7a9a2e7..c5eaf90 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -33,12 +33,17 @@
#include <linux/workqueue.h>
#include <linux/hardirq.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/ratelimit.h>
#include <plat/sram.h>
#include <plat/clock.h>
-
+#include <mach/tiler.h>
+#include <plat/omap-pm.h>
#include <video/omapdss.h>
+#include "../clockdomain.h"
#include "dss.h"
#include "dss_features.h"
#include "dispc.h"
@@ -55,26 +60,20 @@
#define DISPC_MAX_NR_ISRS 8
+static struct clockdomain *l3_1_clkdm, *l3_2_clkdm;
+
struct omap_dispc_isr_data {
omap_dispc_isr_t isr;
void *arg;
u32 mask;
};
-struct dispc_h_coef {
- s8 hc4;
- s8 hc3;
- u8 hc2;
- s8 hc1;
- s8 hc0;
-};
-
-struct dispc_v_coef {
- s8 vc22;
- s8 vc2;
- u8 vc1;
- s8 vc0;
- s8 vc00;
+struct dispc_hv_coef {
+ s8 hc0_vc00;
+ s8 hc1_vc0;
+ u8 hc2_vc1;
+ s8 hc3_vc2;
+ s8 hc4_vc22;
};
#define REG_GET(idx, start, end) \
@@ -92,9 +91,17 @@
static struct {
struct platform_device *pdev;
void __iomem *base;
- int irq;
- u32 fifo_size[3];
+ int ctx_loss_cnt;
+ struct mutex runtime_lock;
+ int runtime_count;
+
+ int irq;
+ struct clk *dss_clk;
+
+ u32 fifo_size[MAX_DSS_OVERLAYS];
+
+ u32 channel_irq[3]; /* Max channels hardcoded to 3*/
spinlock_t irq_lock;
u32 irq_error_mask;
@@ -102,6 +109,7 @@
u32 error_irqs;
struct work_struct error_work;
+ bool ctx_valid;
u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -134,18 +142,34 @@
return __raw_readl(dispc.base + idx);
}
+static int dispc_get_ctx_loss_count(void)
+{
+ struct device *dev = &dispc.pdev->dev;
+ struct omap_display_platform_data *pdata = dev->platform_data;
+ struct omap_dss_board_info *board_data = pdata->board_data;
+ int cnt;
+
+ if (!board_data->get_context_loss_count)
+ return -ENOENT;
+
+ cnt = board_data->get_context_loss_count(dev);
+
+ WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
+
+ return cnt;
+}
+
#define SR(reg) \
dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
#define RR(reg) \
dispc_write_reg(DISPC_##reg, dispc.ctx[DISPC_##reg / sizeof(u32)])
-void dispc_save_context(void)
+static void dispc_save_context(void)
{
- int i;
- if (cpu_is_omap24xx())
- return;
+ int i, o;
- SR(SYSCONFIG);
+ DSSDBG("dispc_save_context\n");
+
SR(IRQENABLE);
SR(CONTROL);
SR(CONFIG);
@@ -158,7 +182,8 @@
SR(TIMING_V(OMAP_DSS_CHANNEL_LCD));
SR(POL_FREQ(OMAP_DSS_CHANNEL_LCD));
SR(DIVISORo(OMAP_DSS_CHANNEL_LCD));
- SR(GLOBAL_ALPHA);
+ if (dss_has_feature(FEAT_GLOBAL_ALPHA))
+ SR(GLOBAL_ALPHA);
SR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
SR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
if (dss_has_feature(FEAT_MGR_LCD2)) {
@@ -188,123 +213,108 @@
SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
- SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
- SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
- SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ if (dss_has_feature(FEAT_CPR)) {
+ SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
+ SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
+ SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ }
if (dss_has_feature(FEAT_MGR_LCD2)) {
- SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
- SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
- SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ if (dss_has_feature(FEAT_CPR)) {
+ SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
+ SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
+ SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ }
SR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
}
- SR(OVL_PRELOAD(OMAP_DSS_GFX));
+ if (dss_has_feature(FEAT_PRELOAD))
+ SR(OVL_PRELOAD(OMAP_DSS_GFX));
- /* VID1 */
- SR(OVL_BA0(OMAP_DSS_VIDEO1));
- SR(OVL_BA1(OMAP_DSS_VIDEO1));
- SR(OVL_POSITION(OMAP_DSS_VIDEO1));
- SR(OVL_SIZE(OMAP_DSS_VIDEO1));
- SR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO1));
- SR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO1));
- SR(OVL_ROW_INC(OMAP_DSS_VIDEO1));
- SR(OVL_PIXEL_INC(OMAP_DSS_VIDEO1));
- SR(OVL_FIR(OMAP_DSS_VIDEO1));
- SR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO1));
- SR(OVL_ACCU0(OMAP_DSS_VIDEO1));
- SR(OVL_ACCU1(OMAP_DSS_VIDEO1));
+ /* VID1-3 */
+ for (o = OMAP_DSS_VIDEO1; o <= OMAP_DSS_VIDEO3; o++) {
+ if (o == OMAP_DSS_VIDEO3 && !dss_has_feature(FEAT_OVL_VID3))
+ continue;
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, i));
-
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, i));
-
- for (i = 0; i < 5; i++)
- SR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i));
-
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
-
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- SR(OVL_BA0_UV(OMAP_DSS_VIDEO1));
- SR(OVL_BA1_UV(OMAP_DSS_VIDEO1));
- SR(OVL_FIR2(OMAP_DSS_VIDEO1));
- SR(OVL_ACCU2_0(OMAP_DSS_VIDEO1));
- SR(OVL_ACCU2_1(OMAP_DSS_VIDEO1));
+ SR(OVL_BA0(o));
+ SR(OVL_BA1(o));
+ SR(OVL_POSITION(o));
+ SR(OVL_SIZE(o));
+ SR(OVL_ATTRIBUTES(o));
+ SR(OVL_FIFO_THRESHOLD(o));
+ SR(OVL_ROW_INC(o));
+ SR(OVL_PIXEL_INC(o));
+ SR(OVL_FIR(o));
+ SR(OVL_PICTURE_SIZE(o));
+ SR(OVL_ACCU0(o));
+ SR(OVL_ACCU1(o));
for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, i));
+ SR(OVL_FIR_COEF_H(o, i));
for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, i));
+ SR(OVL_FIR_COEF_HV(o, i));
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, i));
+ for (i = 0; i < 5; i++)
+ SR(OVL_CONV_COEF(o, i));
+
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ for (i = 0; i < 8; i++)
+ SR(OVL_FIR_COEF_V(o, i));
+ }
+
+ if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ SR(OVL_BA0_UV(o));
+ SR(OVL_BA1_UV(o));
+ SR(OVL_FIR2(o));
+ SR(OVL_ACCU2_0(o));
+ SR(OVL_ACCU2_1(o));
+
+ for (i = 0; i < 8; i++)
+ SR(OVL_FIR_COEF_H2(o, i));
+
+ for (i = 0; i < 8; i++)
+ SR(OVL_FIR_COEF_HV2(o, i));
+
+ for (i = 0; i < 8; i++)
+ SR(OVL_FIR_COEF_V2(o, i));
+ }
+ if (dss_has_feature(FEAT_ATTR2))
+ SR(OVL_ATTRIBUTES2(o));
+
+ if (dss_has_feature(FEAT_PRELOAD))
+ SR(OVL_PRELOAD(o));
}
- if (dss_has_feature(FEAT_ATTR2))
- SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
-
- SR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
-
- /* VID2 */
- SR(OVL_BA0(OMAP_DSS_VIDEO2));
- SR(OVL_BA1(OMAP_DSS_VIDEO2));
- SR(OVL_POSITION(OMAP_DSS_VIDEO2));
- SR(OVL_SIZE(OMAP_DSS_VIDEO2));
- SR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO2));
- SR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO2));
- SR(OVL_ROW_INC(OMAP_DSS_VIDEO2));
- SR(OVL_PIXEL_INC(OMAP_DSS_VIDEO2));
- SR(OVL_FIR(OMAP_DSS_VIDEO2));
- SR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO2));
- SR(OVL_ACCU0(OMAP_DSS_VIDEO2));
- SR(OVL_ACCU1(OMAP_DSS_VIDEO2));
-
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, i));
-
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, i));
-
- for (i = 0; i < 5; i++)
- SR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i));
-
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
-
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- SR(OVL_BA0_UV(OMAP_DSS_VIDEO2));
- SR(OVL_BA1_UV(OMAP_DSS_VIDEO2));
- SR(OVL_FIR2(OMAP_DSS_VIDEO2));
- SR(OVL_ACCU2_0(OMAP_DSS_VIDEO2));
- SR(OVL_ACCU2_1(OMAP_DSS_VIDEO2));
-
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, i));
-
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, i));
-
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, i));
- }
- if (dss_has_feature(FEAT_ATTR2))
- SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
-
- SR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
if (dss_has_feature(FEAT_CORE_CLK_DIV))
SR(DIVISOR);
+
+ dispc.ctx_loss_cnt = dispc_get_ctx_loss_count();
+ dispc.ctx_valid = true;
+
+ DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt);
}
-void dispc_restore_context(void)
+static void dispc_restore_context(void)
{
- int i;
- RR(SYSCONFIG);
+ struct device *dev = &dispc.pdev->dev;
+ int i, o, ctx;
+
+ DSSDBG("dispc_restore_context\n");
+
+ if (!dispc.ctx_valid)
+ return;
+
+ ctx = dispc_get_ctx_loss_count();
+
+ if (!omap_pm_was_context_lost(dev))
+ return;
+
+ DSSDBG("ctx_loss_count: saved %d, current %d\n",
+ dispc.ctx_loss_cnt, ctx);
+
/*RR(IRQENABLE);*/
/*RR(CONTROL);*/
RR(CONFIG);
@@ -317,7 +327,8 @@
RR(TIMING_V(OMAP_DSS_CHANNEL_LCD));
RR(POL_FREQ(OMAP_DSS_CHANNEL_LCD));
RR(DIVISORo(OMAP_DSS_CHANNEL_LCD));
- RR(GLOBAL_ALPHA);
+ if (dss_has_feature(FEAT_GLOBAL_ALPHA))
+ RR(GLOBAL_ALPHA);
RR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
RR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
if (dss_has_feature(FEAT_MGR_LCD2)) {
@@ -347,114 +358,81 @@
RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
- RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
- RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
- RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ if (dss_has_feature(FEAT_CPR)) {
+ RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
+ RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
+ RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ }
if (dss_has_feature(FEAT_MGR_LCD2)) {
RR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
- RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
- RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
- RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ if (dss_has_feature(FEAT_CPR)) {
+ RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
+ RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
+ RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ }
}
- RR(OVL_PRELOAD(OMAP_DSS_GFX));
+ if (dss_has_feature(FEAT_PRELOAD))
+ RR(OVL_PRELOAD(OMAP_DSS_GFX));
- /* VID1 */
- RR(OVL_BA0(OMAP_DSS_VIDEO1));
- RR(OVL_BA1(OMAP_DSS_VIDEO1));
- RR(OVL_POSITION(OMAP_DSS_VIDEO1));
- RR(OVL_SIZE(OMAP_DSS_VIDEO1));
- RR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO1));
- RR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO1));
- RR(OVL_ROW_INC(OMAP_DSS_VIDEO1));
- RR(OVL_PIXEL_INC(OMAP_DSS_VIDEO1));
- RR(OVL_FIR(OMAP_DSS_VIDEO1));
- RR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO1));
- RR(OVL_ACCU0(OMAP_DSS_VIDEO1));
- RR(OVL_ACCU1(OMAP_DSS_VIDEO1));
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, i));
+ /* VID1-3 */
+ for (o = OMAP_DSS_VIDEO1; o <= OMAP_DSS_VIDEO3; o++) {
+ if (o == OMAP_DSS_VIDEO3 && !dss_has_feature(FEAT_OVL_VID3))
+ continue;
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, i));
-
- for (i = 0; i < 5; i++)
- RR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i));
-
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
-
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- RR(OVL_BA0_UV(OMAP_DSS_VIDEO1));
- RR(OVL_BA1_UV(OMAP_DSS_VIDEO1));
- RR(OVL_FIR2(OMAP_DSS_VIDEO1));
- RR(OVL_ACCU2_0(OMAP_DSS_VIDEO1));
- RR(OVL_ACCU2_1(OMAP_DSS_VIDEO1));
+ RR(OVL_BA0(o));
+ RR(OVL_BA1(o));
+ RR(OVL_POSITION(o));
+ RR(OVL_SIZE(o));
+ RR(OVL_ATTRIBUTES(o));
+ RR(OVL_FIFO_THRESHOLD(o));
+ RR(OVL_ROW_INC(o));
+ RR(OVL_PIXEL_INC(o));
+ RR(OVL_FIR(o));
+ RR(OVL_PICTURE_SIZE(o));
+ RR(OVL_ACCU0(o));
+ RR(OVL_ACCU1(o));
for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, i));
+ RR(OVL_FIR_COEF_H(o, i));
for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, i));
+ RR(OVL_FIR_COEF_HV(o, i));
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, i));
+ for (i = 0; i < 5; i++)
+ RR(OVL_CONV_COEF(o, i));
+
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ for (i = 0; i < 8; i++)
+ RR(OVL_FIR_COEF_V(o, i));
+ }
+
+ if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ RR(OVL_BA0_UV(o));
+ RR(OVL_BA1_UV(o));
+ RR(OVL_FIR2(o));
+ RR(OVL_ACCU2_0(o));
+ RR(OVL_ACCU2_1(o));
+
+ for (i = 0; i < 8; i++)
+ RR(OVL_FIR_COEF_H2(o, i));
+
+ for (i = 0; i < 8; i++)
+ RR(OVL_FIR_COEF_HV2(o, i));
+
+ for (i = 0; i < 8; i++)
+ RR(OVL_FIR_COEF_V2(o, i));
+ }
+ if (dss_has_feature(FEAT_ATTR2))
+ RR(OVL_ATTRIBUTES2(o));
+
+ if (dss_has_feature(FEAT_PRELOAD))
+ RR(OVL_PRELOAD(o));
}
- if (dss_has_feature(FEAT_ATTR2))
- RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
-
- RR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
-
- /* VID2 */
- RR(OVL_BA0(OMAP_DSS_VIDEO2));
- RR(OVL_BA1(OMAP_DSS_VIDEO2));
- RR(OVL_POSITION(OMAP_DSS_VIDEO2));
- RR(OVL_SIZE(OMAP_DSS_VIDEO2));
- RR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO2));
- RR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO2));
- RR(OVL_ROW_INC(OMAP_DSS_VIDEO2));
- RR(OVL_PIXEL_INC(OMAP_DSS_VIDEO2));
- RR(OVL_FIR(OMAP_DSS_VIDEO2));
- RR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO2));
- RR(OVL_ACCU0(OMAP_DSS_VIDEO2));
- RR(OVL_ACCU1(OMAP_DSS_VIDEO2));
-
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, i));
-
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, i));
-
- for (i = 0; i < 5; i++)
- RR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i));
-
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
-
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- RR(OVL_BA0_UV(OMAP_DSS_VIDEO2));
- RR(OVL_BA1_UV(OMAP_DSS_VIDEO2));
- RR(OVL_FIR2(OMAP_DSS_VIDEO2));
- RR(OVL_ACCU2_0(OMAP_DSS_VIDEO2));
- RR(OVL_ACCU2_1(OMAP_DSS_VIDEO2));
-
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, i));
-
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, i));
-
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, i));
- }
- if (dss_has_feature(FEAT_ATTR2))
- RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
-
- RR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
if (dss_has_feature(FEAT_CORE_CLK_DIV))
RR(DIVISOR);
@@ -471,19 +449,176 @@
* the context is fully restored
*/
RR(IRQENABLE);
+
+ DSSDBG("context restored\n");
}
#undef SR
#undef RR
-static inline void enable_clocks(bool enable)
+static u32 dispc_calculate_threshold(enum omap_plane plane, u32 paddr,
+ u32 puv_addr, u16 width, u16 height,
+ s32 row_inc, s32 pix_inc)
{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ int shift;
+ u32 channel_no = plane;
+ u32 val, burstsize, doublestride;
+ u32 rotation, bursttype, color_mode;
+ struct dispc_config dispc_reg_config;
+
+ if (width >= 1920)
+ return 1500;
+
+ /* Get the burst size */
+ shift = (plane == OMAP_DSS_GFX) ? 6 : 14;
+ val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
+ burstsize = FLD_GET(val, shift + 1, shift);
+ doublestride = FLD_GET(val, 22, 22);
+ rotation = FLD_GET(val, 13, 12);
+ bursttype = FLD_GET(val, 29, 29);
+ color_mode = FLD_GET(val, 4, 1);
+
+ /* base address for frame (Luma frame in case of YUV420) */
+ dispc_reg_config.ba = paddr;
+ /* base address for Chroma frame in case of YUV420 */
+ dispc_reg_config.bacbcr = puv_addr;
+ /* OrgSizeX for frame */
+ dispc_reg_config.sizex = width - 1;
+ /* OrgSizeY for frame */
+ dispc_reg_config.sizey = height - 1;
+ /* burst size */
+ dispc_reg_config.burstsize = burstsize;
+ /* pixel increment */
+ dispc_reg_config.pixelinc = pix_inc;
+ /* row increment */
+ dispc_reg_config.rowinc = row_inc;
+ /* burst type: 1D/2D */
+ dispc_reg_config.bursttype = bursttype;
+ /* chroma DoubleStride when in YUV420 format */
+ dispc_reg_config.doublestride = doublestride;
+ /* Pixcel format of the frame.*/
+ dispc_reg_config.format = color_mode;
+ /* Rotation of frame */
+ dispc_reg_config.rotation = rotation;
+
+ /* DMA buffer allications - assuming reset values */
+ dispc_reg_config.gfx_top_buffer = 0;
+ dispc_reg_config.gfx_bottom_buffer = 0;
+ dispc_reg_config.vid1_top_buffer = 1;
+ dispc_reg_config.vid1_bottom_buffer = 1;
+ dispc_reg_config.vid2_top_buffer = 2;
+ dispc_reg_config.vid2_bottom_buffer = 2;
+ dispc_reg_config.vid3_top_buffer = 3;
+ dispc_reg_config.vid3_bottom_buffer = 3;
+ dispc_reg_config.wb_top_buffer = 4;
+ dispc_reg_config.wb_bottom_buffer = 4;
+
+ /* antiFlicker is off */
+ dispc_reg_config.antiflicker = 0;
+
+ return sa_calc_wrap(&dispc_reg_config, channel_no);
}
+int dispc_runtime_get(void)
+{
+ int r;
+
+ mutex_lock(&dispc.runtime_lock);
+
+ if (dispc.runtime_count++ == 0) {
+ DSSDBG("dispc_runtime_get\n");
+
+ /*
+ * OMAP4 ERRATUM xxxx: Mstandby and disconnect protocol issue
+ * Impacts: all OMAP4 devices
+ * Simplfied Description:
+ * issue #1: The handshake between IP modules on L3_1 and L3_2
+ * peripherals with PRCM has a limitation in a certain time
+ * window of L4 clock cycle. Due to the fact that a wrong
+ * variant of stall signal was used in circuit of PRCM, the
+ * intitator-interconnect protocol is broken when the time
+ * window is hit where the PRCM requires the interconnect to go
+ * to idle while intitator asks to wakeup.
+ * Issue #2: DISPC asserts a sub-mstandby signal for a short
+ * period. In this time interval, IP block requests
+ * disconnection of Master port, and results in Mstandby and
+ * wait request to PRCM. In parallel, if mstandby is de-asserted
+ * by DISPC simultaneously, interconnect requests for a
+ * reconnect for one cycle alone resulting in a disconnect
+ * protocol violation and a deadlock of the system.
+ *
+ * Workaround:
+ * L3_1 clock domain must not be programmed in HW_AUTO if
+ * Static dependency with DSS is enabled and DSS clock domain
+ * is ON. Same for L3_2.
+ */
+ if (cpu_is_omap44xx()) {
+ clkdm_deny_idle(l3_1_clkdm);
+ clkdm_deny_idle(l3_2_clkdm);
+ }
+
+ r = dss_runtime_get();
+ if (r)
+ goto err_dss_get;
+
+ /* XXX dispc fclk can also come from DSI PLL */
+ clk_enable(dispc.dss_clk);
+
+ r = pm_runtime_get_sync(&dispc.pdev->dev);
+ WARN_ON(r);
+ if (r < 0)
+ goto err_runtime_get;
+
+ dispc_restore_context();
+ }
+
+ mutex_unlock(&dispc.runtime_lock);
+
+ return 0;
+
+err_runtime_get:
+ clk_disable(dispc.dss_clk);
+ dss_runtime_put();
+err_dss_get:
+ mutex_unlock(&dispc.runtime_lock);
+
+ return r;
+}
+
+void dispc_runtime_put(void)
+{
+ mutex_lock(&dispc.runtime_lock);
+
+ if (--dispc.runtime_count == 0) {
+ int r;
+
+ DSSDBG("dispc_runtime_put\n");
+
+ dispc_save_context();
+
+ r = pm_runtime_put_sync(&dispc.pdev->dev);
+ WARN_ON(r);
+
+ clk_disable(dispc.dss_clk);
+
+ dss_runtime_put();
+
+ /*
+ * OMAP4 ERRATUM xxxx: Mstandby and disconnect protocol issue
+ * Workaround:
+ * Restore L3_1 amd L3_2 CD to HW_AUTO, when DSS module idles.
+ */
+ if (cpu_is_omap44xx()) {
+ clkdm_allow_idle(l3_1_clkdm);
+ clkdm_allow_idle(l3_2_clkdm);
+ }
+
+ }
+
+ mutex_unlock(&dispc.runtime_lock);
+}
+
+
bool dispc_go_busy(enum omap_channel channel)
{
int bit;
@@ -505,8 +640,6 @@
int bit;
bool enable_bit, go_bit;
- enable_clocks(1);
-
if (channel == OMAP_DSS_CHANNEL_LCD ||
channel == OMAP_DSS_CHANNEL_LCD2)
bit = 0; /* LCDENABLE */
@@ -520,7 +653,7 @@
enable_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1;
if (!enable_bit)
- goto end;
+ return;
if (channel == OMAP_DSS_CHANNEL_LCD ||
channel == OMAP_DSS_CHANNEL_LCD2)
@@ -535,7 +668,7 @@
if (go_bit) {
DSSERR("GO bit not down for channel %d\n", channel);
- goto end;
+ return;
}
DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" :
@@ -545,8 +678,6 @@
REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit);
else
REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
-end:
- enable_clocks(0);
}
static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value)
@@ -585,105 +716,227 @@
dispc_write_reg(DISPC_OVL_FIR_COEF_V2(plane, reg), value);
}
-static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup,
- int vscaleup, int five_taps,
+static const struct dispc_hv_coef *
+dispc_get_scaling_coef(u32 inc, bool five_taps)
+{
+ static const struct dispc_hv_coef coef3_M8[8] = {
+ { 0, 0, 128, 0, 0 },
+ { 0, 2, 123, 3, 0 },
+ { 0, 5, 111, 12, 0 },
+ { 0, 7, 89, 32, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 32, 89, 7, 0 },
+ { 0, 12, 111, 5, 0 },
+ { 0, 3, 123, 2, 0 },
+ };
+
+ static const struct dispc_hv_coef coef3_M16[8] = {
+ { 0, 36, 56, 36, 0 },
+ { 0, 31, 57, 40, 0 },
+ { 0, 27, 56, 45, 0 },
+ { 0, 23, 55, 50, 0 },
+ { 0, 55, 55, 18, 0 },
+ { 0, 50, 55, 23, 0 },
+ { 0, 45, 56, 27, 0 },
+ { 0, 40, 57, 31, 0 },
+ };
+
+ static const struct dispc_hv_coef coef_M8[8] = {
+ { 0, 0, 128, 0, 0 },
+ { 0, -8, 124, 13, -1 },
+ { -1, -11, 112, 30, -2 },
+ { -2, -11, 95, 51, -5 },
+ { -9, 73, 73, -9, 0 },
+ { -5, 51, 95, -11, -2 },
+ { -2, 30, 112, -11, -1 },
+ { -1, 13, 124, -8, 0 },
+ };
+
+ static const struct dispc_hv_coef coef_M9[8] = {
+ { 8, -8, 128, -8, 8 },
+ { 14, -21, 126, 8, 1 },
+ { 17, -27, 117, 30, -9 },
+ { 17, -30, 103, 56, -18 },
+ { -26, 83, 83, -26, 14 },
+ { -18, 56, 103, -30, 17 },
+ { -9, 30, 117, -27, 17 },
+ { 1, 8, 126, -21, 14 },
+ };
+
+ static const struct dispc_hv_coef coef_M10[8] = {
+ { -2, 2, 128, 2, -2 },
+ { 5, -12, 125, 20, -10 },
+ { 11, -22, 116, 41, -18 },
+ { 15, -27, 102, 62, -24 },
+ { -28, 83, 83, -28, 18 },
+ { -24, 62, 102, -27, 15 },
+ { -18, 41, 116, -22, 11 },
+ { -10, 20, 125, -12, 5 },
+ };
+
+ static const struct dispc_hv_coef coef_M11[8] = {
+ { -12, 12, 128, 12, -12 },
+ { -4, -3, 124, 30, -19 },
+ { 3, -15, 115, 49, -24 },
+ { 9, -22, 101, 67, -27 },
+ { -26, 83, 83, -26, 14 },
+ { -27, 67, 101, -22, 9 },
+ { -24, 49, 115, -15, 3 },
+ { -19, 30, 124, -3, -4 },
+ };
+
+ static const struct dispc_hv_coef coef_M12[8] = {
+ { -19, 21, 124, 21, -19 },
+ { -12, 6, 120, 38, -24 },
+ { -6, -7, 112, 55, -26 },
+ { 1, -16, 98, 70, -25 },
+ { -21, 82, 82, -21, 6 },
+ { -25, 70, 98, -16, 1 },
+ { -26, 55, 112, -7, -6 },
+ { -24, 38, 120, 6, -12 },
+ };
+
+ static const struct dispc_hv_coef coef_M13[8] = {
+ { -22, 27, 118, 27, -22 },
+ { -18, 13, 115, 43, -25 },
+ { -12, 0, 107, 58, -25 },
+ { -6, -10, 95, 71, -22 },
+ { -17, 81, 81, -17, 0 },
+ { -22, 71, 95, -10, -6 },
+ { -25, 58, 107, 0, -12 },
+ { -25, 43, 115, 13, -18 },
+ };
+
+ static const struct dispc_hv_coef coef_M14[8] = {
+ { -23, 32, 110, 32, -23 },
+ { -20, 18, 108, 46, -24 },
+ { -16, 6, 101, 59, -22 },
+ { -11, -4, 91, 70, -18 },
+ { -11, 78, 78, -11, -6 },
+ { -18, 70, 91, -4, -11 },
+ { -22, 59, 101, 6, -16 },
+ { -24, 46, 108, 18, -20 },
+ };
+
+ static const struct dispc_hv_coef coef_M16[8] = {
+ { -20, 37, 94, 37, -20 },
+ { -21, 26, 93, 48, -18 },
+ { -19, 15, 88, 58, -14 },
+ { -17, 6, 82, 66, -9 },
+ { -2, 73, 73, -2, -14 },
+ { -9, 66, 82, 6, -17 },
+ { -14, 58, 88, 15, -19 },
+ { -18, 48, 93, 26, -21 },
+ };
+
+ static const struct dispc_hv_coef coef_M19[8] = {
+ { -12, 38, 76, 38, -12 },
+ { -14, 31, 72, 47, -8 },
+ { -16, 22, 73, 53, -4 },
+ { -16, 15, 69, 59, 1 },
+ { 8, 64, 64, 8, -16 },
+ { 1, 59, 69, 15, -16 },
+ { -4, 53, 73, 22, -16 },
+ { -9, 47, 72, 31, -13 },
+ };
+
+ static const struct dispc_hv_coef coef_M22[8] = {
+ { -6, 37, 66, 37, -6 },
+ { -8, 32, 61, 44, -1 },
+ { -11, 25, 63, 48, 3 },
+ { -13, 19, 61, 53, 8 },
+ { 13, 58, 58, 13, -14 },
+ { 8, 53, 61, 19, -13 },
+ { 3, 48, 63, 25, -11 },
+ { -2, 44, 61, 32, -7 },
+ };
+
+ static const struct dispc_hv_coef coef_M26[8] = {
+ { 1, 36, 54, 36, 1 },
+ { -2, 31, 55, 40, 4 },
+ { -5, 27, 54, 44, 8 },
+ { -8, 22, 53, 48, 13 },
+ { 18, 51, 51, 18, -10 },
+ { 13, 48, 53, 22, -8 },
+ { 8, 44, 54, 27, -5 },
+ { 4, 40, 55, 31, -2 },
+ };
+
+ static const struct dispc_hv_coef coef_M32[8] = {
+ { 7, 34, 46, 34, 7 },
+ { 4, 31, 46, 37, 10 },
+ { 1, 27, 46, 39, 14 },
+ { -1, 24, 46, 42, 17 },
+ { 21, 45, 45, 21, -4 },
+ { 17, 42, 46, 24, -1 },
+ { 14, 39, 46, 28, 1 },
+ { 10, 37, 46, 31, 4 },
+ };
+
+ inc >>= 7; /* /= 128 */
+ if (five_taps) {
+ if (inc > 26)
+ return coef_M32;
+ if (inc > 22)
+ return coef_M26;
+ if (inc > 19)
+ return coef_M22;
+ if (inc > 16)
+ return coef_M19;
+ if (inc > 14)
+ return coef_M16;
+ if (inc > 13)
+ return coef_M14;
+ if (inc > 12)
+ return coef_M13;
+ if (inc > 11)
+ return coef_M12;
+ if (inc > 10)
+ return coef_M11;
+ if (inc > 9)
+ return coef_M10;
+ if (inc > 8)
+ return coef_M9;
+ /* reduce blockiness when upscaling much */
+ if (inc > 3)
+ return coef_M8;
+ if (inc > 2)
+ return coef_M11;
+ if (inc > 1)
+ return coef_M16;
+ return coef_M19;
+ } else {
+ if (inc > 14)
+ return coef3_M16;
+ /* reduce blockiness when upscaling much */
+ if (inc > 3)
+ return coef3_M8;
+ return coef3_M16;
+ }
+}
+
+static void _dispc_set_scale_coef(enum omap_plane plane, int hinc,
+ int vinc, bool five_taps,
enum omap_color_component color_comp)
{
- /* Coefficients for horizontal up-sampling */
- static const struct dispc_h_coef coef_hup[8] = {
- { 0, 0, 128, 0, 0 },
- { -1, 13, 124, -8, 0 },
- { -2, 30, 112, -11, -1 },
- { -5, 51, 95, -11, -2 },
- { 0, -9, 73, 73, -9 },
- { -2, -11, 95, 51, -5 },
- { -1, -11, 112, 30, -2 },
- { 0, -8, 124, 13, -1 },
- };
-
- /* Coefficients for vertical up-sampling */
- static const struct dispc_v_coef coef_vup_3tap[8] = {
- { 0, 0, 128, 0, 0 },
- { 0, 3, 123, 2, 0 },
- { 0, 12, 111, 5, 0 },
- { 0, 32, 89, 7, 0 },
- { 0, 0, 64, 64, 0 },
- { 0, 7, 89, 32, 0 },
- { 0, 5, 111, 12, 0 },
- { 0, 2, 123, 3, 0 },
- };
-
- static const struct dispc_v_coef coef_vup_5tap[8] = {
- { 0, 0, 128, 0, 0 },
- { -1, 13, 124, -8, 0 },
- { -2, 30, 112, -11, -1 },
- { -5, 51, 95, -11, -2 },
- { 0, -9, 73, 73, -9 },
- { -2, -11, 95, 51, -5 },
- { -1, -11, 112, 30, -2 },
- { 0, -8, 124, 13, -1 },
- };
-
- /* Coefficients for horizontal down-sampling */
- static const struct dispc_h_coef coef_hdown[8] = {
- { 0, 36, 56, 36, 0 },
- { 4, 40, 55, 31, -2 },
- { 8, 44, 54, 27, -5 },
- { 12, 48, 53, 22, -7 },
- { -9, 17, 52, 51, 17 },
- { -7, 22, 53, 48, 12 },
- { -5, 27, 54, 44, 8 },
- { -2, 31, 55, 40, 4 },
- };
-
- /* Coefficients for vertical down-sampling */
- static const struct dispc_v_coef coef_vdown_3tap[8] = {
- { 0, 36, 56, 36, 0 },
- { 0, 40, 57, 31, 0 },
- { 0, 45, 56, 27, 0 },
- { 0, 50, 55, 23, 0 },
- { 0, 18, 55, 55, 0 },
- { 0, 23, 55, 50, 0 },
- { 0, 27, 56, 45, 0 },
- { 0, 31, 57, 40, 0 },
- };
-
- static const struct dispc_v_coef coef_vdown_5tap[8] = {
- { 0, 36, 56, 36, 0 },
- { 4, 40, 55, 31, -2 },
- { 8, 44, 54, 27, -5 },
- { 12, 48, 53, 22, -7 },
- { -9, 17, 52, 51, 17 },
- { -7, 22, 53, 48, 12 },
- { -5, 27, 54, 44, 8 },
- { -2, 31, 55, 40, 4 },
- };
-
- const struct dispc_h_coef *h_coef;
- const struct dispc_v_coef *v_coef;
+ const struct dispc_hv_coef *h_coef;
+ const struct dispc_hv_coef *v_coef;
int i;
- if (hscaleup)
- h_coef = coef_hup;
- else
- h_coef = coef_hdown;
-
- if (vscaleup)
- v_coef = five_taps ? coef_vup_5tap : coef_vup_3tap;
- else
- v_coef = five_taps ? coef_vdown_5tap : coef_vdown_3tap;
+ h_coef = dispc_get_scaling_coef(hinc, true);
+ v_coef = dispc_get_scaling_coef(vinc, five_taps);
for (i = 0; i < 8; i++) {
u32 h, hv;
- h = FLD_VAL(h_coef[i].hc0, 7, 0)
- | FLD_VAL(h_coef[i].hc1, 15, 8)
- | FLD_VAL(h_coef[i].hc2, 23, 16)
- | FLD_VAL(h_coef[i].hc3, 31, 24);
- hv = FLD_VAL(h_coef[i].hc4, 7, 0)
- | FLD_VAL(v_coef[i].vc0, 15, 8)
- | FLD_VAL(v_coef[i].vc1, 23, 16)
- | FLD_VAL(v_coef[i].vc2, 31, 24);
+ h = FLD_VAL(h_coef[i].hc0_vc00, 7, 0)
+ | FLD_VAL(h_coef[i].hc1_vc0, 15, 8)
+ | FLD_VAL(h_coef[i].hc2_vc1, 23, 16)
+ | FLD_VAL(h_coef[i].hc3_vc2, 31, 24);
+ hv = FLD_VAL(h_coef[i].hc4_vc22, 7, 0)
+ | FLD_VAL(v_coef[i].hc1_vc0, 15, 8)
+ | FLD_VAL(v_coef[i].hc2_vc1, 23, 16)
+ | FLD_VAL(v_coef[i].hc3_vc2, 31, 24);
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) {
_dispc_write_firh_reg(plane, i, h);
@@ -692,14 +945,13 @@
_dispc_write_firh2_reg(plane, i, h);
_dispc_write_firhv2_reg(plane, i, hv);
}
-
}
if (five_taps) {
for (i = 0; i < 8; i++) {
u32 v;
- v = FLD_VAL(v_coef[i].vc00, 7, 0)
- | FLD_VAL(v_coef[i].vc22, 15, 8);
+ v = FLD_VAL(v_coef[i].hc0_vc00, 7, 0)
+ | FLD_VAL(v_coef[i].hc4_vc22, 15, 8);
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y)
_dispc_write_firv_reg(plane, i, v);
else
@@ -708,49 +960,22 @@
}
}
-static void _dispc_setup_color_conv_coef(void)
+void _dispc_setup_color_conv_coef(enum omap_plane plane,
+ const struct omap_dss_cconv_coefs *ct)
{
- const struct color_conv_coef {
- int ry, rcr, rcb, gy, gcr, gcb, by, bcr, bcb;
- int full_range;
- } ctbl_bt601_5 = {
- 298, 409, 0, 298, -208, -100, 298, 0, 517, 0,
- };
-
- const struct color_conv_coef *ct;
+ BUG_ON(plane < OMAP_DSS_VIDEO1 || plane > OMAP_DSS_VIDEO3);
#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0))
- ct = &ctbl_bt601_5;
-
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 0),
- CVAL(ct->rcr, ct->ry));
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 1),
- CVAL(ct->gy, ct->rcb));
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 2),
- CVAL(ct->gcb, ct->gcr));
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 3),
- CVAL(ct->bcr, ct->by));
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 4),
- CVAL(0, ct->bcb));
-
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 0),
- CVAL(ct->rcr, ct->ry));
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 1),
- CVAL(ct->gy, ct->rcb));
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 2),
- CVAL(ct->gcb, ct->gcr));
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 3),
- CVAL(ct->bcr, ct->by));
- dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 4),
- CVAL(0, ct->bcb));
+ dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->rcr, ct->ry));
+ dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->gy, ct->rcb));
+ dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->gcb, ct->gcr));
+ dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->bcr, ct->by));
+ dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->bcb));
#undef CVAL
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO1),
- ct->full_range, 11, 11);
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO2),
- ct->full_range, 11, 11);
+ REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11);
}
@@ -825,8 +1050,12 @@
if (plane == OMAP_DSS_GFX)
REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 7, 0);
+ else if (plane == OMAP_DSS_VIDEO1)
+ REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 15, 8);
else if (plane == OMAP_DSS_VIDEO2)
REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 23, 16);
+ else if (plane == OMAP_DSS_VIDEO3)
+ REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 31, 24);
}
static void _dispc_set_pix_inc(enum omap_plane plane, s32 inc)
@@ -847,11 +1076,11 @@
switch (color_mode) {
case OMAP_DSS_COLOR_NV12:
m = 0x0; break;
- case OMAP_DSS_COLOR_RGB12U:
+ case OMAP_DSS_COLOR_RGBX16:
m = 0x1; break;
case OMAP_DSS_COLOR_RGBA16:
m = 0x2; break;
- case OMAP_DSS_COLOR_RGBX16:
+ case OMAP_DSS_COLOR_RGB12U:
m = 0x4; break;
case OMAP_DSS_COLOR_ARGB16:
m = 0x5; break;
@@ -901,8 +1130,10 @@
case OMAP_DSS_COLOR_RGB24P:
m = 0x9; break;
case OMAP_DSS_COLOR_YUV2:
+ case OMAP_DSS_COLOR_RGBX16:
m = 0xa; break;
case OMAP_DSS_COLOR_UYVY:
+ case OMAP_DSS_COLOR_RGBA16:
m = 0xb; break;
case OMAP_DSS_COLOR_ARGB32:
m = 0xc; break;
@@ -920,7 +1151,7 @@
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
}
-static void _dispc_set_channel_out(enum omap_plane plane,
+void dispc_set_channel_out(enum omap_plane plane,
enum omap_channel channel)
{
int shift;
@@ -933,6 +1164,7 @@
break;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
+ case OMAP_DSS_VIDEO3:
shift = 16;
break;
default:
@@ -973,14 +1205,13 @@
int shift;
u32 val;
- enable_clocks(1);
-
switch (plane) {
case OMAP_DSS_GFX:
shift = 6;
break;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
+ case OMAP_DSS_VIDEO3:
shift = 14;
break;
default:
@@ -991,8 +1222,6 @@
val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
val = FLD_MOD(val, burst_size, shift+1, shift);
dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
-
- enable_clocks(0);
}
void dispc_enable_gamma_table(bool enable)
@@ -1009,6 +1238,63 @@
REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
}
+void dispc_set_zorder(enum omap_plane plane,
+ enum omap_overlay_zorder zorder)
+{
+ u32 val;
+
+ if (!dss_has_feature(FEAT_OVL_ZORDER))
+ return;
+ val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
+ val = FLD_MOD(val, zorder, 27, 26);
+ dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
+}
+
+void dispc_enable_zorder(enum omap_plane plane, bool enable)
+{
+ u32 val;
+
+ if (!dss_has_feature(FEAT_OVL_ZORDER))
+ return;
+ val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
+ val = FLD_MOD(val, enable, 25, 25);
+ dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
+}
+
+void dispc_enable_cpr(enum omap_channel channel, bool enable)
+{
+ u16 reg;
+
+ if (channel == OMAP_DSS_CHANNEL_LCD)
+ reg = DISPC_CONFIG;
+ else if (channel == OMAP_DSS_CHANNEL_LCD2)
+ reg = DISPC_CONFIG2;
+ else
+ return;
+
+ REG_FLD_MOD(reg, enable, 15, 15);
+}
+
+void dispc_set_cpr_coef(enum omap_channel channel,
+ struct omap_dss_cpr_coefs *coefs)
+{
+ u32 coef_r, coef_g, coef_b;
+
+ if (channel != OMAP_DSS_CHANNEL_LCD && channel != OMAP_DSS_CHANNEL_LCD2)
+ return;
+
+ coef_r = FLD_VAL(coefs->rr, 31, 22) | FLD_VAL(coefs->rg, 20, 11) |
+ FLD_VAL(coefs->rb, 9, 0);
+ coef_g = FLD_VAL(coefs->gr, 31, 22) | FLD_VAL(coefs->gg, 20, 11) |
+ FLD_VAL(coefs->gb, 9, 0);
+ coef_b = FLD_VAL(coefs->br, 31, 22) | FLD_VAL(coefs->bg, 20, 11) |
+ FLD_VAL(coefs->bb, 9, 0);
+
+ dispc_write_reg(DISPC_CPR_COEF_R(channel), coef_r);
+ dispc_write_reg(DISPC_CPR_COEF_G(channel), coef_g);
+ dispc_write_reg(DISPC_CPR_COEF_B(channel), coef_b);
+}
+
static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable)
{
u32 val;
@@ -1029,9 +1315,7 @@
else
bit = 10;
- enable_clocks(1);
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit);
- enable_clocks(0);
}
void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
@@ -1039,9 +1323,7 @@
u32 val;
BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
- enable_clocks(1);
dispc_write_reg(DISPC_SIZE_MGR(channel), val);
- enable_clocks(0);
}
void dispc_set_digit_size(u16 width, u16 height)
@@ -1049,9 +1331,7 @@
u32 val;
BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
- enable_clocks(1);
dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val);
- enable_clocks(0);
}
static void dispc_read_plane_fifo_sizes(void)
@@ -1060,8 +1340,6 @@
int plane;
u8 start, end;
- enable_clocks(1);
-
dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end);
for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) {
@@ -1069,8 +1347,6 @@
start, end);
dispc.fifo_size[plane] = size;
}
-
- enable_clocks(0);
}
u32 dispc_get_plane_fifo_size(enum omap_plane plane)
@@ -1085,8 +1361,6 @@
dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end);
dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end);
- enable_clocks(1);
-
DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n",
plane,
REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane),
@@ -1095,21 +1369,18 @@
hi_start, hi_end),
low, high);
+ /* preload to high threshold to avoid FIFO underflow */
+ dispc_write_reg(DISPC_OVL_PRELOAD(plane), min(high, 0xfffu));
+
dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane),
FLD_VAL(high, hi_start, hi_end) |
FLD_VAL(low, lo_start, lo_end));
-
- enable_clocks(0);
}
void dispc_enable_fifomerge(bool enable)
{
- enable_clocks(1);
-
DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled");
REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14);
-
- enable_clocks(0);
}
static void _dispc_set_fir(enum omap_plane plane,
@@ -1209,8 +1480,9 @@
int accu0 = 0;
int accu1 = 0;
u32 l;
+ u16 y_adjust = color_mode == OMAP_DSS_COLOR_NV12 ? 2 : 0;
- _dispc_set_scale_param(plane, orig_width, orig_height,
+ _dispc_set_scale_param(plane, orig_width, orig_height - y_adjust,
out_width, out_height, five_taps,
rotation, DISPC_COLOR_COMPONENT_RGB_Y);
l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
@@ -1262,6 +1534,7 @@
{
int scale_x = out_width != orig_width;
int scale_y = out_height != orig_height;
+ u16 y_adjust = 0;
if (!dss_has_feature(FEAT_HANDLE_UV_SEPARATE))
return;
@@ -1278,6 +1551,7 @@
orig_height >>= 1;
/* UV is subsampled by 2 horz.*/
orig_width >>= 1;
+ y_adjust = 1;
break;
case OMAP_DSS_COLOR_YUV2:
case OMAP_DSS_COLOR_UYVY:
@@ -1301,7 +1575,7 @@
if (out_height != orig_height)
scale_y = true;
- _dispc_set_scale_param(plane, orig_width, orig_height,
+ _dispc_set_scale_param(plane, orig_width, orig_height - y_adjust,
out_width, out_height, five_taps,
rotation, DISPC_COLOR_COMPONENT_UV);
@@ -1341,7 +1615,8 @@
}
static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation,
- bool mirroring, enum omap_color_mode color_mode)
+ bool mirroring, enum omap_color_mode color_mode,
+ enum omap_dss_rotation_type type)
{
bool row_repeat = false;
int vidrot = 0;
@@ -1391,6 +1666,16 @@
if (dss_has_feature(FEAT_ROWREPEATENABLE))
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane),
row_repeat ? 1 : 0, 18, 18);
+
+ if (color_mode == OMAP_DSS_COLOR_NV12) {
+ /* this will never happen for GFX */
+ /* 1D NV12 buffer is always non-rotated or vert. mirrored */
+ bool doublestride = (rotation == OMAP_DSS_ROT_0 ||
+ rotation == OMAP_DSS_ROT_180) &&
+ type == OMAP_DSS_ROT_TILER;
+ /* DOUBLESTRIDE */
+ REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), doublestride, 22, 22);
+ }
}
static int color_mode_to_bpp(enum omap_color_mode color_mode)
@@ -1439,6 +1724,28 @@
BUG();
}
+static void calc_tiler_row_rotation(struct tiler_view_t *view,
+ u16 width, int bpp, int y_decim,
+ s32 *row_inc, unsigned *offset1, bool ilace)
+{
+ /* assume TB. We worry about swapping top/bottom outside of this call */
+
+ if (ilace) {
+ /* even and odd frames are interleaved */
+
+ /* offset1 is always at an odd line */
+ *offset1 = view->v_inc * (y_decim | 1);
+ y_decim *= 2;
+ }
+ *row_inc = view->v_inc * y_decim + 1 - width * bpp;
+
+ DSSDBG(" ps: %d/%d, width: %d/%d, offset1: %d,"
+ " height: %d, row_inc:%d\n", view->bpp, bpp,
+ view->width, width, *offset1, view->height, *row_inc);
+
+ return;
+}
+
static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
u16 screen_width,
u16 width, u16 height,
@@ -1529,7 +1836,7 @@
enum omap_color_mode color_mode, bool fieldmode,
unsigned int field_offset,
unsigned *offset0, unsigned *offset1,
- s32 *row_inc, s32 *pix_inc)
+ s32 *row_inc, s32 *pix_inc, int x_decim, int y_decim)
{
u8 ps;
u16 fbw, fbh;
@@ -1542,6 +1849,15 @@
case OMAP_DSS_COLOR_CLUT8:
BUG();
return;
+ case OMAP_DSS_COLOR_YUV2:
+ case OMAP_DSS_COLOR_UYVY:
+ if (cpu_is_omap44xx()) {
+ /* on OMAP4 YUYV is handled as 32-bit data */
+ ps = 4;
+ screen_width /= 2;
+ break;
+ }
+ /* fall through */
default:
ps = color_mode_to_bpp(color_mode) / 8;
break;
@@ -1571,10 +1887,10 @@
*offset0 = *offset1 + field_offset * screen_width * ps;
else
*offset0 = *offset1;
- *row_inc = pixinc(1 + (screen_width - fbw) +
+ *row_inc = pixinc(1 + (y_decim * screen_width - fbw * x_decim) +
(fieldmode ? screen_width : 0),
ps);
- *pix_inc = pixinc(1, ps);
+ *pix_inc = pixinc(x_decim, ps);
break;
case OMAP_DSS_ROT_90:
*offset1 = screen_width * (fbh - 1) * ps;
@@ -1671,6 +1987,18 @@
/* FIXME venc pclk? */
u64 tmp, pclk = dispc_pclk_rate(channel);
+ if (cpu_is_omap44xx()) {
+ /* do conservative TRM value on OMAP4 ES1.0 */
+ if (omap_rev() == OMAP4430_REV_ES1_0)
+ return pclk * DIV_ROUND_UP(width, out_width) *
+ DIV_ROUND_UP(height, out_height);
+
+ /* since 4430 ES2.0, fclk requirement only depends on width */
+ pclk *= max(width, out_width);
+ do_div(pclk, out_width);
+ return pclk;
+ }
+
if (height > out_height) {
/* FIXME get real display PPL */
unsigned int ppl = 800;
@@ -1706,6 +2034,11 @@
{
unsigned int hf, vf;
+ /* on OMAP4 three-tap and five-tap clock requirements are the same */
+ if (cpu_is_omap44xx())
+ return calc_fclk_five_taps(channel, width, height, out_width,
+ out_height, 0);
+
/*
* FIXME how to determine the 'A' factor
* for the no downscaling case ?
@@ -1729,27 +2062,188 @@
return dispc_pclk_rate(channel) * vf * hf;
}
-void dispc_set_channel_out(enum omap_plane plane, enum omap_channel channel_out)
+int dispc_scaling_decision(u16 width, u16 height,
+ u16 out_width, u16 out_height,
+ enum omap_plane plane,
+ enum omap_color_mode color_mode,
+ enum omap_channel channel, u8 rotation,
+ enum omap_dss_rotation_type type,
+ u16 min_x_decim, u16 max_x_decim,
+ u16 min_y_decim, u16 max_y_decim,
+ u16 *x_decim, u16 *y_decim, bool *five_taps)
{
- enable_clocks(1);
- _dispc_set_channel_out(plane, channel_out);
- enable_clocks(0);
+ int maxdownscale = cpu_is_omap24xx() ? 2 : 4;
+ int bpp = color_mode_to_bpp(color_mode);
+
+ /*
+ * For now only whole byte formats on OMAP4 can be predecimated.
+ * Later SDMA decimation support may be added
+ */
+ bool can_decimate_x = cpu_is_omap44xx() && !(bpp & 7);
+ bool can_decimate_y = can_decimate_x;
+
+ bool can_scale = plane != OMAP_DSS_GFX;
+
+ u16 in_width, in_height;
+ unsigned long fclk = 0, fclk5 = 0;
+ int min_factor, max_factor; /* decimation search limits */
+ int x, y; /* decimation search variables */
+ unsigned long fclk_max = dispc_fclk_rate();
+ u16 y_decim_limit = type == OMAP_DSS_ROT_TILER ? 2 : 16;
+
+ /* No decimation for bitmap formats */
+ if (color_mode == OMAP_DSS_COLOR_CLUT1 ||
+ color_mode == OMAP_DSS_COLOR_CLUT2 ||
+ color_mode == OMAP_DSS_COLOR_CLUT4 ||
+ color_mode == OMAP_DSS_COLOR_CLUT8) {
+ *x_decim = 1;
+ *y_decim = 1;
+ *five_taps = false;
+ return 0;
+ }
+
+ /* restrict search region based on whether we can decimate */
+ if (!can_decimate_x) {
+ if (min_x_decim > 1)
+ return -EINVAL;
+ min_x_decim = max_x_decim = 1;
+ } else {
+ if (max_x_decim > 16)
+ max_x_decim = 16;
+ }
+
+ if (!can_decimate_y) {
+ if (min_y_decim > 1)
+ return -EINVAL;
+ min_y_decim = max_y_decim = 1;
+ } else {
+ if (max_y_decim > y_decim_limit)
+ max_y_decim = y_decim_limit;
+ }
+
+ /*
+ * Find best supported quality. In the search algorithm, we make use
+ * of the fact, that increased decimation in either direction will have
+ * lower quality. However, we do not differentiate horizontal and
+ * vertical decimation even though they may affect quality differently
+ * given the exact geometry involved.
+ *
+ * Also, since the clock calculations are abstracted, we cannot make
+ * assumptions on how decimation affects the clock rates in our search.
+ *
+ * We search the whole search region in increasing layers from
+ * min_factor to max_factor. In each layer we search in increasing
+ * factors alternating between x and y axis:
+ *
+ * x: 1 2 3
+ * y:
+ * 1 1st | 3rd | 6th |
+ * ----+ | |
+ * 2 2nd 4th | 8th |
+ * ------------+ |
+ * 3 5th 7th 9th |
+ * --------------------+
+ */
+ min_factor = min(min_x_decim, min_y_decim);
+ max_factor = max(max_x_decim, max_y_decim);
+ x = min_x_decim;
+ y = min_y_decim;
+ while (1) {
+ if (x < min_x_decim || x > max_x_decim ||
+ y < min_y_decim || y > max_y_decim)
+ goto loop;
+
+ in_width = DIV_ROUND_UP(width, x);
+ in_height = DIV_ROUND_UP(height, y);
+
+ if (in_width == out_width && in_height == out_height)
+ break;
+
+ if (!can_scale)
+ goto loop;
+
+ if (out_width * maxdownscale < in_width ||
+ out_height * maxdownscale < in_height)
+ goto loop;
+
+ /* Use 5-tap filter unless must use 3-tap */
+ if (!cpu_is_omap44xx())
+ *five_taps = in_width <= 1024;
+ else if (omap_rev() == OMAP4430_REV_ES1_0)
+ *five_taps = in_width <= 1280;
+ else
+ *five_taps = true;
+
+ /*
+ * Predecimation on OMAP4 still fetches the whole lines
+ * :TODO: How does it affect the required clock speed?
+ */
+ fclk = calc_fclk(channel, in_width, in_height,
+ out_width, out_height);
+ fclk5 = *five_taps ?
+ calc_fclk_five_taps(channel, in_width, in_height,
+ out_width, out_height, color_mode) : 0;
+
+ DSSDBG("%d*%d,%d*%d->%d,%d requires %lu(3T), %lu(5T) Hz\n",
+ in_width, x, in_height, y, out_width, out_height,
+ fclk, fclk5);
+
+ /* for now we always use 5-tap unless 3-tap is required */
+ if (*five_taps)
+ fclk = fclk5;
+
+ /* OMAP2/3 has a scaler size limitation */
+ if (!cpu_is_omap44xx() && in_width > (1024 << !*five_taps))
+ goto loop;
+
+ DSSDBG("required fclk rate = %lu Hz\n", fclk);
+ DSSDBG("current fclk rate = %lu Hz\n", fclk_max);
+
+ if (fclk > fclk_max)
+ goto loop;
+ break;
+
+loop:
+ /* err if exhausted search region */
+ if (x == max_x_decim && y == max_y_decim) {
+ DSSERR("failed to set up scaling %u*%u to %u*%u, "
+ "required fclk rate = %lu Hz, "
+ "current = %lu Hz\n",
+ width, height, out_width, out_height,
+ fclk, fclk_max);
+ return -EINVAL;
+ }
+
+ /* get to next factor */
+ if (x == y) {
+ x = min_factor;
+ y++;
+ } else {
+ swap(x, y);
+ if (x < y)
+ x++;
+ }
+ }
+
+ *x_decim = x;
+ *y_decim = y;
+ return 0;
}
-static int _dispc_setup_plane(enum omap_plane plane,
+int dispc_setup_plane(enum omap_plane plane,
u32 paddr, u16 screen_width,
u16 pos_x, u16 pos_y,
u16 width, u16 height,
u16 out_width, u16 out_height,
enum omap_color_mode color_mode,
bool ilace,
+ int x_decim, int y_decim, bool five_taps,
enum omap_dss_rotation_type rotation_type,
- u8 rotation, int mirror,
+ u8 rotation, bool mirror,
u8 global_alpha, u8 pre_mult_alpha,
enum omap_channel channel, u32 puv_addr)
{
- const int maxdownscale = cpu_is_omap34xx() ? 4 : 2;
- bool five_taps = 0;
+ const int maxdownscale = cpu_is_omap24xx() ? 2 : 4;
bool fieldmode = 0;
int cconv = 0;
unsigned offset0, offset1;
@@ -1757,6 +2251,18 @@
s32 pix_inc;
u16 frame_height = height;
unsigned int field_offset = 0;
+ int pixpg = (color_mode &
+ (OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY)) ? 2 : 1;
+ unsigned long tiler_width, tiler_height;
+ u32 fifo_high, fifo_low;
+
+ DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d,%d, %d/%dx%d/%d -> "
+ "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d %dtap\n",
+ plane, paddr, screen_width, pos_x, pos_y,
+ width, x_decim, height, y_decim,
+ out_width, out_height,
+ ilace, color_mode,
+ rotation, mirror, channel, five_taps ? 5 : 3);
if (paddr == 0)
return -EINVAL;
@@ -1778,59 +2284,41 @@
if (!dss_feat_color_mode_supported(plane, color_mode))
return -EINVAL;
+ /* predecimate */
+
+ /* adjust for group-of-pixels*/
+ if (rotation & 1)
+ height /= pixpg;
+ else
+ width /= pixpg;
+
+ /* remember tiler block's size as we are reconstructing it */
+ tiler_width = width;
+ tiler_height = height;
+
+ width = DIV_ROUND_UP(width, x_decim);
+ height = DIV_ROUND_UP(height, y_decim);
+
+ /* NV12 width has to be even (height apparently does not) */
+ if (color_mode == OMAP_DSS_COLOR_NV12)
+ width &= ~1;
+
if (plane == OMAP_DSS_GFX) {
if (width != out_width || height != out_height)
return -EINVAL;
} else {
/* video plane */
- unsigned long fclk = 0;
-
- if (out_width < width / maxdownscale ||
- out_width > width * 8)
+ if (out_width < width / maxdownscale)
return -EINVAL;
- if (out_height < height / maxdownscale ||
- out_height > height * 8)
+ if (out_height < height / maxdownscale)
return -EINVAL;
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY ||
color_mode == OMAP_DSS_COLOR_NV12)
cconv = 1;
-
- /* Must use 5-tap filter? */
- five_taps = height > out_height * 2;
-
- if (!five_taps) {
- fclk = calc_fclk(channel, width, height, out_width,
- out_height);
-
- /* Try 5-tap filter if 3-tap fclk is too high */
- if (cpu_is_omap34xx() && height > out_height &&
- fclk > dispc_fclk_rate())
- five_taps = true;
- }
-
- if (width > (2048 >> five_taps)) {
- DSSERR("failed to set up scaling, fclk too low\n");
- return -EINVAL;
- }
-
- if (five_taps)
- fclk = calc_fclk_five_taps(channel, width, height,
- out_width, out_height, color_mode);
-
- DSSDBG("required fclk rate = %lu Hz\n", fclk);
- DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate());
-
- if (!fclk || fclk > dispc_fclk_rate()) {
- DSSERR("failed to set up scaling, "
- "required fclk rate = %lu Hz, "
- "current fclk rate = %lu Hz\n",
- fclk, dispc_fclk_rate());
- return -EINVAL;
- }
}
if (ilace && !fieldmode) {
@@ -1851,17 +2339,69 @@
if (fieldmode)
field_offset = 1;
- if (rotation_type == OMAP_DSS_ROT_DMA)
+ /* default values */
+ row_inc = pix_inc = 0x1;
+ offset0 = offset1 = 0x0;
+
+ /*
+ * :HACK: we piggy back on UV separate feature for TILER to avoid
+ * having to keep rebase our FEAT_ enum until they add TILER.
+ */
+ if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ /* set BURSTTYPE */
+ bool use_tiler = rotation_type == OMAP_DSS_ROT_TILER;
+ REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), use_tiler, 29, 29);
+ }
+
+ if (rotation_type == OMAP_DSS_ROT_TILER) {
+ struct tiler_view_t view = {0};
+ int bpp = color_mode_to_bpp(color_mode) / 8;
+ /* tiler needs 0-degree width & height */
+ if (rotation & 1)
+ swap(tiler_width, tiler_height);
+
+ if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+ color_mode == OMAP_DSS_COLOR_UYVY)
+ tiler_width /= 2;
+
+ tilview_create(&view, paddr, tiler_width, tiler_height);
+ tilview_rotate(&view, rotation * 90);
+ tilview_flip(&view, mirror, false);
+ paddr = view.tsptr;
+
+ /* we cannot do TB field interlaced in rotated view */
+ pix_inc = 1 + (x_decim - 1) * bpp * pixpg;
+ calc_tiler_row_rotation(&view, width * x_decim, bpp * pixpg,
+ y_decim, &row_inc, &offset1, ilace);
+
+ DSSDBG("w, h = %ld %ld\n", tiler_width, tiler_height);
+
+ if (puv_addr) {
+ tilview_create(&view, puv_addr, tiler_width / 2,
+ tiler_height / 2);
+ tilview_rotate(&view, rotation * 90);
+ tilview_flip(&view, mirror, false);
+ puv_addr = view.tsptr;
+ }
+
+ } else if (rotation_type == OMAP_DSS_ROT_DMA) {
calc_dma_rotation_offset(rotation, mirror,
screen_width, width, frame_height, color_mode,
fieldmode, field_offset,
- &offset0, &offset1, &row_inc, &pix_inc);
- else
+ &offset0, &offset1, &row_inc, &pix_inc,
+ x_decim, y_decim);
+ } else {
calc_vrfb_rotation_offset(rotation, mirror,
screen_width, width, frame_height, color_mode,
fieldmode, field_offset,
&offset0, &offset1, &row_inc, &pix_inc);
+ }
+ /* adjust back to pixels */
+ if (rotation & 1)
+ height *= pixpg;
+ else
+ width *= pixpg;
DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
offset0, offset1, row_inc, pix_inc);
@@ -1879,8 +2419,8 @@
_dispc_set_row_inc(plane, row_inc);
_dispc_set_pix_inc(plane, pix_inc);
- DSSDBG("%d,%d %dx%d -> %dx%d\n", pos_x, pos_y, width, height,
- out_width, out_height);
+ DSSDBG("%d,%d %d*%dx%d*%d -> %dx%d\n", pos_x, pos_y, width, x_decim,
+ height, y_decim, out_width, out_height);
_dispc_set_plane_pos(plane, pos_x, pos_y);
@@ -1895,17 +2435,30 @@
_dispc_set_vid_color_conv(plane, cconv);
}
- _dispc_set_rotation_attrs(plane, rotation, mirror, color_mode);
+ _dispc_set_rotation_attrs(plane, rotation, mirror, color_mode,
+ rotation_type);
_dispc_set_pre_mult_alpha(plane, pre_mult_alpha);
_dispc_setup_global_alpha(plane, global_alpha);
+ if (cpu_is_omap44xx()) {
+ fifo_low = dispc_calculate_threshold(plane, paddr + offset0,
+ puv_addr + offset0, width, height,
+ row_inc, pix_inc);
+ fifo_high = dispc_get_plane_fifo_size(plane) - 1;
+ dispc_setup_plane_fifo(plane, fifo_low, fifo_high);
+ }
+
return 0;
}
-static void _dispc_enable_plane(enum omap_plane plane, bool enable)
+int dispc_enable_plane(enum omap_plane plane, bool enable)
{
+ DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
+
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0);
+
+ return 0;
}
static void dispc_disable_isr(void *data, u32 mask)
@@ -1922,6 +2475,17 @@
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
}
+void omap_dispc_set_irq_type(int channel, enum omap_dispc_irq_type type)
+{
+ if (type == OMAP_DISPC_IRQ_TYPE_VSYNC) {
+ dispc.channel_irq[channel] = channel == OMAP_DSS_CHANNEL_LCD2 ?
+ DISPC_IRQ_VSYNC2 : DISPC_IRQ_VSYNC;
+ } else {
+ dispc.channel_irq[channel] = channel == OMAP_DSS_CHANNEL_LCD2 ?
+ DISPC_IRQ_FRAMEDONE2 : DISPC_IRQ_FRAMEDONE;
+ }
+}
+
static void dispc_enable_lcd_out(enum omap_channel channel, bool enable)
{
struct completion frame_done_completion;
@@ -1929,8 +2493,6 @@
int r;
u32 irq;
- enable_clocks(1);
-
/* When we disable LCD output, we need to wait until frame is done.
* Otherwise the DSS is still working, and turning off the clocks
* prevents DSS from going to OFF mode */
@@ -1938,8 +2500,7 @@
REG_GET(DISPC_CONTROL2, 0, 0) :
REG_GET(DISPC_CONTROL, 0, 0);
- irq = channel == OMAP_DSS_CHANNEL_LCD2 ? DISPC_IRQ_FRAMEDONE2 :
- DISPC_IRQ_FRAMEDONE;
+ irq = dispc.channel_irq[channel];
if (!enable && is_on) {
init_completion(&frame_done_completion);
@@ -1964,8 +2525,6 @@
if (r)
DSSERR("failed to unregister FRAMEDONE isr\n");
}
-
- enable_clocks(0);
}
static void _enable_digit_out(bool enable)
@@ -1973,17 +2532,13 @@
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 1, 1);
}
-static void dispc_enable_digit_out(bool enable)
+static void dispc_enable_digit_out(enum omap_display_type type, bool enable)
{
struct completion frame_done_completion;
int r;
- enable_clocks(1);
-
- if (REG_GET(DISPC_CONTROL, 1, 1) == enable) {
- enable_clocks(0);
+ if (REG_GET(DISPC_CONTROL, 1, 1) == enable)
return;
- }
if (enable) {
unsigned long flags;
@@ -2002,7 +2557,8 @@
init_completion(&frame_done_completion);
r = omap_dispc_register_isr(dispc_disable_isr, &frame_done_completion,
- DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD);
+ DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
+ | DISPC_IRQ_FRAMEDONETV);
if (r)
DSSERR("failed to register EVSYNC isr\n");
@@ -2015,13 +2571,17 @@
msecs_to_jiffies(100)))
DSSERR("timeout waiting for EVSYNC\n");
- if (!wait_for_completion_timeout(&frame_done_completion,
- msecs_to_jiffies(100)))
- DSSERR("timeout waiting for EVSYNC\n");
+ /* Don't wait for the odd field in the case of HDMI */
+ if (type != OMAP_DISPLAY_TYPE_HDMI) {
+ if (!wait_for_completion_timeout(&frame_done_completion,
+ msecs_to_jiffies(100)))
+ DSSERR("timeout waiting for EVSYNC\n");
+ }
r = omap_dispc_unregister_isr(dispc_disable_isr,
&frame_done_completion,
- DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD);
+ DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
+ | DISPC_IRQ_FRAMEDONETV);
if (r)
DSSERR("failed to unregister EVSYNC isr\n");
@@ -2031,12 +2591,12 @@
dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
if (dss_has_feature(FEAT_MGR_LCD2))
dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2;
+ if (dss_has_feature(FEAT_OVL_VID3))
+ dispc.irq_error_mask |= DISPC_IRQ_VID3_FIFO_UNDERFLOW;
dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
_omap_dispc_set_irqs();
spin_unlock_irqrestore(&dispc.irq_lock, flags);
}
-
- enable_clocks(0);
}
bool dispc_is_channel_enabled(enum omap_channel channel)
@@ -2051,13 +2611,14 @@
BUG();
}
-void dispc_enable_channel(enum omap_channel channel, bool enable)
+void dispc_enable_channel(enum omap_channel channel,
+ enum omap_display_type type, bool enable)
{
if (channel == OMAP_DSS_CHANNEL_LCD ||
channel == OMAP_DSS_CHANNEL_LCD2)
dispc_enable_lcd_out(channel, enable);
else if (channel == OMAP_DSS_CHANNEL_DIGIT)
- dispc_enable_digit_out(enable);
+ dispc_enable_digit_out(type, enable);
else
BUG();
}
@@ -2067,9 +2628,7 @@
if (!dss_has_feature(FEAT_LCDENABLEPOL))
return;
- enable_clocks(1);
REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
- enable_clocks(0);
}
void dispc_lcd_enable_signal(bool enable)
@@ -2077,9 +2636,7 @@
if (!dss_has_feature(FEAT_LCDENABLESIGNAL))
return;
- enable_clocks(1);
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28);
- enable_clocks(0);
}
void dispc_pck_free_enable(bool enable)
@@ -2087,19 +2644,15 @@
if (!dss_has_feature(FEAT_PCKFREEENABLE))
return;
- enable_clocks(1);
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
- enable_clocks(0);
}
void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable)
{
- enable_clocks(1);
if (channel == OMAP_DSS_CHANNEL_LCD2)
REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16);
else
REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16);
- enable_clocks(0);
}
@@ -2122,27 +2675,21 @@
return;
}
- enable_clocks(1);
if (channel == OMAP_DSS_CHANNEL_LCD2)
REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3);
else
REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3);
- enable_clocks(0);
}
void dispc_set_loadmode(enum omap_dss_load_mode mode)
{
- enable_clocks(1);
REG_FLD_MOD(DISPC_CONFIG, mode, 2, 1);
- enable_clocks(0);
}
void dispc_set_default_color(enum omap_channel channel, u32 color)
{
- enable_clocks(1);
dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color);
- enable_clocks(0);
}
u32 dispc_get_default_color(enum omap_channel channel)
@@ -2153,9 +2700,7 @@
channel != OMAP_DSS_CHANNEL_LCD &&
channel != OMAP_DSS_CHANNEL_LCD2);
- enable_clocks(1);
l = dispc_read_reg(DISPC_DEFAULT_COLOR(channel));
- enable_clocks(0);
return l;
}
@@ -2164,7 +2709,6 @@
enum omap_dss_trans_key_type type,
u32 trans_key)
{
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, type, 11, 11);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
@@ -2173,14 +2717,12 @@
REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11);
dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key);
- enable_clocks(0);
}
void dispc_get_trans_key(enum omap_channel ch,
enum omap_dss_trans_key_type *type,
u32 *trans_key)
{
- enable_clocks(1);
if (type) {
if (ch == OMAP_DSS_CHANNEL_LCD)
*type = REG_GET(DISPC_CONFIG, 11, 11);
@@ -2194,33 +2736,27 @@
if (trans_key)
*trans_key = dispc_read_reg(DISPC_TRANS_COLOR(ch));
- enable_clocks(0);
}
void dispc_enable_trans_key(enum omap_channel ch, bool enable)
{
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12);
else /* OMAP_DSS_CHANNEL_LCD2 */
REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10);
- enable_clocks(0);
}
void dispc_enable_alpha_blending(enum omap_channel ch, bool enable)
{
if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
return;
- enable_clocks(1);
+ /* :NOTE: compatibility mode is not supported on LCD2 */
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19);
- else /* OMAP_DSS_CHANNEL_LCD2 */
- REG_FLD_MOD(DISPC_CONFIG2, enable, 18, 18);
- enable_clocks(0);
}
bool dispc_alpha_blending_enabled(enum omap_channel ch)
{
@@ -2229,16 +2765,14 @@
if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
return false;
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
enabled = REG_GET(DISPC_CONFIG, 18, 18);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
enabled = REG_GET(DISPC_CONFIG, 19, 19);
else if (ch == OMAP_DSS_CHANNEL_LCD2)
- enabled = REG_GET(DISPC_CONFIG2, 18, 18);
+ enabled = false;
else
BUG();
- enable_clocks(0);
return enabled;
}
@@ -2248,7 +2782,6 @@
{
bool enabled;
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
enabled = REG_GET(DISPC_CONFIG, 10, 10);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
@@ -2257,7 +2790,6 @@
enabled = REG_GET(DISPC_CONFIG2, 10, 10);
else
BUG();
- enable_clocks(0);
return enabled;
}
@@ -2285,12 +2817,10 @@
return;
}
- enable_clocks(1);
if (channel == OMAP_DSS_CHANNEL_LCD2)
REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8);
else
REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
- enable_clocks(0);
}
void dispc_set_parallel_interface_mode(enum omap_channel channel,
@@ -2322,8 +2852,6 @@
return;
}
- enable_clocks(1);
-
if (channel == OMAP_DSS_CHANNEL_LCD2) {
l = dispc_read_reg(DISPC_CONTROL2);
l = FLD_MOD(l, stallmode, 11, 11);
@@ -2335,8 +2863,6 @@
l = FLD_MOD(l, gpout1, 16, 16);
dispc_write_reg(DISPC_CONTROL, l);
}
-
- enable_clocks(0);
}
static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
@@ -2389,10 +2915,8 @@
FLD_VAL(vbp, 31, 20);
}
- enable_clocks(1);
dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
- enable_clocks(0);
}
/* change name to mode? */
@@ -2435,10 +2959,8 @@
BUG_ON(lck_div < 1);
BUG_ON(pck_div < 2);
- enable_clocks(1);
dispc_write_reg(DISPC_DIVISORo(channel),
FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
- enable_clocks(0);
}
static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div,
@@ -2457,7 +2979,7 @@
switch (dss_get_dispc_clk_source()) {
case OMAP_DSS_CLK_SRC_FCK:
- r = dss_clk_get_rate(DSS_CLK_FCK);
+ r = clk_get_rate(dispc.dss_clk);
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
dsidev = dsi_get_dsidev_from_id(0);
@@ -2487,7 +3009,7 @@
switch (dss_get_lcd_clk_source(channel)) {
case OMAP_DSS_CLK_SRC_FCK:
- r = dss_clk_get_rate(DSS_CLK_FCK);
+ r = clk_get_rate(dispc.dss_clk);
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
dsidev = dsi_get_dsidev_from_id(0);
@@ -2510,13 +3032,23 @@
unsigned long r;
u32 l;
- l = dispc_read_reg(DISPC_DIVISORo(channel));
+ if (channel == OMAP_DSS_CHANNEL_LCD ||
+ channel == OMAP_DSS_CHANNEL_LCD2) {
+ l = dispc_read_reg(DISPC_DIVISORo(channel));
- pcd = FLD_GET(l, 7, 0);
+ pcd = FLD_GET(l, 7, 0);
- r = dispc_lclk_rate(channel);
+ r = dispc_lclk_rate(channel);
- return r / pcd;
+ return r / pcd;
+ } else {
+ struct omap_overlay_manager *mgr;
+ mgr = omap_dss_get_overlay_manager(channel);
+ if (!mgr || !mgr->device)
+ return 0;
+
+ return mgr->device->panel.timings.pixel_clock * 1000;
+ }
}
void dispc_dump_clocks(struct seq_file *s)
@@ -2526,7 +3058,8 @@
enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
enum omap_dss_clk_source lcd_clk_src;
- enable_clocks(1);
+ if (dispc_runtime_get())
+ return;
seq_printf(s, "- DISPC -\n");
@@ -2574,7 +3107,8 @@
seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd);
}
- enable_clocks(0);
+
+ dispc_runtime_put();
}
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -2612,6 +3146,10 @@
PIS(VID1_END_WIN);
PIS(VID2_FIFO_UNDERFLOW);
PIS(VID2_END_WIN);
+ if (dss_has_feature(FEAT_OVL_VID3)) {
+ PIS(VID3_FIFO_UNDERFLOW);
+ PIS(VID3_END_WIN);
+ }
PIS(SYNC_LOST);
PIS(SYNC_LOST_DIGIT);
PIS(WAKEUP);
@@ -2627,9 +3165,11 @@
void dispc_dump_regs(struct seq_file *s)
{
+ int i, o;
#define DUMPREG(r) seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (dispc_runtime_get())
+ return;
DUMPREG(DISPC_REVISION);
DUMPREG(DISPC_SYSCONFIG);
@@ -2649,7 +3189,8 @@
DUMPREG(DISPC_TIMING_V(OMAP_DSS_CHANNEL_LCD));
DUMPREG(DISPC_POL_FREQ(OMAP_DSS_CHANNEL_LCD));
DUMPREG(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_GLOBAL_ALPHA);
+ if (dss_has_feature(FEAT_GLOBAL_ALPHA))
+ DUMPREG(DISPC_GLOBAL_ALPHA);
DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
if (dss_has_feature(FEAT_MGR_LCD2)) {
@@ -2680,188 +3221,82 @@
DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ if (dss_has_feature(FEAT_CPR)) {
+ DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
+ DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
+ DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ }
if (dss_has_feature(FEAT_MGR_LCD2)) {
DUMPREG(DISPC_DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
+ if (dss_has_feature(FEAT_CPR)) {
+ DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
+ DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
+ }
}
- DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_GFX));
+ if (dss_has_feature(FEAT_PRELOAD))
+ DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_GFX));
- DUMPREG(DISPC_OVL_BA0(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_BA1(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_POSITION(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_SIZE(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_FIFO_SIZE_STATUS(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_ROW_INC(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_PIXEL_INC(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_FIR(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_PICTURE_SIZE(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_ACCU0(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_ACCU1(OMAP_DSS_VIDEO1));
+ for (o = OMAP_DSS_VIDEO1; o <= OMAP_DSS_VIDEO3; o++) {
+ if (o == OMAP_DSS_VIDEO3 && !dss_has_feature(FEAT_OVL_VID3))
+ continue;
- DUMPREG(DISPC_OVL_BA0(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_BA1(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_POSITION(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_SIZE(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_FIFO_SIZE_STATUS(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_ROW_INC(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_PIXEL_INC(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_FIR(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_PICTURE_SIZE(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_ACCU0(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_ACCU1(OMAP_DSS_VIDEO2));
+ DUMPREG(DISPC_OVL_BA0(o));
+ DUMPREG(DISPC_OVL_BA1(o));
+ DUMPREG(DISPC_OVL_POSITION(o));
+ DUMPREG(DISPC_OVL_SIZE(o));
+ DUMPREG(DISPC_OVL_ATTRIBUTES(o));
+ DUMPREG(DISPC_OVL_FIFO_THRESHOLD(o));
+ DUMPREG(DISPC_OVL_FIFO_SIZE_STATUS(o));
+ DUMPREG(DISPC_OVL_ROW_INC(o));
+ DUMPREG(DISPC_OVL_PIXEL_INC(o));
+ DUMPREG(DISPC_OVL_FIR(o));
+ DUMPREG(DISPC_OVL_PICTURE_SIZE(o));
+ DUMPREG(DISPC_OVL_ACCU0(o));
+ DUMPREG(DISPC_OVL_ACCU1(o));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 7));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 7));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 0));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 1));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 2));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 3));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 7));
+ for (i = 0; i < 8; i++)
+ DUMPREG(DISPC_OVL_FIR_COEF_H(o, i));
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_BA1_UV(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_FIR2(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_ACCU2_0(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_ACCU2_1(OMAP_DSS_VIDEO1));
+ for (i = 0; i < 8; i++)
+ DUMPREG(DISPC_OVL_FIR_COEF_HV(o, i));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 7));
+ for (i = 0; i < 5; i++)
+ DUMPREG(DISPC_OVL_CONV_COEF(o, i));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 7));
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ for (i = 0; i < 8; i++)
+ DUMPREG(DISPC_OVL_FIR_COEF_V(o, i));
+ }
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 7));
+ if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ DUMPREG(DISPC_OVL_BA0_UV(o));
+ DUMPREG(DISPC_OVL_BA1_UV(o));
+ DUMPREG(DISPC_OVL_FIR2(o));
+ DUMPREG(DISPC_OVL_ACCU2_0(o));
+ DUMPREG(DISPC_OVL_ACCU2_1(o));
+
+ for (i = 0; i < 8; i++)
+ DUMPREG(DISPC_OVL_FIR_COEF_H2(o, i));
+
+ for (i = 0; i < 8; i++)
+ DUMPREG(DISPC_OVL_FIR_COEF_HV2(o, i));
+
+ for (i = 0; i < 8; i++)
+ DUMPREG(DISPC_OVL_FIR_COEF_V2(o, i));
+ }
+ if (dss_has_feature(FEAT_ATTR2))
+ DUMPREG(DISPC_OVL_ATTRIBUTES2(o));
+
+ if (dss_has_feature(FEAT_PRELOAD))
+ DUMPREG(DISPC_OVL_PRELOAD(o));
}
- if (dss_has_feature(FEAT_ATTR2))
- DUMPREG(DISPC_OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
-
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 7));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 7));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 0));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 1));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 2));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 3));
- DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 7));
-
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_BA1_UV(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_FIR2(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_ACCU2_0(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_ACCU2_1(OMAP_DSS_VIDEO2));
-
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 7));
-
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 7));
-
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 7));
- }
- if (dss_has_feature(FEAT_ATTR2))
- DUMPREG(DISPC_OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
-
- DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO2));
-
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dispc_runtime_put();
#undef DUMPREG
}
@@ -2882,9 +3317,7 @@
l |= FLD_VAL(acbi, 11, 8);
l |= FLD_VAL(acb, 7, 0);
- enable_clocks(1);
dispc_write_reg(DISPC_POL_FREQ(channel), l);
- enable_clocks(0);
}
void dispc_set_pol_freq(enum omap_channel channel,
@@ -3005,15 +3438,11 @@
mask |= isr_data->mask;
}
- enable_clocks(1);
-
old_mask = dispc_read_reg(DISPC_IRQENABLE);
/* clear the irqstatus for newly enabled irqs */
dispc_write_reg(DISPC_IRQSTATUS, (mask ^ old_mask) & mask);
dispc_write_reg(DISPC_IRQENABLE, mask);
-
- enable_clocks(0);
}
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
@@ -3119,6 +3548,8 @@
PIS(OCP_ERR);
PIS(VID1_FIFO_UNDERFLOW);
PIS(VID2_FIFO_UNDERFLOW);
+ if (dss_has_feature(FEAT_OVL_VID3))
+ PIS(VID3_FIFO_UNDERFLOW);
PIS(SYNC_LOST);
PIS(SYNC_LOST_DIGIT);
if (dss_has_feature(FEAT_MGR_LCD2))
@@ -3218,6 +3649,8 @@
dispc.error_irqs = 0;
spin_unlock_irqrestore(&dispc.irq_lock, flags);
+ dispc_runtime_get();
+
if (errors & DISPC_IRQ_GFX_FIFO_UNDERFLOW) {
DSSERR("GFX_FIFO_UNDERFLOW, disabling GFX\n");
for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
@@ -3272,6 +3705,24 @@
}
}
+ if (errors & DISPC_IRQ_VID3_FIFO_UNDERFLOW) {
+ DSSERR("VID3_FIFO_UNDERFLOW, disabling VID3\n");
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+ struct omap_overlay *ovl;
+ ovl = omap_dss_get_overlay(i);
+
+ if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+ continue;
+
+ if (ovl->id == 3) {
+ dispc_enable_plane(ovl->id, 0);
+ dispc_go(ovl->manager->id);
+ mdelay(50);
+ break;
+ }
+ }
+ }
+
if (errors & DISPC_IRQ_SYNC_LOST) {
struct omap_overlay_manager *manager = NULL;
bool enable = false;
@@ -3283,6 +3734,11 @@
mgr = omap_dss_get_overlay_manager(i);
if (mgr->id == OMAP_DSS_CHANNEL_LCD) {
+ if(!mgr->device->first_vsync){
+ DSSERR("First SYNC_LOST.. ignoring \n");
+ break;
+ }
+
manager = mgr;
enable = mgr->device->state ==
OMAP_DSS_DISPLAY_ACTIVE;
@@ -3315,17 +3771,23 @@
struct omap_overlay_manager *manager = NULL;
bool enable = false;
- DSSERR("SYNC_LOST_DIGIT, disabling TV\n");
+ pr_err_ratelimited("SYNC_LOST_DIGIT, disabling TV\n");
for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
struct omap_overlay_manager *mgr;
mgr = omap_dss_get_overlay_manager(i);
if (mgr->id == OMAP_DSS_CHANNEL_DIGIT) {
+ if(!mgr->device->first_vsync){
+ DSSERR("First SYNC_LOST..TV ignoring\n");
+ }
+
manager = mgr;
enable = mgr->device->state ==
OMAP_DSS_DISPLAY_ACTIVE;
+ mgr->device->sync_lost_error = 1;
mgr->device->driver->disable(mgr->device);
+ mgr->device->sync_lost_error = 0;
break;
}
}
@@ -3361,6 +3823,11 @@
mgr = omap_dss_get_overlay_manager(i);
if (mgr->id == OMAP_DSS_CHANNEL_LCD2) {
+ if(!mgr->device->first_vsync){
+ DSSERR("First SYNC_LOST.. ignoring \n");
+ break;
+ }
+
manager = mgr;
enable = mgr->device->state ==
OMAP_DSS_DISPLAY_ACTIVE;
@@ -3404,6 +3871,8 @@
dispc.irq_error_mask |= errors;
_omap_dispc_set_irqs();
spin_unlock_irqrestore(&dispc.irq_lock, flags);
+
+ dispc_runtime_put();
}
int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout)
@@ -3446,11 +3915,15 @@
int r;
DECLARE_COMPLETION_ONSTACK(completion);
+ r = dispc_runtime_get();
+ if (r)
+ return r;
+
r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
irqmask);
if (r)
- return r;
+ goto done;
timeout = wait_for_completion_interruptible_timeout(&completion,
timeout);
@@ -3458,12 +3931,14 @@
omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
if (timeout == 0)
- return -ETIMEDOUT;
+ r = -ETIMEDOUT;
+ else if (timeout == -ERESTARTSYS)
+ r = timeout;
- if (timeout == -ERESTARTSYS)
- return -ERESTARTSYS;
+done:
+ dispc_runtime_put();
- return 0;
+ return r;
}
#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
@@ -3498,7 +3973,8 @@
dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
if (dss_has_feature(FEAT_MGR_LCD2))
dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2;
-
+ if (dss_has_feature(FEAT_OVL_VID3))
+ dispc.irq_error_mask |= DISPC_IRQ_VID3_FIFO_UNDERFLOW;
/* there's SYNC_LOST_DIGIT waiting after enabling the DSS,
* so clear it */
dispc_write_reg(DISPC_IRQSTATUS, dispc_read_reg(DISPC_IRQSTATUS));
@@ -3522,13 +3998,6 @@
{
u32 l;
- l = dispc_read_reg(DISPC_SYSCONFIG);
- l = FLD_MOD(l, 2, 13, 12); /* MIDLEMODE: smart standby */
- l = FLD_MOD(l, 2, 4, 3); /* SIDLEMODE: smart idle */
- l = FLD_MOD(l, 1, 2, 2); /* ENWAKEUP */
- l = FLD_MOD(l, 1, 0, 0); /* AUTOIDLE */
- dispc_write_reg(DISPC_SYSCONFIG, l);
-
/* Exclusively enable DISPC_CORE_CLK and set divider to 1 */
if (dss_has_feature(FEAT_CORE_CLK_DIV)) {
l = dispc_read_reg(DISPC_DIVISOR);
@@ -3538,83 +4007,47 @@
dispc_write_reg(DISPC_DIVISOR, l);
}
+ /* for OMAP4 ERRATUM xxxx: Mstandby and disconnect protocol issue */
+ if (cpu_is_omap44xx()) {
+ l3_1_clkdm = clkdm_lookup("l3_1_clkdm");
+ l3_2_clkdm = clkdm_lookup("l3_2_clkdm");
+ }
+
/* FUNCGATED */
if (dss_has_feature(FEAT_FUNCGATED))
REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
+ REG_FLD_MOD(DISPC_CONFIG, 1, 17, 17);
+
/* L3 firewall setting: enable access to OCM RAM */
/* XXX this should be somewhere in plat-omap */
if (cpu_is_omap24xx())
__raw_writel(0x402000b0, OMAP2_L3_IO_ADDRESS(0x680050a0));
- _dispc_setup_color_conv_coef();
-
dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY);
dispc_read_plane_fifo_sizes();
}
-int dispc_enable_plane(enum omap_plane plane, bool enable)
-{
- DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
-
- enable_clocks(1);
- _dispc_enable_plane(plane, enable);
- enable_clocks(0);
-
- return 0;
-}
-
-int dispc_setup_plane(enum omap_plane plane,
- u32 paddr, u16 screen_width,
- u16 pos_x, u16 pos_y,
- u16 width, u16 height,
- u16 out_width, u16 out_height,
- enum omap_color_mode color_mode,
- bool ilace,
- enum omap_dss_rotation_type rotation_type,
- u8 rotation, bool mirror, u8 global_alpha,
- u8 pre_mult_alpha, enum omap_channel channel,
- u32 puv_addr)
-{
- int r = 0;
-
- DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d, %d, %dx%d -> "
- "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n",
- plane, paddr, screen_width, pos_x, pos_y,
- width, height,
- out_width, out_height,
- ilace, color_mode,
- rotation, mirror, channel);
-
- enable_clocks(1);
-
- r = _dispc_setup_plane(plane,
- paddr, screen_width,
- pos_x, pos_y,
- width, height,
- out_width, out_height,
- color_mode, ilace,
- rotation_type,
- rotation, mirror,
- global_alpha,
- pre_mult_alpha,
- channel, puv_addr);
-
- enable_clocks(0);
-
- return r;
-}
-
/* DISPC HW IP initialisation */
static int omap_dispchw_probe(struct platform_device *pdev)
{
u32 rev;
int r = 0;
struct resource *dispc_mem;
+ struct clk *clk;
dispc.pdev = pdev;
+ clk = clk_get(&pdev->dev, "dss_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get dss_clk\n");
+ r = PTR_ERR(clk);
+ goto err_get_clk;
+ }
+
+ dispc.dss_clk = clk;
+
spin_lock_init(&dispc.irq_lock);
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -3628,51 +4061,65 @@
if (!dispc_mem) {
DSSERR("can't get IORESOURCE_MEM DISPC\n");
r = -EINVAL;
- goto fail0;
+ goto err_ioremap;
}
dispc.base = ioremap(dispc_mem->start, resource_size(dispc_mem));
if (!dispc.base) {
DSSERR("can't ioremap DISPC\n");
r = -ENOMEM;
- goto fail0;
+ goto err_ioremap;
}
dispc.irq = platform_get_irq(dispc.pdev, 0);
if (dispc.irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
- goto fail1;
+ goto err_irq;
}
r = request_irq(dispc.irq, omap_dispc_irq_handler, IRQF_SHARED,
"OMAP DISPC", dispc.pdev);
if (r < 0) {
DSSERR("request_irq failed\n");
- goto fail1;
+ goto err_irq;
}
- enable_clocks(1);
+ mutex_init(&dispc.runtime_lock);
+
+ pm_runtime_enable(&pdev->dev);
+
+ r = dispc_runtime_get();
+ if (r)
+ goto err_runtime_get;
_omap_dispc_initial_config();
_omap_dispc_initialize_irq();
- dispc_save_context();
-
rev = dispc_read_reg(DISPC_REVISION);
dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
- enable_clocks(0);
+ dispc_runtime_put();
return 0;
-fail1:
+
+err_runtime_get:
+ pm_runtime_disable(&pdev->dev);
+ free_irq(dispc.irq, dispc.pdev);
+err_irq:
iounmap(dispc.base);
-fail0:
+err_ioremap:
+ clk_put(dispc.dss_clk);
+err_get_clk:
return r;
}
static int omap_dispchw_remove(struct platform_device *pdev)
{
+ pm_runtime_disable(&pdev->dev);
+
+ clk_put(dispc.dss_clk);
+
free_irq(dispc.irq, dispc.pdev);
iounmap(dispc.base);
return 0;
diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h
index 6c9ee0a..ee08b69 100644
--- a/drivers/video/omap2/dss/dispc.h
+++ b/drivers/video/omap2/dss/dispc.h
@@ -174,7 +174,8 @@
case OMAP_DSS_CHANNEL_LCD:
return 0x0070;
case OMAP_DSS_CHANNEL_DIGIT:
- BUG();
+ /* FIXME venc pclk? */
+ return 0x0070;
case OMAP_DSS_CHANNEL_LCD2:
return 0x040C;
default:
@@ -291,6 +292,8 @@
return 0x00BC;
case OMAP_DSS_VIDEO2:
return 0x014C;
+ case OMAP_DSS_VIDEO3:
+ return 0x0300;
default:
BUG();
}
@@ -304,6 +307,8 @@
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0000;
+ case OMAP_DSS_VIDEO3:
+ return 0x0008;
default:
BUG();
}
@@ -316,6 +321,8 @@
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0004;
+ case OMAP_DSS_VIDEO3:
+ return 0x000C;
default:
BUG();
}
@@ -330,6 +337,8 @@
return 0x0544;
case OMAP_DSS_VIDEO2:
return 0x04BC;
+ case OMAP_DSS_VIDEO3:
+ return 0x0310;
default:
BUG();
}
@@ -344,6 +353,8 @@
return 0x0548;
case OMAP_DSS_VIDEO2:
return 0x04C0;
+ case OMAP_DSS_VIDEO3:
+ return 0x0314;
default:
BUG();
}
@@ -356,6 +367,8 @@
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0008;
+ case OMAP_DSS_VIDEO3:
+ return 0x009C;
default:
BUG();
}
@@ -368,6 +381,8 @@
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x000C;
+ case OMAP_DSS_VIDEO3:
+ return 0x00A8;
default:
BUG();
}
@@ -381,6 +396,8 @@
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0010;
+ case OMAP_DSS_VIDEO3:
+ return 0x0070;
default:
BUG();
}
@@ -395,6 +412,8 @@
return 0x0568;
case OMAP_DSS_VIDEO2:
return 0x04DC;
+ case OMAP_DSS_VIDEO3:
+ return 0x032C;
default:
BUG();
}
@@ -408,6 +427,8 @@
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0014;
+ case OMAP_DSS_VIDEO3:
+ return 0x008C;
default:
BUG();
}
@@ -421,6 +442,8 @@
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0018;
+ case OMAP_DSS_VIDEO3:
+ return 0x0088;
default:
BUG();
}
@@ -434,6 +457,8 @@
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x001C;
+ case OMAP_DSS_VIDEO3:
+ return 0x00A4;
default:
BUG();
}
@@ -447,6 +472,8 @@
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0020;
+ case OMAP_DSS_VIDEO3:
+ return 0x0098;
default:
BUG();
}
@@ -459,6 +486,7 @@
return 0x0034;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
+ case OMAP_DSS_VIDEO3:
BUG();
default:
BUG();
@@ -472,6 +500,7 @@
return 0x0038;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
+ case OMAP_DSS_VIDEO3:
BUG();
default:
BUG();
@@ -486,6 +515,8 @@
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0024;
+ case OMAP_DSS_VIDEO3:
+ return 0x0090;
default:
BUG();
}
@@ -500,6 +531,8 @@
return 0x0580;
case OMAP_DSS_VIDEO2:
return 0x055C;
+ case OMAP_DSS_VIDEO3:
+ return 0x0424;
default:
BUG();
}
@@ -513,6 +546,8 @@
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0028;
+ case OMAP_DSS_VIDEO3:
+ return 0x0094;
default:
BUG();
}
@@ -527,6 +562,8 @@
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x002C;
+ case OMAP_DSS_VIDEO3:
+ return 0x0000;
default:
BUG();
}
@@ -541,6 +578,8 @@
return 0x0584;
case OMAP_DSS_VIDEO2:
return 0x0560;
+ case OMAP_DSS_VIDEO3:
+ return 0x0428;
default:
BUG();
}
@@ -554,6 +593,8 @@
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0030;
+ case OMAP_DSS_VIDEO3:
+ return 0x0004;
default:
BUG();
}
@@ -568,6 +609,8 @@
return 0x0588;
case OMAP_DSS_VIDEO2:
return 0x0564;
+ case OMAP_DSS_VIDEO3:
+ return 0x042C;
default:
BUG();
}
@@ -582,6 +625,8 @@
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0034 + i * 0x8;
+ case OMAP_DSS_VIDEO3:
+ return 0x0010 + i * 0x8;
default:
BUG();
}
@@ -597,6 +642,8 @@
return 0x058C + i * 0x8;
case OMAP_DSS_VIDEO2:
return 0x0568 + i * 0x8;
+ case OMAP_DSS_VIDEO3:
+ return 0x0430 + i * 0x8;
default:
BUG();
}
@@ -611,6 +658,8 @@
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0038 + i * 0x8;
+ case OMAP_DSS_VIDEO3:
+ return 0x0014 + i * 0x8;
default:
BUG();
}
@@ -626,6 +675,8 @@
return 0x0590 + i * 8;
case OMAP_DSS_VIDEO2:
return 0x056C + i * 0x8;
+ case OMAP_DSS_VIDEO3:
+ return 0x0434 + i * 0x8;
default:
BUG();
}
@@ -639,6 +690,7 @@
BUG();
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
+ case OMAP_DSS_VIDEO3:
return 0x0074 + i * 0x4;
default:
BUG();
@@ -655,6 +707,8 @@
return 0x0124 + i * 0x4;
case OMAP_DSS_VIDEO2:
return 0x00B4 + i * 0x4;
+ case OMAP_DSS_VIDEO3:
+ return 0x0050 + i * 0x4;
default:
BUG();
}
@@ -670,6 +724,8 @@
return 0x05CC + i * 0x4;
case OMAP_DSS_VIDEO2:
return 0x05A8 + i * 0x4;
+ case OMAP_DSS_VIDEO3:
+ return 0x0470 + i * 0x4;
default:
BUG();
}
@@ -684,6 +740,8 @@
return 0x0174;
case OMAP_DSS_VIDEO2:
return 0x00E8;
+ case OMAP_DSS_VIDEO3:
+ return 0x00A0;
default:
BUG();
}
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index c2dfc8c..8b3b360 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -339,6 +339,18 @@
*fifo_low = fifo_size - burst_size_bytes;
}
+void omapdss_display_get_dimensions(struct omap_dss_device *dssdev,
+ u32 *width_in_um, u32 *height_in_um)
+{
+ if (dssdev->driver->get_dimensions) {
+ dssdev->driver->get_dimensions(dssdev,
+ width_in_um, width_in_um);
+ } else {
+ *width_in_um = dssdev->panel.width_in_um;
+ *height_in_um = dssdev->panel.height_in_um;
+ }
+}
+
int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
{
switch (dssdev->type) {
@@ -446,6 +458,8 @@
return;
}
+ BLOCKING_INIT_NOTIFIER_HEAD(&dssdev->state_notifiers);
+
/* create device sysfs files */
i = 0;
while ((attr = display_sysfs_attrs[i++]) != NULL) {
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index ff6bd30..f053b18 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -23,7 +23,6 @@
#define DSS_SUBSYS_NAME "DPI"
#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -130,8 +129,6 @@
bool is_tft;
int r = 0;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
-
dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
dssdev->panel.acbi, dssdev->panel.acb);
@@ -144,7 +141,7 @@
r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000,
&fck, &lck_div, &pck_div);
if (r)
- goto err0;
+ return r;
pck = fck / lck_div / pck_div / 1000;
@@ -158,12 +155,10 @@
dispc_set_lcd_timings(dssdev->manager->id, t);
-err0:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
- return r;
+ return 0;
}
-static int dpi_basic_init(struct omap_dss_device *dssdev)
+static void dpi_basic_init(struct omap_dss_device *dssdev)
{
bool is_tft;
@@ -175,8 +170,6 @@
OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN);
dispc_set_tft_data_lines(dssdev->manager->id,
dssdev->phy.dpi.data_lines);
-
- return 0;
}
int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
@@ -186,31 +179,38 @@
r = omap_dss_start_device(dssdev);
if (r) {
DSSERR("failed to start device\n");
- goto err0;
+ goto err_start_dev;
}
if (cpu_is_omap34xx()) {
r = regulator_enable(dpi.vdds_dsi_reg);
if (r)
- goto err1;
+ goto err_reg_enable;
}
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
-
- r = dpi_basic_init(dssdev);
+ r = dss_runtime_get();
if (r)
- goto err2;
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r)
+ goto err_get_dispc;
+
+ dpi_basic_init(dssdev);
if (dpi_use_dsi_pll(dssdev)) {
- dss_clk_enable(DSS_CLK_SYSCK);
+ r = dsi_runtime_get(dpi.dsidev);
+ if (r)
+ goto err_get_dsi;
+
r = dsi_pll_init(dpi.dsidev, 0, 1);
if (r)
- goto err3;
+ goto err_dsi_pll_init;
}
r = dpi_set_mode(dssdev);
if (r)
- goto err4;
+ goto err_set_mode;
mdelay(2);
@@ -218,19 +218,22 @@
return 0;
-err4:
+err_set_mode:
if (dpi_use_dsi_pll(dssdev))
dsi_pll_uninit(dpi.dsidev, true);
-err3:
+err_dsi_pll_init:
if (dpi_use_dsi_pll(dssdev))
- dss_clk_disable(DSS_CLK_SYSCK);
-err2:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dsi_runtime_put(dpi.dsidev);
+err_get_dsi:
+ dispc_runtime_put();
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
if (cpu_is_omap34xx())
regulator_disable(dpi.vdds_dsi_reg);
-err1:
+err_reg_enable:
omap_dss_stop_device(dssdev);
-err0:
+err_start_dev:
return r;
}
EXPORT_SYMBOL(omapdss_dpi_display_enable);
@@ -242,10 +245,11 @@
if (dpi_use_dsi_pll(dssdev)) {
dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
dsi_pll_uninit(dpi.dsidev, true);
- dss_clk_disable(DSS_CLK_SYSCK);
+ dsi_runtime_put(dpi.dsidev);
}
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dispc_runtime_put();
+ dss_runtime_put();
if (cpu_is_omap34xx())
regulator_disable(dpi.vdds_dsi_reg);
@@ -257,11 +261,26 @@
void dpi_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
+ int r;
+
DSSDBG("dpi_set_timings\n");
dssdev->panel.timings = *timings;
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ r = dss_runtime_get();
+ if (r)
+ return;
+
+ r = dispc_runtime_get();
+ if (r) {
+ dss_runtime_put();
+ return;
+ }
+
dpi_set_mode(dssdev);
dispc_go(dssdev->manager->id);
+
+ dispc_runtime_put();
+ dss_runtime_put();
}
}
EXPORT_SYMBOL(dpi_set_timings);
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 345757c..93b52f6 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -36,6 +36,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
#include <video/omapdss.h>
#include <plat/clock.h>
@@ -205,6 +206,7 @@
#define DSI_DT_DCS_LONG_WRITE 0x39
#define DSI_DT_RX_ACK_WITH_ERR 0x02
+#define DSI_DT_RX_LONG_READ 0x1a
#define DSI_DT_RX_DCS_LONG_READ 0x1c
#define DSI_DT_RX_SHORT_READ_1 0x21
#define DSI_DT_RX_SHORT_READ_2 0x22
@@ -267,8 +269,15 @@
struct dsi_data {
struct platform_device *pdev;
void __iomem *base;
+
+ struct mutex runtime_lock;
+ int runtime_count;
+
int irq;
+ struct clk *dss_clk;
+ struct clk *sys_clk;
+
void (*dsi_mux_pads)(bool enable);
struct dsi_clock_info current_cinfo;
@@ -389,15 +398,6 @@
return __raw_readl(dsi->base + idx.idx);
}
-
-void dsi_save_context(void)
-{
-}
-
-void dsi_restore_context(void)
-{
-}
-
void dsi_bus_lock(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -493,9 +493,18 @@
total_bytes * 1000 / total_us);
}
#else
-#define dsi_perf_mark_setup(x)
-#define dsi_perf_mark_start(x)
-#define dsi_perf_show(x, y)
+static inline void dsi_perf_mark_setup(struct platform_device *dsidev)
+{
+}
+
+static inline void dsi_perf_mark_start(struct platform_device *dsidev)
+{
+}
+
+static inline void dsi_perf_show(struct platform_device *dsidev,
+ const char *name)
+{
+}
#endif
static void print_irq_status(u32 status)
@@ -1039,13 +1048,69 @@
return e;
}
-/* DSI func clock. this could also be dsi_pll_hsdiv_dsi_clk */
-static inline void enable_clocks(bool enable)
+int dsi_runtime_get(struct platform_device *dsidev)
{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ int r;
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+
+ mutex_lock(&dsi->runtime_lock);
+
+ if (dsi->runtime_count++ == 0) {
+ DSSDBG("dsi_runtime_get\n");
+
+ r = dss_runtime_get();
+ if (r)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r)
+ goto err_get_dispc;
+
+ /* XXX dsi fclk can also come from DSI PLL */
+ clk_enable(dsi->dss_clk);
+
+ r = pm_runtime_get_sync(&dsi->pdev->dev);
+ WARN_ON(r);
+ if (r < 0)
+ goto err_runtime_get;
+ }
+
+ mutex_unlock(&dsi->runtime_lock);
+
+ return 0;
+
+err_runtime_get:
+ clk_disable(dsi->dss_clk);
+ dispc_runtime_put();
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
+ mutex_unlock(&dsi->runtime_lock);
+
+ return r;
+}
+
+void dsi_runtime_put(struct platform_device *dsidev)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+
+ mutex_lock(&dsi->runtime_lock);
+
+ if (--dsi->runtime_count == 0) {
+ int r;
+
+ DSSDBG("dsi_runtime_put\n");
+
+ r = pm_runtime_put_sync(&dsi->pdev->dev);
+ WARN_ON(r);
+
+ clk_disable(dsi->dss_clk);
+
+ dispc_runtime_put();
+ dss_runtime_put();
+ }
+
+ mutex_unlock(&dsi->runtime_lock);
}
/* source clock for DSI PLL. this could also be PCLKFREE */
@@ -1055,9 +1120,9 @@
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
if (enable)
- dss_clk_enable(DSS_CLK_SYSCK);
+ clk_enable(dsi->sys_clk);
else
- dss_clk_disable(DSS_CLK_SYSCK);
+ clk_disable(dsi->sys_clk);
if (enable && dsi->pll_locked) {
if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
@@ -1150,10 +1215,11 @@
{
unsigned long r;
int dsi_module = dsi_get_dsidev_id(dsidev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) {
/* DSI FCLK source is DSS_CLK_FCK */
- r = dss_clk_get_rate(DSS_CLK_FCK);
+ r = clk_get_rate(dsi->dss_clk);
} else {
/* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
@@ -1262,7 +1328,7 @@
return -EINVAL;
if (cinfo->use_sys_clk) {
- cinfo->clkin = dss_clk_get_rate(DSS_CLK_SYSCK);
+ cinfo->clkin = clk_get_rate(dsi->sys_clk);
/* XXX it is unclear if highfreq should be used
* with DSS_SYS_CLK source also */
cinfo->highfreq = 0;
@@ -1311,7 +1377,7 @@
int match = 0;
unsigned long dss_sys_clk, max_dss_fck;
- dss_sys_clk = dss_clk_get_rate(DSS_CLK_SYSCK);
+ dss_sys_clk = clk_get_rate(dsi->sys_clk);
max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
@@ -1539,6 +1605,9 @@
l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
+
+ if (cpu_is_omap44xx())
+ l = FLD_MOD(l, 3, 22, 21); /* DSI_REF_SEL */
dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
REG_FLD_MOD(dsidev, DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
@@ -1601,7 +1670,6 @@
dsi->vdds_dsi_reg = vdds_dsi;
}
- enable_clocks(1);
dsi_enable_pll_clock(dsidev, 1);
/*
* Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
@@ -1653,7 +1721,6 @@
}
err0:
dsi_disable_scp_clk(dsidev);
- enable_clocks(0);
dsi_enable_pll_clock(dsidev, 0);
return r;
}
@@ -1671,7 +1738,6 @@
}
dsi_disable_scp_clk(dsidev);
- enable_clocks(0);
dsi_enable_pll_clock(dsidev, 0);
DSSDBG("PLL uninit done\n");
@@ -1688,7 +1754,8 @@
dispc_clk_src = dss_get_dispc_clk_source();
dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
- enable_clocks(1);
+ if (dsi_runtime_get(dsidev))
+ return;
seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
@@ -1731,7 +1798,7 @@
seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
- enable_clocks(0);
+ dsi_runtime_put(dsidev);
}
void dsi_dump_clocks(struct seq_file *s)
@@ -1873,7 +1940,8 @@
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (dsi_runtime_get(dsidev))
+ return;
dsi_enable_scp_clk(dsidev);
DUMPREG(DSI_REVISION);
@@ -1947,7 +2015,7 @@
DUMPREG(DSI_PLL_CONFIGURATION2);
dsi_disable_scp_clk(dsidev);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dsi_runtime_put(dsidev);
#undef DUMPREG
}
@@ -1994,6 +2062,10 @@
/* PWR_CMD */
REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27);
+ if (cpu_is_omap44xx())
+ /*bit 30 has to be set to 1 to GO in omap4*/
+ REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, 1, 30, 30);
+
/* PWR_STATUS */
while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1),
26, 25) != state) {
@@ -2354,6 +2426,13 @@
if (dsi->dsi_mux_pads)
dsi->dsi_mux_pads(true);
+ if (cpu_is_omap44xx()) {
+ /* DDR_CLK_ALWAYS_ON */
+ REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
+ /* HS_AUTO_STOP_ENABLE */
+ REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 18, 18);
+ }
+
dsi_enable_scp_clk(dsidev);
/* A dummy read using the SCP interface to any DSIPHY register is
@@ -2463,28 +2542,6 @@
dsi->dsi_mux_pads(false);
}
-static int _dsi_wait_reset(struct platform_device *dsidev)
-{
- int t = 0;
-
- while (REG_GET(dsidev, DSI_SYSSTATUS, 0, 0) == 0) {
- if (++t > 5) {
- DSSERR("soft reset failed\n");
- return -ENODEV;
- }
- udelay(1);
- }
-
- return 0;
-}
-
-static int _dsi_reset(struct platform_device *dsidev)
-{
- /* Soft reset */
- REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 1, 1);
- return _dsi_wait_reset(dsidev);
-}
-
static void dsi_config_tx_fifo(struct platform_device *dsidev,
enum fifo_size size1, enum fifo_size size2,
enum fifo_size size3, enum fifo_size size4)
@@ -2722,6 +2779,8 @@
r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
if (dss_has_feature(FEAT_DSI_VC_OCP_WIDTH))
r = FLD_MOD(r, 3, 11, 10); /* OCP_WIDTH = 32 bit */
+ if (channel == 0)
+ r = FLD_MOD(r, 1, 11, 10); /* OCP_WIDTH = 32 bit */
r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
@@ -2885,6 +2944,10 @@
} else if (dt == DSI_DT_RX_SHORT_READ_2) {
DSSERR("\tDCS short response, 2 byte: %#x\n",
FLD_GET(val, 23, 8));
+ } else if (dt == DSI_DT_RX_LONG_READ) {
+ DSSERR("\tlong response, len %d\n",
+ FLD_GET(val, 23, 8));
+ dsi_vc_flush_long_data(dsidev, channel);
} else if (dt == DSI_DT_RX_DCS_LONG_READ) {
DSSERR("\tDCS long response, len %d\n",
FLD_GET(val, 23, 8));
@@ -3229,7 +3292,7 @@
buf[1] = (data >> 8) & 0xff;
return 2;
- } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
+ } else if (dt == DSI_DT_RX_DCS_LONG_READ || dt == DSI_DT_RX_LONG_READ) {
int w;
int len = FLD_GET(val, 23, 8);
if (dsi->debug_read)
@@ -3386,6 +3449,10 @@
dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
+ /* Reset LANEx_ULPS_SIG2 */
+ REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (0 << 0) | (0 << 1) | (0 << 2),
+ 7, 5);
+
dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
dsi_if_enable(dsidev, false);
@@ -3401,7 +3468,7 @@
}
static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
- unsigned ticks, bool x4, bool x16)
+ unsigned ticks, bool x4, bool x16, bool to)
{
unsigned long fck;
unsigned long total_ticks;
@@ -3413,7 +3480,7 @@
fck = dsi_fclk_rate(dsidev);
r = dsi_read_reg(dsidev, DSI_TIMING2);
- r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
+ r = FLD_MOD(r, to ? 1 : 0, 15, 15); /* LP_RX_TO */
r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */
r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */
r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
@@ -3428,7 +3495,7 @@
}
static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks,
- bool x8, bool x16)
+ bool x8, bool x16, bool to)
{
unsigned long fck;
unsigned long total_ticks;
@@ -3440,7 +3507,7 @@
fck = dsi_fclk_rate(dsidev);
r = dsi_read_reg(dsidev, DSI_TIMING1);
- r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
+ r = FLD_MOD(r, to ? 1 : 0, 31, 31); /* TA_TO */
r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */
r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */
r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
@@ -3455,7 +3522,8 @@
}
static void dsi_set_stop_state_counter(struct platform_device *dsidev,
- unsigned ticks, bool x4, bool x16)
+ unsigned ticks, bool x4, bool x16,
+ bool stop_mode)
{
unsigned long fck;
unsigned long total_ticks;
@@ -3467,7 +3535,7 @@
fck = dsi_fclk_rate(dsidev);
r = dsi_read_reg(dsidev, DSI_TIMING1);
- r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
+ r = FLD_MOD(r, stop_mode ? 1 : 0, 15, 15); /* FORCE_TX_STOP_MODE_IO */
r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */
r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */
r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
@@ -3482,7 +3550,7 @@
}
static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
- unsigned ticks, bool x4, bool x16)
+ unsigned ticks, bool x4, bool x16, bool to)
{
unsigned long fck;
unsigned long total_ticks;
@@ -3494,7 +3562,7 @@
fck = dsi_get_txbyteclkhs(dsidev);
r = dsi_read_reg(dsidev, DSI_TIMING2);
- r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
+ r = FLD_MOD(r, to ? 1 : 0, 31, 31); /* HS_TX_TO */
r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */
r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */
r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
@@ -3507,7 +3575,8 @@
ticks, x4 ? " x4" : "", x16 ? " x16" : "",
(total_ticks * 1000) / (fck / 1000 / 1000));
}
-static int dsi_proto_config(struct omap_dss_device *dssdev)
+
+static int dsi_cmd_proto_config(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
u32 r;
@@ -3524,10 +3593,10 @@
DSI_FIFO_SIZE_32);
/* XXX what values for the timeouts? */
- dsi_set_stop_state_counter(dsidev, 0x1000, false, false);
- dsi_set_ta_timeout(dsidev, 0x1fff, true, true);
- dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true);
- dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true);
+ dsi_set_stop_state_counter(dsidev, 0x1000, false, false, true);
+ dsi_set_ta_timeout(dsidev, 0x1fff, true, true, true);
+ dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true, true);
+ dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true, true);
switch (dssdev->ctrl.pixel_size) {
case 16:
@@ -3569,6 +3638,165 @@
return 0;
}
+static int dispc_to_dsi_clock(int val, int bytes_per_pixel, int lanes)
+{
+ return (val * bytes_per_pixel + lanes / 2) / lanes;
+}
+static int dsi_video_proto_config(struct omap_dss_device *dssdev)
+{
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct omap_video_timings *timings = &dssdev->panel.timings;
+ int buswidth = 0;
+ u32 r;
+ int bytes_per_pixel;
+ int hbp, hfp, hsa, tl;
+
+ dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32,
+ DSI_FIFO_SIZE_32,
+ DSI_FIFO_SIZE_32,
+ DSI_FIFO_SIZE_32);
+
+ dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32,
+ DSI_FIFO_SIZE_32,
+ DSI_FIFO_SIZE_32,
+ DSI_FIFO_SIZE_32);
+
+ dsi_set_stop_state_counter(dsidev, 0x1fff, true, true, false);
+ dsi_set_ta_timeout(dsidev, 0x1fff, true, true, true);
+ dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true, false);
+ dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true, true);
+
+ switch (dssdev->ctrl.pixel_size) {
+ case 16:
+ buswidth = 0;
+ bytes_per_pixel = 2;
+ break;
+ case 18:
+ buswidth = 1;
+ bytes_per_pixel = 3;
+ break;
+ case 24:
+ buswidth = 2;
+ bytes_per_pixel = 3;
+ break;
+ default:
+ BUG();
+ }
+
+ r = dsi_read_reg(dsidev, DSI_CTRL);
+ r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
+ r = FLD_MOD(r, 1, 4, 4); /* VP_CLK_RATIO, always 1, see errata*/
+ r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
+ r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */
+ r = FLD_MOD(r, 1, 9, 9); /* VP_DE_POL */
+ r = FLD_MOD(r, 0, 10, 10); /* VP_HSYNC_POL */
+ r = FLD_MOD(r, 1, 11, 11); /* VP_VSYNC_POL */
+ r = FLD_MOD(r, 2, 13, 12); /* LINE_BUFFER */
+ r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
+ r = FLD_MOD(r, 1, 15, 15); /* VP_VSYNC_START */
+ r = FLD_MOD(r, 1, 17, 17); /* VP_HSYNC_START */
+ r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
+ r = FLD_MOD(r, 1, 20, 20); /* BLANKING_MODE */
+ r = FLD_MOD(r, 1, 21, 21); /* HFP_BLANKING */
+ r = FLD_MOD(r, 1, 22, 22); /* HBP_BLANKING */
+ r = FLD_MOD(r, 1, 23, 23); /* HSA_BLANKING */
+ dsi_write_reg(dsidev, DSI_CTRL, r);
+
+ if(!dssdev->skip_init){
+ dsi_vc_initial_config(dsidev, 0);
+ dsi_vc_initial_config(dsidev, 1);
+ dsi_vc_initial_config(dsidev, 2);
+ dsi_vc_initial_config(dsidev, 3);
+ }
+
+ hbp = dispc_to_dsi_clock(timings->hbp, bytes_per_pixel, 4);
+ hfp = dispc_to_dsi_clock(timings->hfp, bytes_per_pixel, 4);
+ hsa = dispc_to_dsi_clock(timings->hsw, bytes_per_pixel, 4);
+ tl = hbp + hfp + hsa +
+ dispc_to_dsi_clock(timings->x_res, bytes_per_pixel, 4);
+
+ r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
+ r = FLD_MOD(r, hbp - 1, 11, 0); /* HBP */
+ r = FLD_MOD(r, hfp - 1, 23, 12); /* HFP */
+ r = FLD_MOD(r, hsa - 1, 31, 24); /* HSA */
+ dsi_write_reg(dsidev, DSI_VM_TIMING1, r);
+
+ r = dsi_read_reg(dsidev, DSI_VM_TIMING2);
+ r = FLD_MOD(r, timings->vbp, 7, 0); /* VBP */
+ r = FLD_MOD(r, timings->vfp, 15, 8); /* VFP */
+ r = FLD_MOD(r, timings->vsw, 23, 16); /* VSA */
+ r = FLD_MOD(r, 4, 27, 24); /* WINDOW_SYNC */
+ dsi_write_reg(dsidev, DSI_VM_TIMING2, r);
+
+ r = dsi_read_reg(dsidev, DSI_VM_TIMING3);
+ r = FLD_MOD(r, timings->y_res, 14, 0);
+ r = FLD_MOD(r, tl - 1, 31, 16);
+ dsi_write_reg(dsidev, DSI_VM_TIMING3, r);
+
+ /* TODO: either calculate these values or make them configurable */
+ r = FLD_VAL(72, 23, 16) | /* HSA_HS_INTERLEAVING */
+ FLD_VAL(114, 15, 8) | /* HFB_HS_INTERLEAVING */
+ FLD_VAL(150, 7, 0); /* HbB_HS_INTERLEAVING */
+ dsi_write_reg(dsidev, DSI_VM_TIMING4, r);
+
+ r = FLD_VAL(130, 23, 16) | /* HSA_LP_INTERLEAVING */
+ FLD_VAL(223, 15, 8) | /* HFB_LP_INTERLEAVING */
+ FLD_VAL(59, 7, 0); /* HBB_LP_INTERLEAVING */
+ dsi_write_reg(dsidev, DSI_VM_TIMING5, r);
+
+ r = FLD_VAL(0x7A67, 31, 16) | /* BL_HS_INTERLEAVING */
+ FLD_VAL(0x31D1, 15, 0); /* BL_LP_INTERLEAVING */
+ dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
+
+ r = FLD_VAL(18, 31, 16) | /* ENTER_HS_MODE_LATENCY */
+ FLD_VAL(15, 15, 0); /* EXIT_HS_MODE_LATENCY */
+ dsi_write_reg(dsidev, DSI_VM_TIMING7, r);
+
+ return 0;
+}
+
+int dsi_video_mode_enable(struct omap_dss_device *dssdev, u8 data_type)
+{
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ u16 word_count;
+ u32 r;
+ u32 header;
+
+ dsi_if_enable(dsidev, 0);
+ dsi_vc_enable(dsidev, 1, 0);
+ dsi_vc_enable(dsidev, 0, 0);
+
+ r = dsi_read_reg(dsidev, DSI_TIMING1);
+ r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
+ dsi_write_reg(dsidev, DSI_TIMING1, r);
+
+ if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 15, 0) != 0)
+ BUG();
+
+ r = dsi_read_reg(dsidev, DSI_VC_CTRL(0));
+ r = FLD_MOD(r, 1, 4, 4);
+ r = FLD_MOD(r, 1, 9, 9);
+ dsi_write_reg(dsidev, DSI_VC_CTRL(0), r);
+
+ r = dsi_read_reg(dsidev, DSI_VC_CTRL(1));
+ r = FLD_MOD(r, 0, 4, 4);
+ r = FLD_MOD(r, 1, 9, 9);
+ dsi_write_reg(dsidev, DSI_VC_CTRL(1), r);
+
+ word_count = dssdev->panel.timings.x_res * 3;
+ header = FLD_VAL(0, 31, 24) | /* ECC */
+ FLD_VAL(word_count, 23, 8) | /* WORD_COUNT */
+ FLD_VAL(0, 7, 6) | /* VC_ID */
+ FLD_VAL(data_type, 5, 0);
+ dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(0), header);
+
+ dsi_vc_enable(dsidev, 1, 1);
+ dsi_vc_enable(dsidev, 0, 1);
+ dsi_if_enable(dsidev, 1);
+
+ return 0;
+}
+
static void dsi_proto_timings(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -3579,6 +3807,7 @@
unsigned ddr_clk_pre, ddr_clk_post;
unsigned enter_hs_mode_lat, exit_hs_mode_lat;
unsigned ths_eot;
+ unsigned offset_ddr_clk;
u32 r;
r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
@@ -3603,9 +3832,13 @@
ths_eot = DIV_ROUND_UP(4, dsi_get_num_data_lanes_dssdev(dssdev));
+ /* DDR PRE & DDR POST increased to keep LP-11 under 10 usec */
+ offset_ddr_clk = dssdev->clocks.dsi.offset_ddr_clk;
+
ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
- 4);
- ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot;
+ 4) + offset_ddr_clk;
+ ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot
+ + offset_ddr_clk;
BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
@@ -3807,6 +4040,11 @@
DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
x, y, w, h);
+ if (dssdev->phy.dsi.type == OMAP_DSS_DSI_TYPE_VIDEO_MODE) {
+ dss_start_update(dssdev);
+ return;
+ }
+
dsi_vc_config_vp(dsidev, channel);
bytespp = dssdev->ctrl.pixel_size / 8;
@@ -4017,23 +4255,30 @@
irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
- r = omap_dispc_register_isr(dsi_framedone_irq_callback, (void *) dssdev,
- irq);
- if (r) {
- DSSERR("can't get FRAMEDONE irq\n");
- return r;
+ if (dssdev->phy.dsi.type == OMAP_DSS_DSI_TYPE_CMD_MODE) {
+ r = omap_dispc_register_isr(dsi_framedone_irq_callback, (void *) dssdev,
+ irq);
+ if (r) {
+ DSSERR("can't get FRAMEDONE irq\n");
+ return r;
+ }
+
+ dispc_set_parallel_interface_mode(dssdev->manager->id,
+ OMAP_DSS_PARALLELMODE_DSI);
+ dispc_enable_fifohandcheck(dssdev->manager->id, 1);
+ } else {
+ dispc_set_parallel_interface_mode(dssdev->manager->id,
+ OMAP_DSS_PARALLELMODE_BYPASS);
+ dispc_enable_fifohandcheck(dssdev->manager->id, 0);
}
dispc_set_lcd_display_type(dssdev->manager->id,
OMAP_DSS_LCD_DISPLAY_TFT);
- dispc_set_parallel_interface_mode(dssdev->manager->id,
- OMAP_DSS_PARALLELMODE_DSI);
- dispc_enable_fifohandcheck(dssdev->manager->id, 1);
dispc_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size);
- {
+ if(dssdev->phy.dsi.type == OMAP_DSS_DSI_TYPE_CMD_MODE) {
struct omap_video_timings timings = {
.hsw = 1,
.hfp = 1,
@@ -4044,6 +4289,9 @@
};
dispc_set_lcd_timings(dssdev->manager->id, &timings);
+ } else {
+ dispc_set_lcd_timings(dssdev->manager->id,
+ &dssdev->panel.timings);
}
return 0;
@@ -4056,8 +4304,9 @@
irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
- omap_dispc_unregister_isr(dsi_framedone_irq_callback, (void *) dssdev,
- irq);
+ if(dssdev->phy.dsi.type == OMAP_DSS_DSI_TYPE_CMD_MODE)
+ omap_dispc_unregister_isr(dsi_framedone_irq_callback, (void *) dssdev,
+ irq);
}
static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
@@ -4120,13 +4369,20 @@
int dsi_module = dsi_get_dsidev_id(dsidev);
int r;
+ /* The SCPClk is required for PLL and complexio registers on OMAP4 */
+ if (cpu_is_omap44xx())
+ REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14);
+
r = dsi_pll_init(dsidev, true, true);
+
if (r)
goto err0;
- r = dsi_configure_dsi_clocks(dssdev);
- if (r)
- goto err1;
+ if(!dssdev->skip_init){
+ r = dsi_configure_dsi_clocks(dssdev);
+ if (r)
+ goto err1;
+ }
dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
dss_select_dsi_clk_source(dsi_module, dssdev->clocks.dsi.dsi_fclk_src);
@@ -4135,13 +4391,19 @@
DSSDBG("PLL OK\n");
- r = dsi_configure_dispc_clocks(dssdev);
- if (r)
- goto err2;
+ if(!dssdev->skip_init){
+ r = dsi_configure_dispc_clocks(dssdev);
+ if (r)
+ goto err2;
+ }
- r = dsi_cio_init(dssdev);
- if (r)
- goto err2;
+ if(!dssdev->skip_init){
+ r = dsi_cio_init(dssdev);
+ if (r)
+ goto err2;
+ }
+ else
+ dsi_enable_scp_clk(dsidev);
_dsi_print_reset_status(dsidev);
@@ -4151,17 +4413,23 @@
if (1)
_dsi_print_reset_status(dsidev);
- r = dsi_proto_config(dssdev);
+ if(dssdev->phy.dsi.type == OMAP_DSS_DSI_TYPE_CMD_MODE)
+ r = dsi_cmd_proto_config(dssdev);
+ else
+ r = dsi_video_proto_config(dssdev);
+
if (r)
goto err3;
/* enable interface */
- dsi_vc_enable(dsidev, 0, 1);
- dsi_vc_enable(dsidev, 1, 1);
- dsi_vc_enable(dsidev, 2, 1);
- dsi_vc_enable(dsidev, 3, 1);
- dsi_if_enable(dsidev, 1);
- dsi_force_tx_stop_mode_io(dsidev);
+ if(!dssdev->skip_init){
+ dsi_vc_enable(dsidev, 0, 1);
+ dsi_vc_enable(dsidev, 1, 1);
+ dsi_vc_enable(dsidev, 2, 1);
+ dsi_vc_enable(dsidev, 3, 1);
+ dsi_if_enable(dsidev, 1);
+ dsi_force_tx_stop_mode_io(dsidev);
+ }
return 0;
err3:
@@ -4198,22 +4466,6 @@
dsi_pll_uninit(dsidev, disconnect_lanes);
}
-static int dsi_core_init(struct platform_device *dsidev)
-{
- /* Autoidle */
- REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 0, 0);
-
- /* ENWAKEUP */
- REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 2, 2);
-
- /* SIDLEMODE smart-idle */
- REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 2, 4, 3);
-
- _dsi_initialize_irq(dsidev);
-
- return 0;
-}
-
int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -4229,37 +4481,40 @@
r = omap_dss_start_device(dssdev);
if (r) {
DSSERR("failed to start device\n");
- goto err0;
+ goto err_start_dev;
}
- enable_clocks(1);
- dsi_enable_pll_clock(dsidev, 1);
-
- r = _dsi_reset(dsidev);
+ r = dsi_runtime_get(dsidev);
if (r)
- goto err1;
+ goto err_get_dsi;
- dsi_core_init(dsidev);
+ if(!dssdev->skip_init)
+ dsi_enable_pll_clock(dsidev, 1);
- r = dsi_display_init_dispc(dssdev);
- if (r)
- goto err1;
+ _dsi_initialize_irq(dsidev);
+
+ if(!dssdev->skip_init){
+ r = dsi_display_init_dispc(dssdev);
+ if (r)
+ goto err_init_dispc;
+ }
r = dsi_display_init_dsi(dssdev);
if (r)
- goto err2;
+ goto err_init_dsi;
mutex_unlock(&dsi->lock);
return 0;
-err2:
+err_init_dsi:
dsi_display_uninit_dispc(dssdev);
-err1:
- enable_clocks(0);
+err_init_dispc:
dsi_enable_pll_clock(dsidev, 0);
+ dsi_runtime_put(dsidev);
+err_get_dsi:
omap_dss_stop_device(dssdev);
-err0:
+err_start_dev:
mutex_unlock(&dsi->lock);
DSSDBG("dsi_display_enable FAILED\n");
return r;
@@ -4282,7 +4537,7 @@
dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps);
- enable_clocks(0);
+ dsi_runtime_put(dsidev);
dsi_enable_pll_clock(dsidev, 0);
omap_dss_stop_device(dssdev);
@@ -4322,9 +4577,12 @@
DSSDBG("DSI init\n");
- /* XXX these should be figured out dynamically */
- dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
+ if(dssdev->phy.dsi.type == OMAP_DSS_DSI_TYPE_CMD_MODE) {
+ dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
+ } else {
+ dssdev->caps = 0;
+ }
if (dsi->vdds_dsi_reg == NULL) {
struct regulator *vdds_dsi;
@@ -4437,7 +4695,44 @@
dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
}
-static int dsi_init(struct platform_device *dsidev)
+static int dsi_get_clocks(struct platform_device *dsidev)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct clk *clk;
+
+ clk = clk_get(&dsidev->dev, "dss_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get dss_clk\n");
+ return PTR_ERR(clk);
+ }
+
+ dsi->dss_clk = clk;
+
+ clk = clk_get(&dsidev->dev, "sys_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get sys_clk\n");
+ clk_put(dsi->dss_clk);
+ dsi->dss_clk = NULL;
+ return PTR_ERR(clk);
+ }
+
+ dsi->sys_clk = clk;
+
+ return 0;
+}
+
+static void dsi_put_clocks(struct platform_device *dsidev)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+
+ if (dsi->dss_clk)
+ clk_put(dsi->dss_clk);
+ if (dsi->sys_clk)
+ clk_put(dsi->sys_clk);
+}
+
+/* DSI1 HW IP initialisation */
+static int omap_dsi1hw_probe(struct platform_device *dsidev)
{
struct omap_display_platform_data *dss_plat_data;
struct omap_dss_board_info *board_info;
@@ -4449,7 +4744,7 @@
dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
if (!dsi) {
r = -ENOMEM;
- goto err0;
+ goto err_alloc;
}
dsi->pdev = dsidev;
@@ -4472,6 +4767,14 @@
mutex_init(&dsi->lock);
sema_init(&dsi->bus_lock, 1);
+ r = dsi_get_clocks(dsidev);
+ if (r)
+ goto err_get_clk;
+
+ mutex_init(&dsi->runtime_lock);
+
+ pm_runtime_enable(&dsidev->dev);
+
INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work,
dsi_framedone_timeout_work_callback);
@@ -4484,26 +4787,26 @@
if (!dsi_mem) {
DSSERR("can't get IORESOURCE_MEM DSI\n");
r = -EINVAL;
- goto err1;
+ goto err_ioremap;
}
dsi->base = ioremap(dsi_mem->start, resource_size(dsi_mem));
if (!dsi->base) {
DSSERR("can't ioremap DSI\n");
r = -ENOMEM;
- goto err1;
+ goto err_ioremap;
}
dsi->irq = platform_get_irq(dsi->pdev, 0);
if (dsi->irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
- goto err2;
+ goto err_get_irq;
}
r = request_irq(dsi->irq, omap_dsi_irq_handler, IRQF_SHARED,
dev_name(&dsidev->dev), dsi->pdev);
if (r < 0) {
DSSERR("request_irq failed\n");
- goto err2;
+ goto err_get_irq;
}
/* DSI VCs initialization */
@@ -4515,7 +4818,9 @@
dsi_calc_clock_param_ranges(dsidev);
- enable_clocks(1);
+ r = dsi_runtime_get(dsidev);
+ if (r)
+ goto err_get_dsi;
rev = dsi_read_reg(dsidev, DSI_REVISION);
dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
@@ -4523,21 +4828,32 @@
dsi->num_data_lanes = dsi_get_num_data_lanes(dsidev);
- enable_clocks(0);
+ dsi_runtime_put(dsidev);
return 0;
-err2:
+
+err_get_dsi:
+ free_irq(dsi->irq, dsi->pdev);
+err_get_irq:
iounmap(dsi->base);
-err1:
+err_ioremap:
+ pm_runtime_disable(&dsidev->dev);
+err_get_clk:
kfree(dsi);
-err0:
+err_alloc:
return r;
}
-static void dsi_exit(struct platform_device *dsidev)
+static int omap_dsi1hw_remove(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ WARN_ON(dsi->scp_clk_refcount > 0);
+
+ pm_runtime_disable(&dsidev->dev);
+
+ dsi_put_clocks(dsidev);
+
if (dsi->vdds_dsi_reg != NULL) {
if (dsi->vdds_dsi_enabled) {
regulator_disable(dsi->vdds_dsi_reg);
@@ -4553,29 +4869,6 @@
kfree(dsi);
- DSSDBG("omap_dsi_exit\n");
-}
-
-/* DSI1 HW IP initialisation */
-static int omap_dsi1hw_probe(struct platform_device *dsidev)
-{
- int r;
-
- r = dsi_init(dsidev);
- if (r) {
- DSSERR("Failed to initialize DSI\n");
- goto err_dsi;
- }
-err_dsi:
- return r;
-}
-
-static int omap_dsi1hw_remove(struct platform_device *dsidev)
-{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
- dsi_exit(dsidev);
- WARN_ON(dsi->scp_clk_refcount > 0);
return 0;
}
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index d9489d5..964eb08 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -28,6 +28,8 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <video/omapdss.h>
#include <plat/clock.h>
@@ -59,15 +61,12 @@
static struct {
struct platform_device *pdev;
void __iomem *base;
- int ctx_id;
+
+ struct mutex runtime_lock;
+ int runtime_count;
struct clk *dpll4_m4_ck;
- struct clk *dss_ick;
- struct clk *dss_fck;
- struct clk *dss_sys_clk;
- struct clk *dss_tv_fck;
- struct clk *dss_video_fck;
- unsigned num_clks_enabled;
+ struct clk *dss_clk;
unsigned long cache_req_pck;
unsigned long cache_prate;
@@ -78,6 +77,7 @@
enum omap_dss_clk_source dispc_clk_source;
enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
+ bool ctx_valid;
u32 ctx[DSS_SZ_REGS / sizeof(u32)];
} dss;
@@ -87,13 +87,6 @@
[OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK",
};
-static void dss_clk_enable_all_no_ctx(void);
-static void dss_clk_disable_all_no_ctx(void);
-static void dss_clk_enable_no_ctx(enum dss_clock clks);
-static void dss_clk_disable_no_ctx(enum dss_clock clks);
-
-static int _omap_dss_wait_reset(void);
-
static inline void dss_write_reg(const struct dss_reg idx, u32 val)
{
__raw_writel(val, dss.base + idx.idx);
@@ -109,12 +102,10 @@
#define RR(reg) \
dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)])
-void dss_save_context(void)
+static void dss_save_context(void)
{
- if (cpu_is_omap24xx())
- return;
+ DSSDBG("dss_save_context\n");
- SR(SYSCONFIG);
SR(CONTROL);
if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
@@ -122,14 +113,19 @@
SR(SDI_CONTROL);
SR(PLL_CONTROL);
}
+
+ dss.ctx_valid = true;
+
+ DSSDBG("context saved\n");
}
-void dss_restore_context(void)
+static void dss_restore_context(void)
{
- if (_omap_dss_wait_reset())
- DSSERR("DSS not coming out of reset after sleep\n");
+ DSSDBG("dss_restore_context\n");
- RR(SYSCONFIG);
+ if (!dss.ctx_valid)
+ return;
+
RR(CONTROL);
if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
@@ -137,6 +133,8 @@
RR(SDI_CONTROL);
RR(PLL_CONTROL);
}
+
+ DSSDBG("context restored\n");
}
#undef SR
@@ -234,6 +232,7 @@
return dss_generic_clk_source_names[clk_src];
}
+
void dss_dump_clocks(struct seq_file *s)
{
unsigned long dpll4_ck_rate;
@@ -241,13 +240,14 @@
const char *fclk_name, *fclk_real_name;
unsigned long fclk_rate;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (dss_runtime_get())
+ return;
seq_printf(s, "- DSS -\n");
fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
- fclk_rate = dss_clk_get_rate(DSS_CLK_FCK);
+ fclk_rate = clk_get_rate(dss.dss_clk);
if (dss.dpll4_m4_ck) {
dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
@@ -273,14 +273,15 @@
fclk_rate);
}
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dss_runtime_put();
}
void dss_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (dss_runtime_get())
+ return;
DUMPREG(DSS_REVISION);
DUMPREG(DSS_SYSCONFIG);
@@ -294,7 +295,7 @@
DUMPREG(DSS_SDI_STATUS);
}
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dss_runtime_put();
#undef DUMPREG
}
@@ -437,7 +438,7 @@
} else {
if (cinfo->fck_div != 0)
return -EINVAL;
- cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK);
+ cinfo->fck = clk_get_rate(dss.dss_clk);
}
return 0;
@@ -467,7 +468,7 @@
int dss_get_clock_div(struct dss_clock_info *cinfo)
{
- cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK);
+ cinfo->fck = clk_get_rate(dss.dss_clk);
if (dss.dpll4_m4_ck) {
unsigned long prate;
@@ -512,7 +513,7 @@
max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
- fck = dss_clk_get_rate(DSS_CLK_FCK);
+ fck = clk_get_rate(dss.dss_clk);
if (req_pck == dss.cache_req_pck &&
((cpu_is_omap34xx() && prate == dss.cache_prate) ||
dss.cache_dss_cinfo.fck == fck)) {
@@ -539,7 +540,7 @@
if (dss.dpll4_m4_ck == NULL) {
struct dispc_clock_info cur_dispc;
/* XXX can we change the clock on omap2? */
- fck = dss_clk_get_rate(DSS_CLK_FCK);
+ fck = clk_get_rate(dss.dss_clk);
fck_div = 1;
dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
@@ -616,28 +617,6 @@
return 0;
}
-static int _omap_dss_wait_reset(void)
-{
- int t = 0;
-
- while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) {
- if (++t > 1000) {
- DSSERR("soft reset failed\n");
- return -ENODEV;
- }
- udelay(1);
- }
-
- return 0;
-}
-
-static int _omap_dss_reset(void)
-{
- /* Soft reset */
- REG_FLD_MOD(DSS_SYSCONFIG, 1, 1, 1);
- return _omap_dss_wait_reset();
-}
-
void dss_set_venc_output(enum omap_dss_venc_type type)
{
int l = 0;
@@ -663,48 +642,152 @@
REG_FLD_MOD(DSS_CONTROL, hdmi, 15, 15); /* VENC_HDMI_SWITCH */
}
-static int dss_init(void)
+static int dss_get_clocks(void)
+{
+ struct clk *clk;
+ int r;
+
+ clk = clk_get(&dss.pdev->dev, "dss_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get clock dss_clk\n");
+ r = PTR_ERR(clk);
+ goto err;
+ }
+
+ dss.dss_clk = clk;
+
+ if (cpu_is_omap34xx()) {
+ clk = clk_get(NULL, "dpll4_m4_ck");
+ if (IS_ERR(clk)) {
+ DSSERR("Failed to get dpll4_m4_ck\n");
+ r = PTR_ERR(clk);
+ goto err;
+ }
+ } else if (cpu_is_omap44xx()) {
+ clk = clk_get(NULL, "dpll_per_m5x2_ck");
+ if (IS_ERR(clk)) {
+ DSSERR("Failed to get dpll_per_m5x2_ck\n");
+ r = PTR_ERR(clk);
+ goto err;
+ }
+ } else { /* omap24xx */
+ clk = NULL;
+ }
+
+ dss.dpll4_m4_ck = clk;
+
+ return 0;
+
+err:
+ if (dss.dss_clk)
+ clk_put(dss.dss_clk);
+ if (dss.dpll4_m4_ck)
+ clk_put(dss.dpll4_m4_ck);
+
+ return r;
+}
+
+static void dss_put_clocks(void)
+{
+ if (dss.dpll4_m4_ck)
+ clk_put(dss.dpll4_m4_ck);
+ clk_put(dss.dss_clk);
+}
+
+int dss_runtime_get(void)
{
int r;
- u32 rev;
+
+ mutex_lock(&dss.runtime_lock);
+
+ if (dss.runtime_count++ == 0) {
+ DSSDBG("dss_runtime_get\n");
+
+ clk_enable(dss.dss_clk);
+
+ r = pm_runtime_get_sync(&dss.pdev->dev);
+ WARN_ON(r);
+ if (r < 0)
+ goto err;
+
+ dss_restore_context();
+ }
+
+ mutex_unlock(&dss.runtime_lock);
+
+ return 0;
+
+err:
+ clk_disable(dss.dss_clk);
+ mutex_unlock(&dss.runtime_lock);
+ return r;
+}
+
+void dss_runtime_put(void)
+{
+ mutex_lock(&dss.runtime_lock);
+
+ if (--dss.runtime_count == 0) {
+ int r;
+
+ DSSDBG("dss_runtime_put\n");
+
+ dss_save_context();
+
+ r = pm_runtime_put_sync(&dss.pdev->dev);
+ WARN_ON(r);
+
+ clk_disable(dss.dss_clk);
+ }
+
+ mutex_unlock(&dss.runtime_lock);
+}
+
+/* DEBUGFS */
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+void dss_debug_dump_clocks(struct seq_file *s)
+{
+ dss_dump_clocks(s);
+ dispc_dump_clocks(s);
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_dump_clocks(s);
+#endif
+}
+#endif
+
+/* DSS HW IP initialisation */
+static int omap_dsshw_probe(struct platform_device *pdev)
+{
struct resource *dss_mem;
- struct clk *dpll4_m4_ck;
+ u32 rev;
+ int r;
+
+ dss.pdev = pdev;
dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
if (!dss_mem) {
DSSERR("can't get IORESOURCE_MEM DSS\n");
r = -EINVAL;
- goto fail0;
+ goto err_ioremap;
}
dss.base = ioremap(dss_mem->start, resource_size(dss_mem));
if (!dss.base) {
DSSERR("can't ioremap DSS\n");
r = -ENOMEM;
- goto fail0;
+ goto err_ioremap;
}
- /* disable LCD and DIGIT output. This seems to fix the synclost
- * problem that we get, if the bootloader starts the DSS and
- * the kernel resets it */
- omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
+ r = dss_get_clocks();
+ if (r)
+ goto err_clocks;
-#ifdef CONFIG_OMAP2_DSS_SLEEP_BEFORE_RESET
- /* We need to wait here a bit, otherwise we sometimes start to
- * get synclost errors, and after that only power cycle will
- * restore DSS functionality. I have no idea why this happens.
- * And we have to wait _before_ resetting the DSS, but after
- * enabling clocks.
- *
- * This bug was at least present on OMAP3430. It's unknown
- * if it happens on OMAP2 or OMAP3630.
- */
- msleep(50);
-#endif
+ mutex_init(&dss.runtime_lock);
- _omap_dss_reset();
+ pm_runtime_enable(&pdev->dev);
- /* autoidle */
- REG_FLD_MOD(DSS_SYSCONFIG, 1, 0, 0);
+ r = dss_runtime_get();
+ if (r)
+ goto err_runtime_get;
/* Select DPLL */
REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
@@ -714,404 +797,12 @@
REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
#endif
- if (cpu_is_omap34xx()) {
- dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck");
- if (IS_ERR(dpll4_m4_ck)) {
- DSSERR("Failed to get dpll4_m4_ck\n");
- r = PTR_ERR(dpll4_m4_ck);
- goto fail1;
- }
- } else if (cpu_is_omap44xx()) {
- dpll4_m4_ck = clk_get(NULL, "dpll_per_m5x2_ck");
- if (IS_ERR(dpll4_m4_ck)) {
- DSSERR("Failed to get dpll4_m4_ck\n");
- r = PTR_ERR(dpll4_m4_ck);
- goto fail1;
- }
- } else { /* omap24xx */
- dpll4_m4_ck = NULL;
- }
-
- dss.dpll4_m4_ck = dpll4_m4_ck;
-
dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK;
dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
- dss_save_context();
-
- rev = dss_read_reg(DSS_REVISION);
- printk(KERN_INFO "OMAP DSS rev %d.%d\n",
- FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
-
- return 0;
-
-fail1:
- iounmap(dss.base);
-fail0:
- return r;
-}
-
-static void dss_exit(void)
-{
- if (dss.dpll4_m4_ck)
- clk_put(dss.dpll4_m4_ck);
-
- iounmap(dss.base);
-}
-
-/* CONTEXT */
-static int dss_get_ctx_id(void)
-{
- struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data;
- int r;
-
- if (!pdata->board_data->get_last_off_on_transaction_id)
- return 0;
- r = pdata->board_data->get_last_off_on_transaction_id(&dss.pdev->dev);
- if (r < 0) {
- dev_err(&dss.pdev->dev, "getting transaction ID failed, "
- "will force context restore\n");
- r = -1;
- }
- return r;
-}
-
-int dss_need_ctx_restore(void)
-{
- int id = dss_get_ctx_id();
-
- if (id < 0 || id != dss.ctx_id) {
- DSSDBG("ctx id %d -> id %d\n",
- dss.ctx_id, id);
- dss.ctx_id = id;
- return 1;
- } else {
- return 0;
- }
-}
-
-static void save_all_ctx(void)
-{
- DSSDBG("save context\n");
-
- dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK);
-
- dss_save_context();
- dispc_save_context();
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_save_context();
-#endif
-
- dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK);
-}
-
-static void restore_all_ctx(void)
-{
- DSSDBG("restore context\n");
-
- dss_clk_enable_all_no_ctx();
-
- dss_restore_context();
- dispc_restore_context();
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_restore_context();
-#endif
-
- dss_clk_disable_all_no_ctx();
-}
-
-static int dss_get_clock(struct clk **clock, const char *clk_name)
-{
- struct clk *clk;
-
- clk = clk_get(&dss.pdev->dev, clk_name);
-
- if (IS_ERR(clk)) {
- DSSERR("can't get clock %s", clk_name);
- return PTR_ERR(clk);
- }
-
- *clock = clk;
-
- DSSDBG("clk %s, rate %ld\n", clk_name, clk_get_rate(clk));
-
- return 0;
-}
-
-static int dss_get_clocks(void)
-{
- int r;
- struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data;
-
- dss.dss_ick = NULL;
- dss.dss_fck = NULL;
- dss.dss_sys_clk = NULL;
- dss.dss_tv_fck = NULL;
- dss.dss_video_fck = NULL;
-
- r = dss_get_clock(&dss.dss_ick, "ick");
- if (r)
- goto err;
-
- r = dss_get_clock(&dss.dss_fck, "fck");
- if (r)
- goto err;
-
- if (!pdata->opt_clock_available) {
- r = -ENODEV;
- goto err;
- }
-
- if (pdata->opt_clock_available("sys_clk")) {
- r = dss_get_clock(&dss.dss_sys_clk, "sys_clk");
- if (r)
- goto err;
- }
-
- if (pdata->opt_clock_available("tv_clk")) {
- r = dss_get_clock(&dss.dss_tv_fck, "tv_clk");
- if (r)
- goto err;
- }
-
- if (pdata->opt_clock_available("video_clk")) {
- r = dss_get_clock(&dss.dss_video_fck, "video_clk");
- if (r)
- goto err;
- }
-
- return 0;
-
-err:
- if (dss.dss_ick)
- clk_put(dss.dss_ick);
- if (dss.dss_fck)
- clk_put(dss.dss_fck);
- if (dss.dss_sys_clk)
- clk_put(dss.dss_sys_clk);
- if (dss.dss_tv_fck)
- clk_put(dss.dss_tv_fck);
- if (dss.dss_video_fck)
- clk_put(dss.dss_video_fck);
-
- return r;
-}
-
-static void dss_put_clocks(void)
-{
- if (dss.dss_video_fck)
- clk_put(dss.dss_video_fck);
- if (dss.dss_tv_fck)
- clk_put(dss.dss_tv_fck);
- if (dss.dss_sys_clk)
- clk_put(dss.dss_sys_clk);
- clk_put(dss.dss_fck);
- clk_put(dss.dss_ick);
-}
-
-unsigned long dss_clk_get_rate(enum dss_clock clk)
-{
- switch (clk) {
- case DSS_CLK_ICK:
- return clk_get_rate(dss.dss_ick);
- case DSS_CLK_FCK:
- return clk_get_rate(dss.dss_fck);
- case DSS_CLK_SYSCK:
- return clk_get_rate(dss.dss_sys_clk);
- case DSS_CLK_TVFCK:
- return clk_get_rate(dss.dss_tv_fck);
- case DSS_CLK_VIDFCK:
- return clk_get_rate(dss.dss_video_fck);
- }
-
- BUG();
- return 0;
-}
-
-static unsigned count_clk_bits(enum dss_clock clks)
-{
- unsigned num_clks = 0;
-
- if (clks & DSS_CLK_ICK)
- ++num_clks;
- if (clks & DSS_CLK_FCK)
- ++num_clks;
- if (clks & DSS_CLK_SYSCK)
- ++num_clks;
- if (clks & DSS_CLK_TVFCK)
- ++num_clks;
- if (clks & DSS_CLK_VIDFCK)
- ++num_clks;
-
- return num_clks;
-}
-
-static void dss_clk_enable_no_ctx(enum dss_clock clks)
-{
- unsigned num_clks = count_clk_bits(clks);
-
- if (clks & DSS_CLK_ICK)
- clk_enable(dss.dss_ick);
- if (clks & DSS_CLK_FCK)
- clk_enable(dss.dss_fck);
- if ((clks & DSS_CLK_SYSCK) && dss.dss_sys_clk)
- clk_enable(dss.dss_sys_clk);
- if ((clks & DSS_CLK_TVFCK) && dss.dss_tv_fck)
- clk_enable(dss.dss_tv_fck);
- if ((clks & DSS_CLK_VIDFCK) && dss.dss_video_fck)
- clk_enable(dss.dss_video_fck);
-
- dss.num_clks_enabled += num_clks;
-}
-
-void dss_clk_enable(enum dss_clock clks)
-{
- bool check_ctx = dss.num_clks_enabled == 0;
-
- dss_clk_enable_no_ctx(clks);
-
- /*
- * HACK: On omap4 the registers may not be accessible right after
- * enabling the clocks. At some point this will be handled by
- * pm_runtime, but for the time begin this should make things work.
- */
- if (cpu_is_omap44xx() && check_ctx)
- udelay(10);
-
- if (check_ctx && cpu_is_omap34xx() && dss_need_ctx_restore())
- restore_all_ctx();
-}
-
-static void dss_clk_disable_no_ctx(enum dss_clock clks)
-{
- unsigned num_clks = count_clk_bits(clks);
-
- if (clks & DSS_CLK_ICK)
- clk_disable(dss.dss_ick);
- if (clks & DSS_CLK_FCK)
- clk_disable(dss.dss_fck);
- if ((clks & DSS_CLK_SYSCK) && dss.dss_sys_clk)
- clk_disable(dss.dss_sys_clk);
- if ((clks & DSS_CLK_TVFCK) && dss.dss_tv_fck)
- clk_disable(dss.dss_tv_fck);
- if ((clks & DSS_CLK_VIDFCK) && dss.dss_video_fck)
- clk_disable(dss.dss_video_fck);
-
- dss.num_clks_enabled -= num_clks;
-}
-
-void dss_clk_disable(enum dss_clock clks)
-{
- if (cpu_is_omap34xx()) {
- unsigned num_clks = count_clk_bits(clks);
-
- BUG_ON(dss.num_clks_enabled < num_clks);
-
- if (dss.num_clks_enabled == num_clks)
- save_all_ctx();
- }
-
- dss_clk_disable_no_ctx(clks);
-}
-
-static void dss_clk_enable_all_no_ctx(void)
-{
- enum dss_clock clks;
-
- clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK;
- if (cpu_is_omap34xx())
- clks |= DSS_CLK_VIDFCK;
- dss_clk_enable_no_ctx(clks);
-}
-
-static void dss_clk_disable_all_no_ctx(void)
-{
- enum dss_clock clks;
-
- clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK;
- if (cpu_is_omap34xx())
- clks |= DSS_CLK_VIDFCK;
- dss_clk_disable_no_ctx(clks);
-}
-
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
-/* CLOCKS */
-static void core_dump_clocks(struct seq_file *s)
-{
- int i;
- struct clk *clocks[5] = {
- dss.dss_ick,
- dss.dss_fck,
- dss.dss_sys_clk,
- dss.dss_tv_fck,
- dss.dss_video_fck
- };
-
- const char *names[5] = {
- "ick",
- "fck",
- "sys_clk",
- "tv_fck",
- "video_fck"
- };
-
- seq_printf(s, "- CORE -\n");
-
- seq_printf(s, "internal clk count\t\t%u\n", dss.num_clks_enabled);
-
- for (i = 0; i < 5; i++) {
- if (!clocks[i])
- continue;
- seq_printf(s, "%s (%s)%*s\t%lu\t%d\n",
- names[i],
- clocks[i]->name,
- 24 - strlen(names[i]) - strlen(clocks[i]->name),
- "",
- clk_get_rate(clocks[i]),
- clocks[i]->usecount);
- }
-}
-#endif /* defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) */
-
-/* DEBUGFS */
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
-void dss_debug_dump_clocks(struct seq_file *s)
-{
- core_dump_clocks(s);
- dss_dump_clocks(s);
- dispc_dump_clocks(s);
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_dump_clocks(s);
-#endif
-}
-#endif
-
-
-/* DSS HW IP initialisation */
-static int omap_dsshw_probe(struct platform_device *pdev)
-{
- int r;
-
- dss.pdev = pdev;
-
- r = dss_get_clocks();
- if (r)
- goto err_clocks;
-
- dss_clk_enable_all_no_ctx();
-
- dss.ctx_id = dss_get_ctx_id();
- DSSDBG("initial ctx id %u\n", dss.ctx_id);
-
- r = dss_init();
- if (r) {
- DSSERR("Failed to initialize DSS\n");
- goto err_dss;
- }
-
r = dpi_init();
if (r) {
DSSERR("Failed to initialize DPI\n");
@@ -1124,33 +815,37 @@
goto err_sdi;
}
- dss_clk_disable_all_no_ctx();
+ rev = dss_read_reg(DSS_REVISION);
+ printk(KERN_INFO "OMAP DSS rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ dss_runtime_put();
+
return 0;
err_sdi:
dpi_exit();
err_dpi:
- dss_exit();
-err_dss:
- dss_clk_disable_all_no_ctx();
+ dss_runtime_put();
+err_runtime_get:
+ pm_runtime_disable(&pdev->dev);
dss_put_clocks();
err_clocks:
+ iounmap(dss.base);
+err_ioremap:
return r;
}
static int omap_dsshw_remove(struct platform_device *pdev)
{
+ dpi_exit();
+ sdi_exit();
- dss_exit();
+ iounmap(dss.base);
- /*
- * As part of hwmod changes, DSS is not the only controller of dss
- * clocks; hwmod framework itself will also enable clocks during hwmod
- * init for dss, and autoidle is set in h/w for DSS. Hence, there's no
- * need to disable clocks if their usecounts > 1.
- */
- WARN_ON(dss.num_clks_enabled > 0);
+ pm_runtime_disable(&pdev->dev);
dss_put_clocks();
+
return 0;
}
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 8ab6d43..22cd979 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -109,14 +109,6 @@
OMAP_DSS_PARALLELMODE_DSI,
};
-enum dss_clock {
- DSS_CLK_ICK = 1 << 0, /* DSS_L3_ICLK and DSS_L4_ICLK */
- DSS_CLK_FCK = 1 << 1, /* DSS1_ALWON_FCLK */
- DSS_CLK_SYSCK = 1 << 2, /* DSS2_ALWON_FCLK */
- DSS_CLK_TVFCK = 1 << 3, /* DSS_TV_FCLK */
- DSS_CLK_VIDFCK = 1 << 4, /* DSS_96M_FCLK*/
-};
-
enum dss_hdmi_venc_clk_source_select {
DSS_VENC_TV_CLK = 0,
DSS_HDMI_M_PCLK = 1,
@@ -164,14 +156,28 @@
bool use_sys_clk;
};
-/* HDMI PLL structure */
-struct hdmi_pll_info {
- u16 regn;
- u16 regm;
- u32 regmf;
- u16 regm2;
- u16 regsd;
- u16 dcofreq;
+struct dispc_config {
+ u32 sizex, sizey;
+ u32 burstsize;
+ u32 pixelinc;
+ u32 rowinc;
+ u32 bursttype;
+ u32 antiflicker;
+ u32 doublestride;
+ u32 ba;
+ u32 bacbcr;
+ u32 format;
+ u32 rotation;
+ u32 gfx_top_buffer;
+ u32 gfx_bottom_buffer;
+ u32 vid1_top_buffer;
+ u32 vid1_bottom_buffer;
+ u32 vid2_top_buffer;
+ u32 vid2_bottom_buffer;
+ u32 vid3_top_buffer;
+ u32 vid3_bottom_buffer;
+ u32 wb_top_buffer;
+ u32 wb_bottom_buffer;
};
struct seq_file;
@@ -220,13 +226,10 @@
int dss_init_platform_driver(void);
void dss_uninit_platform_driver(void);
+int dss_runtime_get(void);
+void dss_runtime_put(void);
+
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
-void dss_save_context(void);
-void dss_restore_context(void);
-void dss_clk_enable(enum dss_clock clks);
-void dss_clk_disable(enum dss_clock clks);
-unsigned long dss_clk_get_rate(enum dss_clock clk);
-int dss_need_ctx_restore(void);
const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
void dss_dump_clocks(struct seq_file *s);
@@ -283,15 +286,15 @@
int dsi_init_platform_driver(void);
void dsi_uninit_platform_driver(void);
+int dsi_runtime_get(struct platform_device *dsidev);
+void dsi_runtime_put(struct platform_device *dsidev);
+
void dsi_dump_clocks(struct seq_file *s);
void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
const struct file_operations *debug_fops);
void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
const struct file_operations *debug_fops);
-void dsi_save_context(void);
-void dsi_restore_context(void);
-
int dsi_init_display(struct omap_dss_device *display);
void dsi_irq_handler(void);
unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev);
@@ -317,6 +320,13 @@
static inline void dsi_uninit_platform_driver(void)
{
}
+static inline int dsi_runtime_get(struct platform_device *dsidev)
+{
+ return 0;
+}
+static inline void dsi_runtime_put(struct platform_device *dsidev)
+{
+}
static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
{
WARN("%s: DSI not compiled in, returning rate as 0\n", __func__);
@@ -384,8 +394,8 @@
void dispc_irq_handler(void);
void dispc_fake_vsync_irq(void);
-void dispc_save_context(void);
-void dispc_restore_context(void);
+int dispc_runtime_get(void);
+void dispc_runtime_put(void);
void dispc_enable_sidle(void);
void dispc_disable_sidle(void);
@@ -402,6 +412,14 @@
void dispc_enable_fifomerge(bool enable);
void dispc_set_burst_size(enum omap_plane plane,
enum omap_burst_size burst_size);
+void dispc_set_zorder(enum omap_plane plane,
+ enum omap_overlay_zorder zorder);
+void dispc_enable_zorder(enum omap_plane plane, bool enable);
+void dispc_enable_cpr(enum omap_channel channel, bool enable);
+void dispc_set_cpr_coef(enum omap_channel channel,
+ struct omap_dss_cpr_coefs *coefs);
+void _dispc_setup_color_conv_coef(enum omap_plane plane,
+ const struct omap_dss_cconv_coefs *ct);
void dispc_set_plane_ba0(enum omap_plane plane, u32 paddr);
void dispc_set_plane_ba1(enum omap_plane plane, u32 paddr);
@@ -418,15 +436,26 @@
u16 out_width, u16 out_height,
enum omap_color_mode color_mode,
bool ilace,
+ int x_decim, int y_decim, bool five_taps,
enum omap_dss_rotation_type rotation_type,
u8 rotation, bool mirror,
u8 global_alpha, u8 pre_mult_alpha,
enum omap_channel channel,
u32 puv_addr);
+int dispc_scaling_decision(u16 width, u16 height,
+ u16 out_width, u16 out_height,
+ enum omap_plane plane,
+ enum omap_color_mode color_mode,
+ enum omap_channel channel, u8 rotation,
+ enum omap_dss_rotation_type type,
+ u16 min_x_decim, u16 max_x_decim,
+ u16 min_y_decim, u16 max_y_decim,
+ u16 *x_decim, u16 *y_decim, bool *three_tap);
bool dispc_go_busy(enum omap_channel channel);
void dispc_go(enum omap_channel channel);
-void dispc_enable_channel(enum omap_channel channel, bool enable);
+void dispc_enable_channel(enum omap_channel channel,
+ enum omap_display_type type, bool enable);
bool dispc_is_channel_enabled(enum omap_channel channel);
int dispc_enable_plane(enum omap_plane plane, bool enable);
void dispc_enable_replication(enum omap_plane plane, bool enable);
@@ -467,7 +496,7 @@
struct dispc_clock_info *cinfo);
int dispc_get_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo);
-
+u32 sa_calc_wrap(struct dispc_config *dispc_reg_config, u32 channel_no);
/* VENC */
#ifdef CONFIG_OMAP2_DSS_VENC
@@ -508,8 +537,26 @@
void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev);
int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
struct omap_video_timings *timings);
+int omapdss_hdmi_display_set_mode(struct omap_dss_device *dssdev,
+ struct fb_videomode *mode);
+void omapdss_hdmi_restart(void);
+int hdmi_panel_hpd_handler(int hpd);
+int omapdss_hdmi_get_pixel_clock(void);
+int omapdss_hdmi_get_mode(void);
+int omapdss_hdmi_get_deepcolor(void);
+void omapdss_hdmi_set_deepcolor(int val);
+int hdmi_get_current_hpd(void);
+void hdmi_get_monspecs(struct fb_monspecs *specs);
+u8 *hdmi_read_edid(struct omap_video_timings *);
+
int hdmi_panel_init(void);
void hdmi_panel_exit(void);
+void hdmi_dump_regs(struct seq_file *s);
+int omapdss_hdmi_register_hdcp_callbacks(void (*hdmi_start_frame_cb)(void),
+ void (*hdmi_irq_cb)(int status),
+ bool (*hdmi_power_on_cb)(void));
+int omap_dss_ovl_set_info(struct omap_overlay *ovl,
+ struct omap_overlay_info *info);
/* RFBI */
#ifdef CONFIG_OMAP2_DSS_RFBI
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index 1c18888..aba2250 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -197,7 +197,17 @@
OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 |
OMAP_DSS_COLOR_RGBX32,
- /* OMAP_DSS_VIDEO2 */
+ /* OMAP_DSS_VIDEO2 */
+ OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U |
+ OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 |
+ OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 |
+ OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U |
+ OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY |
+ OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 |
+ OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 |
+ OMAP_DSS_COLOR_RGBX32,
+
+ /* OMAP_DSS_VIDEO3 */
OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U |
OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 |
OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 |
@@ -286,7 +296,9 @@
FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
FEAT_FUNCGATED | FEAT_ROWREPEATENABLE |
FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF |
- FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC,
+ FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC |
+ FEAT_VENC_REQUIRES_TV_DAC_CLK | FEAT_CPR | FEAT_PRELOAD |
+ FEAT_FIR_COEF_V,
.num_mgrs = 2,
.num_ovls = 3,
@@ -306,7 +318,8 @@
FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED |
FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT |
FEAT_RESIZECONF | FEAT_DSI_PLL_PWR_BUG |
- FEAT_DSI_PLL_FREQSEL,
+ FEAT_DSI_PLL_FREQSEL | FEAT_CPR | FEAT_PRELOAD |
+ FEAT_FIR_COEF_V,
.num_mgrs = 2,
.num_ovls = 3,
@@ -327,10 +340,13 @@
FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 |
FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC |
FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH |
- FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2,
+ FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2 |
+ FEAT_CPR | FEAT_PRELOAD | FEAT_FIR_COEF_V |
+ FEAT_ALPHA_OMAP3_COMPAT | FEAT_OVL_VID3 |
+ FEAT_OVL_ZORDER,
.num_mgrs = 3,
- .num_ovls = 3,
+ .num_ovls = 4,
.supported_displays = omap4_dss_supported_displays,
.supported_color_modes = omap4_dss_supported_color_modes,
.clksrc_names = omap4_dss_clk_source_names,
@@ -348,10 +364,13 @@
FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC |
FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH |
FEAT_DSI_GNQ | FEAT_HDMI_CTS_SWMODE |
- FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2,
+ FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2 | FEAT_CPR |
+ FEAT_PRELOAD | FEAT_FIR_COEF_V |
+ FEAT_ALPHA_OMAP3_COMPAT | FEAT_OVL_VID3 |
+ FEAT_OVL_ZORDER,
.num_mgrs = 3,
- .num_ovls = 3,
+ .num_ovls = 4,
.supported_displays = omap4_dss_supported_displays,
.supported_color_modes = omap4_dss_supported_color_modes,
.clksrc_names = omap4_dss_clk_source_names,
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index 07b346f..28f44ed 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -21,7 +21,7 @@
#define __OMAP2_DSS_FEATURES_H
#define MAX_DSS_MANAGERS 3
-#define MAX_DSS_OVERLAYS 3
+#define MAX_DSS_OVERLAYS 4
#define MAX_DSS_LCD_MANAGERS 2
#define MAX_NUM_DSI 2
@@ -51,6 +51,13 @@
FEAT_HDMI_CTS_SWMODE = 1 << 19,
FEAT_HANDLE_UV_SEPARATE = 1 << 20,
FEAT_ATTR2 = 1 << 21,
+ FEAT_VENC_REQUIRES_TV_DAC_CLK = 1 << 22,
+ FEAT_CPR = 1 << 23,
+ FEAT_PRELOAD = 1 << 24,
+ FEAT_FIR_COEF_V = 1 << 25,
+ FEAT_ALPHA_OMAP3_COMPAT = 1 << 26,
+ FEAT_OVL_VID3 = 1 << 27,
+ FEAT_OVL_ZORDER = 1 << 28,
};
/* DSS register field id */
diff --git a/drivers/video/omap2/dss/fifothreshold.c b/drivers/video/omap2/dss/fifothreshold.c
new file mode 100644
index 0000000..431f4c1
--- /dev/null
+++ b/drivers/video/omap2/dss/fifothreshold.c
@@ -0,0 +1,422 @@
+/*
+ * linux/drivers/video/omap2/dss/fifothreshold.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <video/omapdss.h>
+#include "dss.h"
+
+#define YUV422_UYVY 10
+#define YUV422_YUV2 11
+
+struct sa_struct {
+ u32 min_sa;
+ u32 max_lt;
+ u32 min_lt;
+};
+
+struct ddma_config {
+ u16 twomode;
+ u16 antifckr;
+ u16 double_stride;
+ u16 bpp;
+ u16 bitmap;
+ u16 pixel_inc;
+ u16 max_burst;
+ u16 gballoc;
+ u16 vballoc;
+ u16 yuv420;
+ u32 rowincr;
+ u32 ba;
+ u32 size_x;
+ u32 size_y;
+};
+
+/*
+ * bitpk : used to return partial bit vectors of bigger
+ * bit vector, as and when required in algorithm.
+ * Ex: bitpk(BaseAddress,28,27) = BaseAddress[28:27]
+ */
+static inline u32 bitpk(unsigned long a, u32 left, u32 right)
+{
+ return (a >> right) & ((2 << (left - right)) - 1);
+}
+
+/*
+ * dispc_reg_to_ddma converts the DISPC register values into information
+ * used by the DDMA. Ex: format=15 => BytesPerPixel = 3
+ * dispcRegConfig :: Dispc register information
+ * ChannelNo :: ChannelNo
+ * y_nuv :: 1->Luma frame parameters, calculation ;
+ * 0->Chroma frame parameters and calculation
+ * bh_config :: Output struct having information useful for the algorithm
+ */
+static void dispc_reg_to_ddma(struct dispc_config *dispc_reg_config,
+ u32 channel_no, u32 y_nuv, struct ddma_config *bh_config)
+{
+ u16 i;
+ /* GFX pipe specific conversions */
+ if (channel_no == 0) {
+ /*
+ * For bitmap formats the pixcel information is stored in bits.
+ * This needs to be divided by 8 to convert into bytes.
+ */
+ bh_config->bitmap = (dispc_reg_config->format <= 2) ? 8 : 1;
+ /*
+ * In case of GFX there is no YUV420 mode:
+ * yuv420: 1->nonYUV420 2-> YUV420
+ */
+ bh_config->yuv420 = 1;
+ bh_config->pixel_inc = dispc_reg_config->pixelinc;
+ switch (dispc_reg_config->format) {
+ /* LUT for format<-->Bytesper pixel */
+ case 0:
+ case 3:
+ i = 1;
+ break;
+ case 1:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 10:
+ case 11:
+ case 15:
+ i = 2;
+ break;
+ case 9:
+ i = 3;
+ break;
+ default:
+ i = 4;
+ break;
+ }
+ bh_config->bpp = i;
+ i = 0;
+ /*
+ * Chroma double_stride value of DISPC registers is invalid
+ * for GFX where there is np YUV420 format.
+ */
+ bh_config->double_stride = 0;
+ bh_config->antifckr = dispc_reg_config->antiflicker;
+ bh_config->ba = dispc_reg_config->ba;
+ bh_config->size_x = dispc_reg_config->sizex;
+ } else {
+ /*
+ * For all Video channels
+ *
+ * In Video there is no bitmap format, All format pixcel is
+ * stored in multiples of bytes.
+ */
+ bh_config->bitmap = 1;
+ /* No antiflicker mode for Video channels */
+ bh_config->antifckr = 0;
+ /*
+ * 1->nonYUV420 2-> YUV420 : Used in breaking up the buffer
+ * allocation:: Top:Luma, Bottom:Chroma
+ */
+ bh_config->yuv420 = (dispc_reg_config->format == 0) ? 2 : 1;
+
+ switch (dispc_reg_config->format) {
+ /* LUT for format<-->Bytesper pixel */
+ /* bpp:1 for Luma bpp:2 for Chroma */
+ case 0:
+ i = (y_nuv ? 1 : 2);
+ break;
+ case 1:
+ case 2:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 15:
+ i = 2;
+ break;
+ case 9:
+ i = 3;
+ break;
+ case 10:
+ case 11:
+ i = (dispc_reg_config->rotation == 1 ||
+ dispc_reg_config->rotation == 3) ? 4 : 2;
+ break;
+
+ /* Format 10,11 => YUV422. YUV422+No rotation : bpp =bpp/2 */
+ default:
+ i = 4;
+ break;
+ }
+
+ /*
+ * PixcelIncrement = numberOfPixcelsInterleaving*BytesPerPixcel
+ * + 1. For Chroma pixcelincrement should be doubled to leave
+ * same number of Chroma pixels and Luma.
+ */
+ bh_config->pixel_inc =
+ (dispc_reg_config->format == 0 && y_nuv == 0) ?
+ (dispc_reg_config->pixelinc - 1) * 2 + 1 :
+ dispc_reg_config->pixelinc;
+
+ /*
+ * for YUV422+No rotation : bpp =bpp/2:: To correct Pixcel
+ * increment accordingly use in stride calculation.
+ */
+ bh_config->pixel_inc =
+ ((dispc_reg_config->format == YUV422_UYVY ||
+ dispc_reg_config->format == YUV422_YUV2) &&
+ (dispc_reg_config->rotation == 0 ||
+ dispc_reg_config->rotation == 2)) ?
+ (dispc_reg_config->pixelinc - 1) / 2 + 1 :
+ bh_config->pixel_inc;
+ bh_config->bpp = i;
+ bh_config->double_stride =
+ (dispc_reg_config->format == 0 && y_nuv == 0) ?
+ dispc_reg_config->doublestride : 0;
+
+ /* Conditions in which SizeY is halfed = i; */
+ i = (((dispc_reg_config->rotation == 1 ||
+ dispc_reg_config->rotation == 3) &&
+ (dispc_reg_config->format == YUV422_UYVY ||
+ dispc_reg_config->format == YUV422_YUV2)) ||
+ ((dispc_reg_config->rotation == 1 ||
+ dispc_reg_config->rotation == 3 ||
+ bh_config->double_stride == 1) &&
+ dispc_reg_config->format == 0 && y_nuv == 0)) ? 1 : 0;
+
+ /* Choosing between BA_Y and BA_CbCr */
+ bh_config->ba =
+ (dispc_reg_config->format == 0 && y_nuv == 0) ?
+ dispc_reg_config->bacbcr :
+ dispc_reg_config->ba;
+
+ /* SizeX halfed for Chroma frame */
+ bh_config->size_x =
+ (dispc_reg_config->format == 0 && y_nuv == 0) ?
+ (dispc_reg_config->sizex + 1) / 2 - 1 :
+ dispc_reg_config->sizex;
+ }
+
+ bh_config->twomode = dispc_reg_config->bursttype;
+ bh_config->size_y = ((dispc_reg_config->sizey + 1) >> i) - 1;
+ bh_config->rowincr = dispc_reg_config->rowinc;
+ bh_config->max_burst = 1 << (dispc_reg_config->burstsize + 1);
+
+ /* Decoding the burstSize to be used in BH calculation algorithm */
+ bh_config->gballoc =
+ (dispc_reg_config->gfx_bottom_buffer == channel_no) +
+ (dispc_reg_config->gfx_top_buffer == channel_no);
+ bh_config->vballoc =
+ (dispc_reg_config->vid1_bottom_buffer == channel_no) +
+ (dispc_reg_config->vid1_top_buffer == channel_no) +
+ (dispc_reg_config->vid2_bottom_buffer == channel_no) +
+ (dispc_reg_config->vid2_top_buffer == channel_no) +
+ (dispc_reg_config->vid3_bottom_buffer == channel_no) +
+ (dispc_reg_config->vid3_top_buffer == channel_no) +
+ (dispc_reg_config->wb_bottom_buffer == channel_no) +
+ (dispc_reg_config->wb_top_buffer == channel_no);
+}
+
+/*
+ * sa_calc calculates SA and LT values for one set of DISPC reg inputs
+ * dispc_reg_config :: Dispc register information
+ * channel_no :: channel_no
+ * y_nuv :: 1->Luma frame parameters, calculation
+ * 0->Chroma frame parameters and calculation
+ * sa_info :: Output struct having information of SA and LT values
+ */
+static void sa_calc(struct dispc_config *dispc_reg_config, u32 channel_no,
+ u32 y_nuv, struct sa_struct *sa_info)
+{
+ u32 Sorientation, mode, mode_0, mode_1;
+ int blkh_opt;
+ int pagemode;
+ long int sizeX_nopred, sizeX_pred;
+ u32 pict_16word;
+ long int pict_16word_ceil;
+ long int stride;
+ int stride_8k;
+ int stride_16k;
+ int stride_32k;
+ int stride_64k;
+ int stride_ok, stride_val;
+ int linesRem;
+ u32 BA_bhbit, bh_max;
+ int burstHeight;
+ int i;
+ int bh2d_cond;
+ int C1, c1flag, C2;
+ long int Tot_mem;
+ struct ddma_config bh_config;
+
+ dispc_reg_to_ddma(dispc_reg_config, channel_no, y_nuv, &bh_config);
+
+ mode = bitpk(bh_config.ba, 28, 27);
+ mode_1 = bitpk(bh_config.ba, 28, 28);
+ mode_0 = bitpk(bh_config.ba, 27, 27);
+ Sorientation = bitpk(bh_config.ba, 31, 31);
+
+ pagemode = (mode == 3);
+ blkh_opt = mode_1 ? 2 : 4;
+
+ bh_config.double_stride = (bh_config.double_stride == 1
+ && bh_config.twomode == 1) ? 2 : 1;
+
+ /* SizeX in frame = number of pixels * BytesPerPixel */
+ sizeX_nopred = ((bh_config.size_x + 1) * bh_config.bpp) /
+ bh_config.bitmap;
+ /* Size including skipped pixels */
+ sizeX_pred = ((bh_config.size_x + 1) *
+ (bh_config.pixel_inc - 1 + bh_config.bpp)) / bh_config.bitmap;
+ stride = ((bh_config.rowincr - 1) + sizeX_pred) *
+ bh_config.double_stride;
+ stride_8k = stride == 8192 && mode_1 == 0 && Sorientation;
+ stride_16k = stride == 16384 && !(mode_0 != mode_1 && !Sorientation);
+ stride_32k = stride == 32768 && (mode_1 == 1 || !Sorientation);
+ stride_64k = stride == 65536 && !(mode_0 == mode_1) && !Sorientation;
+ stride_ok = (stride_8k || stride_16k || stride_32k || stride_64k);
+ stride_val = stride_64k ? 16 : stride_32k ? 15 : stride_16k ? 14 : 13;
+
+ linesRem = bh_config.size_y + 1;
+
+ /* Condition than enables 2D fetch of OCP */
+ bh2d_cond = (bh_config.twomode && (pagemode == 0)
+ && stride_ok && (linesRem > 0));
+
+ /*
+ * BH calculation algorithm depending on stride,NumberofLinesInFrame,
+ * other parameters of Tiler alignment of base address.
+ */
+ C1 = C2 = c1flag = 0;
+ for (i = 1; i <= 5 && linesRem > 0 && c1flag == 0; i++) {
+ if (bh2d_cond) {
+ /* 2D transfer */
+ BA_bhbit = bitpk(bh_config.ba,
+ stride_val + (mode_1 == 0),
+ stride_val);
+ bh_max = blkh_opt - BA_bhbit;
+
+ burstHeight = min(linesRem,
+ min((int) bh_config.max_burst, (int) bh_max));
+
+ if (burstHeight == 3 ||
+ (burstHeight == 4 && bh_config.antifckr == 1))
+ burstHeight = 2;
+ } else {
+ burstHeight = 1;
+ }
+ if ((C1 + burstHeight) <= 4 && c1flag == 0) {
+ /*
+ * C1 incremented until its >= 4. ensures howmany
+ * full lines are requested just before SA reaches
+ */
+ C1 += burstHeight;
+ } else {
+ if (c1flag == 0)
+ /*
+ * After C1 saturated to 4, next burstHeight
+ * decides C2: the number of partially filled
+ * lines when SA condition is reached
+ */
+ C2 = burstHeight;
+ c1flag = 1;
+ }
+ linesRem -= burstHeight;
+ bh_config.ba += stride * burstHeight;
+ }
+
+ /*
+ * Total line buffer memory GFXBuffers+Vid/WB bufers allocated in terms
+ * of 16Byte Word locations
+ */
+ Tot_mem = (640 * bh_config.gballoc + 1024 * bh_config.vballoc) /
+ (4 * bh_config.yuv420);
+ /*
+ * Ceil(rounded to higher integer) of Number of 16Byte Word locations
+ * used by single line of frame.
+ */
+ pict_16word_ceil = DIV_ROUND_UP(sizeX_nopred, 16);
+
+ /* Exact Number of 16Byte Word locations used by single line of frame */
+ pict_16word = sizeX_nopred / 16;
+
+ /*
+ * Number of sets of 4 lines that can fully fit into the memory
+ * buffers allocated.
+ */
+ i = Tot_mem / pict_16word_ceil;
+
+ if (i == 0) {
+ /* LineSize > MemoryLineBufferSize (Valid only for 1D) */
+ sa_info->min_sa = Tot_mem - 8;
+ } else if (i == 1) {
+ /*
+ * When MemoryLineBufferSize > LineSize >
+ * (MemoryLineBufferSize/2)
+ */
+ sa_info->min_sa = pict_16word + C2 * (Tot_mem -
+ pict_16word_ceil - 8);
+ } else {
+ /* All other cases */
+ sa_info->min_sa = i * pict_16word + C1 * pict_16word + C2 *
+ (Tot_mem - (pict_16word_ceil * i) - 8);
+ }
+
+ /* C2=0:: no partialy filed lines:: Then minLT = 0 */
+ if (C2 == 0) {
+ sa_info->min_lt = 0;
+ } else if (bh_config.antifckr == 1) {
+ if (C1 == 3)
+ sa_info->min_lt = 3 * pict_16word_ceil + C2 * (Tot_mem -
+ (pict_16word_ceil*i));
+ else if (C1 == 4)
+ sa_info->min_lt = 2 * pict_16word_ceil + C2 * (Tot_mem -
+ (pict_16word_ceil*i));
+ } else {
+ sa_info->min_lt = C2 * (Tot_mem - (pict_16word_ceil*i));
+ }
+
+ sa_info->max_lt = max(sa_info->min_sa - 8, sa_info->min_lt + 1);
+}
+
+/*
+ * sa_calc calculates SA and LT values for one set of DISPC reg inputs
+ * This takes care of calling the actual sa_calc function once/twice
+ * as per nonYUV420/YUV420 format and gives final value of output
+ * dispc_reg_config :: Dispc register information
+ * channel_no :: channel_no
+ * y_nuv :: 1->Luma frame parameters, calculation;
+ * 0->Chroma frame parameters and calculation
+ * sa_info :: Output struct having information of SA and LT values
+ */
+u32 sa_calc_wrap(struct dispc_config *dispc_reg_config, u32 channel_no)
+{
+ struct sa_struct sa_info_y;
+ struct sa_struct sa_info_uv;
+
+ /* SA values calculated for Luma frame */
+ sa_calc(dispc_reg_config, channel_no, 1, &sa_info_y);
+
+ /* Going into this looop only for YUV420 Format and Channel != GFX */
+ if (dispc_reg_config->format == 0 && channel_no > 0) {
+ /* SA values calculated for Chroma Frame */
+ sa_calc(dispc_reg_config, channel_no, 0, &sa_info_uv);
+ return 2 * max(max(sa_info_y.min_sa - 8, sa_info_y.min_lt + 1),
+ max(sa_info_uv.min_sa - 8, sa_info_uv.min_lt + 1));
+ } else {
+ return sa_info_y.max_lt;
+ }
+}
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index fadd6a0..cfb82b5 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -26,147 +26,124 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/interrupt.h>
+#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/string.h>
-#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
#include <video/omapdss.h>
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-#include <sound/soc.h>
-#include <sound/pcm_params.h>
-#endif
+#include <video/hdmi_ti_4xxx_ip.h>
+#include <linux/gpio.h>
+#include <linux/fb.h>
+#include <linux/omapfb.h>
#include "dss.h"
-#include "hdmi.h"
#include "dss_features.h"
-#define HDMI_DEFAULT_REGN 15
-#define HDMI_DEFAULT_REGM2 1
+#define HDMI_WP 0x0
+#define HDMI_CORE_SYS 0x400
+#define HDMI_CORE_AV 0x900
+#define HDMI_PLLCTRL 0x200
+#define HDMI_PHY 0x300
+
+/* HDMI EDID Length move this */
+#define HDMI_EDID_MAX_LENGTH 512
+#define EDID_TIMING_DESCRIPTOR_SIZE 0x12
+#define EDID_DESCRIPTOR_BLOCK0_ADDRESS 0x36
+#define EDID_DESCRIPTOR_BLOCK1_ADDRESS 0x80
+#define EDID_SIZE_BLOCK0_TIMING_DESCRIPTOR 4
+#define EDID_SIZE_BLOCK1_TIMING_DESCRIPTOR 4
+
+#define OMAP_HDMI_TIMINGS_NB 34
static struct {
struct mutex lock;
struct omap_display_platform_data *pdata;
struct platform_device *pdev;
- void __iomem *base_wp; /* HDMI wrapper */
+ struct omap_dss_device *dssdev;
+ struct hdmi_ip_data hdmi_data;
int code;
int mode;
u8 edid[HDMI_EDID_MAX_LENGTH];
u8 edid_set;
+ bool can_do_hdmi;
+
bool custom_set;
+ enum hdmi_deep_color_mode deep_color;
struct hdmi_config cfg;
+ struct regulator *hdmi_reg;
- int hpd_gpio;
- bool phy_tx_enabled;
+ int hdmi_irq;
+ struct clk *sys_clk;
+ struct clk *hdmi_clk;
+
+ int runtime_count;
+ int enabled;
+ bool set_mode;
+ bool wp_reset_done;
+
+ void (*hdmi_start_frame_cb)(void);
+ void (*hdmi_irq_cb)(int);
+ bool (*hdmi_power_on_cb)(void);
} hdmi;
-/*
- * Logic for the below structure :
- * user enters the CEA or VESA timings by specifying the HDMI/DVI code.
- * There is a correspondence between CEA/VESA timing and code, please
- * refer to section 6.3 in HDMI 1.3 specification for timing code.
- *
- * In the below structure, cea_vesa_timings corresponds to all OMAP4
- * supported CEA and VESA timing values.code_cea corresponds to the CEA
- * code, It is used to get the timing from cea_vesa_timing array.Similarly
- * with code_vesa. Code_index is used for back mapping, that is once EDID
- * is read from the TV, EDID is parsed to find the timing values and then
- * map it to corresponding CEA or VESA index.
- */
-
-static const struct hdmi_timings cea_vesa_timings[OMAP_HDMI_TIMINGS_NB] = {
- { {640, 480, 25200, 96, 16, 48, 2, 10, 33} , 0 , 0},
- { {1280, 720, 74250, 40, 440, 220, 5, 5, 20}, 1, 1},
- { {1280, 720, 74250, 40, 110, 220, 5, 5, 20}, 1, 1},
- { {720, 480, 27027, 62, 16, 60, 6, 9, 30}, 0, 0},
- { {2880, 576, 108000, 256, 48, 272, 5, 5, 39}, 0, 0},
- { {1440, 240, 27027, 124, 38, 114, 3, 4, 15}, 0, 0},
- { {1440, 288, 27000, 126, 24, 138, 3, 2, 19}, 0, 0},
- { {1920, 540, 74250, 44, 528, 148, 5, 2, 15}, 1, 1},
- { {1920, 540, 74250, 44, 88, 148, 5, 2, 15}, 1, 1},
- { {1920, 1080, 148500, 44, 88, 148, 5, 4, 36}, 1, 1},
- { {720, 576, 27000, 64, 12, 68, 5, 5, 39}, 0, 0},
- { {1440, 576, 54000, 128, 24, 136, 5, 5, 39}, 0, 0},
- { {1920, 1080, 148500, 44, 528, 148, 5, 4, 36}, 1, 1},
- { {2880, 480, 108108, 248, 64, 240, 6, 9, 30}, 0, 0},
- { {1920, 1080, 74250, 44, 638, 148, 5, 4, 36}, 1, 1},
- /* VESA From Here */
- { {640, 480, 25175, 96, 16, 48, 2 , 11, 31}, 0, 0},
- { {800, 600, 40000, 128, 40, 88, 4 , 1, 23}, 1, 1},
- { {848, 480, 33750, 112, 16, 112, 8 , 6, 23}, 1, 1},
- { {1280, 768, 79500, 128, 64, 192, 7 , 3, 20}, 1, 0},
- { {1280, 800, 83500, 128, 72, 200, 6 , 3, 22}, 1, 0},
- { {1360, 768, 85500, 112, 64, 256, 6 , 3, 18}, 1, 1},
- { {1280, 960, 108000, 112, 96, 312, 3 , 1, 36}, 1, 1},
- { {1280, 1024, 108000, 112, 48, 248, 3 , 1, 38}, 1, 1},
- { {1024, 768, 65000, 136, 24, 160, 6, 3, 29}, 0, 0},
- { {1400, 1050, 121750, 144, 88, 232, 4, 3, 32}, 1, 0},
- { {1440, 900, 106500, 152, 80, 232, 6, 3, 25}, 1, 0},
- { {1680, 1050, 146250, 176 , 104, 280, 6, 3, 30}, 1, 0},
- { {1366, 768, 85500, 143, 70, 213, 3, 3, 24}, 1, 1},
- { {1920, 1080, 148500, 44, 148, 80, 5, 4, 36}, 1, 1},
- { {1280, 768, 68250, 32, 48, 80, 7, 3, 12}, 0, 1},
- { {1400, 1050, 101000, 32, 48, 80, 4, 3, 23}, 0, 1},
- { {1680, 1050, 119000, 32, 48, 80, 6, 3, 21}, 0, 1},
- { {1280, 800, 79500, 32, 48, 80, 6, 3, 14}, 0, 1},
- { {1280, 720, 74250, 40, 110, 220, 5, 5, 20}, 1, 1}
-};
-
-/*
- * This is a static mapping array which maps the timing values
- * with corresponding CEA / VESA code
- */
-static const int code_index[OMAP_HDMI_TIMINGS_NB] = {
- 1, 19, 4, 2, 37, 6, 21, 20, 5, 16, 17, 29, 31, 35, 32,
- /* <--15 CEA 17--> vesa*/
- 4, 9, 0xE, 0x17, 0x1C, 0x27, 0x20, 0x23, 0x10, 0x2A,
- 0X2F, 0x3A, 0X51, 0X52, 0x16, 0x29, 0x39, 0x1B
-};
-
-/*
- * This is reverse static mapping which maps the CEA / VESA code
- * to the corresponding timing values
- */
-static const int code_cea[39] = {
- -1, 0, 3, 3, 2, 8, 5, 5, -1, -1,
- -1, -1, -1, -1, -1, -1, 9, 10, 10, 1,
- 7, 6, 6, -1, -1, -1, -1, -1, -1, 11,
- 11, 12, 14, -1, -1, 13, 13, 4, 4
-};
-
-static const int code_vesa[85] = {
- -1, -1, -1, -1, 15, -1, -1, -1, -1, 16,
- -1, -1, -1, -1, 17, -1, 23, -1, -1, -1,
- -1, -1, 29, 18, -1, -1, -1, 32, 19, -1,
- -1, -1, 21, -1, -1, 22, -1, -1, -1, 20,
- -1, 30, 24, -1, -1, -1, -1, 25, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 31, 26, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 27, 28, -1, 33};
-
static const u8 edid_header[8] = {0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0};
-static inline void hdmi_write_reg(const struct hdmi_reg idx, u32 val)
+static int hdmi_runtime_get(void)
{
- __raw_writel(val, hdmi.base_wp + idx.idx);
-}
+ int r;
-static inline u32 hdmi_read_reg(const struct hdmi_reg idx)
-{
- return __raw_readl(hdmi.base_wp + idx.idx);
-}
+ DSSDBG("hdmi_runtime_get\n");
-static inline int hdmi_wait_for_bit_change(const struct hdmi_reg idx,
- int b2, int b1, u32 val)
-{
- u32 t = 0;
- while (val != REG_GET(idx, b2, b1)) {
- udelay(1);
- if (t++ > 10000)
- return !val;
+ if (hdmi.runtime_count++ == 0) {
+ r = dss_runtime_get();
+ if (r)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r)
+ goto err_get_dispc;
+
+ clk_enable(hdmi.sys_clk);
+ clk_enable(hdmi.hdmi_clk);
+
+ r = pm_runtime_get_sync(&hdmi.pdev->dev);
+ WARN_ON(r);
+ if (r < 0)
+ goto err_runtime_get;
}
- return val;
+
+ return 0;
+
+err_runtime_get:
+ clk_disable(hdmi.sys_clk);
+ clk_disable(hdmi.hdmi_clk);
+ dispc_runtime_put();
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
+ return r;
+}
+
+static void hdmi_runtime_put(void)
+{
+ int r;
+
+ DSSDBG("hdmi_runtime_put\n");
+
+ if (--hdmi.runtime_count == 0) {
+ r = pm_runtime_put_sync(&hdmi.pdev->dev);
+ WARN_ON(r);
+
+ clk_disable(hdmi.sys_clk);
+ clk_disable(hdmi.hdmi_clk);
+
+ dispc_runtime_put();
+ dss_runtime_put();
+ }
}
int hdmi_init_display(struct omap_dss_device *dssdev)
@@ -176,949 +153,141 @@
return 0;
}
-static int hdmi_pll_init(enum hdmi_clk_refsel refsel, int dcofreq,
- struct hdmi_pll_info *fmt, u16 sd)
+static int relaxed_fb_mode_is_equal(const struct fb_videomode *mode1,
+ const struct fb_videomode *mode2)
{
- u32 r;
+ u32 ratio1 = mode1->flag & (FB_FLAG_RATIO_4_3 | FB_FLAG_RATIO_16_9);
+ u32 ratio2 = mode2->flag & (FB_FLAG_RATIO_4_3 | FB_FLAG_RATIO_16_9);
- /* PLL start always use manual mode */
- REG_FLD_MOD(PLLCTRL_PLL_CONTROL, 0x0, 0, 0);
-
- r = hdmi_read_reg(PLLCTRL_CFG1);
- r = FLD_MOD(r, fmt->regm, 20, 9); /* CFG1_PLL_REGM */
- r = FLD_MOD(r, fmt->regn, 8, 1); /* CFG1_PLL_REGN */
-
- hdmi_write_reg(PLLCTRL_CFG1, r);
-
- r = hdmi_read_reg(PLLCTRL_CFG2);
-
- r = FLD_MOD(r, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */
- r = FLD_MOD(r, 0x1, 13, 13); /* PLL_REFEN */
- r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */
-
- if (dcofreq) {
- /* divider programming for frequency beyond 1000Mhz */
- REG_FLD_MOD(PLLCTRL_CFG3, sd, 17, 10);
- r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */
- } else {
- r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */
- }
-
- hdmi_write_reg(PLLCTRL_CFG2, r);
-
- r = hdmi_read_reg(PLLCTRL_CFG4);
- r = FLD_MOD(r, fmt->regm2, 24, 18);
- r = FLD_MOD(r, fmt->regmf, 17, 0);
-
- hdmi_write_reg(PLLCTRL_CFG4, r);
-
- /* go now */
- REG_FLD_MOD(PLLCTRL_PLL_GO, 0x1, 0, 0);
-
- /* wait for bit change */
- if (hdmi_wait_for_bit_change(PLLCTRL_PLL_GO, 0, 0, 1) != 1) {
- DSSERR("PLL GO bit not set\n");
- return -ETIMEDOUT;
- }
-
- /* Wait till the lock bit is set in PLL status */
- if (hdmi_wait_for_bit_change(PLLCTRL_PLL_STATUS, 1, 1, 1) != 1) {
- DSSWARN("cannot lock PLL\n");
- DSSWARN("CFG1 0x%x\n",
- hdmi_read_reg(PLLCTRL_CFG1));
- DSSWARN("CFG2 0x%x\n",
- hdmi_read_reg(PLLCTRL_CFG2));
- DSSWARN("CFG4 0x%x\n",
- hdmi_read_reg(PLLCTRL_CFG4));
- return -ETIMEDOUT;
- }
-
- DSSDBG("PLL locked!\n");
-
- return 0;
+ return (mode1->xres == mode2->xres &&
+ mode1->yres == mode2->yres &&
+ mode1->pixclock <= mode2->pixclock * 201 / 200 &&
+ mode1->pixclock >= mode2->pixclock * 200 / 201 &&
+ mode1->hsync_len + mode1->left_margin + mode1->right_margin ==
+ mode2->hsync_len + mode2->left_margin + mode2->right_margin &&
+ mode1->vsync_len + mode1->upper_margin + mode1->lower_margin ==
+ mode2->vsync_len + mode2->upper_margin + mode2->lower_margin &&
+ (!ratio1 || !ratio2 || ratio1 == ratio2) &&
+ (mode1->vmode & FB_VMODE_INTERLACED) ==
+ (mode2->vmode & FB_VMODE_INTERLACED));
}
-/* PHY_PWR_CMD */
-static int hdmi_set_phy_pwr(enum hdmi_phy_pwr val)
+static int hdmi_set_timings(struct fb_videomode *vm, bool check_only)
{
- /* Command for power control of HDMI PHY */
- REG_FLD_MOD(HDMI_WP_PWR_CTRL, val, 7, 6);
-
- /* Status of the power control of HDMI PHY */
- if (hdmi_wait_for_bit_change(HDMI_WP_PWR_CTRL, 5, 4, val) != val) {
- DSSERR("Failed to set PHY power mode to %d\n", val);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-/* PLL_PWR_CMD */
-static int hdmi_set_pll_pwr(enum hdmi_pll_pwr val)
-{
- /* Command for power control of HDMI PLL */
- REG_FLD_MOD(HDMI_WP_PWR_CTRL, val, 3, 2);
-
- /* wait till PHY_PWR_STATUS is set */
- if (hdmi_wait_for_bit_change(HDMI_WP_PWR_CTRL, 1, 0, val) != val) {
- DSSERR("Failed to set PHY_PWR_STATUS\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int hdmi_pll_reset(void)
-{
- /* SYSRESET controlled by power FSM */
- REG_FLD_MOD(PLLCTRL_PLL_CONTROL, 0x0, 3, 3);
-
- /* READ 0x0 reset is in progress */
- if (hdmi_wait_for_bit_change(PLLCTRL_PLL_STATUS, 0, 0, 1) != 1) {
- DSSERR("Failed to sysreset PLL\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int hdmi_check_hpd_state(void)
-{
- unsigned long flags;
- bool hpd;
- int r;
- /* this should be in ti_hdmi_4xxx_ip private data */
- static DEFINE_SPINLOCK(phy_tx_lock);
-
- spin_lock_irqsave(&phy_tx_lock, flags);
-
- hpd = gpio_get_value(hdmi.hpd_gpio);
-
- if (hpd == hdmi.phy_tx_enabled) {
- spin_unlock_irqrestore(&phy_tx_lock, flags);
- return 0;
- }
-
- if (hpd)
- r = hdmi_set_phy_pwr(HDMI_PHYPWRCMD_TXON);
- else
- r = hdmi_set_phy_pwr(HDMI_PHYPWRCMD_LDOON);
-
- if (r) {
- DSSERR("Failed to %s PHY TX power\n",
- hpd ? "enable" : "disable");
- goto err;
- }
-
- hdmi.phy_tx_enabled = hpd;
-err:
- spin_unlock_irqrestore(&phy_tx_lock, flags);
- return r;
-}
-
-static irqreturn_t hpd_irq_handler(int irq, void *data)
-{
- hdmi_check_hpd_state();
-
- return IRQ_HANDLED;
-}
-
-static int hdmi_phy_init(void)
-{
- u16 r = 0;
-
- r = hdmi_set_phy_pwr(HDMI_PHYPWRCMD_LDOON);
- if (r)
- return r;
-
- /*
- * Read address 0 in order to get the SCP reset done completed
- * Dummy access performed to make sure reset is done
- */
- hdmi_read_reg(HDMI_TXPHY_TX_CTRL);
-
- /*
- * Write to phy address 0 to configure the clock
- * use HFBITCLK write HDMI_TXPHY_TX_CONTROL_FREQOUT field
- */
- REG_FLD_MOD(HDMI_TXPHY_TX_CTRL, 0x1, 31, 30);
-
- /* Write to phy address 1 to start HDMI line (TXVALID and TMDSCLKEN) */
- hdmi_write_reg(HDMI_TXPHY_DIGITAL_CTRL, 0xF0000000);
-
- /* Setup max LDO voltage */
- REG_FLD_MOD(HDMI_TXPHY_POWER_CTRL, 0xB, 3, 0);
-
- /* Write to phy address 3 to change the polarity control */
- REG_FLD_MOD(HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
-
- r = request_threaded_irq(gpio_to_irq(hdmi.hpd_gpio),
- NULL, hpd_irq_handler,
- IRQF_DISABLED | IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING, "hpd", NULL);
- if (r) {
- DSSERR("HPD IRQ request failed\n");
- hdmi_set_phy_pwr(HDMI_PHYPWRCMD_OFF);
- return r;
- }
-
- r = hdmi_check_hpd_state();
- if (r) {
- free_irq(gpio_to_irq(hdmi.hpd_gpio), NULL);
- hdmi_set_phy_pwr(HDMI_PHYPWRCMD_OFF);
- return r;
- }
-
- return 0;
-}
-
-static int hdmi_wait_softreset(void)
-{
- /* reset W1 */
- REG_FLD_MOD(HDMI_WP_SYSCONFIG, 0x1, 0, 0);
-
- /* wait till SOFTRESET == 0 */
- if (hdmi_wait_for_bit_change(HDMI_WP_SYSCONFIG, 0, 0, 0) != 0) {
- DSSERR("sysconfig reset failed\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int hdmi_pll_program(struct hdmi_pll_info *fmt)
-{
- u16 r = 0;
- enum hdmi_clk_refsel refsel;
-
- /* wait for wrapper reset */
- r = hdmi_wait_softreset();
- if (r)
- return r;
-
- r = hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF);
- if (r)
- return r;
-
- r = hdmi_set_pll_pwr(HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
- if (r)
- return r;
-
- r = hdmi_pll_reset();
- if (r)
- return r;
-
- refsel = HDMI_REFSEL_SYSCLK;
-
- r = hdmi_pll_init(refsel, fmt->dcofreq, fmt, fmt->regsd);
- if (r)
- return r;
-
- return 0;
-}
-
-static void hdmi_phy_off(void)
-{
- free_irq(gpio_to_irq(hdmi.hpd_gpio), NULL);
- hdmi_set_phy_pwr(HDMI_PHYPWRCMD_OFF);
- hdmi.phy_tx_enabled = false;
-}
-
-static int hdmi_core_ddc_edid(u8 *pedid, int ext)
-{
- u32 i, j;
- char checksum = 0;
- u32 offset = 0;
-
- /* Turn on CLK for DDC */
- REG_FLD_MOD(HDMI_CORE_AV_DPD, 0x7, 2, 0);
-
- /*
- * SW HACK : Without the Delay DDC(i2c bus) reads 0 values /
- * right shifted values( The behavior is not consistent and seen only
- * with some TV's)
- */
- usleep_range(800, 1000);
-
- if (!ext) {
- /* Clk SCL Devices */
- REG_FLD_MOD(HDMI_CORE_DDC_CMD, 0xA, 3, 0);
-
- /* HDMI_CORE_DDC_STATUS_IN_PROG */
- if (hdmi_wait_for_bit_change(HDMI_CORE_DDC_STATUS,
- 4, 4, 0) != 0) {
- DSSERR("Failed to program DDC\n");
- return -ETIMEDOUT;
- }
-
- /* Clear FIFO */
- REG_FLD_MOD(HDMI_CORE_DDC_CMD, 0x9, 3, 0);
-
- /* HDMI_CORE_DDC_STATUS_IN_PROG */
- if (hdmi_wait_for_bit_change(HDMI_CORE_DDC_STATUS,
- 4, 4, 0) != 0) {
- DSSERR("Failed to program DDC\n");
- return -ETIMEDOUT;
- }
-
- } else {
- if (ext % 2 != 0)
- offset = 0x80;
- }
-
- /* Load Segment Address Register */
- REG_FLD_MOD(HDMI_CORE_DDC_SEGM, ext/2, 7, 0);
-
- /* Load Slave Address Register */
- REG_FLD_MOD(HDMI_CORE_DDC_ADDR, 0xA0 >> 1, 7, 1);
-
- /* Load Offset Address Register */
- REG_FLD_MOD(HDMI_CORE_DDC_OFFSET, offset, 7, 0);
-
- /* Load Byte Count */
- REG_FLD_MOD(HDMI_CORE_DDC_COUNT1, 0x80, 7, 0);
- REG_FLD_MOD(HDMI_CORE_DDC_COUNT2, 0x0, 1, 0);
-
- /* Set DDC_CMD */
- if (ext)
- REG_FLD_MOD(HDMI_CORE_DDC_CMD, 0x4, 3, 0);
- else
- REG_FLD_MOD(HDMI_CORE_DDC_CMD, 0x2, 3, 0);
-
- /* HDMI_CORE_DDC_STATUS_BUS_LOW */
- if (REG_GET(HDMI_CORE_DDC_STATUS, 6, 6) == 1) {
- DSSWARN("I2C Bus Low?\n");
- return -EIO;
- }
- /* HDMI_CORE_DDC_STATUS_NO_ACK */
- if (REG_GET(HDMI_CORE_DDC_STATUS, 5, 5) == 1) {
- DSSWARN("I2C No Ack\n");
- return -EIO;
- }
-
- i = ext * 128;
- j = 0;
- while (((REG_GET(HDMI_CORE_DDC_STATUS, 4, 4) == 1) ||
- (REG_GET(HDMI_CORE_DDC_STATUS, 2, 2) == 0)) &&
- j < 128) {
-
- if (REG_GET(HDMI_CORE_DDC_STATUS, 2, 2) == 0) {
- /* FIFO not empty */
- pedid[i++] = REG_GET(HDMI_CORE_DDC_DATA, 7, 0);
- j++;
- }
- }
-
- for (j = 0; j < 128; j++)
- checksum += pedid[j];
-
- if (checksum != 0) {
- DSSERR("E-EDID checksum failed!!\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static int read_edid(u8 *pedid, u16 max_length)
-{
- int r = 0, n = 0, i = 0;
- int max_ext_blocks = (max_length / 128) - 1;
-
- r = hdmi_core_ddc_edid(pedid, 0);
- if (r) {
- return r;
- } else {
- n = pedid[0x7e];
-
- /*
- * README: need to comply with max_length set by the caller.
- * Better implementation should be to allocate necessary
- * memory to store EDID according to nb_block field found
- * in first block
- */
- if (n > max_ext_blocks)
- n = max_ext_blocks;
-
- for (i = 1; i <= n; i++) {
- r = hdmi_core_ddc_edid(pedid, i);
- if (r)
- return r;
- }
- }
- return 0;
-}
-
-static int get_timings_index(void)
-{
- int code;
-
- if (hdmi.mode == 0)
- code = code_vesa[hdmi.code];
- else
- code = code_cea[hdmi.code];
-
- if (code == -1) {
- /* HDMI code 4 corresponds to 640 * 480 VGA */
- hdmi.code = 4;
- /* DVI mode 1 corresponds to HDMI 0 to DVI */
- hdmi.mode = HDMI_DVI;
-
- code = code_vesa[hdmi.code];
- }
- return code;
-}
-
-static struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing)
-{
- int i = 0, code = -1, temp_vsync = 0, temp_hsync = 0;
- int timing_vsync = 0, timing_hsync = 0;
- struct omap_video_timings temp;
- struct hdmi_cm cm = {-1};
+ int i = 0;
DSSDBG("hdmi_get_code\n");
- for (i = 0; i < OMAP_HDMI_TIMINGS_NB; i++) {
- temp = cea_vesa_timings[i].timings;
- if ((temp.pixel_clock == timing->pixel_clock) &&
- (temp.x_res == timing->x_res) &&
- (temp.y_res == timing->y_res)) {
+ if (!vm->xres || !vm->yres || !vm->pixclock)
+ goto fail;
- temp_hsync = temp.hfp + temp.hsw + temp.hbp;
- timing_hsync = timing->hfp + timing->hsw + timing->hbp;
- temp_vsync = temp.vfp + temp.vsw + temp.vbp;
- timing_vsync = timing->vfp + timing->vsw + timing->vbp;
-
- DSSDBG("temp_hsync = %d , temp_vsync = %d"
- "timing_hsync = %d, timing_vsync = %d\n",
- temp_hsync, temp_hsync,
- timing_hsync, timing_vsync);
-
- if ((temp_hsync == timing_hsync) &&
- (temp_vsync == timing_vsync)) {
- code = i;
- cm.code = code_index[i];
- if (code < 14)
- cm.mode = HDMI_HDMI;
- else
- cm.mode = HDMI_DVI;
- DSSDBG("Hdmi_code = %d mode = %d\n",
- cm.code, cm.mode);
- break;
- }
+ for (i = 0; i < CEA_MODEDB_SIZE; i++) {
+ if (relaxed_fb_mode_is_equal(cea_modes + i, vm)) {
+ *vm = cea_modes[i];
+ if (check_only)
+ return 1;
+ hdmi.cfg.cm.code = i;
+ hdmi.cfg.cm.mode = HDMI_HDMI;
+ hdmi.cfg.timings = cea_modes[hdmi.cfg.cm.code];
+ goto done;
}
}
- return cm;
+ for (i = 0; i < VESA_MODEDB_SIZE; i++) {
+ if (relaxed_fb_mode_is_equal(vesa_modes + i, vm)) {
+ *vm = vesa_modes[i];
+ if (check_only)
+ return 1;
+ hdmi.cfg.cm.code = i;
+ hdmi.cfg.cm.mode = HDMI_DVI;
+ hdmi.cfg.timings = vesa_modes[hdmi.cfg.cm.code];
+ goto done;
+ }
+ }
+
+fail:
+ if (check_only)
+ return 0;
+ hdmi.cfg.cm.code = 1;
+ hdmi.cfg.cm.mode = HDMI_HDMI;
+ hdmi.cfg.timings = cea_modes[hdmi.cfg.cm.code];
+
+ i = -1;
+done:
+
+ DSSDBG("%s-%d\n", hdmi.cfg.cm.mode ? "CEA" : "VESA", hdmi.cfg.cm.code);
+ return i >= 0;
}
-static void get_horz_vert_timing_info(int current_descriptor_addrs, u8 *edid ,
- struct omap_video_timings *timings)
+void hdmi_get_monspecs(struct fb_monspecs *specs)
{
- /* X and Y resolution */
- timings->x_res = (((edid[current_descriptor_addrs + 4] & 0xF0) << 4) |
- edid[current_descriptor_addrs + 2]);
- timings->y_res = (((edid[current_descriptor_addrs + 7] & 0xF0) << 4) |
- edid[current_descriptor_addrs + 5]);
+ int i, j;
+ char *edid = (char *) hdmi.edid;
- timings->pixel_clock = ((edid[current_descriptor_addrs + 1] << 8) |
- edid[current_descriptor_addrs]);
+ memset(specs, 0x0, sizeof(*specs));
+ if (!hdmi.edid_set)
+ return;
- timings->pixel_clock = 10 * timings->pixel_clock;
+ fb_edid_to_monspecs(edid, specs);
+ if (specs->modedb == NULL)
+ return;
- /* HORIZONTAL FRONT PORCH */
- timings->hfp = edid[current_descriptor_addrs + 8] |
- ((edid[current_descriptor_addrs + 11] & 0xc0) << 2);
- /* HORIZONTAL SYNC WIDTH */
- timings->hsw = edid[current_descriptor_addrs + 9] |
- ((edid[current_descriptor_addrs + 11] & 0x30) << 4);
- /* HORIZONTAL BACK PORCH */
- timings->hbp = (((edid[current_descriptor_addrs + 4] & 0x0F) << 8) |
- edid[current_descriptor_addrs + 3]) -
- (timings->hfp + timings->hsw);
- /* VERTICAL FRONT PORCH */
- timings->vfp = ((edid[current_descriptor_addrs + 10] & 0xF0) >> 4) |
- ((edid[current_descriptor_addrs + 11] & 0x0f) << 2);
- /* VERTICAL SYNC WIDTH */
- timings->vsw = (edid[current_descriptor_addrs + 10] & 0x0F) |
- ((edid[current_descriptor_addrs + 11] & 0x03) << 4);
- /* VERTICAL BACK PORCH */
- timings->vbp = (((edid[current_descriptor_addrs + 7] & 0x0F) << 8) |
- edid[current_descriptor_addrs + 6]) -
- (timings->vfp + timings->vsw);
+ for (i = 1; i <= edid[0x7e] && i * 128 < HDMI_EDID_MAX_LENGTH; i++) {
+ if (edid[i * 128] == 0x2)
+ fb_edid_add_monspecs(edid + i * 128, specs);
+ }
-}
+ hdmi.can_do_hdmi = specs->misc & FB_MISC_HDMI;
-/* Description : This function gets the resolution information from EDID */
-static void get_edid_timing_data(u8 *edid)
-{
- u8 count;
- u16 current_descriptor_addrs;
- struct hdmi_cm cm;
- struct omap_video_timings edid_timings;
-
- /* search block 0, there are 4 DTDs arranged in priority order */
- for (count = 0; count < EDID_SIZE_BLOCK0_TIMING_DESCRIPTOR; count++) {
- current_descriptor_addrs =
- EDID_DESCRIPTOR_BLOCK0_ADDRESS +
- count * EDID_TIMING_DESCRIPTOR_SIZE;
- get_horz_vert_timing_info(current_descriptor_addrs,
- edid, &edid_timings);
- cm = hdmi_get_code(&edid_timings);
- DSSDBG("Block0[%d] value matches code = %d , mode = %d\n",
- count, cm.code, cm.mode);
- if (cm.code == -1) {
+ /* filter out resolutions we don't support */
+ for (i = j = 0; i < specs->modedb_len; i++) {
+ u32 max_pclk = hdmi.dssdev->clocks.hdmi.max_pixclk_khz;
+ if (!hdmi_set_timings(&specs->modedb[i], true))
continue;
- } else {
- hdmi.code = cm.code;
- hdmi.mode = cm.mode;
- DSSDBG("code = %d , mode = %d\n",
- hdmi.code, hdmi.mode);
- return;
- }
- }
- if (edid[0x7e] != 0x00) {
- for (count = 0; count < EDID_SIZE_BLOCK1_TIMING_DESCRIPTOR;
- count++) {
- current_descriptor_addrs =
- EDID_DESCRIPTOR_BLOCK1_ADDRESS +
- count * EDID_TIMING_DESCRIPTOR_SIZE;
- get_horz_vert_timing_info(current_descriptor_addrs,
- edid, &edid_timings);
- cm = hdmi_get_code(&edid_timings);
- DSSDBG("Block1[%d] value matches code = %d, mode = %d",
- count, cm.code, cm.mode);
- if (cm.code == -1) {
- continue;
- } else {
- hdmi.code = cm.code;
- hdmi.mode = cm.mode;
- DSSDBG("code = %d , mode = %d\n",
- hdmi.code, hdmi.mode);
- return;
- }
- }
- }
- DSSINFO("no valid timing found , falling back to VGA\n");
- hdmi.code = 4; /* setting default value of 640 480 VGA */
- hdmi.mode = HDMI_DVI;
+ if (max_pclk &&
+ max_pclk < PICOS2KHZ(specs->modedb[i].pixclock))
+ continue;
+
+ if (specs->modedb[i].flag & FB_FLAG_PIXEL_REPEAT)
+ continue;
+
+ specs->modedb[j++] = specs->modedb[i];
+ }
+ specs->modedb_len = j;
}
-static void hdmi_read_edid(struct omap_video_timings *dp)
+u8 *hdmi_read_edid(struct omap_video_timings *dp)
{
- int ret = 0, code;
+ int ret = 0, i;
+
+ if (hdmi.edid_set)
+ return hdmi.edid;
memset(hdmi.edid, 0, HDMI_EDID_MAX_LENGTH);
- if (!hdmi.edid_set)
- ret = read_edid(hdmi.edid, HDMI_EDID_MAX_LENGTH);
+ ret = read_ti_4xxx_edid(&hdmi.hdmi_data, hdmi.edid,
+ HDMI_EDID_MAX_LENGTH);
- if (!ret) {
- if (!memcmp(hdmi.edid, edid_header, sizeof(edid_header))) {
- /* search for timings of default resolution */
- get_edid_timing_data(hdmi.edid);
- hdmi.edid_set = true;
- }
- } else {
+ for (i = 0; i < HDMI_EDID_MAX_LENGTH; i += 16)
+ pr_info("edid[%03x] = %02x %02x %02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ hdmi.edid[i], hdmi.edid[i + 1], hdmi.edid[i + 2],
+ hdmi.edid[i + 3], hdmi.edid[i + 4], hdmi.edid[i + 5],
+ hdmi.edid[i + 6], hdmi.edid[i + 7], hdmi.edid[i + 8],
+ hdmi.edid[i + 9], hdmi.edid[i + 10], hdmi.edid[i + 11],
+ hdmi.edid[i + 12], hdmi.edid[i + 13], hdmi.edid[i + 14],
+ hdmi.edid[i + 15]);
+
+ if (ret) {
DSSWARN("failed to read E-EDID\n");
+ return NULL;
}
- if (!hdmi.edid_set) {
- DSSINFO("fallback to VGA\n");
- hdmi.code = 4; /* setting default value of 640 480 VGA */
- hdmi.mode = HDMI_DVI;
- }
+ if (memcmp(hdmi.edid, edid_header, sizeof(edid_header)))
+ return NULL;
- code = get_timings_index();
-
- *dp = cea_vesa_timings[code].timings;
-}
-
-static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
- struct hdmi_core_infoframe_avi *avi_cfg,
- struct hdmi_core_packet_enable_repeat *repeat_cfg)
-{
- DSSDBG("Enter hdmi_core_init\n");
-
- /* video core */
- video_cfg->ip_bus_width = HDMI_INPUT_8BIT;
- video_cfg->op_dither_truc = HDMI_OUTPUTTRUNCATION_8BIT;
- video_cfg->deep_color_pkt = HDMI_DEEPCOLORPACKECTDISABLE;
- video_cfg->pkt_mode = HDMI_PACKETMODERESERVEDVALUE;
- video_cfg->hdmi_dvi = HDMI_DVI;
- video_cfg->tclk_sel_clkmult = HDMI_FPLL10IDCK;
-
- /* info frame */
- avi_cfg->db1_format = 0;
- avi_cfg->db1_active_info = 0;
- avi_cfg->db1_bar_info_dv = 0;
- avi_cfg->db1_scan_info = 0;
- avi_cfg->db2_colorimetry = 0;
- avi_cfg->db2_aspect_ratio = 0;
- avi_cfg->db2_active_fmt_ar = 0;
- avi_cfg->db3_itc = 0;
- avi_cfg->db3_ec = 0;
- avi_cfg->db3_q_range = 0;
- avi_cfg->db3_nup_scaling = 0;
- avi_cfg->db4_videocode = 0;
- avi_cfg->db5_pixel_repeat = 0;
- avi_cfg->db6_7_line_eoftop = 0 ;
- avi_cfg->db8_9_line_sofbottom = 0;
- avi_cfg->db10_11_pixel_eofleft = 0;
- avi_cfg->db12_13_pixel_sofright = 0;
-
- /* packet enable and repeat */
- repeat_cfg->audio_pkt = 0;
- repeat_cfg->audio_pkt_repeat = 0;
- repeat_cfg->avi_infoframe = 0;
- repeat_cfg->avi_infoframe_repeat = 0;
- repeat_cfg->gen_cntrl_pkt = 0;
- repeat_cfg->gen_cntrl_pkt_repeat = 0;
- repeat_cfg->generic_pkt = 0;
- repeat_cfg->generic_pkt_repeat = 0;
-}
-
-static void hdmi_core_powerdown_disable(void)
-{
- DSSDBG("Enter hdmi_core_powerdown_disable\n");
- REG_FLD_MOD(HDMI_CORE_CTRL1, 0x0, 0, 0);
-}
-
-static void hdmi_core_swreset_release(void)
-{
- DSSDBG("Enter hdmi_core_swreset_release\n");
- REG_FLD_MOD(HDMI_CORE_SYS_SRST, 0x0, 0, 0);
-}
-
-static void hdmi_core_swreset_assert(void)
-{
- DSSDBG("Enter hdmi_core_swreset_assert\n");
- REG_FLD_MOD(HDMI_CORE_SYS_SRST, 0x1, 0, 0);
-}
-
-/* DSS_HDMI_CORE_VIDEO_CONFIG */
-static void hdmi_core_video_config(struct hdmi_core_video_config *cfg)
-{
- u32 r = 0;
-
- /* sys_ctrl1 default configuration not tunable */
- r = hdmi_read_reg(HDMI_CORE_CTRL1);
- r = FLD_MOD(r, HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC, 5, 5);
- r = FLD_MOD(r, HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC, 4, 4);
- r = FLD_MOD(r, HDMI_CORE_CTRL1_BSEL_24BITBUS, 2, 2);
- r = FLD_MOD(r, HDMI_CORE_CTRL1_EDGE_RISINGEDGE, 1, 1);
- hdmi_write_reg(HDMI_CORE_CTRL1, r);
-
- REG_FLD_MOD(HDMI_CORE_SYS_VID_ACEN, cfg->ip_bus_width, 7, 6);
-
- /* Vid_Mode */
- r = hdmi_read_reg(HDMI_CORE_SYS_VID_MODE);
-
- /* dither truncation configuration */
- if (cfg->op_dither_truc > HDMI_OUTPUTTRUNCATION_12BIT) {
- r = FLD_MOD(r, cfg->op_dither_truc - 3, 7, 6);
- r = FLD_MOD(r, 1, 5, 5);
- } else {
- r = FLD_MOD(r, cfg->op_dither_truc, 7, 6);
- r = FLD_MOD(r, 0, 5, 5);
- }
- hdmi_write_reg(HDMI_CORE_SYS_VID_MODE, r);
-
- /* HDMI_Ctrl */
- r = hdmi_read_reg(HDMI_CORE_AV_HDMI_CTRL);
- r = FLD_MOD(r, cfg->deep_color_pkt, 6, 6);
- r = FLD_MOD(r, cfg->pkt_mode, 5, 3);
- r = FLD_MOD(r, cfg->hdmi_dvi, 0, 0);
- hdmi_write_reg(HDMI_CORE_AV_HDMI_CTRL, r);
-
- /* TMDS_CTRL */
- REG_FLD_MOD(HDMI_CORE_SYS_TMDS_CTRL,
- cfg->tclk_sel_clkmult, 6, 5);
-}
-
-static void hdmi_core_aux_infoframe_avi_config(
- struct hdmi_core_infoframe_avi info_avi)
-{
- u32 val;
- char sum = 0, checksum = 0;
-
- sum += 0x82 + 0x002 + 0x00D;
- hdmi_write_reg(HDMI_CORE_AV_AVI_TYPE, 0x082);
- hdmi_write_reg(HDMI_CORE_AV_AVI_VERS, 0x002);
- hdmi_write_reg(HDMI_CORE_AV_AVI_LEN, 0x00D);
-
- val = (info_avi.db1_format << 5) |
- (info_avi.db1_active_info << 4) |
- (info_avi.db1_bar_info_dv << 2) |
- (info_avi.db1_scan_info);
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(0), val);
- sum += val;
-
- val = (info_avi.db2_colorimetry << 6) |
- (info_avi.db2_aspect_ratio << 4) |
- (info_avi.db2_active_fmt_ar);
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(1), val);
- sum += val;
-
- val = (info_avi.db3_itc << 7) |
- (info_avi.db3_ec << 4) |
- (info_avi.db3_q_range << 2) |
- (info_avi.db3_nup_scaling);
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(2), val);
- sum += val;
-
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(3), info_avi.db4_videocode);
- sum += info_avi.db4_videocode;
-
- val = info_avi.db5_pixel_repeat;
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(4), val);
- sum += val;
-
- val = info_avi.db6_7_line_eoftop & 0x00FF;
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(5), val);
- sum += val;
-
- val = ((info_avi.db6_7_line_eoftop >> 8) & 0x00FF);
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(6), val);
- sum += val;
-
- val = info_avi.db8_9_line_sofbottom & 0x00FF;
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(7), val);
- sum += val;
-
- val = ((info_avi.db8_9_line_sofbottom >> 8) & 0x00FF);
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(8), val);
- sum += val;
-
- val = info_avi.db10_11_pixel_eofleft & 0x00FF;
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(9), val);
- sum += val;
-
- val = ((info_avi.db10_11_pixel_eofleft >> 8) & 0x00FF);
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(10), val);
- sum += val;
-
- val = info_avi.db12_13_pixel_sofright & 0x00FF;
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(11), val);
- sum += val;
-
- val = ((info_avi.db12_13_pixel_sofright >> 8) & 0x00FF);
- hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(12), val);
- sum += val;
-
- checksum = 0x100 - sum;
- hdmi_write_reg(HDMI_CORE_AV_AVI_CHSUM, checksum);
-}
-
-static void hdmi_core_av_packet_config(
- struct hdmi_core_packet_enable_repeat repeat_cfg)
-{
- /* enable/repeat the infoframe */
- hdmi_write_reg(HDMI_CORE_AV_PB_CTRL1,
- (repeat_cfg.audio_pkt << 5) |
- (repeat_cfg.audio_pkt_repeat << 4) |
- (repeat_cfg.avi_infoframe << 1) |
- (repeat_cfg.avi_infoframe_repeat));
-
- /* enable/repeat the packet */
- hdmi_write_reg(HDMI_CORE_AV_PB_CTRL2,
- (repeat_cfg.gen_cntrl_pkt << 3) |
- (repeat_cfg.gen_cntrl_pkt_repeat << 2) |
- (repeat_cfg.generic_pkt << 1) |
- (repeat_cfg.generic_pkt_repeat));
-}
-
-static void hdmi_wp_init(struct omap_video_timings *timings,
- struct hdmi_video_format *video_fmt,
- struct hdmi_video_interface *video_int)
-{
- DSSDBG("Enter hdmi_wp_init\n");
-
- timings->hbp = 0;
- timings->hfp = 0;
- timings->hsw = 0;
- timings->vbp = 0;
- timings->vfp = 0;
- timings->vsw = 0;
-
- video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
- video_fmt->y_res = 0;
- video_fmt->x_res = 0;
-
- video_int->vsp = 0;
- video_int->hsp = 0;
-
- video_int->interlacing = 0;
- video_int->tm = 0; /* HDMI_TIMING_SLAVE */
-
-}
-
-static void hdmi_wp_video_start(bool start)
-{
- REG_FLD_MOD(HDMI_WP_VIDEO_CFG, start, 31, 31);
-}
-
-static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt,
- struct omap_video_timings *timings, struct hdmi_config *param)
-{
- DSSDBG("Enter hdmi_wp_video_init_format\n");
-
- video_fmt->y_res = param->timings.timings.y_res;
- video_fmt->x_res = param->timings.timings.x_res;
-
- timings->hbp = param->timings.timings.hbp;
- timings->hfp = param->timings.timings.hfp;
- timings->hsw = param->timings.timings.hsw;
- timings->vbp = param->timings.timings.vbp;
- timings->vfp = param->timings.timings.vfp;
- timings->vsw = param->timings.timings.vsw;
-}
-
-static void hdmi_wp_video_config_format(
- struct hdmi_video_format *video_fmt)
-{
- u32 l = 0;
-
- REG_FLD_MOD(HDMI_WP_VIDEO_CFG, video_fmt->packing_mode, 10, 8);
-
- l |= FLD_VAL(video_fmt->y_res, 31, 16);
- l |= FLD_VAL(video_fmt->x_res, 15, 0);
- hdmi_write_reg(HDMI_WP_VIDEO_SIZE, l);
-}
-
-static void hdmi_wp_video_config_interface(
- struct hdmi_video_interface *video_int)
-{
- u32 r;
- DSSDBG("Enter hdmi_wp_video_config_interface\n");
-
- r = hdmi_read_reg(HDMI_WP_VIDEO_CFG);
- r = FLD_MOD(r, video_int->vsp, 7, 7);
- r = FLD_MOD(r, video_int->hsp, 6, 6);
- r = FLD_MOD(r, video_int->interlacing, 3, 3);
- r = FLD_MOD(r, video_int->tm, 1, 0);
- hdmi_write_reg(HDMI_WP_VIDEO_CFG, r);
-}
-
-static void hdmi_wp_video_config_timing(
- struct omap_video_timings *timings)
-{
- u32 timing_h = 0;
- u32 timing_v = 0;
-
- DSSDBG("Enter hdmi_wp_video_config_timing\n");
-
- timing_h |= FLD_VAL(timings->hbp, 31, 20);
- timing_h |= FLD_VAL(timings->hfp, 19, 8);
- timing_h |= FLD_VAL(timings->hsw, 7, 0);
- hdmi_write_reg(HDMI_WP_VIDEO_TIMING_H, timing_h);
-
- timing_v |= FLD_VAL(timings->vbp, 31, 20);
- timing_v |= FLD_VAL(timings->vfp, 19, 8);
- timing_v |= FLD_VAL(timings->vsw, 7, 0);
- hdmi_write_reg(HDMI_WP_VIDEO_TIMING_V, timing_v);
-}
-
-static void hdmi_basic_configure(struct hdmi_config *cfg)
-{
- /* HDMI */
- struct omap_video_timings video_timing;
- struct hdmi_video_format video_format;
- struct hdmi_video_interface video_interface;
- /* HDMI core */
- struct hdmi_core_infoframe_avi avi_cfg;
- struct hdmi_core_video_config v_core_cfg;
- struct hdmi_core_packet_enable_repeat repeat_cfg;
-
- hdmi_wp_init(&video_timing, &video_format,
- &video_interface);
-
- hdmi_core_init(&v_core_cfg,
- &avi_cfg,
- &repeat_cfg);
-
- hdmi_wp_video_init_format(&video_format,
- &video_timing, cfg);
-
- hdmi_wp_video_config_timing(&video_timing);
-
- /* video config */
- video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
-
- hdmi_wp_video_config_format(&video_format);
-
- video_interface.vsp = cfg->timings.vsync_pol;
- video_interface.hsp = cfg->timings.hsync_pol;
- video_interface.interlacing = cfg->interlace;
- video_interface.tm = 1 ; /* HDMI_TIMING_MASTER_24BIT */
-
- hdmi_wp_video_config_interface(&video_interface);
-
- /*
- * configure core video part
- * set software reset in the core
- */
- hdmi_core_swreset_assert();
-
- /* power down off */
- hdmi_core_powerdown_disable();
-
- v_core_cfg.pkt_mode = HDMI_PACKETMODE24BITPERPIXEL;
- v_core_cfg.hdmi_dvi = cfg->cm.mode;
-
- hdmi_core_video_config(&v_core_cfg);
-
- /* release software reset in the core */
- hdmi_core_swreset_release();
-
- /*
- * configure packet
- * info frame video see doc CEA861-D page 65
- */
- avi_cfg.db1_format = HDMI_INFOFRAME_AVI_DB1Y_RGB;
- avi_cfg.db1_active_info =
- HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF;
- avi_cfg.db1_bar_info_dv = HDMI_INFOFRAME_AVI_DB1B_NO;
- avi_cfg.db1_scan_info = HDMI_INFOFRAME_AVI_DB1S_0;
- avi_cfg.db2_colorimetry = HDMI_INFOFRAME_AVI_DB2C_NO;
- avi_cfg.db2_aspect_ratio = HDMI_INFOFRAME_AVI_DB2M_NO;
- avi_cfg.db2_active_fmt_ar = HDMI_INFOFRAME_AVI_DB2R_SAME;
- avi_cfg.db3_itc = HDMI_INFOFRAME_AVI_DB3ITC_NO;
- avi_cfg.db3_ec = HDMI_INFOFRAME_AVI_DB3EC_XVYUV601;
- avi_cfg.db3_q_range = HDMI_INFOFRAME_AVI_DB3Q_DEFAULT;
- avi_cfg.db3_nup_scaling = HDMI_INFOFRAME_AVI_DB3SC_NO;
- avi_cfg.db4_videocode = cfg->cm.code;
- avi_cfg.db5_pixel_repeat = HDMI_INFOFRAME_AVI_DB5PR_NO;
- avi_cfg.db6_7_line_eoftop = 0;
- avi_cfg.db8_9_line_sofbottom = 0;
- avi_cfg.db10_11_pixel_eofleft = 0;
- avi_cfg.db12_13_pixel_sofright = 0;
-
- hdmi_core_aux_infoframe_avi_config(avi_cfg);
-
- /* enable/repeat the infoframe */
- repeat_cfg.avi_infoframe = HDMI_PACKETENABLE;
- repeat_cfg.avi_infoframe_repeat = HDMI_PACKETREPEATON;
- /* wakeup */
- repeat_cfg.audio_pkt = HDMI_PACKETENABLE;
- repeat_cfg.audio_pkt_repeat = HDMI_PACKETREPEATON;
- hdmi_core_av_packet_config(repeat_cfg);
-}
-
-static void update_hdmi_timings(struct hdmi_config *cfg,
- struct omap_video_timings *timings, int code)
-{
- cfg->timings.timings.x_res = timings->x_res;
- cfg->timings.timings.y_res = timings->y_res;
- cfg->timings.timings.hbp = timings->hbp;
- cfg->timings.timings.hfp = timings->hfp;
- cfg->timings.timings.hsw = timings->hsw;
- cfg->timings.timings.vbp = timings->vbp;
- cfg->timings.timings.vfp = timings->vfp;
- cfg->timings.timings.vsw = timings->vsw;
- cfg->timings.timings.pixel_clock = timings->pixel_clock;
- cfg->timings.vsync_pol = cea_vesa_timings[code].vsync_pol;
- cfg->timings.hsync_pol = cea_vesa_timings[code].hsync_pol;
+ hdmi.edid_set = true;
+ return hdmi.edid;
}
static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
@@ -1127,16 +296,12 @@
unsigned long clkin, refclk;
u32 mf;
- clkin = dss_clk_get_rate(DSS_CLK_SYSCK) / 10000;
+ clkin = clk_get_rate(hdmi.sys_clk) / 10000;
/*
* Input clock is predivided by N + 1
* out put of which is reference clk
*/
- if (dssdev->clocks.hdmi.regn == 0)
- pi->regn = HDMI_DEFAULT_REGN;
- else
- pi->regn = dssdev->clocks.hdmi.regn;
-
+ pi->regn = dssdev->clocks.hdmi.regn;
refclk = clkin / (pi->regn + 1);
/*
@@ -1144,11 +309,7 @@
* Multiplying by 100 to avoid fractional part removal
*/
pi->regm = (phy * 100 / (refclk)) / 100;
-
- if (dssdev->clocks.hdmi.regm2 == 0)
- pi->regm2 = HDMI_DEFAULT_REGM2;
- else
- pi->regm2 = dssdev->clocks.hdmi.regm2;
+ pi->regm2 = dssdev->clocks.hdmi.regm2;
/*
* fractional multiplier is remainder of the difference between
@@ -1169,26 +330,59 @@
DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
}
-static void hdmi_enable_clocks(int enable)
+static void hdmi_load_hdcp_keys(struct omap_dss_device *dssdev)
{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK |
- DSS_CLK_SYSCK | DSS_CLK_VIDFCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK |
- DSS_CLK_SYSCK | DSS_CLK_VIDFCK);
+ int aksv;
+ int retries = 5;
+ DSSDBG("hdmi_load_hdcp_keys\n");
+ /* load the keys and reset the wrapper to populate the AKSV registers*/
+ if (hdmi.hdmi_power_on_cb) {
+ aksv = hdmi_ti_4xx_check_aksv_data(&hdmi.hdmi_data);
+ if ((aksv == HDMI_AKSV_ZERO) &&
+ hdmi.custom_set &&
+ hdmi.hdmi_power_on_cb()) {
+ hdmi_ti_4xxx_set_wait_soft_reset(&hdmi.hdmi_data);
+
+ while (retries) {
+ aksv = hdmi_ti_4xx_check_aksv_data(&hdmi.hdmi_data);
+ if (aksv == HDMI_AKSV_VALID)
+ break;
+ msleep(50);
+ retries--;
+ }
+
+ hdmi.wp_reset_done = (aksv == HDMI_AKSV_VALID) ?
+ true : false;
+ DSSINFO("HDMI_WRAPPER RESET DONE\n");
+ } else if (aksv == HDMI_AKSV_VALID)
+ hdmi.wp_reset_done = true;
+ else if (aksv == HDMI_AKSV_ERROR)
+ hdmi.wp_reset_done = false;
+
+ if (!hdmi.wp_reset_done)
+ DSSERR("*** INVALID AKSV: "
+ "Do not perform HDCP AUTHENTICATION\n");
+ }
+
}
static int hdmi_power_on(struct omap_dss_device *dssdev)
{
- int r, code = 0;
+ int r;
struct hdmi_pll_info pll_data;
struct omap_video_timings *p;
unsigned long phy;
- hdmi_enable_clocks(1);
+ r = hdmi_runtime_get();
+ if (r)
+ return r;
- dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 0);
+ /* Load the HDCP keys if not already loaded*/
+ hdmi_load_hdcp_keys(dssdev);
+
+ hdmi_ti_4xxx_wp_video_start(&hdmi.hdmi_data, 0);
+
+ dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, dssdev->type, 0);
p = &dssdev->panel.timings;
@@ -1197,35 +391,53 @@
dssdev->panel.timings.y_res);
if (!hdmi.custom_set) {
- DSSDBG("Read EDID as no EDID is not set on poweron\n");
- hdmi_read_edid(p);
+ struct fb_videomode vesa_vga = vesa_modes[4];
+ hdmi_set_timings(&vesa_vga, false);
}
- code = get_timings_index();
- dssdev->panel.timings = cea_vesa_timings[code].timings;
- update_hdmi_timings(&hdmi.cfg, p, code);
+
+ omapfb_fb2dss_timings(&hdmi.cfg.timings, &dssdev->panel.timings);
phy = p->pixel_clock;
+ switch (hdmi.deep_color) {
+ case HDMI_DEEP_COLOR_30BIT:
+ phy = (p->pixel_clock * 125) / 100 ;
+ hdmi.cfg.deep_color = HDMI_DEEP_COLOR_30BIT;
+ break;
+ case HDMI_DEEP_COLOR_36BIT:
+ if (p->pixel_clock == 148500) {
+ printk(KERN_ERR "36 bit deep color not supported");
+ goto err;
+ }
+
+ phy = (p->pixel_clock * 150) / 100;
+ hdmi.cfg.deep_color = HDMI_DEEP_COLOR_36BIT;
+ break;
+ case HDMI_DEEP_COLOR_24BIT:
+ default:
+ phy = p->pixel_clock;
+ hdmi.cfg.deep_color = HDMI_DEEP_COLOR_24BIT;
+ break;
+ }
+
hdmi_compute_pll(dssdev, phy, &pll_data);
- hdmi_wp_video_start(0);
-
- /* config the PLL and PHY first */
- r = hdmi_pll_program(&pll_data);
+ /* config the PLL and PHY hdmi_set_pll_pwrfirst */
+ r = hdmi_ti_4xxx_pll_program(&hdmi.hdmi_data, &pll_data);
if (r) {
DSSDBG("Failed to lock PLL\n");
goto err;
}
- r = hdmi_phy_init();
+ r = hdmi_ti_4xxx_phy_init(&hdmi.hdmi_data);
if (r) {
DSSDBG("Failed to start PHY\n");
goto err;
}
- hdmi.cfg.cm.mode = hdmi.mode;
+ hdmi.cfg.cm.mode = hdmi.can_do_hdmi ? hdmi.mode : HDMI_DVI;
hdmi.cfg.cm.code = hdmi.code;
- hdmi_basic_configure(&hdmi.cfg);
+ hdmi_ti_4xxx_basic_configure(&hdmi.hdmi_data, &hdmi.cfg);
/* Make selection of HDMI in DSS */
dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
@@ -1245,65 +457,154 @@
dispc_set_digit_size(dssdev->panel.timings.x_res,
dssdev->panel.timings.y_res);
- dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 1);
+ dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, dssdev->type, 1);
- hdmi_wp_video_start(1);
+ hdmi_ti_4xxx_wp_video_start(&hdmi.hdmi_data, 1);
+
+ if (hdmi.hdmi_start_frame_cb &&
+ hdmi.custom_set &&
+ hdmi.wp_reset_done)
+ (*hdmi.hdmi_start_frame_cb)();
return 0;
err:
- hdmi_enable_clocks(0);
+ hdmi_runtime_put();
return -EIO;
}
static void hdmi_power_off(struct omap_dss_device *dssdev)
{
- dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 0);
+ if (hdmi.hdmi_irq_cb)
+ hdmi.hdmi_irq_cb(HDMI_HPD_LOW);
- hdmi_wp_video_start(0);
- hdmi_phy_off();
- hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF);
- hdmi_enable_clocks(0);
+ hdmi_ti_4xxx_wp_video_start(&hdmi.hdmi_data, 0);
- hdmi.edid_set = 0;
+ dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, dssdev->type, 0);
+ hdmi_ti_4xxx_phy_off(&hdmi.hdmi_data, hdmi.set_mode);
+ hdmi_ti_4xxx_set_pll_pwr(&hdmi.hdmi_data, HDMI_PLLPWRCMD_ALLOFF);
+ hdmi_runtime_put();
+ hdmi.deep_color = HDMI_DEEP_COLOR_24BIT;
+}
+
+int omapdss_hdmi_get_pixel_clock(void)
+{
+ return PICOS2KHZ(hdmi.cfg.timings.pixclock);
+}
+
+int omapdss_hdmi_get_mode(void)
+{
+ return hdmi.mode;
+}
+
+int omapdss_hdmi_register_hdcp_callbacks(void (*hdmi_start_frame_cb)(void),
+ void (*hdmi_irq_cb)(int status),
+ bool (*hdmi_power_on_cb)(void))
+{
+ hdmi.hdmi_start_frame_cb = hdmi_start_frame_cb;
+ hdmi.hdmi_irq_cb = hdmi_irq_cb;
+ hdmi.hdmi_power_on_cb = hdmi_power_on_cb;
+
+ return hdmi_ti_4xxx_wp_get_video_state(&hdmi.hdmi_data);
+}
+EXPORT_SYMBOL(omapdss_hdmi_register_hdcp_callbacks);
+
+void omapdss_hdmi_set_deepcolor(int val)
+{
+ hdmi.deep_color = val;
+}
+
+int omapdss_hdmi_get_deepcolor(void)
+{
+ return hdmi.deep_color;
+}
+
+int hdmi_get_current_hpd()
+{
+ return gpio_get_value(hdmi.dssdev->hpd_gpio);
+}
+
+static irqreturn_t hpd_irq_handler(int irq, void *ptr)
+{
+ int hpd = hdmi_get_current_hpd();
+ pr_info("hpd %d\n", hpd);
+
+ hdmi_panel_hpd_handler(hpd);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t hdmi_irq_handler(int irq, void *arg)
+{
+ int r = 0;
+
+ r = hdmi_ti_4xxx_irq_handler(&hdmi.hdmi_data);
+
+ DSSDBG("Received HDMI IRQ = %08x\n", r);
+
+ if (hdmi.hdmi_irq_cb)
+ hdmi.hdmi_irq_cb(r);
+
+ return IRQ_HANDLED;
}
int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- struct hdmi_cm cm;
+ struct fb_videomode t;
- cm = hdmi_get_code(timings);
- if (cm.code == -1) {
- DSSERR("Invalid timing entered\n");
- return -EINVAL;
+ omapfb_dss2fb_timings(timings, &t);
+
+ /* also check interlaced timings */
+ if (!hdmi_set_timings(&t, true)) {
+ t.yres *= 2;
+ t.vmode |= FB_VMODE_INTERLACED;
}
-
+ if (!hdmi_set_timings(&t, true))
+ return -EINVAL;
return 0;
+}
+int omapdss_hdmi_display_set_mode(struct omap_dss_device *dssdev,
+ struct fb_videomode *vm)
+{
+ int r1, r2;
+ DSSINFO("Enter omapdss_hdmi_display_set_mode\n");
+ /* turn the hdmi off and on to get new timings to use */
+ hdmi.set_mode = true;
+ dssdev->driver->disable(dssdev);
+ hdmi.set_mode = false;
+ r1 = hdmi_set_timings(vm, false) ? 0 : -EINVAL;
+ hdmi.custom_set = 1;
+ hdmi.code = hdmi.cfg.cm.code;
+ hdmi.mode = hdmi.cfg.cm.mode;
+ r2 = dssdev->driver->enable(dssdev);
+ return r1 ? : r2;
}
void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev)
{
- struct hdmi_cm cm;
+ struct fb_videomode t;
- hdmi.custom_set = 1;
- cm = hdmi_get_code(&dssdev->panel.timings);
- hdmi.code = cm.code;
- hdmi.mode = cm.mode;
- omapdss_hdmi_display_enable(dssdev);
- hdmi.custom_set = 0;
+ omapfb_dss2fb_timings(&dssdev->panel.timings, &t);
+ /* also check interlaced timings */
+ if (!hdmi_set_timings(&t, true)) {
+ t.yres *= 2;
+ t.vmode |= FB_VMODE_INTERLACED;
+ }
+
+ omapdss_hdmi_display_set_mode(dssdev, &t);
}
int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
{
- struct omap_dss_hdmi_data *priv = dssdev->data;
int r = 0;
- DSSDBG("ENTER hdmi_display_enable\n");
+ DSSINFO("ENTER hdmi_display_enable\n");
mutex_lock(&hdmi.lock);
- hdmi.hpd_gpio = priv->hpd_gpio;
+ if (hdmi.enabled)
+ goto err0;
r = omap_dss_start_device(dssdev);
if (r) {
@@ -1319,15 +620,34 @@
}
}
+ hdmi.hdmi_reg = regulator_get(NULL, "hdmi_vref");
+ if (IS_ERR_OR_NULL(hdmi.hdmi_reg)) {
+ DSSERR("Failed to get hdmi_vref regulator\n");
+ r = PTR_ERR(hdmi.hdmi_reg) ? : -ENODEV;
+ goto err2;
+ }
+
+ r = regulator_enable(hdmi.hdmi_reg);
+ if (r) {
+ DSSERR("failed to enable hdmi_vref regulator\n");
+ goto err3;
+ }
+
r = hdmi_power_on(dssdev);
if (r) {
DSSERR("failed to power on device\n");
- goto err2;
+ goto err4;
}
+ hdmi.enabled = true;
+
mutex_unlock(&hdmi.lock);
return 0;
+err4:
+ regulator_disable(hdmi.hdmi_reg);
+err3:
+ regulator_put(hdmi.hdmi_reg);
err2:
if (dssdev->platform_disable)
dssdev->platform_disable(dssdev);
@@ -1340,440 +660,91 @@
void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
{
- DSSDBG("Enter hdmi_display_disable\n");
+ DSSINFO("Enter hdmi_display_disable\n");
mutex_lock(&hdmi.lock);
+ if (!hdmi.enabled)
+ goto done;
+
+ hdmi.enabled = false;
+ hdmi.wp_reset_done = false;
+
hdmi_power_off(dssdev);
+ if (dssdev->sync_lost_error == 0)
+ if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
+ /* clear EDID and mode on disable only */
+ hdmi.edid_set = false;
+ hdmi.custom_set = 0;
+ pr_info("hdmi: clearing EDID info\n");
+ }
+ regulator_disable(hdmi.hdmi_reg);
+
+ regulator_put(hdmi.hdmi_reg);
if (dssdev->platform_disable)
dssdev->platform_disable(dssdev);
omap_dss_stop_device(dssdev);
-
+done:
mutex_unlock(&hdmi.lock);
}
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-static void hdmi_wp_audio_config_format(
- struct hdmi_audio_format *aud_fmt)
+static int hdmi_get_clocks(struct platform_device *pdev)
{
- u32 r;
+ struct clk *clk;
- DSSDBG("Enter hdmi_wp_audio_config_format\n");
-
- r = hdmi_read_reg(HDMI_WP_AUDIO_CFG);
- r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24);
- r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16);
- r = FLD_MOD(r, aud_fmt->en_sig_blk_strt_end, 5, 5);
- r = FLD_MOD(r, aud_fmt->type, 4, 4);
- r = FLD_MOD(r, aud_fmt->justification, 3, 3);
- r = FLD_MOD(r, aud_fmt->sample_order, 2, 2);
- r = FLD_MOD(r, aud_fmt->samples_per_word, 1, 1);
- r = FLD_MOD(r, aud_fmt->sample_size, 0, 0);
- hdmi_write_reg(HDMI_WP_AUDIO_CFG, r);
-}
-
-static void hdmi_wp_audio_config_dma(struct hdmi_audio_dma *aud_dma)
-{
- u32 r;
-
- DSSDBG("Enter hdmi_wp_audio_config_dma\n");
-
- r = hdmi_read_reg(HDMI_WP_AUDIO_CFG2);
- r = FLD_MOD(r, aud_dma->transfer_size, 15, 8);
- r = FLD_MOD(r, aud_dma->block_size, 7, 0);
- hdmi_write_reg(HDMI_WP_AUDIO_CFG2, r);
-
- r = hdmi_read_reg(HDMI_WP_AUDIO_CTRL);
- r = FLD_MOD(r, aud_dma->mode, 9, 9);
- r = FLD_MOD(r, aud_dma->fifo_threshold, 8, 0);
- hdmi_write_reg(HDMI_WP_AUDIO_CTRL, r);
-}
-
-static void hdmi_core_audio_config(struct hdmi_core_audio_config *cfg)
-{
- u32 r;
-
- /* audio clock recovery parameters */
- r = hdmi_read_reg(HDMI_CORE_AV_ACR_CTRL);
- r = FLD_MOD(r, cfg->use_mclk, 2, 2);
- r = FLD_MOD(r, cfg->en_acr_pkt, 1, 1);
- r = FLD_MOD(r, cfg->cts_mode, 0, 0);
- hdmi_write_reg(HDMI_CORE_AV_ACR_CTRL, r);
-
- REG_FLD_MOD(HDMI_CORE_AV_N_SVAL1, cfg->n, 7, 0);
- REG_FLD_MOD(HDMI_CORE_AV_N_SVAL2, cfg->n >> 8, 7, 0);
- REG_FLD_MOD(HDMI_CORE_AV_N_SVAL3, cfg->n >> 16, 7, 0);
-
- if (cfg->cts_mode == HDMI_AUDIO_CTS_MODE_SW) {
- REG_FLD_MOD(HDMI_CORE_AV_CTS_SVAL1, cfg->cts, 7, 0);
- REG_FLD_MOD(HDMI_CORE_AV_CTS_SVAL2, cfg->cts >> 8, 7, 0);
- REG_FLD_MOD(HDMI_CORE_AV_CTS_SVAL3, cfg->cts >> 16, 7, 0);
- } else {
- /*
- * HDMI IP uses this configuration to divide the MCLK to
- * update CTS value.
- */
- REG_FLD_MOD(HDMI_CORE_AV_FREQ_SVAL, cfg->mclk_mode, 2, 0);
-
- /* Configure clock for audio packets */
- REG_FLD_MOD(HDMI_CORE_AV_AUD_PAR_BUSCLK_1,
- cfg->aud_par_busclk, 7, 0);
- REG_FLD_MOD(HDMI_CORE_AV_AUD_PAR_BUSCLK_2,
- (cfg->aud_par_busclk >> 8), 7, 0);
- REG_FLD_MOD(HDMI_CORE_AV_AUD_PAR_BUSCLK_3,
- (cfg->aud_par_busclk >> 16), 7, 0);
+ clk = clk_get(&pdev->dev, "sys_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get sys_clk\n");
+ return PTR_ERR(clk);
}
- /* Override of SPDIF sample frequency with value in I2S_CHST4 */
- REG_FLD_MOD(HDMI_CORE_AV_SPDIF_CTRL, cfg->fs_override, 1, 1);
+ hdmi.sys_clk = clk;
- /* I2S parameters */
- REG_FLD_MOD(HDMI_CORE_AV_I2S_CHST4, cfg->freq_sample, 3, 0);
-
- r = hdmi_read_reg(HDMI_CORE_AV_I2S_IN_CTRL);
- r = FLD_MOD(r, cfg->i2s_cfg.en_high_bitrate_aud, 7, 7);
- r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6);
- r = FLD_MOD(r, cfg->i2s_cfg.cbit_order, 5, 5);
- r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4);
- r = FLD_MOD(r, cfg->i2s_cfg.ws_polarity, 3, 3);
- r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2);
- r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1);
- r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0);
- hdmi_write_reg(HDMI_CORE_AV_I2S_IN_CTRL, r);
-
- r = hdmi_read_reg(HDMI_CORE_AV_I2S_CHST5);
- r = FLD_MOD(r, cfg->freq_sample, 7, 4);
- r = FLD_MOD(r, cfg->i2s_cfg.word_length, 3, 1);
- r = FLD_MOD(r, cfg->i2s_cfg.word_max_length, 0, 0);
- hdmi_write_reg(HDMI_CORE_AV_I2S_CHST5, r);
-
- REG_FLD_MOD(HDMI_CORE_AV_I2S_IN_LEN, cfg->i2s_cfg.in_length_bits, 3, 0);
-
- /* Audio channels and mode parameters */
- REG_FLD_MOD(HDMI_CORE_AV_HDMI_CTRL, cfg->layout, 2, 1);
- r = hdmi_read_reg(HDMI_CORE_AV_AUD_MODE);
- r = FLD_MOD(r, cfg->i2s_cfg.active_sds, 7, 4);
- r = FLD_MOD(r, cfg->en_dsd_audio, 3, 3);
- r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2);
- r = FLD_MOD(r, cfg->en_spdif, 1, 1);
- hdmi_write_reg(HDMI_CORE_AV_AUD_MODE, r);
-}
-
-static void hdmi_core_audio_infoframe_config(
- struct hdmi_core_infoframe_audio *info_aud)
-{
- u8 val;
- u8 sum = 0, checksum = 0;
-
- /*
- * Set audio info frame type, version and length as
- * described in HDMI 1.4a Section 8.2.2 specification.
- * Checksum calculation is defined in Section 5.3.5.
- */
- hdmi_write_reg(HDMI_CORE_AV_AUDIO_TYPE, 0x84);
- hdmi_write_reg(HDMI_CORE_AV_AUDIO_VERS, 0x01);
- hdmi_write_reg(HDMI_CORE_AV_AUDIO_LEN, 0x0a);
- sum += 0x84 + 0x001 + 0x00a;
-
- val = (info_aud->db1_coding_type << 4)
- | (info_aud->db1_channel_count - 1);
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(0), val);
- sum += val;
-
- val = (info_aud->db2_sample_freq << 2) | info_aud->db2_sample_size;
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(1), val);
- sum += val;
-
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(2), 0x00);
-
- val = info_aud->db4_channel_alloc;
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(3), val);
- sum += val;
-
- val = (info_aud->db5_downmix_inh << 7) | (info_aud->db5_lsv << 3);
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(4), val);
- sum += val;
-
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(5), 0x00);
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(6), 0x00);
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(7), 0x00);
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(8), 0x00);
- hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(9), 0x00);
-
- checksum = 0x100 - sum;
- hdmi_write_reg(HDMI_CORE_AV_AUDIO_CHSUM, checksum);
-
- /*
- * TODO: Add MPEG and SPD enable and repeat cfg when EDID parsing
- * is available.
- */
-}
-
-static int hdmi_config_audio_acr(u32 sample_freq, u32 *n, u32 *cts)
-{
- u32 r;
- u32 deep_color = 0;
- u32 pclk = hdmi.cfg.timings.timings.pixel_clock;
-
- if (n == NULL || cts == NULL)
- return -EINVAL;
- /*
- * Obtain current deep color configuration. This needed
- * to calculate the TMDS clock based on the pixel clock.
- */
- r = REG_GET(HDMI_WP_VIDEO_CFG, 1, 0);
- switch (r) {
- case 1: /* No deep color selected */
- deep_color = 100;
- break;
- case 2: /* 10-bit deep color selected */
- deep_color = 125;
- break;
- case 3: /* 12-bit deep color selected */
- deep_color = 150;
- break;
- default:
- return -EINVAL;
+ clk = clk_get(&pdev->dev, "hdmi_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get hdmi_clk\n");
+ clk_put(hdmi.sys_clk);
+ return PTR_ERR(clk);
}
- switch (sample_freq) {
- case 32000:
- if ((deep_color == 125) && ((pclk == 54054)
- || (pclk == 74250)))
- *n = 8192;
- else
- *n = 4096;
- break;
- case 44100:
- *n = 6272;
- break;
- case 48000:
- if ((deep_color == 125) && ((pclk == 54054)
- || (pclk == 74250)))
- *n = 8192;
- else
- *n = 6144;
- break;
- default:
- *n = 0;
- return -EINVAL;
- }
-
- /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
- *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
+ hdmi.hdmi_clk = clk;
return 0;
}
-static int hdmi_audio_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
+static void hdmi_put_clocks(void)
{
- struct hdmi_audio_format audio_format;
- struct hdmi_audio_dma audio_dma;
- struct hdmi_core_audio_config core_cfg;
- struct hdmi_core_infoframe_audio aud_if_cfg;
- int err, n, cts;
- enum hdmi_core_audio_sample_freq sample_freq;
-
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S16_LE:
- core_cfg.i2s_cfg.word_max_length =
- HDMI_AUDIO_I2S_MAX_WORD_20BITS;
- core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_16_BITS;
- core_cfg.i2s_cfg.in_length_bits =
- HDMI_AUDIO_I2S_INPUT_LENGTH_16;
- core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
- audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
- audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
- audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
- audio_dma.transfer_size = 0x10;
- break;
- case SNDRV_PCM_FORMAT_S24_LE:
- core_cfg.i2s_cfg.word_max_length =
- HDMI_AUDIO_I2S_MAX_WORD_24BITS;
- core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_24_BITS;
- core_cfg.i2s_cfg.in_length_bits =
- HDMI_AUDIO_I2S_INPUT_LENGTH_24;
- audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
- audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
- audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
- core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
- audio_dma.transfer_size = 0x20;
- break;
- default:
- return -EINVAL;
- }
-
- switch (params_rate(params)) {
- case 32000:
- sample_freq = HDMI_AUDIO_FS_32000;
- break;
- case 44100:
- sample_freq = HDMI_AUDIO_FS_44100;
- break;
- case 48000:
- sample_freq = HDMI_AUDIO_FS_48000;
- break;
- default:
- return -EINVAL;
- }
-
- err = hdmi_config_audio_acr(params_rate(params), &n, &cts);
- if (err < 0)
- return err;
-
- /* Audio wrapper config */
- audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
- audio_format.active_chnnls_msk = 0x03;
- audio_format.type = HDMI_AUDIO_TYPE_LPCM;
- audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
- /* Disable start/stop signals of IEC 60958 blocks */
- audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF;
-
- audio_dma.block_size = 0xC0;
- audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
- audio_dma.fifo_threshold = 0x20; /* in number of samples */
-
- hdmi_wp_audio_config_dma(&audio_dma);
- hdmi_wp_audio_config_format(&audio_format);
-
- /*
- * I2S config
- */
- core_cfg.i2s_cfg.en_high_bitrate_aud = false;
- /* Only used with high bitrate audio */
- core_cfg.i2s_cfg.cbit_order = false;
- /* Serial data and word select should change on sck rising edge */
- core_cfg.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
- core_cfg.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
- /* Set I2S word select polarity */
- core_cfg.i2s_cfg.ws_polarity = HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT;
- core_cfg.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
- /* Set serial data to word select shift. See Phillips spec. */
- core_cfg.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
- /* Enable one of the four available serial data channels */
- core_cfg.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
-
- /* Core audio config */
- core_cfg.freq_sample = sample_freq;
- core_cfg.n = n;
- core_cfg.cts = cts;
- if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
- core_cfg.aud_par_busclk = 0;
- core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
- core_cfg.use_mclk = false;
- } else {
- core_cfg.aud_par_busclk = (((128 * 31) - 1) << 8);
- core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
- core_cfg.use_mclk = true;
- core_cfg.mclk_mode = HDMI_AUDIO_MCLK_128FS;
- }
- core_cfg.layout = HDMI_AUDIO_LAYOUT_2CH;
- core_cfg.en_spdif = false;
- /* Use sample frequency from channel status word */
- core_cfg.fs_override = true;
- /* Enable ACR packets */
- core_cfg.en_acr_pkt = true;
- /* Disable direct streaming digital audio */
- core_cfg.en_dsd_audio = false;
- /* Use parallel audio interface */
- core_cfg.en_parallel_aud_input = true;
-
- hdmi_core_audio_config(&core_cfg);
-
- /*
- * Configure packet
- * info frame audio see doc CEA861-D page 74
- */
- aud_if_cfg.db1_coding_type = HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM;
- aud_if_cfg.db1_channel_count = 2;
- aud_if_cfg.db2_sample_freq = HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM;
- aud_if_cfg.db2_sample_size = HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM;
- aud_if_cfg.db4_channel_alloc = 0x00;
- aud_if_cfg.db5_downmix_inh = false;
- aud_if_cfg.db5_lsv = 0;
-
- hdmi_core_audio_infoframe_config(&aud_if_cfg);
- return 0;
+ if (hdmi.sys_clk)
+ clk_put(hdmi.sys_clk);
+ if (hdmi.hdmi_clk)
+ clk_put(hdmi.hdmi_clk);
}
-static int hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
-{
- int err = 0;
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- REG_FLD_MOD(HDMI_CORE_AV_AUD_MODE, 1, 0, 0);
- REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 1, 31, 31);
- REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 1, 30, 30);
- break;
-
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- REG_FLD_MOD(HDMI_CORE_AV_AUD_MODE, 0, 0, 0);
- REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 0, 30, 30);
- REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 0, 31, 31);
- break;
- default:
- err = -EINVAL;
- }
- return err;
-}
-
-static int hdmi_audio_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- if (!hdmi.mode) {
- pr_err("Current video settings do not support audio.\n");
- return -EIO;
- }
- return 0;
-}
-
-static struct snd_soc_codec_driver hdmi_audio_codec_drv = {
-};
-
-static struct snd_soc_dai_ops hdmi_audio_codec_ops = {
- .hw_params = hdmi_audio_hw_params,
- .trigger = hdmi_audio_trigger,
- .startup = hdmi_audio_startup,
-};
-
-static struct snd_soc_dai_driver hdmi_codec_dai_drv = {
- .name = "hdmi-audio-codec",
- .playback = {
- .channels_min = 2,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_32000 |
- SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
- },
- .ops = &hdmi_audio_codec_ops,
-};
-#endif
-
/* HDMI HW IP initialisation */
static int omapdss_hdmihw_probe(struct platform_device *pdev)
{
struct resource *hdmi_mem;
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
- int ret;
-#endif
+ struct omap_dss_board_info *board_data;
+ int r;
hdmi.pdata = pdev->dev.platform_data;
hdmi.pdev = pdev;
mutex_init(&hdmi.lock);
+ /* save reference to HDMI device */
+ board_data = hdmi.pdata->board_data;
+ for (r = 0; r < board_data->num_devices; r++) {
+ if (board_data->devices[r]->type == OMAP_DISPLAY_TYPE_HDMI)
+ hdmi.dssdev = board_data->devices[r];
+ }
+ if (!hdmi.dssdev) {
+ DSSERR("can't get HDMI device\n");
+ return -EINVAL;
+ }
+
hdmi_mem = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0);
if (!hdmi_mem) {
DSSERR("can't get IORESOURCE_MEM HDMI\n");
@@ -1781,25 +752,47 @@
}
/* Base address taken from platform */
- hdmi.base_wp = ioremap(hdmi_mem->start, resource_size(hdmi_mem));
- if (!hdmi.base_wp) {
+ hdmi.hdmi_data.base_wp = ioremap(hdmi_mem->start,
+ resource_size(hdmi_mem));
+ if (!hdmi.hdmi_data.base_wp) {
DSSERR("can't ioremap WP\n");
return -ENOMEM;
}
+ r = hdmi_get_clocks(pdev);
+ if (r) {
+ iounmap(hdmi.hdmi_data.base_wp);
+ return r;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ r = request_irq(gpio_to_irq(hdmi.dssdev->hpd_gpio), hpd_irq_handler,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "hpd", NULL);
+ if (r < 0) {
+ pr_err("hdmi: request_irq %d failed\n",
+ gpio_to_irq(hdmi.dssdev->hpd_gpio));
+ return -EINVAL;
+ }
+
+ hdmi.hdmi_irq = platform_get_irq(pdev, 0);
+
+ r = request_irq(hdmi.hdmi_irq, hdmi_irq_handler, 0, "OMAP HDMI", NULL);
+ if (r < 0) {
+ pr_err("hdmi: request_irq %s failed\n",
+ pdev->name);
+ return -EINVAL;
+ }
+
+ hdmi.hdmi_data.hdmi_core_sys_offset = HDMI_CORE_SYS;
+ hdmi.hdmi_data.hdmi_core_av_offset = HDMI_CORE_AV;
+ hdmi.hdmi_data.hdmi_pll_offset = HDMI_PLLCTRL;
+ hdmi.hdmi_data.hdmi_phy_offset = HDMI_PHY;
+ hdmi.wp_reset_done = false;
+
hdmi_panel_init();
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-
- /* Register ASoC codec DAI */
- ret = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv,
- &hdmi_codec_dai_drv, 1);
- if (ret) {
- DSSERR("can't register ASoC HDMI audio codec\n");
- return ret;
- }
-#endif
return 0;
}
@@ -1807,12 +800,15 @@
{
hdmi_panel_exit();
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
- snd_soc_unregister_codec(&pdev->dev);
-#endif
+ if (hdmi.dssdev)
+ free_irq(gpio_to_irq(hdmi.dssdev->hpd_gpio), hpd_irq_handler);
+ hdmi.dssdev = NULL;
- iounmap(hdmi.base_wp);
+ pm_runtime_disable(&pdev->dev);
+
+ hdmi_put_clocks();
+
+ iounmap(hdmi.hdmi_data.base_wp);
return 0;
}
@@ -1835,3 +831,13 @@
{
return platform_driver_unregister(&omapdss_hdmihw_driver);
}
+
+void hdmi_dump_regs(struct seq_file *s)
+{
+ if (hdmi_runtime_get())
+ return;
+
+ hdmi_ti_4xxx_dump_regs(&hdmi.hdmi_data, s);
+
+ hdmi_runtime_put();
+}
diff --git a/drivers/video/omap2/dss/hdmi.h b/drivers/video/omap2/dss/hdmi.h
deleted file mode 100644
index c885f9c..0000000
--- a/drivers/video/omap2/dss/hdmi.h
+++ /dev/null
@@ -1,631 +0,0 @@
-/*
- * hdmi.h
- *
- * HDMI driver definition for TI OMAP4 processors.
- *
- * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _OMAP4_DSS_HDMI_H_
-#define _OMAP4_DSS_HDMI_H_
-
-#include <linux/string.h>
-#include <video/omapdss.h>
-
-#define HDMI_WP 0x0
-#define HDMI_CORE_SYS 0x400
-#define HDMI_CORE_AV 0x900
-#define HDMI_PLLCTRL 0x200
-#define HDMI_PHY 0x300
-
-struct hdmi_reg { u16 idx; };
-
-#define HDMI_REG(idx) ((const struct hdmi_reg) { idx })
-
-/* HDMI Wrapper */
-#define HDMI_WP_REG(idx) HDMI_REG(HDMI_WP + idx)
-
-#define HDMI_WP_REVISION HDMI_WP_REG(0x0)
-#define HDMI_WP_SYSCONFIG HDMI_WP_REG(0x10)
-#define HDMI_WP_IRQSTATUS_RAW HDMI_WP_REG(0x24)
-#define HDMI_WP_IRQSTATUS HDMI_WP_REG(0x28)
-#define HDMI_WP_PWR_CTRL HDMI_WP_REG(0x40)
-#define HDMI_WP_IRQENABLE_SET HDMI_WP_REG(0x2C)
-#define HDMI_WP_VIDEO_CFG HDMI_WP_REG(0x50)
-#define HDMI_WP_VIDEO_SIZE HDMI_WP_REG(0x60)
-#define HDMI_WP_VIDEO_TIMING_H HDMI_WP_REG(0x68)
-#define HDMI_WP_VIDEO_TIMING_V HDMI_WP_REG(0x6C)
-#define HDMI_WP_WP_CLK HDMI_WP_REG(0x70)
-#define HDMI_WP_AUDIO_CFG HDMI_WP_REG(0x80)
-#define HDMI_WP_AUDIO_CFG2 HDMI_WP_REG(0x84)
-#define HDMI_WP_AUDIO_CTRL HDMI_WP_REG(0x88)
-#define HDMI_WP_AUDIO_DATA HDMI_WP_REG(0x8C)
-
-/* HDMI IP Core System */
-#define HDMI_CORE_SYS_REG(idx) HDMI_REG(HDMI_CORE_SYS + idx)
-
-#define HDMI_CORE_SYS_VND_IDL HDMI_CORE_SYS_REG(0x0)
-#define HDMI_CORE_SYS_DEV_IDL HDMI_CORE_SYS_REG(0x8)
-#define HDMI_CORE_SYS_DEV_IDH HDMI_CORE_SYS_REG(0xC)
-#define HDMI_CORE_SYS_DEV_REV HDMI_CORE_SYS_REG(0x10)
-#define HDMI_CORE_SYS_SRST HDMI_CORE_SYS_REG(0x14)
-#define HDMI_CORE_CTRL1 HDMI_CORE_SYS_REG(0x20)
-#define HDMI_CORE_SYS_SYS_STAT HDMI_CORE_SYS_REG(0x24)
-#define HDMI_CORE_SYS_VID_ACEN HDMI_CORE_SYS_REG(0x124)
-#define HDMI_CORE_SYS_VID_MODE HDMI_CORE_SYS_REG(0x128)
-#define HDMI_CORE_SYS_INTR_STATE HDMI_CORE_SYS_REG(0x1C0)
-#define HDMI_CORE_SYS_INTR1 HDMI_CORE_SYS_REG(0x1C4)
-#define HDMI_CORE_SYS_INTR2 HDMI_CORE_SYS_REG(0x1C8)
-#define HDMI_CORE_SYS_INTR3 HDMI_CORE_SYS_REG(0x1CC)
-#define HDMI_CORE_SYS_INTR4 HDMI_CORE_SYS_REG(0x1D0)
-#define HDMI_CORE_SYS_UMASK1 HDMI_CORE_SYS_REG(0x1D4)
-#define HDMI_CORE_SYS_TMDS_CTRL HDMI_CORE_SYS_REG(0x208)
-#define HDMI_CORE_SYS_DE_DLY HDMI_CORE_SYS_REG(0xC8)
-#define HDMI_CORE_SYS_DE_CTRL HDMI_CORE_SYS_REG(0xCC)
-#define HDMI_CORE_SYS_DE_TOP HDMI_CORE_SYS_REG(0xD0)
-#define HDMI_CORE_SYS_DE_CNTL HDMI_CORE_SYS_REG(0xD8)
-#define HDMI_CORE_SYS_DE_CNTH HDMI_CORE_SYS_REG(0xDC)
-#define HDMI_CORE_SYS_DE_LINL HDMI_CORE_SYS_REG(0xE0)
-#define HDMI_CORE_SYS_DE_LINH_1 HDMI_CORE_SYS_REG(0xE4)
-#define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC 0x1
-#define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC 0x1
-#define HDMI_CORE_CTRL1_BSEL_24BITBUS 0x1
-#define HDMI_CORE_CTRL1_EDGE_RISINGEDGE 0x1
-
-/* HDMI DDC E-DID */
-#define HDMI_CORE_DDC_CMD HDMI_CORE_SYS_REG(0x3CC)
-#define HDMI_CORE_DDC_STATUS HDMI_CORE_SYS_REG(0x3C8)
-#define HDMI_CORE_DDC_ADDR HDMI_CORE_SYS_REG(0x3B4)
-#define HDMI_CORE_DDC_OFFSET HDMI_CORE_SYS_REG(0x3BC)
-#define HDMI_CORE_DDC_COUNT1 HDMI_CORE_SYS_REG(0x3C0)
-#define HDMI_CORE_DDC_COUNT2 HDMI_CORE_SYS_REG(0x3C4)
-#define HDMI_CORE_DDC_DATA HDMI_CORE_SYS_REG(0x3D0)
-#define HDMI_CORE_DDC_SEGM HDMI_CORE_SYS_REG(0x3B8)
-
-/* HDMI IP Core Audio Video */
-#define HDMI_CORE_AV_REG(idx) HDMI_REG(HDMI_CORE_AV + idx)
-
-#define HDMI_CORE_AV_HDMI_CTRL HDMI_CORE_AV_REG(0xBC)
-#define HDMI_CORE_AV_DPD HDMI_CORE_AV_REG(0xF4)
-#define HDMI_CORE_AV_PB_CTRL1 HDMI_CORE_AV_REG(0xF8)
-#define HDMI_CORE_AV_PB_CTRL2 HDMI_CORE_AV_REG(0xFC)
-#define HDMI_CORE_AV_AVI_TYPE HDMI_CORE_AV_REG(0x100)
-#define HDMI_CORE_AV_AVI_VERS HDMI_CORE_AV_REG(0x104)
-#define HDMI_CORE_AV_AVI_LEN HDMI_CORE_AV_REG(0x108)
-#define HDMI_CORE_AV_AVI_CHSUM HDMI_CORE_AV_REG(0x10C)
-#define HDMI_CORE_AV_AVI_DBYTE(n) HDMI_CORE_AV_REG(n * 4 + 0x110)
-#define HDMI_CORE_AV_AVI_DBYTE_NELEMS HDMI_CORE_AV_REG(15)
-#define HDMI_CORE_AV_SPD_DBYTE HDMI_CORE_AV_REG(0x190)
-#define HDMI_CORE_AV_SPD_DBYTE_NELEMS HDMI_CORE_AV_REG(27)
-#define HDMI_CORE_AV_AUD_DBYTE(n) HDMI_CORE_AV_REG(n * 4 + 0x210)
-#define HDMI_CORE_AV_AUD_DBYTE_NELEMS HDMI_CORE_AV_REG(10)
-#define HDMI_CORE_AV_MPEG_DBYTE HDMI_CORE_AV_REG(0x290)
-#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS HDMI_CORE_AV_REG(27)
-#define HDMI_CORE_AV_GEN_DBYTE HDMI_CORE_AV_REG(0x300)
-#define HDMI_CORE_AV_GEN_DBYTE_NELEMS HDMI_CORE_AV_REG(31)
-#define HDMI_CORE_AV_GEN2_DBYTE HDMI_CORE_AV_REG(0x380)
-#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS HDMI_CORE_AV_REG(31)
-#define HDMI_CORE_AV_ACR_CTRL HDMI_CORE_AV_REG(0x4)
-#define HDMI_CORE_AV_FREQ_SVAL HDMI_CORE_AV_REG(0x8)
-#define HDMI_CORE_AV_N_SVAL1 HDMI_CORE_AV_REG(0xC)
-#define HDMI_CORE_AV_N_SVAL2 HDMI_CORE_AV_REG(0x10)
-#define HDMI_CORE_AV_N_SVAL3 HDMI_CORE_AV_REG(0x14)
-#define HDMI_CORE_AV_CTS_SVAL1 HDMI_CORE_AV_REG(0x18)
-#define HDMI_CORE_AV_CTS_SVAL2 HDMI_CORE_AV_REG(0x1C)
-#define HDMI_CORE_AV_CTS_SVAL3 HDMI_CORE_AV_REG(0x20)
-#define HDMI_CORE_AV_CTS_HVAL1 HDMI_CORE_AV_REG(0x24)
-#define HDMI_CORE_AV_CTS_HVAL2 HDMI_CORE_AV_REG(0x28)
-#define HDMI_CORE_AV_CTS_HVAL3 HDMI_CORE_AV_REG(0x2C)
-#define HDMI_CORE_AV_AUD_MODE HDMI_CORE_AV_REG(0x50)
-#define HDMI_CORE_AV_SPDIF_CTRL HDMI_CORE_AV_REG(0x54)
-#define HDMI_CORE_AV_HW_SPDIF_FS HDMI_CORE_AV_REG(0x60)
-#define HDMI_CORE_AV_SWAP_I2S HDMI_CORE_AV_REG(0x64)
-#define HDMI_CORE_AV_SPDIF_ERTH HDMI_CORE_AV_REG(0x6C)
-#define HDMI_CORE_AV_I2S_IN_MAP HDMI_CORE_AV_REG(0x70)
-#define HDMI_CORE_AV_I2S_IN_CTRL HDMI_CORE_AV_REG(0x74)
-#define HDMI_CORE_AV_I2S_CHST0 HDMI_CORE_AV_REG(0x78)
-#define HDMI_CORE_AV_I2S_CHST1 HDMI_CORE_AV_REG(0x7C)
-#define HDMI_CORE_AV_I2S_CHST2 HDMI_CORE_AV_REG(0x80)
-#define HDMI_CORE_AV_I2S_CHST4 HDMI_CORE_AV_REG(0x84)
-#define HDMI_CORE_AV_I2S_CHST5 HDMI_CORE_AV_REG(0x88)
-#define HDMI_CORE_AV_ASRC HDMI_CORE_AV_REG(0x8C)
-#define HDMI_CORE_AV_I2S_IN_LEN HDMI_CORE_AV_REG(0x90)
-#define HDMI_CORE_AV_HDMI_CTRL HDMI_CORE_AV_REG(0xBC)
-#define HDMI_CORE_AV_AUDO_TXSTAT HDMI_CORE_AV_REG(0xC0)
-#define HDMI_CORE_AV_AUD_PAR_BUSCLK_1 HDMI_CORE_AV_REG(0xCC)
-#define HDMI_CORE_AV_AUD_PAR_BUSCLK_2 HDMI_CORE_AV_REG(0xD0)
-#define HDMI_CORE_AV_AUD_PAR_BUSCLK_3 HDMI_CORE_AV_REG(0xD4)
-#define HDMI_CORE_AV_TEST_TXCTRL HDMI_CORE_AV_REG(0xF0)
-#define HDMI_CORE_AV_DPD HDMI_CORE_AV_REG(0xF4)
-#define HDMI_CORE_AV_PB_CTRL1 HDMI_CORE_AV_REG(0xF8)
-#define HDMI_CORE_AV_PB_CTRL2 HDMI_CORE_AV_REG(0xFC)
-#define HDMI_CORE_AV_AVI_TYPE HDMI_CORE_AV_REG(0x100)
-#define HDMI_CORE_AV_AVI_VERS HDMI_CORE_AV_REG(0x104)
-#define HDMI_CORE_AV_AVI_LEN HDMI_CORE_AV_REG(0x108)
-#define HDMI_CORE_AV_AVI_CHSUM HDMI_CORE_AV_REG(0x10C)
-#define HDMI_CORE_AV_SPD_TYPE HDMI_CORE_AV_REG(0x180)
-#define HDMI_CORE_AV_SPD_VERS HDMI_CORE_AV_REG(0x184)
-#define HDMI_CORE_AV_SPD_LEN HDMI_CORE_AV_REG(0x188)
-#define HDMI_CORE_AV_SPD_CHSUM HDMI_CORE_AV_REG(0x18C)
-#define HDMI_CORE_AV_AUDIO_TYPE HDMI_CORE_AV_REG(0x200)
-#define HDMI_CORE_AV_AUDIO_VERS HDMI_CORE_AV_REG(0x204)
-#define HDMI_CORE_AV_AUDIO_LEN HDMI_CORE_AV_REG(0x208)
-#define HDMI_CORE_AV_AUDIO_CHSUM HDMI_CORE_AV_REG(0x20C)
-#define HDMI_CORE_AV_MPEG_TYPE HDMI_CORE_AV_REG(0x280)
-#define HDMI_CORE_AV_MPEG_VERS HDMI_CORE_AV_REG(0x284)
-#define HDMI_CORE_AV_MPEG_LEN HDMI_CORE_AV_REG(0x288)
-#define HDMI_CORE_AV_MPEG_CHSUM HDMI_CORE_AV_REG(0x28C)
-#define HDMI_CORE_AV_CP_BYTE1 HDMI_CORE_AV_REG(0x37C)
-#define HDMI_CORE_AV_CEC_ADDR_ID HDMI_CORE_AV_REG(0x3FC)
-#define HDMI_CORE_AV_SPD_DBYTE_ELSIZE 0x4
-#define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE 0x4
-#define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE 0x4
-#define HDMI_CORE_AV_GEN_DBYTE_ELSIZE 0x4
-
-/* PLL */
-#define HDMI_PLL_REG(idx) HDMI_REG(HDMI_PLLCTRL + idx)
-
-#define PLLCTRL_PLL_CONTROL HDMI_PLL_REG(0x0)
-#define PLLCTRL_PLL_STATUS HDMI_PLL_REG(0x4)
-#define PLLCTRL_PLL_GO HDMI_PLL_REG(0x8)
-#define PLLCTRL_CFG1 HDMI_PLL_REG(0xC)
-#define PLLCTRL_CFG2 HDMI_PLL_REG(0x10)
-#define PLLCTRL_CFG3 HDMI_PLL_REG(0x14)
-#define PLLCTRL_CFG4 HDMI_PLL_REG(0x20)
-
-/* HDMI PHY */
-#define HDMI_PHY_REG(idx) HDMI_REG(HDMI_PHY + idx)
-
-#define HDMI_TXPHY_TX_CTRL HDMI_PHY_REG(0x0)
-#define HDMI_TXPHY_DIGITAL_CTRL HDMI_PHY_REG(0x4)
-#define HDMI_TXPHY_POWER_CTRL HDMI_PHY_REG(0x8)
-#define HDMI_TXPHY_PAD_CFG_CTRL HDMI_PHY_REG(0xC)
-
-/* HDMI EDID Length */
-#define HDMI_EDID_MAX_LENGTH 256
-#define EDID_TIMING_DESCRIPTOR_SIZE 0x12
-#define EDID_DESCRIPTOR_BLOCK0_ADDRESS 0x36
-#define EDID_DESCRIPTOR_BLOCK1_ADDRESS 0x80
-#define EDID_SIZE_BLOCK0_TIMING_DESCRIPTOR 4
-#define EDID_SIZE_BLOCK1_TIMING_DESCRIPTOR 4
-
-#define OMAP_HDMI_TIMINGS_NB 34
-
-#define REG_FLD_MOD(idx, val, start, end) \
- hdmi_write_reg(idx, FLD_MOD(hdmi_read_reg(idx), val, start, end))
-#define REG_GET(idx, start, end) \
- FLD_GET(hdmi_read_reg(idx), start, end)
-
-/* HDMI timing structure */
-struct hdmi_timings {
- struct omap_video_timings timings;
- int vsync_pol;
- int hsync_pol;
-};
-
-enum hdmi_phy_pwr {
- HDMI_PHYPWRCMD_OFF = 0,
- HDMI_PHYPWRCMD_LDOON = 1,
- HDMI_PHYPWRCMD_TXON = 2
-};
-
-enum hdmi_pll_pwr {
- HDMI_PLLPWRCMD_ALLOFF = 0,
- HDMI_PLLPWRCMD_PLLONLY = 1,
- HDMI_PLLPWRCMD_BOTHON_ALLCLKS = 2,
- HDMI_PLLPWRCMD_BOTHON_NOPHYCLK = 3
-};
-
-enum hdmi_clk_refsel {
- HDMI_REFSEL_PCLK = 0,
- HDMI_REFSEL_REF1 = 1,
- HDMI_REFSEL_REF2 = 2,
- HDMI_REFSEL_SYSCLK = 3
-};
-
-enum hdmi_core_inputbus_width {
- HDMI_INPUT_8BIT = 0,
- HDMI_INPUT_10BIT = 1,
- HDMI_INPUT_12BIT = 2
-};
-
-enum hdmi_core_dither_trunc {
- HDMI_OUTPUTTRUNCATION_8BIT = 0,
- HDMI_OUTPUTTRUNCATION_10BIT = 1,
- HDMI_OUTPUTTRUNCATION_12BIT = 2,
- HDMI_OUTPUTDITHER_8BIT = 3,
- HDMI_OUTPUTDITHER_10BIT = 4,
- HDMI_OUTPUTDITHER_12BIT = 5
-};
-
-enum hdmi_core_deepcolor_ed {
- HDMI_DEEPCOLORPACKECTDISABLE = 0,
- HDMI_DEEPCOLORPACKECTENABLE = 1
-};
-
-enum hdmi_core_packet_mode {
- HDMI_PACKETMODERESERVEDVALUE = 0,
- HDMI_PACKETMODE24BITPERPIXEL = 4,
- HDMI_PACKETMODE30BITPERPIXEL = 5,
- HDMI_PACKETMODE36BITPERPIXEL = 6,
- HDMI_PACKETMODE48BITPERPIXEL = 7
-};
-
-enum hdmi_core_hdmi_dvi {
- HDMI_DVI = 0,
- HDMI_HDMI = 1
-};
-
-enum hdmi_core_tclkselclkmult {
- HDMI_FPLL05IDCK = 0,
- HDMI_FPLL10IDCK = 1,
- HDMI_FPLL20IDCK = 2,
- HDMI_FPLL40IDCK = 3
-};
-
-enum hdmi_core_packet_ctrl {
- HDMI_PACKETENABLE = 1,
- HDMI_PACKETDISABLE = 0,
- HDMI_PACKETREPEATON = 1,
- HDMI_PACKETREPEATOFF = 0
-};
-
-/* INFOFRAME_AVI_ and INFOFRAME_AUDIO_ definitions */
-enum hdmi_core_infoframe {
- HDMI_INFOFRAME_AVI_DB1Y_RGB = 0,
- HDMI_INFOFRAME_AVI_DB1Y_YUV422 = 1,
- HDMI_INFOFRAME_AVI_DB1Y_YUV444 = 2,
- HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF = 0,
- HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_ON = 1,
- HDMI_INFOFRAME_AVI_DB1B_NO = 0,
- HDMI_INFOFRAME_AVI_DB1B_VERT = 1,
- HDMI_INFOFRAME_AVI_DB1B_HORI = 2,
- HDMI_INFOFRAME_AVI_DB1B_VERTHORI = 3,
- HDMI_INFOFRAME_AVI_DB1S_0 = 0,
- HDMI_INFOFRAME_AVI_DB1S_1 = 1,
- HDMI_INFOFRAME_AVI_DB1S_2 = 2,
- HDMI_INFOFRAME_AVI_DB2C_NO = 0,
- HDMI_INFOFRAME_AVI_DB2C_ITU601 = 1,
- HDMI_INFOFRAME_AVI_DB2C_ITU709 = 2,
- HDMI_INFOFRAME_AVI_DB2C_EC_EXTENDED = 3,
- HDMI_INFOFRAME_AVI_DB2M_NO = 0,
- HDMI_INFOFRAME_AVI_DB2M_43 = 1,
- HDMI_INFOFRAME_AVI_DB2M_169 = 2,
- HDMI_INFOFRAME_AVI_DB2R_SAME = 8,
- HDMI_INFOFRAME_AVI_DB2R_43 = 9,
- HDMI_INFOFRAME_AVI_DB2R_169 = 10,
- HDMI_INFOFRAME_AVI_DB2R_149 = 11,
- HDMI_INFOFRAME_AVI_DB3ITC_NO = 0,
- HDMI_INFOFRAME_AVI_DB3ITC_YES = 1,
- HDMI_INFOFRAME_AVI_DB3EC_XVYUV601 = 0,
- HDMI_INFOFRAME_AVI_DB3EC_XVYUV709 = 1,
- HDMI_INFOFRAME_AVI_DB3Q_DEFAULT = 0,
- HDMI_INFOFRAME_AVI_DB3Q_LR = 1,
- HDMI_INFOFRAME_AVI_DB3Q_FR = 2,
- HDMI_INFOFRAME_AVI_DB3SC_NO = 0,
- HDMI_INFOFRAME_AVI_DB3SC_HORI = 1,
- HDMI_INFOFRAME_AVI_DB3SC_VERT = 2,
- HDMI_INFOFRAME_AVI_DB3SC_HORIVERT = 3,
- HDMI_INFOFRAME_AVI_DB5PR_NO = 0,
- HDMI_INFOFRAME_AVI_DB5PR_2 = 1,
- HDMI_INFOFRAME_AVI_DB5PR_3 = 2,
- HDMI_INFOFRAME_AVI_DB5PR_4 = 3,
- HDMI_INFOFRAME_AVI_DB5PR_5 = 4,
- HDMI_INFOFRAME_AVI_DB5PR_6 = 5,
- HDMI_INFOFRAME_AVI_DB5PR_7 = 6,
- HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
- HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
- HDMI_INFOFRAME_AVI_DB5PR_10 = 9,
- HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM = 0,
- HDMI_INFOFRAME_AUDIO_DB1CT_IEC60958 = 1,
- HDMI_INFOFRAME_AUDIO_DB1CT_AC3 = 2,
- HDMI_INFOFRAME_AUDIO_DB1CT_MPEG1 = 3,
- HDMI_INFOFRAME_AUDIO_DB1CT_MP3 = 4,
- HDMI_INFOFRAME_AUDIO_DB1CT_MPEG2_MULTICH = 5,
- HDMI_INFOFRAME_AUDIO_DB1CT_AAC = 6,
- HDMI_INFOFRAME_AUDIO_DB1CT_DTS = 7,
- HDMI_INFOFRAME_AUDIO_DB1CT_ATRAC = 8,
- HDMI_INFOFRAME_AUDIO_DB1CT_ONEBIT = 9,
- HDMI_INFOFRAME_AUDIO_DB1CT_DOLBY_DIGITAL_PLUS = 10,
- HDMI_INFOFRAME_AUDIO_DB1CT_DTS_HD = 11,
- HDMI_INFOFRAME_AUDIO_DB1CT_MAT = 12,
- HDMI_INFOFRAME_AUDIO_DB1CT_DST = 13,
- HDMI_INFOFRAME_AUDIO_DB1CT_WMA_PRO = 14,
- HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM = 0,
- HDMI_INFOFRAME_AUDIO_DB2SF_32000 = 1,
- HDMI_INFOFRAME_AUDIO_DB2SF_44100 = 2,
- HDMI_INFOFRAME_AUDIO_DB2SF_48000 = 3,
- HDMI_INFOFRAME_AUDIO_DB2SF_88200 = 4,
- HDMI_INFOFRAME_AUDIO_DB2SF_96000 = 5,
- HDMI_INFOFRAME_AUDIO_DB2SF_176400 = 6,
- HDMI_INFOFRAME_AUDIO_DB2SF_192000 = 7,
- HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM = 0,
- HDMI_INFOFRAME_AUDIO_DB2SS_16BIT = 1,
- HDMI_INFOFRAME_AUDIO_DB2SS_20BIT = 2,
- HDMI_INFOFRAME_AUDIO_DB2SS_24BIT = 3,
- HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PERMITTED = 0,
- HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PROHIBITED = 1
-};
-
-enum hdmi_packing_mode {
- HDMI_PACK_10b_RGB_YUV444 = 0,
- HDMI_PACK_24b_RGB_YUV444_YUV422 = 1,
- HDMI_PACK_20b_YUV422 = 2,
- HDMI_PACK_ALREADYPACKED = 7
-};
-
-enum hdmi_core_audio_sample_freq {
- HDMI_AUDIO_FS_32000 = 0x3,
- HDMI_AUDIO_FS_44100 = 0x0,
- HDMI_AUDIO_FS_48000 = 0x2,
- HDMI_AUDIO_FS_88200 = 0x8,
- HDMI_AUDIO_FS_96000 = 0xA,
- HDMI_AUDIO_FS_176400 = 0xC,
- HDMI_AUDIO_FS_192000 = 0xE,
- HDMI_AUDIO_FS_NOT_INDICATED = 0x1
-};
-
-enum hdmi_core_audio_layout {
- HDMI_AUDIO_LAYOUT_2CH = 0,
- HDMI_AUDIO_LAYOUT_8CH = 1
-};
-
-enum hdmi_core_cts_mode {
- HDMI_AUDIO_CTS_MODE_HW = 0,
- HDMI_AUDIO_CTS_MODE_SW = 1
-};
-
-enum hdmi_stereo_channels {
- HDMI_AUDIO_STEREO_NOCHANNELS = 0,
- HDMI_AUDIO_STEREO_ONECHANNEL = 1,
- HDMI_AUDIO_STEREO_TWOCHANNELS = 2,
- HDMI_AUDIO_STEREO_THREECHANNELS = 3,
- HDMI_AUDIO_STEREO_FOURCHANNELS = 4
-};
-
-enum hdmi_audio_type {
- HDMI_AUDIO_TYPE_LPCM = 0,
- HDMI_AUDIO_TYPE_IEC = 1
-};
-
-enum hdmi_audio_justify {
- HDMI_AUDIO_JUSTIFY_LEFT = 0,
- HDMI_AUDIO_JUSTIFY_RIGHT = 1
-};
-
-enum hdmi_audio_sample_order {
- HDMI_AUDIO_SAMPLE_RIGHT_FIRST = 0,
- HDMI_AUDIO_SAMPLE_LEFT_FIRST = 1
-};
-
-enum hdmi_audio_samples_perword {
- HDMI_AUDIO_ONEWORD_ONESAMPLE = 0,
- HDMI_AUDIO_ONEWORD_TWOSAMPLES = 1
-};
-
-enum hdmi_audio_sample_size {
- HDMI_AUDIO_SAMPLE_16BITS = 0,
- HDMI_AUDIO_SAMPLE_24BITS = 1
-};
-
-enum hdmi_audio_transf_mode {
- HDMI_AUDIO_TRANSF_DMA = 0,
- HDMI_AUDIO_TRANSF_IRQ = 1
-};
-
-enum hdmi_audio_blk_strt_end_sig {
- HDMI_AUDIO_BLOCK_SIG_STARTEND_ON = 0,
- HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF = 1
-};
-
-enum hdmi_audio_i2s_config {
- HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT = 0,
- HDMI_AUDIO_I2S_WS_POLARIT_YLOW_IS_RIGHT = 1,
- HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0,
- HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1,
- HDMI_AUDIO_I2S_MAX_WORD_20BITS = 0,
- HDMI_AUDIO_I2S_MAX_WORD_24BITS = 1,
- HDMI_AUDIO_I2S_CHST_WORD_NOT_SPECIFIED = 0,
- HDMI_AUDIO_I2S_CHST_WORD_16_BITS = 1,
- HDMI_AUDIO_I2S_CHST_WORD_17_BITS = 6,
- HDMI_AUDIO_I2S_CHST_WORD_18_BITS = 2,
- HDMI_AUDIO_I2S_CHST_WORD_19_BITS = 4,
- HDMI_AUDIO_I2S_CHST_WORD_20_BITS_20MAX = 5,
- HDMI_AUDIO_I2S_CHST_WORD_20_BITS_24MAX = 1,
- HDMI_AUDIO_I2S_CHST_WORD_21_BITS = 6,
- HDMI_AUDIO_I2S_CHST_WORD_22_BITS = 2,
- HDMI_AUDIO_I2S_CHST_WORD_23_BITS = 4,
- HDMI_AUDIO_I2S_CHST_WORD_24_BITS = 5,
- HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0,
- HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1,
- HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0,
- HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1,
- HDMI_AUDIO_I2S_INPUT_LENGTH_NA = 0,
- HDMI_AUDIO_I2S_INPUT_LENGTH_16 = 2,
- HDMI_AUDIO_I2S_INPUT_LENGTH_17 = 12,
- HDMI_AUDIO_I2S_INPUT_LENGTH_18 = 4,
- HDMI_AUDIO_I2S_INPUT_LENGTH_19 = 8,
- HDMI_AUDIO_I2S_INPUT_LENGTH_20 = 10,
- HDMI_AUDIO_I2S_INPUT_LENGTH_21 = 13,
- HDMI_AUDIO_I2S_INPUT_LENGTH_22 = 5,
- HDMI_AUDIO_I2S_INPUT_LENGTH_23 = 9,
- HDMI_AUDIO_I2S_INPUT_LENGTH_24 = 11,
- HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0,
- HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1,
- HDMI_AUDIO_I2S_SD0_EN = 1,
- HDMI_AUDIO_I2S_SD1_EN = 1 << 1,
- HDMI_AUDIO_I2S_SD2_EN = 1 << 2,
- HDMI_AUDIO_I2S_SD3_EN = 1 << 3,
-};
-
-enum hdmi_audio_mclk_mode {
- HDMI_AUDIO_MCLK_128FS = 0,
- HDMI_AUDIO_MCLK_256FS = 1,
- HDMI_AUDIO_MCLK_384FS = 2,
- HDMI_AUDIO_MCLK_512FS = 3,
- HDMI_AUDIO_MCLK_768FS = 4,
- HDMI_AUDIO_MCLK_1024FS = 5,
- HDMI_AUDIO_MCLK_1152FS = 6,
- HDMI_AUDIO_MCLK_192FS = 7
-};
-
-struct hdmi_core_video_config {
- enum hdmi_core_inputbus_width ip_bus_width;
- enum hdmi_core_dither_trunc op_dither_truc;
- enum hdmi_core_deepcolor_ed deep_color_pkt;
- enum hdmi_core_packet_mode pkt_mode;
- enum hdmi_core_hdmi_dvi hdmi_dvi;
- enum hdmi_core_tclkselclkmult tclk_sel_clkmult;
-};
-
-/*
- * Refer to section 8.2 in HDMI 1.3 specification for
- * details about infoframe databytes
- */
-struct hdmi_core_infoframe_avi {
- u8 db1_format;
- /* Y0, Y1 rgb,yCbCr */
- u8 db1_active_info;
- /* A0 Active information Present */
- u8 db1_bar_info_dv;
- /* B0, B1 Bar info data valid */
- u8 db1_scan_info;
- /* S0, S1 scan information */
- u8 db2_colorimetry;
- /* C0, C1 colorimetry */
- u8 db2_aspect_ratio;
- /* M0, M1 Aspect ratio (4:3, 16:9) */
- u8 db2_active_fmt_ar;
- /* R0...R3 Active format aspect ratio */
- u8 db3_itc;
- /* ITC IT content. */
- u8 db3_ec;
- /* EC0, EC1, EC2 Extended colorimetry */
- u8 db3_q_range;
- /* Q1, Q0 Quantization range */
- u8 db3_nup_scaling;
- /* SC1, SC0 Non-uniform picture scaling */
- u8 db4_videocode;
- /* VIC0..6 Video format identification */
- u8 db5_pixel_repeat;
- /* PR0..PR3 Pixel repetition factor */
- u16 db6_7_line_eoftop;
- /* Line number end of top bar */
- u16 db8_9_line_sofbottom;
- /* Line number start of bottom bar */
- u16 db10_11_pixel_eofleft;
- /* Pixel number end of left bar */
- u16 db12_13_pixel_sofright;
- /* Pixel number start of right bar */
-};
-/*
- * Refer to section 8.2 in HDMI 1.3 specification for
- * details about infoframe databytes
- */
-struct hdmi_core_infoframe_audio {
- u8 db1_coding_type;
- u8 db1_channel_count;
- u8 db2_sample_freq;
- u8 db2_sample_size;
- u8 db4_channel_alloc;
- bool db5_downmix_inh;
- u8 db5_lsv; /* Level shift values for downmix */
-};
-
-struct hdmi_core_packet_enable_repeat {
- u32 audio_pkt;
- u32 audio_pkt_repeat;
- u32 avi_infoframe;
- u32 avi_infoframe_repeat;
- u32 gen_cntrl_pkt;
- u32 gen_cntrl_pkt_repeat;
- u32 generic_pkt;
- u32 generic_pkt_repeat;
-};
-
-struct hdmi_video_format {
- enum hdmi_packing_mode packing_mode;
- u32 y_res; /* Line per panel */
- u32 x_res; /* pixel per line */
-};
-
-struct hdmi_video_interface {
- int vsp; /* Vsync polarity */
- int hsp; /* Hsync polarity */
- int interlacing;
- int tm; /* Timing mode */
-};
-
-struct hdmi_cm {
- int code;
- int mode;
-};
-
-struct hdmi_config {
- struct hdmi_timings timings;
- u16 interlace;
- struct hdmi_cm cm;
-};
-
-struct hdmi_audio_format {
- enum hdmi_stereo_channels stereo_channels;
- u8 active_chnnls_msk;
- enum hdmi_audio_type type;
- enum hdmi_audio_justify justification;
- enum hdmi_audio_sample_order sample_order;
- enum hdmi_audio_samples_perword samples_per_word;
- enum hdmi_audio_sample_size sample_size;
- enum hdmi_audio_blk_strt_end_sig en_sig_blk_strt_end;
-};
-
-struct hdmi_audio_dma {
- u8 transfer_size;
- u8 block_size;
- enum hdmi_audio_transf_mode mode;
- u16 fifo_threshold;
-};
-
-struct hdmi_core_audio_i2s_config {
- u8 word_max_length;
- u8 word_length;
- u8 in_length_bits;
- u8 justification;
- u8 en_high_bitrate_aud;
- u8 sck_edge_mode;
- u8 cbit_order;
- u8 vbit;
- u8 ws_polarity;
- u8 direction;
- u8 shift;
- u8 active_sds;
-};
-
-struct hdmi_core_audio_config {
- struct hdmi_core_audio_i2s_config i2s_cfg;
- enum hdmi_core_audio_sample_freq freq_sample;
- bool fs_override;
- u32 n;
- u32 cts;
- u32 aud_par_busclk;
- enum hdmi_core_audio_layout layout;
- enum hdmi_core_cts_mode cts_mode;
- bool use_mclk;
- enum hdmi_audio_mclk_mode mclk_mode;
- bool en_acr_pkt;
- bool en_dsd_audio;
- bool en_parallel_aud_input;
- bool en_spdif;
-};
-#endif
diff --git a/drivers/video/omap2/dss/hdmi_omap4_panel.c b/drivers/video/omap2/dss/hdmi_omap4_panel.c
deleted file mode 100644
index 7d4f2bd..0000000
--- a/drivers/video/omap2/dss/hdmi_omap4_panel.c
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * hdmi_omap4_panel.c
- *
- * HDMI library support functions for TI OMAP4 processors.
- *
- * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
- * Authors: Mythri P k <mythripk@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/mutex.h>
-#include <linux/module.h>
-#include <video/omapdss.h>
-
-#include "dss.h"
-
-static struct {
- struct mutex hdmi_lock;
-} hdmi;
-
-
-static int hdmi_panel_probe(struct omap_dss_device *dssdev)
-{
- DSSDBG("ENTER hdmi_panel_probe\n");
-
- dssdev->panel.config = OMAP_DSS_LCD_TFT |
- OMAP_DSS_LCD_IVS | OMAP_DSS_LCD_IHS;
-
- /*
- * Initialize the timings to 640 * 480
- * This is only for framebuffer update not for TV timing setting
- * Setting TV timing will be done only on enable
- */
- dssdev->panel.timings.x_res = 640;
- dssdev->panel.timings.y_res = 480;
-
- DSSDBG("hdmi_panel_probe x_res= %d y_res = %d\n",
- dssdev->panel.timings.x_res,
- dssdev->panel.timings.y_res);
- return 0;
-}
-
-static void hdmi_panel_remove(struct omap_dss_device *dssdev)
-{
-
-}
-
-static int hdmi_panel_enable(struct omap_dss_device *dssdev)
-{
- int r = 0;
- DSSDBG("ENTER hdmi_panel_enable\n");
-
- mutex_lock(&hdmi.hdmi_lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
- r = -EINVAL;
- goto err;
- }
-
- r = omapdss_hdmi_display_enable(dssdev);
- if (r) {
- DSSERR("failed to power on\n");
- goto err;
- }
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-err:
- mutex_unlock(&hdmi.hdmi_lock);
-
- return r;
-}
-
-static void hdmi_panel_disable(struct omap_dss_device *dssdev)
-{
- mutex_lock(&hdmi.hdmi_lock);
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- omapdss_hdmi_display_disable(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-
- mutex_unlock(&hdmi.hdmi_lock);
-}
-
-static int hdmi_panel_suspend(struct omap_dss_device *dssdev)
-{
- int r = 0;
-
- mutex_lock(&hdmi.hdmi_lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
- r = -EINVAL;
- goto err;
- }
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-
- omapdss_hdmi_display_disable(dssdev);
-
-err:
- mutex_unlock(&hdmi.hdmi_lock);
-
- return r;
-}
-
-static int hdmi_panel_resume(struct omap_dss_device *dssdev)
-{
- int r = 0;
-
- mutex_lock(&hdmi.hdmi_lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
- r = -EINVAL;
- goto err;
- }
-
- r = omapdss_hdmi_display_enable(dssdev);
- if (r) {
- DSSERR("failed to power on\n");
- goto err;
- }
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-err:
- mutex_unlock(&hdmi.hdmi_lock);
-
- return r;
-}
-
-static void hdmi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- mutex_lock(&hdmi.hdmi_lock);
-
- *timings = dssdev->panel.timings;
-
- mutex_unlock(&hdmi.hdmi_lock);
-}
-
-static void hdmi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- DSSDBG("hdmi_set_timings\n");
-
- mutex_lock(&hdmi.hdmi_lock);
-
- dssdev->panel.timings = *timings;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
- /* turn the hdmi off and on to get new timings to use */
- omapdss_hdmi_display_disable(dssdev);
- omapdss_hdmi_display_set_timing(dssdev);
- }
-
- mutex_unlock(&hdmi.hdmi_lock);
-}
-
-static int hdmi_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- int r = 0;
-
- DSSDBG("hdmi_check_timings\n");
-
- mutex_lock(&hdmi.hdmi_lock);
-
- r = omapdss_hdmi_display_check_timing(dssdev, timings);
- if (r) {
- DSSERR("Timing cannot be applied\n");
- goto err;
- }
-err:
- mutex_unlock(&hdmi.hdmi_lock);
- return r;
-}
-
-static struct omap_dss_driver hdmi_driver = {
- .probe = hdmi_panel_probe,
- .remove = hdmi_panel_remove,
- .enable = hdmi_panel_enable,
- .disable = hdmi_panel_disable,
- .suspend = hdmi_panel_suspend,
- .resume = hdmi_panel_resume,
- .get_timings = hdmi_get_timings,
- .set_timings = hdmi_set_timings,
- .check_timings = hdmi_check_timings,
- .driver = {
- .name = "hdmi_panel",
- .owner = THIS_MODULE,
- },
-};
-
-int hdmi_panel_init(void)
-{
- mutex_init(&hdmi.hdmi_lock);
-
- omap_dss_register_driver(&hdmi_driver);
-
- return 0;
-}
-
-void hdmi_panel_exit(void)
-{
- omap_dss_unregister_driver(&hdmi_driver);
-
-}
diff --git a/drivers/video/omap2/dss/hdmi_panel.c b/drivers/video/omap2/dss/hdmi_panel.c
new file mode 100644
index 0000000..0fa5dea
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi_panel.c
@@ -0,0 +1,339 @@
+/*
+ * hdmi_panel.c
+ *
+ * HDMI library support functions for TI OMAP4 processors.
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Mythri P k <mythripk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <video/omapdss.h>
+#include <linux/switch.h>
+
+#include "dss.h"
+
+#include <video/hdmi_ti_4xxx_ip.h>
+
+static struct {
+ struct mutex hdmi_lock;
+ struct switch_dev hpd_switch;
+} hdmi;
+
+static ssize_t hdmi_deepcolor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int r;
+ r = omapdss_hdmi_get_deepcolor();
+ return snprintf(buf, PAGE_SIZE, "%d\n", r);
+}
+
+static ssize_t hdmi_deepcolor_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long deep_color;
+ int r = kstrtoul(buf, 0, &deep_color);
+ if (r || deep_color > 2)
+ return -EINVAL;
+ omapdss_hdmi_set_deepcolor(deep_color);
+ return size;
+}
+
+static DEVICE_ATTR(deepcolor, S_IRUGO | S_IWUSR, hdmi_deepcolor_show,
+ hdmi_deepcolor_store);
+
+static int hdmi_panel_probe(struct omap_dss_device *dssdev)
+{
+ DSSDBG("ENTER hdmi_panel_probe\n");
+
+ dssdev->panel.config = OMAP_DSS_LCD_TFT |
+ OMAP_DSS_LCD_IVS | OMAP_DSS_LCD_IHS;
+
+ /*
+ * Initialize the timings to 640 * 480
+ * This is only for framebuffer update not for TV timing setting
+ * Setting TV timing will be done only on enable
+ */
+ dssdev->panel.timings.x_res = 640;
+ dssdev->panel.timings.y_res = 480;
+
+ /* sysfs entry to provide user space control to set deepcolor mode */
+ if (device_create_file(&dssdev->dev, &dev_attr_deepcolor))
+ DSSERR("failed to create sysfs file\n");
+
+ DSSDBG("hdmi_panel_probe x_res= %d y_res = %d\n",
+ dssdev->panel.timings.x_res,
+ dssdev->panel.timings.y_res);
+ return 0;
+}
+
+static void hdmi_panel_remove(struct omap_dss_device *dssdev)
+{
+ device_remove_file(&dssdev->dev, &dev_attr_deepcolor);
+}
+
+static int hdmi_panel_enable(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+ DSSDBG("ENTER hdmi_panel_enable\n");
+
+ mutex_lock(&hdmi.hdmi_lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ r = omapdss_hdmi_display_enable(dssdev);
+ if (r) {
+ DSSERR("failed to power on\n");
+ goto err;
+ }
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+err:
+ mutex_unlock(&hdmi.hdmi_lock);
+
+ return r;
+}
+
+static void hdmi_panel_disable(struct omap_dss_device *dssdev)
+{
+ mutex_lock(&hdmi.hdmi_lock);
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ omapdss_hdmi_display_disable(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
+ mutex_unlock(&hdmi.hdmi_lock);
+}
+
+static int hdmi_panel_suspend(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ mutex_lock(&hdmi.hdmi_lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
+ hdmi_panel_hpd_handler(0);
+
+ omapdss_hdmi_display_disable(dssdev);
+err:
+ mutex_unlock(&hdmi.hdmi_lock);
+
+ return r;
+}
+
+static int hdmi_panel_resume(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ mutex_lock(&hdmi.hdmi_lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+err:
+ mutex_unlock(&hdmi.hdmi_lock);
+
+ hdmi_panel_hpd_handler(hdmi_get_current_hpd());
+
+ return r;
+}
+
+enum {
+ HPD_STATE_OFF,
+ HPD_STATE_START,
+ HPD_STATE_EDID_TRYLAST = HPD_STATE_START + 5,
+};
+
+static struct hpd_worker_data {
+ struct delayed_work dwork;
+ atomic_t state;
+} hpd_work;
+static struct workqueue_struct *my_workq;
+
+static void hdmi_hotplug_detect_worker(struct work_struct *work)
+{
+ struct hpd_worker_data *d = container_of(work, typeof(*d), dwork.work);
+ struct omap_dss_device *dssdev = NULL;
+ int state = atomic_read(&d->state);
+
+ int match(struct omap_dss_device *dssdev, void *arg)
+ {
+ return sysfs_streq(dssdev->name , "hdmi");
+ }
+ dssdev = omap_dss_find_device(NULL, match);
+
+ pr_err("in hpd work %d, state=%d\n", state, dssdev->state);
+ if (dssdev == NULL)
+ return;
+
+ mutex_lock(&hdmi.hdmi_lock);
+ if (state == HPD_STATE_OFF) {
+ switch_set_state(&hdmi.hpd_switch, 0);
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ mutex_unlock(&hdmi.hdmi_lock);
+ dssdev->driver->disable(dssdev);
+ mutex_lock(&hdmi.hdmi_lock);
+ }
+ goto done;
+ } else {
+ if (state == HPD_STATE_START) {
+ mutex_unlock(&hdmi.hdmi_lock);
+ dssdev->driver->enable(dssdev);
+ mutex_lock(&hdmi.hdmi_lock);
+ } else if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE ||
+ hdmi.hpd_switch.state) {
+ /* powered down after enable - skip EDID read */
+ goto done;
+ } else if (hdmi_read_edid(&dssdev->panel.timings)) {
+ /* get monspecs from edid */
+ hdmi_get_monspecs(&dssdev->panel.monspecs);
+ pr_info("panel size %d by %d\n",
+ dssdev->panel.monspecs.max_x,
+ dssdev->panel.monspecs.max_y);
+ dssdev->panel.width_in_um =
+ dssdev->panel.monspecs.max_x * 10000;
+ dssdev->panel.height_in_um =
+ dssdev->panel.monspecs.max_y * 10000;
+ switch_set_state(&hdmi.hpd_switch, 1);
+ goto done;
+ } else if (state == HPD_STATE_EDID_TRYLAST){
+ pr_info("Failed to read EDID after %d times. Giving up.", state - HPD_STATE_START);
+ goto done;
+ }
+ if (atomic_add_unless(&d->state, 1, HPD_STATE_OFF))
+ queue_delayed_work(my_workq, &d->dwork, msecs_to_jiffies(60));
+ }
+done:
+ mutex_unlock(&hdmi.hdmi_lock);
+}
+
+int hdmi_panel_hpd_handler(int hpd)
+{
+ __cancel_delayed_work(&hpd_work.dwork);
+ atomic_set(&hpd_work.state, hpd ? HPD_STATE_START : HPD_STATE_OFF);
+ queue_delayed_work(my_workq, &hpd_work.dwork, msecs_to_jiffies(hpd ? 40 : 30));
+ return 0;
+}
+
+static void hdmi_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ mutex_lock(&hdmi.hdmi_lock);
+
+ *timings = dssdev->panel.timings;
+
+ mutex_unlock(&hdmi.hdmi_lock);
+}
+
+static void hdmi_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ DSSDBG("hdmi_set_timings\n");
+
+ mutex_lock(&hdmi.hdmi_lock);
+
+ dssdev->panel.timings = *timings;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ omapdss_hdmi_display_set_timing(dssdev);
+
+ mutex_unlock(&hdmi.hdmi_lock);
+}
+
+static int hdmi_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ int r = 0;
+
+ DSSDBG("hdmi_check_timings\n");
+
+ mutex_lock(&hdmi.hdmi_lock);
+
+ r = omapdss_hdmi_display_check_timing(dssdev, timings);
+ if (r) {
+ DSSERR("Timing cannot be applied\n");
+ goto err;
+ }
+err:
+ mutex_unlock(&hdmi.hdmi_lock);
+ return r;
+}
+
+static int hdmi_get_modedb(struct omap_dss_device *dssdev,
+ struct fb_videomode *modedb, int modedb_len)
+{
+ struct fb_monspecs *specs = &dssdev->panel.monspecs;
+ if (specs->modedb_len < modedb_len)
+ modedb_len = specs->modedb_len;
+ memcpy(modedb, specs->modedb, sizeof(*modedb) * modedb_len);
+ return modedb_len;
+}
+
+static struct omap_dss_driver hdmi_driver = {
+ .probe = hdmi_panel_probe,
+ .remove = hdmi_panel_remove,
+ .enable = hdmi_panel_enable,
+ .disable = hdmi_panel_disable,
+ .suspend = hdmi_panel_suspend,
+ .resume = hdmi_panel_resume,
+ .get_timings = hdmi_get_timings,
+ .set_timings = hdmi_set_timings,
+ .check_timings = hdmi_check_timings,
+ .get_modedb = hdmi_get_modedb,
+ .set_mode = omapdss_hdmi_display_set_mode,
+ .driver = {
+ .name = "hdmi_panel",
+ .owner = THIS_MODULE,
+ },
+};
+
+int hdmi_panel_init(void)
+{
+ mutex_init(&hdmi.hdmi_lock);
+ hdmi.hpd_switch.name = "hdmi";
+ switch_dev_register(&hdmi.hpd_switch);
+
+ my_workq = create_singlethread_workqueue("hdmi_hotplug");
+ INIT_DELAYED_WORK(&hpd_work.dwork, hdmi_hotplug_detect_worker);
+ omap_dss_register_driver(&hdmi_driver);
+
+ return 0;
+}
+
+void hdmi_panel_exit(void)
+{
+ destroy_workqueue(my_workq);
+ omap_dss_unregister_driver(&hdmi_driver);
+
+ switch_dev_unregister(&hdmi.hpd_switch);
+}
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 9aeea50..5888688 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -28,6 +28,8 @@
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
+#include <linux/ratelimit.h>
+#include <linux/seq_file.h>
#include <video/omapdss.h>
#include <plat/cpu.h>
@@ -37,6 +39,7 @@
static int num_managers;
static struct list_head manager_list;
+static struct omap_overlay_manager *mgrs[MAX_DSS_MANAGERS];
static ssize_t manager_name_show(struct omap_overlay_manager *mgr, char *buf)
{
@@ -260,6 +263,10 @@
if (sscanf(buf, "%d", &enable) != 1)
return -EINVAL;
+ /* if we have OMAP3 alpha compatibility, alpha blending is always on */
+ if (dss_has_feature(FEAT_ALPHA_OMAP3_COMPAT) && !enable)
+ return -EINVAL;
+
mgr->get_manager_info(mgr, &info);
info.alpha_enabled = enable ? true : false;
@@ -275,6 +282,108 @@
return size;
}
+static ssize_t manager_cpr_enable_show(struct omap_overlay_manager *mgr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.cpr_enable);
+}
+
+static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr,
+ const char *buf, size_t size)
+{
+ struct omap_overlay_manager_info info;
+ int v;
+ int r;
+ bool enable;
+
+ if (!dss_has_feature(FEAT_CPR))
+ return -ENODEV;
+
+ r = kstrtoint(buf, 0, &v);
+ if (r)
+ return r;
+
+ enable = !!v;
+
+ mgr->get_manager_info(mgr, &info);
+
+ if (info.cpr_enable == enable)
+ return size;
+
+ info.cpr_enable = enable;
+
+ r = mgr->set_manager_info(mgr, &info);
+ if (r)
+ return r;
+
+ r = mgr->apply(mgr);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t manager_cpr_coef_show(struct omap_overlay_manager *mgr,
+ char *buf)
+{
+ struct omap_overlay_manager_info info;
+
+ mgr->get_manager_info(mgr, &info);
+
+ return snprintf(buf, PAGE_SIZE,
+ "%d %d %d %d %d %d %d %d %d\n",
+ info.cpr_coefs.rr,
+ info.cpr_coefs.rg,
+ info.cpr_coefs.rb,
+ info.cpr_coefs.gr,
+ info.cpr_coefs.gg,
+ info.cpr_coefs.gb,
+ info.cpr_coefs.br,
+ info.cpr_coefs.bg,
+ info.cpr_coefs.bb);
+}
+
+static ssize_t manager_cpr_coef_store(struct omap_overlay_manager *mgr,
+ const char *buf, size_t size)
+{
+ struct omap_overlay_manager_info info;
+ struct omap_dss_cpr_coefs coefs;
+ int r, i;
+ s16 *arr;
+
+ if (!dss_has_feature(FEAT_CPR))
+ return -ENODEV;
+
+ if (sscanf(buf, "%hd %hd %hd %hd %hd %hd %hd %hd %hd",
+ &coefs.rr, &coefs.rg, &coefs.rb,
+ &coefs.gr, &coefs.gg, &coefs.gb,
+ &coefs.br, &coefs.bg, &coefs.bb) != 9)
+ return -EINVAL;
+
+ arr = (s16[]){ coefs.rr, coefs.rg, coefs.rb,
+ coefs.gr, coefs.gg, coefs.gb,
+ coefs.br, coefs.bg, coefs.bb };
+
+ for (i = 0; i < 9; ++i) {
+ if (arr[i] < -512 || arr[i] > 511)
+ return -EINVAL;
+ }
+
+ mgr->get_manager_info(mgr, &info);
+
+ info.cpr_coefs = coefs;
+
+ r = mgr->set_manager_info(mgr, &info);
+ if (r)
+ return r;
+
+ r = mgr->apply(mgr);
+ if (r)
+ return r;
+
+ return size;
+}
+
struct manager_attribute {
struct attribute attr;
ssize_t (*show)(struct omap_overlay_manager *, char *);
@@ -300,6 +409,12 @@
static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR,
manager_alpha_blending_enabled_show,
manager_alpha_blending_enabled_store);
+static MANAGER_ATTR(cpr_enable, S_IRUGO|S_IWUSR,
+ manager_cpr_enable_show,
+ manager_cpr_enable_store);
+static MANAGER_ATTR(cpr_coef, S_IRUGO|S_IWUSR,
+ manager_cpr_coef_show,
+ manager_cpr_coef_store);
static struct attribute *manager_sysfs_attrs[] = {
@@ -310,6 +425,8 @@
&manager_attr_trans_key_value.attr,
&manager_attr_trans_key_enabled.attr,
&manager_attr_alpha_blending_enabled.attr,
+ &manager_attr_cpr_enable.attr,
+ &manager_attr_cpr_coef.attr,
NULL
};
@@ -353,6 +470,20 @@
.default_attrs = manager_sysfs_attrs,
};
+struct callback_states {
+ /*
+ * Keep track of callbacks at the last 3 levels of pipeline:
+ * cache, shadow registers and in DISPC registers.
+ *
+ * Note: We zero the function pointer when moving from one level to
+ * another to avoid checking for dirty and shadow_dirty fields that
+ * are not common between overlay and manager cache structures.
+ */
+ struct omapdss_ovl_cb cache, shadow, dispc;
+ bool dispc_displayed;
+ bool shadow_enabled;
+};
+
/*
* We have 4 levels of cache for the dispc settings. First two are in SW and
* the latter two in HW.
@@ -409,15 +540,21 @@
u8 global_alpha;
u8 pre_mult_alpha;
+ struct callback_states cb; /* callback data for the last 3 states */
+ int dispc_channel; /* overlay's channel in DISPC */
+
enum omap_channel channel;
bool replication;
bool ilace;
+ u16 min_x_decim, max_x_decim, min_y_decim, max_y_decim;
enum omap_burst_size burst_size;
u32 fifo_low;
u32 fifo_high;
bool manual_update;
+ enum omap_overlay_zorder zorder;
+ struct omap_dss_cconv_coefs cconv;
};
struct manager_cache_data {
@@ -447,6 +584,12 @@
/* enlarge the update area if the update area contains scaled
* overlays */
bool enlarge_update_area;
+
+ struct callback_states cb; /* callback data for the last 3 states */
+
+ bool cpr_enable;
+ struct omap_dss_cpr_coefs cpr_coefs;
+ bool skip_init;
};
static struct {
@@ -455,9 +598,43 @@
struct manager_cache_data manager_cache[MAX_DSS_MANAGERS];
bool irq_enabled;
+ u32 comp_irq_enabled;
} dss_cache;
+/* propagating callback info between states */
+static inline void
+dss_ovl_configure_cb(struct callback_states *st, int i, bool enabled)
+{
+ /* complete info in shadow */
+ dss_ovl_cb(&st->shadow, i, DSS_COMPLETION_ECLIPSED_SHADOW);
+ /* propagate cache to shadow */
+ st->shadow = st->cache;
+ st->shadow_enabled = enabled;
+ st->cache.fn = NULL; /* info traveled to shadow */
+}
+
+static inline void
+dss_ovl_program_cb(struct callback_states *st, int i)
+{
+ /* mark previous programming as completed */
+ dss_ovl_cb(&st->dispc, i, st->dispc_displayed ?
+ DSS_COMPLETION_RELEASED : DSS_COMPLETION_TORN);
+
+ /* mark shadow info as programmed, not yet displayed */
+ dss_ovl_cb(&st->shadow, i, DSS_COMPLETION_PROGRAMMED);
+
+ /* if overlay/manager is not enabled, we are done now */
+ if (!st->shadow_enabled) {
+ dss_ovl_cb(&st->shadow, i, DSS_COMPLETION_RELEASED);
+ st->shadow.fn = NULL;
+ }
+
+ /* propagate shadow to dispc */
+ st->dispc = st->shadow;
+ st->shadow.fn = NULL;
+ st->dispc_displayed = false;
+}
static int omap_dss_set_device(struct omap_overlay_manager *mgr,
struct omap_dss_device *dssdev)
@@ -492,6 +669,12 @@
mgr->device = dssdev;
mgr->device_changed = true;
+ if (dssdev->type == OMAP_DISPLAY_TYPE_DSI &&
+ !(dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE))
+ omap_dispc_set_irq_type(mgr->id, OMAP_DISPC_IRQ_TYPE_VSYNC);
+ else
+ omap_dispc_set_irq_type(mgr->id, OMAP_DISPC_IRQ_TYPE_FRAMEDONE);
+
return 0;
}
@@ -513,6 +696,7 @@
{
unsigned long timeout = msecs_to_jiffies(500);
u32 irq;
+ int r;
if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) {
irq = DISPC_IRQ_EVSYNC_ODD;
@@ -524,7 +708,11 @@
else
irq = DISPC_IRQ_VSYNC2;
}
- return omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
+ r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
+ if (!r)
+ mgr->device->first_vsync = true;
+
+ return r;
}
static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
@@ -541,7 +729,8 @@
if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
|| dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
- irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
+ irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN
+ | DISPC_IRQ_FRAMEDONETV;
} else {
if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
enum omap_dss_update_mode mode;
@@ -588,6 +777,8 @@
}
r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
+ if (!r)
+ mgr->device->first_vsync = true;
if (r == -ERESTARTSYS)
break;
@@ -619,7 +810,8 @@
if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
|| dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
- irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
+ irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN
+ | DISPC_IRQ_FRAMEDONETV;
} else {
if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
enum omap_dss_update_mode mode;
@@ -737,6 +929,8 @@
u16 x, y, w, h;
u32 paddr;
int r;
+ u16 x_decim, y_decim;
+ bool five_taps;
u16 orig_w, orig_h, orig_outw, orig_outh;
DSSDBGF("%d", plane);
@@ -779,11 +973,17 @@
case OMAP_DSS_COLOR_NV12:
bpp = 8;
break;
+
+ case OMAP_DSS_COLOR_CLUT1:
+ case OMAP_DSS_COLOR_CLUT2:
+ case OMAP_DSS_COLOR_CLUT4:
+ case OMAP_DSS_COLOR_CLUT8:
case OMAP_DSS_COLOR_RGB16:
case OMAP_DSS_COLOR_ARGB16:
case OMAP_DSS_COLOR_YUV2:
case OMAP_DSS_COLOR_UYVY:
case OMAP_DSS_COLOR_RGBA16:
+ case OMAP_DSS_COLOR_RGB12U:
case OMAP_DSS_COLOR_RGBX16:
case OMAP_DSS_COLOR_ARGB16_1555:
case OMAP_DSS_COLOR_XRGB16_1555:
@@ -849,14 +1049,20 @@
}
}
- r = dispc_setup_plane(plane,
+ r = dispc_scaling_decision(w, h, outw, outh,
+ plane, c->color_mode, c->channel,
+ c->rotation, c->rotation_type,
+ c->min_x_decim, c->max_x_decim,
+ c->min_y_decim, c->max_y_decim,
+ &x_decim, &y_decim, &five_taps);
+ r = r ? : dispc_setup_plane(plane,
paddr,
c->screen_width,
x, y,
w, h,
outw, outh,
c->color_mode,
- c->ilace,
+ c->ilace, x_decim, y_decim, five_taps,
c->rotation_type,
c->rotation,
c->mirror,
@@ -875,7 +1081,12 @@
dispc_enable_replication(plane, c->replication);
dispc_set_burst_size(plane, c->burst_size);
- dispc_setup_plane_fifo(plane, c->fifo_low, c->fifo_high);
+ dispc_set_zorder(plane, c->zorder);
+ dispc_enable_zorder(plane, 1);
+ if (!cpu_is_omap44xx())
+ dispc_setup_plane_fifo(plane, c->fifo_low, c->fifo_high);
+ if (plane != OMAP_DSS_GFX)
+ _dispc_setup_color_conv_coef(plane, &c->cconv);
dispc_enable_plane(plane, 1);
@@ -893,7 +1104,19 @@
dispc_set_default_color(channel, c->default_color);
dispc_set_trans_key(channel, c->trans_key_type, c->trans_key);
dispc_enable_trans_key(channel, c->trans_enabled);
- dispc_enable_alpha_blending(channel, c->alpha_enabled);
+
+ /* if we have OMAP3 alpha compatibility, alpha blending is always on */
+ if (dss_has_feature(FEAT_ALPHA_OMAP3_COMPAT)) {
+ /* and alpha_blending bit enables OMAP3 compatibility mode */
+ dispc_enable_alpha_blending(channel, false);
+ c->alpha_enabled = true;
+ } else {
+ dispc_enable_alpha_blending(channel, c->alpha_enabled);
+ }
+ if (dss_has_feature(FEAT_CPR)) {
+ dispc_enable_cpr(channel, c->cpr_enable);
+ dispc_set_cpr_coef(channel, &c->cpr_coefs);
+ }
}
/* configure_dispc() tries to write values from cache to shadow registers.
@@ -908,6 +1131,7 @@
const int num_mgrs = dss_feat_get_num_mgrs();
int i;
int r;
+ int used_ovls, j;
bool mgr_busy[MAX_DSS_MANAGERS];
bool mgr_go[MAX_DSS_MANAGERS];
bool busy;
@@ -940,6 +1164,8 @@
if (r)
DSSERR("configure_overlay %d failed\n", i);
+ dss_ovl_configure_cb(&oc->cb, i, oc->enabled);
+
oc->dirty = false;
oc->shadow_dirty = true;
mgr_go[oc->channel] = true;
@@ -960,7 +1186,16 @@
continue;
}
+ for (j = used_ovls = 0; j < num_ovls; j++) {
+ oc = &dss_cache.overlay_cache[j];
+ if (oc->channel == i && oc->enabled)
+ used_ovls++;
+ }
+
configure_manager(i);
+
+ dss_ovl_configure_cb(&mc->cb, i, used_ovls);
+
mc->dirty = false;
mc->shadow_dirty = true;
mgr_go[i] = true;
@@ -976,8 +1211,12 @@
/* We don't need GO with manual update display. LCD iface will
* always be turned off after frame, and new settings will be
* taken in to use at next update */
- if (!mc->manual_upd_display)
- dispc_go(i);
+ if (!mc->manual_upd_display){
+ if(mc->skip_init)
+ mc->skip_init = false;
+ else
+ dispc_go(i);
+ }
}
if (busy)
@@ -1139,6 +1378,89 @@
*hi = h;
}
+static void schedule_completion_irq(void);
+
+static void dss_completion_irq_handler(void *data, u32 mask)
+{
+ struct manager_cache_data *mc;
+ struct overlay_cache_data *oc;
+ const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
+ const int num_mgrs = MAX_DSS_MANAGERS;
+ const u32 masks[] = {
+ DISPC_IRQ_FRAMEDONE | DISPC_IRQ_VSYNC,
+ DISPC_IRQ_FRAMEDONE2 | DISPC_IRQ_VSYNC2,
+ DISPC_IRQ_FRAMEDONETV | DISPC_IRQ_EVSYNC_EVEN |
+ DISPC_IRQ_EVSYNC_ODD
+ };
+ int i;
+
+ spin_lock(&dss_cache.lock);
+
+ for (i = 0; i < num_mgrs; i++) {
+ mc = &dss_cache.manager_cache[i];
+ if (mask & masks[i]) {
+ if (mgrs[i] && mgrs[i]->device)
+ mgrs[i]->device->first_vsync = true;
+ dss_ovl_cb(&mc->cb.dispc, i, DSS_COMPLETION_DISPLAYED);
+ mc->cb.dispc_displayed = true;
+ }
+ }
+
+ /* notify all overlays on that manager */
+ for (i = 0; i < num_ovls; i++) {
+ oc = &dss_cache.overlay_cache[i];
+ if (mask & masks[oc->channel]) {
+ dss_ovl_cb(&oc->cb.dispc, i, DSS_COMPLETION_DISPLAYED);
+ oc->cb.dispc_displayed = true;
+ }
+ }
+
+ schedule_completion_irq();
+
+ spin_unlock(&dss_cache.lock);
+}
+
+static void schedule_completion_irq(void)
+{
+ struct manager_cache_data *mc;
+ struct overlay_cache_data *oc;
+ const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
+ const int num_mgrs = MAX_DSS_MANAGERS;
+ const u32 masks[] = {
+ DISPC_IRQ_FRAMEDONE | DISPC_IRQ_VSYNC,
+ DISPC_IRQ_FRAMEDONE2 | DISPC_IRQ_VSYNC2,
+ DISPC_IRQ_FRAMEDONETV | DISPC_IRQ_EVSYNC_EVEN |
+ DISPC_IRQ_EVSYNC_ODD
+ };
+ u32 mask = 0;
+ int i;
+
+ for (i = 0; i < num_mgrs; i++) {
+ mc = &dss_cache.manager_cache[i];
+ if (mc->cb.dispc.fn &&
+ (mc->cb.dispc.mask & DSS_COMPLETION_DISPLAYED))
+ mask |= masks[i];
+ }
+
+ /* notify all overlays on that manager */
+ for (i = 0; i < num_ovls; i++) {
+ oc = &dss_cache.overlay_cache[i];
+ if (oc->cb.dispc.fn && oc->enabled &&
+ (oc->cb.dispc.mask & DSS_COMPLETION_DISPLAYED))
+ mask |= masks[oc->channel];
+ }
+
+ if (mask != dss_cache.comp_irq_enabled) {
+ if (dss_cache.comp_irq_enabled)
+ omap_dispc_unregister_isr(dss_completion_irq_handler,
+ NULL, dss_cache.comp_irq_enabled);
+ if (mask)
+ omap_dispc_register_isr(dss_completion_irq_handler,
+ NULL, mask);
+ dss_cache.comp_irq_enabled = mask;
+ }
+}
+
void dss_start_update(struct omap_dss_device *dssdev)
{
struct manager_cache_data *mc;
@@ -1147,14 +1469,20 @@
const int num_mgrs = dss_feat_get_num_mgrs();
struct omap_overlay_manager *mgr;
int i;
+ unsigned long flags;
mgr = dssdev->manager;
+ spin_lock_irqsave(&dss_cache.lock, flags);
for (i = 0; i < num_ovls; ++i) {
oc = &dss_cache.overlay_cache[i];
if (oc->channel != mgr->id)
continue;
+ if (oc->shadow_dirty) {
+ dss_ovl_program_cb(&oc->cb, i);
+ oc->dispc_channel = oc->channel;
+ }
oc->shadow_dirty = false;
}
@@ -1163,9 +1491,14 @@
if (mgr->id != i)
continue;
+ if (mc->shadow_dirty)
+ dss_ovl_program_cb(&mc->cb, i);
mc->shadow_dirty = false;
}
+ schedule_completion_irq();
+ spin_unlock_irqrestore(&dss_cache.lock, flags);
+
dssdev->manager->enable(dssdev->manager);
}
@@ -1179,30 +1512,52 @@
bool mgr_busy[MAX_DSS_MANAGERS];
u32 irq_mask;
+ spin_lock(&dss_cache.lock);
+
for (i = 0; i < num_mgrs; i++)
mgr_busy[i] = dispc_go_busy(i);
- spin_lock(&dss_cache.lock);
-
for (i = 0; i < num_ovls; ++i) {
oc = &dss_cache.overlay_cache[i];
- if (!mgr_busy[oc->channel])
+ if (!mgr_busy[oc->channel] && oc->shadow_dirty) {
+ dss_ovl_program_cb(&oc->cb, i);
+ oc->dispc_channel = oc->channel;
oc->shadow_dirty = false;
+ }
}
for (i = 0; i < num_mgrs; ++i) {
mc = &dss_cache.manager_cache[i];
- if (!mgr_busy[i])
+ if (!mgr_busy[i] && mc->shadow_dirty) {
+ if (mgrs[i] && mgrs[i]->device)
+ mgrs[i]->device->first_vsync = true;
+ dss_ovl_program_cb(&mc->cb, i);
mc->shadow_dirty = false;
+ }
}
+ schedule_completion_irq();
+
r = configure_dispc();
if (r == 1)
goto end;
+ /*
+ * FIXME Sometimes when handling an interrupt for a manager, the
+ * manager is still busy at the beginning of the interrupt handler.
+ * Later it becomes idle, so we unregister the interrupt. This
+ * leaves the shadow_dirty flag in an incorrect true state, and also
+ * misses the 'programmed' callback.
+ *
+ * For now, we do not unregister the interrupt if any manager
+ * was busy at the first read of the GO bits. A better fix would be
+ * to keep the first read busy state in the cache, so we do not operate
+ * on instantaneous reads of the GO bit.
+ */
+
/* re-read busy flags */
for (i = 0; i < num_mgrs; i++)
- mgr_busy[i] = dispc_go_busy(i);
+ mgr_busy[i] |= dispc_go_busy(i);
/* keep running as long as there are busy managers, so that
* we can collect overlay-applied information */
@@ -1223,10 +1578,133 @@
spin_unlock(&dss_cache.lock);
}
+static int omap_dss_mgr_blank(struct omap_overlay_manager *mgr,
+ bool wait_for_go)
+{
+ struct overlay_cache_data *oc;
+ struct manager_cache_data *mc;
+ unsigned long flags;
+ int r, r_get, i;
+
+ DSSDBG("omap_dss_mgr_blank(%s,wait=%d)\n", mgr->name, wait_for_go);
+
+ r_get = r = dispc_runtime_get();
+ /* still clear cache even if failed to get clocks, just don't config */
+
+ spin_lock_irqsave(&dss_cache.lock, flags);
+
+ /* disable overlays in overlay info structs and in cache */
+ for (i = 0; i < omap_dss_get_num_overlays(); i++) {
+ struct omap_overlay_info oi = { .enabled = false };
+ struct omap_overlay *ovl;
+
+ ovl = omap_dss_get_overlay(i);
+
+ if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC) ||
+ ovl->manager != mgr)
+ continue;
+
+ oc = &dss_cache.overlay_cache[ovl->id];
+
+ /* complete unconfigured info in cache */
+ dss_ovl_cb(&oc->cb.cache, i, DSS_COMPLETION_ECLIPSED_CACHE);
+ oc->cb.cache.fn = NULL;
+
+ ovl->info = oi;
+ ovl->info_dirty = false;
+ oc->dirty = true;
+ oc->enabled = false;
+ }
+
+ /* dirty manager */
+ mc = &dss_cache.manager_cache[mgr->id];
+ dss_ovl_cb(&mc->cb.cache, i, DSS_COMPLETION_ECLIPSED_CACHE);
+ mc->cb.cache.fn = NULL;
+ mgr->info.cb.fn = NULL;
+ mc->dirty = true;
+ mgr->info_dirty = false;
+
+ /*
+ * TRICKY: Enable apply irq even if not waiting for vsync, so that
+ * DISPC programming takes place in case GO bit was on.
+ */
+ if (!dss_cache.irq_enabled) {
+ u32 mask;
+
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
+ DISPC_IRQ_EVSYNC_EVEN;
+ if (dss_has_feature(FEAT_MGR_LCD2))
+ mask |= DISPC_IRQ_VSYNC2;
+
+ r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
+ dss_cache.irq_enabled = true;
+ }
+
+ if (!r_get) {
+ r = configure_dispc();
+ if (r)
+ pr_info("mgr_blank while GO is set");
+ }
+
+ if (r_get || !wait_for_go) {
+ /* pretend that programming has happened */
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+ oc = &dss_cache.overlay_cache[i];
+ if (oc->channel != mgr->id)
+ continue;
+ if (r && oc->dirty)
+ dss_ovl_configure_cb(&oc->cb, i, false);
+ if (oc->shadow_dirty) {
+ dss_ovl_program_cb(&oc->cb, i);
+ oc->dispc_channel = oc->channel;
+ oc->shadow_dirty = false;
+ } else {
+ pr_warn("ovl%d-shadow is not dirty\n", i);
+ }
+ }
+
+ if (r && mc->dirty)
+ dss_ovl_configure_cb(&mc->cb, i, false);
+ if (mc->shadow_dirty) {
+ dss_ovl_program_cb(&mc->cb, i);
+ mc->shadow_dirty = false;
+ } else {
+ pr_warn("mgr%d-shadow is not dirty\n", mgr->id);
+ }
+ }
+
+ spin_unlock_irqrestore(&dss_cache.lock, flags);
+
+ if (wait_for_go)
+ mgr->wait_for_go(mgr);
+
+ if (!r_get)
+ dispc_runtime_put();
+
+ return r;
+}
+
+int omap_dss_manager_unregister_callback(struct omap_overlay_manager *mgr,
+ struct omapdss_ovl_cb *cb)
+{
+ unsigned long flags;
+ int r = 0;
+ spin_lock_irqsave(&dss_cache.lock, flags);
+ if (mgr->info_dirty &&
+ mgr->info.cb.fn == cb->fn &&
+ mgr->info.cb.data == cb->data)
+ mgr->info.cb.fn = NULL;
+ else
+ r = -EPERM;
+ spin_unlock_irqrestore(&dss_cache.lock, flags);
+ return r;
+}
+
static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
{
struct overlay_cache_data *oc;
struct manager_cache_data *mc;
+ struct omap_dss_device *dssdev;
int i;
struct omap_overlay *ovl;
int num_planes_enabled = 0;
@@ -1236,8 +1714,19 @@
DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
+ r = dispc_runtime_get();
+ if (r)
+ return r;
+
spin_lock_irqsave(&dss_cache.lock, flags);
+ if (!mgr->device || mgr->device->state != OMAP_DSS_DISPLAY_ACTIVE) {
+ pr_info_ratelimited("cannot apply mgr(%s) on inactive device\n",
+ mgr->name);
+ r = -ENODEV;
+ goto done;
+ }
+
/* Configure overlays */
for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
struct omap_dss_device *dssdev;
@@ -1247,34 +1736,39 @@
if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
continue;
- oc = &dss_cache.overlay_cache[ovl->id];
-
- if (!overlay_enabled(ovl)) {
- if (oc->enabled) {
- oc->enabled = false;
- oc->dirty = true;
- }
+ if (ovl->manager != mgr)
continue;
- }
- if (!ovl->info_dirty) {
+ oc = &dss_cache.overlay_cache[ovl->id];
+ dssdev = mgr->device;
+
+ if (!overlay_enabled(ovl) || !dssdev) {
+ ovl->info.enabled = false;
+ } else if (!ovl->info_dirty) {
if (oc->enabled)
++num_planes_enabled;
continue;
+ } else if (dss_check_overlay(ovl, dssdev)) {
+ ovl->info.enabled = false;
}
- dssdev = ovl->manager->device;
-
- if (dss_check_overlay(ovl, dssdev)) {
- if (oc->enabled) {
- oc->enabled = false;
- oc->dirty = true;
- }
- continue;
- }
+ /* complete unconfigured info in cache */
+ dss_ovl_cb(&oc->cb.cache, i,
+#if 0
+ (oc->cb.cache.fn == ovl->info.cb.fn &&
+ oc->cb.cache.data == ovl->info.cb.data) ?
+ DSS_COMPLETION_CHANGED_CACHE :
+#endif
+ DSS_COMPLETION_ECLIPSED_CACHE);
+ oc->cb.cache = ovl->info.cb;
+ ovl->info.cb.fn = NULL;
ovl->info_dirty = false;
- oc->dirty = true;
+ if (ovl->info.enabled || oc->enabled)
+ oc->dirty = true;
+ oc->enabled = ovl->info.enabled;
+ if (!oc->enabled)
+ continue;
oc->paddr = ovl->info.paddr;
oc->vaddr = ovl->info.vaddr;
@@ -1292,15 +1786,19 @@
oc->out_height = ovl->info.out_height;
oc->global_alpha = ovl->info.global_alpha;
oc->pre_mult_alpha = ovl->info.pre_mult_alpha;
+ oc->zorder = ovl->info.zorder;
+ oc->min_x_decim = ovl->info.min_x_decim;
+ oc->max_x_decim = ovl->info.max_x_decim;
+ oc->min_y_decim = ovl->info.min_y_decim;
+ oc->max_y_decim = ovl->info.max_y_decim;
+ oc->cconv = ovl->info.cconv;
oc->replication =
dss_use_replication(dssdev, ovl->info.color_mode);
oc->ilace = dssdev->type == OMAP_DISPLAY_TYPE_VENC;
- oc->channel = ovl->manager->id;
-
- oc->enabled = true;
+ oc->channel = mgr->id;
oc->manual_update =
dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
@@ -1310,46 +1808,59 @@
++num_planes_enabled;
}
- /* Configure managers */
- list_for_each_entry(mgr, &manager_list, list) {
- struct omap_dss_device *dssdev;
+ /* configure manager */
+ if (!(mgr->caps & OMAP_DSS_OVL_MGR_CAP_DISPC))
+ goto skip_mgr;
- if (!(mgr->caps & OMAP_DSS_OVL_MGR_CAP_DISPC))
- continue;
+ mc = &dss_cache.manager_cache[mgr->id];
- mc = &dss_cache.manager_cache[mgr->id];
-
- if (mgr->device_changed) {
- mgr->device_changed = false;
- mgr->info_dirty = true;
- }
-
- if (!mgr->info_dirty)
- continue;
-
- if (!mgr->device)
- continue;
-
- dssdev = mgr->device;
-
- mgr->info_dirty = false;
- mc->dirty = true;
-
- mc->default_color = mgr->info.default_color;
- mc->trans_key_type = mgr->info.trans_key_type;
- mc->trans_key = mgr->info.trans_key;
- mc->trans_enabled = mgr->info.trans_enabled;
- mc->alpha_enabled = mgr->info.alpha_enabled;
-
- mc->manual_upd_display =
- dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
-
- mc->manual_update =
- dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
- dssdev->driver->get_update_mode(dssdev) !=
- OMAP_DSS_UPDATE_AUTO;
+ if (mgr->device_changed) {
+ mgr->device_changed = false;
+ mgr->info_dirty = true;
}
+ if (!mgr->info_dirty)
+ goto skip_mgr;
+
+ if (!mgr->device)
+ goto skip_mgr;
+
+ dssdev = mgr->device;
+
+ /* complete unconfigured info in cache */
+ dss_ovl_cb(&mc->cb.cache, mgr->id,
+#if 0
+ (mc->cb.cache.fn == mgr->info.cb.fn &&
+ mc->cb.cache.data == mgr->info.cb.data) ?
+ DSS_COMPLETION_CHANGED_CACHE :
+#endif
+ DSS_COMPLETION_ECLIPSED_CACHE);
+ mc->cb.cache = mgr->info.cb;
+ mgr->info.cb.fn = NULL;
+
+ mgr->info_dirty = false;
+ mc->dirty = true;
+
+ mc->default_color = mgr->info.default_color;
+ mc->trans_key_type = mgr->info.trans_key_type;
+ mc->trans_key = mgr->info.trans_key;
+ mc->trans_enabled = mgr->info.trans_enabled;
+ mc->alpha_enabled = mgr->info.alpha_enabled;
+ mc->cpr_coefs = mgr->info.cpr_coefs;
+ mc->cpr_enable = mgr->info.cpr_enable;
+
+ mc->manual_upd_display =
+ dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
+
+ mc->manual_update =
+ dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
+ dssdev->driver->get_update_mode(dssdev) !=
+ OMAP_DSS_UPDATE_AUTO;
+
+ mc->skip_init = dssdev->skip_init;
+
+skip_mgr:
+
/* XXX TODO: Try to get fifomerge working. The problem is that it
* affects both managers, not individually but at the same time. This
* means the change has to be well synchronized. I guess the proper way
@@ -1381,6 +1892,8 @@
continue;
dssdev = ovl->manager->device;
+ if (!dssdev)
+ continue;
size = dispc_get_plane_fifo_size(ovl->id);
if (use_fifomerge)
@@ -1409,7 +1922,6 @@
}
r = 0;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
if (!dss_cache.irq_enabled) {
u32 mask;
@@ -1422,41 +1934,139 @@
dss_cache.irq_enabled = true;
}
configure_dispc();
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+done:
spin_unlock_irqrestore(&dss_cache.lock, flags);
+ dispc_runtime_put();
+
return r;
}
+#ifdef CONFIG_DEBUG_FS
+static void seq_print_cb(struct seq_file *s, struct omapdss_ovl_cb *cb)
+{
+ if (!cb->fn) {
+ seq_printf(s, "(none)\n");
+ return;
+ }
+
+ seq_printf(s, "mask=%c%c%c%c [%p] %pf\n",
+ (cb->mask & DSS_COMPLETION_CHANGED) ? 'C' : '-',
+ (cb->mask & DSS_COMPLETION_PROGRAMMED) ? 'P' : '-',
+ (cb->mask & DSS_COMPLETION_DISPLAYED) ? 'D' : '-',
+ (cb->mask & DSS_COMPLETION_RELEASED) ? 'R' : '-',
+ cb->data,
+ cb->fn);
+}
+#endif
+
+static void seq_print_cbs(struct omap_overlay_manager *mgr, struct seq_file *s)
+{
+#ifdef CONFIG_DEBUG_FS
+ struct manager_cache_data *mc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dss_cache.lock, flags);
+
+ mc = &dss_cache.manager_cache[mgr->id];
+
+ seq_printf(s, " DISPC pipeline:\n\n"
+ " info:%13s ", mgr->info_dirty ? "DIRTY" : "clean");
+ seq_print_cb(s, &mgr->info.cb);
+ seq_printf(s, " cache:%12s ", mc->dirty ? "DIRTY" : "clean");
+ seq_print_cb(s, &mc->cb.cache);
+ seq_printf(s, " shadow: %s %s ",
+ mc->cb.shadow_enabled ? "ACT" : "off",
+ mc->shadow_dirty ? "DIRTY" : "clean");
+ seq_print_cb(s, &mc->cb.shadow);
+ seq_printf(s, " dispc:%12s ",
+ mc->cb.dispc_displayed ? "DISPLAYED" : "");
+ seq_print_cb(s, &mc->cb.dispc);
+ seq_printf(s, "\n");
+
+ spin_unlock_irqrestore(&dss_cache.lock, flags);
+#endif
+}
+
static int dss_check_manager(struct omap_overlay_manager *mgr)
{
- /* OMAP supports only graphics source transparency color key and alpha
- * blending simultaneously. See TRM 15.4.2.4.2.2 Alpha Mode */
-
- if (mgr->info.alpha_enabled && mgr->info.trans_enabled &&
+ /* if we have OMAP3 alpha compatibility, alpha blending is always on */
+ if (dss_has_feature(FEAT_ALPHA_OMAP3_COMPAT)) {
+ if (!mgr->info.alpha_enabled)
+ return -EINVAL;
+ } else {
+ /*
+ * OMAP3- supports only graphics destination transparency
+ * color key and alpha blending simultaneously.
+ * See TRM 15.4.2.4.2.2 Alpha Mode.
+ */
+ if (mgr->info.alpha_enabled && mgr->info.trans_enabled &&
mgr->info.trans_key_type != OMAP_DSS_COLOR_KEY_GFX_DST)
- return -EINVAL;
+ return -EINVAL;
+ }
return 0;
}
+int omap_dss_ovl_set_info(struct omap_overlay *ovl,
+ struct omap_overlay_info *info)
+{
+ int r;
+ struct omap_overlay_info old_info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dss_cache.lock, flags);
+ old_info = ovl->info;
+ ovl->info = *info;
+
+ if (ovl->manager) {
+ r = dss_check_overlay(ovl, ovl->manager->device);
+ if (r) {
+ ovl->info = old_info;
+ spin_unlock_irqrestore(&dss_cache.lock, flags);
+ return r;
+ }
+ }
+
+ /* complete previous settings */
+ if (ovl->info_dirty)
+ dss_ovl_cb(&old_info.cb, ovl->id,
+ (info->cb.fn == old_info.cb.fn &&
+ info->cb.data == old_info.cb.data) ?
+ DSS_COMPLETION_CHANGED_SET :
+ DSS_COMPLETION_ECLIPSED_SET);
+
+ ovl->info_dirty = true;
+ spin_unlock_irqrestore(&dss_cache.lock, flags);
+
+ return 0;
+}
+
+
static int omap_dss_mgr_set_info(struct omap_overlay_manager *mgr,
struct omap_overlay_manager_info *info)
{
int r;
struct omap_overlay_manager_info old_info;
+ unsigned long flags;
+ spin_lock_irqsave(&dss_cache.lock, flags);
old_info = mgr->info;
mgr->info = *info;
r = dss_check_manager(mgr);
if (r) {
mgr->info = old_info;
+ spin_unlock_irqrestore(&dss_cache.lock, flags);
return r;
}
+ if (mgr->info_dirty)
+ dss_ovl_cb(&old_info.cb, mgr->id, DSS_COMPLETION_ECLIPSED_SET);
+
mgr->info_dirty = true;
+ spin_unlock_irqrestore(&dss_cache.lock, flags);
return 0;
}
@@ -1469,13 +2079,13 @@
static int dss_mgr_enable(struct omap_overlay_manager *mgr)
{
- dispc_enable_channel(mgr->id, 1);
+ dispc_enable_channel(mgr->id, mgr->device->type, 1);
return 0;
}
static int dss_mgr_disable(struct omap_overlay_manager *mgr)
{
- dispc_enable_channel(mgr->id, 0);
+ dispc_enable_channel(mgr->id, mgr->device->type, 0);
return 0;
}
@@ -1483,6 +2093,8 @@
{
++num_managers;
list_add_tail(&manager->list, &manager_list);
+ if (manager->id < ARRAY_SIZE(mgrs))
+ mgrs[manager->id] = manager;
}
int dss_init_overlay_managers(struct platform_device *pdev)
@@ -1501,6 +2113,10 @@
BUG_ON(mgr == NULL);
+ /* alpha blending always on with OMAP3 alpha compatibility */
+ if (dss_has_feature(FEAT_ALPHA_OMAP3_COMPAT))
+ mgr->info.alpha_enabled = true;
+
switch (i) {
case 0:
mgr->name = "lcd";
@@ -1523,6 +2139,8 @@
mgr->get_manager_info = &omap_dss_mgr_get_info;
mgr->wait_for_go = &dss_mgr_wait_for_go;
mgr->wait_for_vsync = &dss_mgr_wait_for_vsync;
+ mgr->blank = &omap_dss_mgr_blank;
+ mgr->dump_cb = &seq_print_cbs;
mgr->enable = &dss_mgr_enable;
mgr->disable = &dss_mgr_disable;
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 0f08025..e9d31c2 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -84,32 +84,42 @@
old_mgr = ovl->manager;
+ r = dispc_runtime_get();
+ if (r)
+ return r;
+
/* detach old manager */
if (old_mgr) {
r = ovl->unset_manager(ovl);
if (r) {
DSSERR("detach failed\n");
- return r;
+ goto err;
}
r = old_mgr->apply(old_mgr);
if (r)
- return r;
+ goto err;
}
if (mgr) {
r = ovl->set_manager(ovl, mgr);
if (r) {
DSSERR("Failed to attach overlay\n");
- return r;
+ goto err;
}
r = mgr->apply(mgr);
if (r)
- return r;
+ goto err;
}
+ dispc_runtime_put();
+
return size;
+
+err:
+ dispc_runtime_put();
+ return r;
}
static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf)
@@ -238,6 +248,9 @@
u8 alpha;
struct omap_overlay_info info;
+ if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
+ return -ENODEV;
+
r = kstrtou8(buf, 0, &alpha);
if (r)
return r;
@@ -308,6 +321,118 @@
return size;
}
+static ssize_t overlay_zorder_show(struct omap_overlay *ovl, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ ovl->info.zorder);
+}
+
+static ssize_t overlay_zorder_store(struct omap_overlay *ovl,
+ const char *buf, size_t size)
+{
+ int r;
+ struct omap_overlay_info info;
+
+ if (!dss_has_feature(FEAT_OVL_ZORDER))
+ return size;
+
+ ovl->get_overlay_info(ovl, &info);
+
+ info.zorder = simple_strtoul(buf, NULL, 10);
+
+ r = ovl->set_overlay_info(ovl, &info);
+ if (r)
+ return r;
+
+ if (ovl->manager) {
+ r = ovl->manager->apply(ovl->manager);
+ if (r)
+ return r;
+ }
+
+ return size;
+}
+
+static ssize_t overlay_decim_show(u16 min, u16 max, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d..%d\n", min, max);
+}
+
+static ssize_t overlay_x_decim_show(struct omap_overlay *ovl, char *buf)
+{
+ return overlay_decim_show(ovl->info.min_x_decim, ovl->info.max_x_decim,
+ buf);
+}
+
+static ssize_t overlay_y_decim_show(struct omap_overlay *ovl, char *buf)
+{
+ return overlay_decim_show(ovl->info.min_y_decim, ovl->info.max_y_decim,
+ buf);
+}
+
+static ssize_t overlay_decim_store(u16 *min, u16 *max,
+ const char *buf, size_t size)
+{
+ char *last;
+
+ *min = *max = simple_strtoul(buf, &last, 10);
+ if (last < buf + size && *last == '.') {
+ /* check for .. separator */
+ if (last + 2 >= buf + size || last[1] != '.')
+ return -EINVAL;
+
+ *max = simple_strtoul(last + 2, &last, 10);
+
+ /* fix order */
+ if (*max < *min)
+ swap(*min, *max);
+ }
+
+ /* decimation must be positive */
+ if (*min == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t overlay_x_decim_store(struct omap_overlay *ovl,
+ const char *buf, size_t size)
+{
+ int r;
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
+ r = overlay_decim_store(&info.min_x_decim, &info.max_x_decim,
+ buf, size);
+
+ r = r ? : ovl->set_overlay_info(ovl, &info);
+
+ if (!r && ovl->manager)
+ r = ovl->manager->apply(ovl->manager);
+
+ return r ? : size;
+}
+
+static ssize_t overlay_y_decim_store(struct omap_overlay *ovl,
+ const char *buf, size_t size)
+{
+ int r;
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
+ r = overlay_decim_store(&info.min_y_decim, &info.max_y_decim,
+ buf, size);
+
+ r = r ? : ovl->set_overlay_info(ovl, &info);
+
+ if (!r && ovl->manager)
+ r = ovl->manager->apply(ovl->manager);
+
+ return r ? : size;
+}
+
struct overlay_attribute {
struct attribute attr;
ssize_t (*show)(struct omap_overlay *, char *);
@@ -334,6 +459,12 @@
static OVERLAY_ATTR(pre_mult_alpha, S_IRUGO|S_IWUSR,
overlay_pre_mult_alpha_show,
overlay_pre_mult_alpha_store);
+static OVERLAY_ATTR(x_decim, S_IRUGO|S_IWUSR,
+ overlay_x_decim_show, overlay_x_decim_store);
+static OVERLAY_ATTR(y_decim, S_IRUGO|S_IWUSR,
+ overlay_y_decim_show, overlay_y_decim_store);
+static OVERLAY_ATTR(zorder, S_IRUGO|S_IWUSR,
+ overlay_zorder_show, overlay_zorder_store);
static struct attribute *overlay_sysfs_attrs[] = {
&overlay_attr_name.attr,
@@ -345,6 +476,9 @@
&overlay_attr_enabled.attr,
&overlay_attr_global_alpha.attr,
&overlay_attr_pre_mult_alpha.attr,
+ &overlay_attr_zorder.attr,
+ &overlay_attr_x_decim.attr,
+ &overlay_attr_y_decim.attr,
NULL
};
@@ -449,28 +583,12 @@
return -EINVAL;
}
- return 0;
-}
-
-static int dss_ovl_set_overlay_info(struct omap_overlay *ovl,
- struct omap_overlay_info *info)
-{
- int r;
- struct omap_overlay_info old_info;
-
- old_info = ovl->info;
- ovl->info = *info;
-
- if (ovl->manager) {
- r = dss_check_overlay(ovl, ovl->manager->device);
- if (r) {
- ovl->info = old_info;
- return r;
- }
+ if ((info->zorder < OMAP_DSS_OVL_ZORDER_0) ||
+ (info->zorder > OMAP_DSS_OVL_ZORDER_3)) {
+ DSSERR("overlay doesn't support zorder %d\n", info->zorder);
+ return -EINVAL;
}
- ovl->info_dirty = true;
-
return 0;
}
@@ -504,7 +622,6 @@
ovl->manager = mgr;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
/* XXX: When there is an overlay on a DSI manual update display, and
* the overlay is first disabled, then moved to tv, and enabled, we
* seem to get SYNC_LOST_DIGIT error.
@@ -518,7 +635,6 @@
* the overlay, but before moving the overlay to TV.
*/
dispc_set_channel_out(ovl->id, mgr->id);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
return 0;
}
@@ -592,6 +708,9 @@
void dss_init_overlays(struct platform_device *pdev)
{
int i, r;
+ const struct omap_dss_cconv_coefs ctbl_bt601_5 = {
+ 298, 409, 0, 298, -208, -100, 298, 0, 517, 0,
+ };
INIT_LIST_HEAD(&overlay_list);
@@ -609,6 +728,7 @@
ovl->id = OMAP_DSS_GFX;
ovl->caps = OMAP_DSS_OVL_CAP_DISPC;
ovl->info.global_alpha = 255;
+ ovl->info.zorder = OMAP_DSS_OVL_ZORDER_0;
break;
case 1:
ovl->name = "vid1";
@@ -616,6 +736,9 @@
ovl->caps = OMAP_DSS_OVL_CAP_SCALE |
OMAP_DSS_OVL_CAP_DISPC;
ovl->info.global_alpha = 255;
+ ovl->info.zorder = dss_has_feature(FEAT_OVL_ZORDER) ?
+ OMAP_DSS_OVL_ZORDER_3 :
+ OMAP_DSS_OVL_ZORDER_0;
break;
case 2:
ovl->name = "vid2";
@@ -623,12 +746,31 @@
ovl->caps = OMAP_DSS_OVL_CAP_SCALE |
OMAP_DSS_OVL_CAP_DISPC;
ovl->info.global_alpha = 255;
+ ovl->info.zorder = dss_has_feature(FEAT_OVL_ZORDER) ?
+ OMAP_DSS_OVL_ZORDER_2 :
+ OMAP_DSS_OVL_ZORDER_0;
break;
+ case 3:
+ ovl->name = "vid3";
+ ovl->id = OMAP_DSS_VIDEO3;
+ ovl->caps = OMAP_DSS_OVL_CAP_SCALE |
+ OMAP_DSS_OVL_CAP_DISPC;
+ ovl->info.global_alpha = 255;
+ ovl->info.zorder = dss_has_feature(FEAT_OVL_ZORDER) ?
+ OMAP_DSS_OVL_ZORDER_1 :
+ OMAP_DSS_OVL_ZORDER_0;
+ break;
+
}
+ ovl->info.min_x_decim = ovl->info.min_y_decim = 1;
+ ovl->info.max_x_decim = ovl->info.max_y_decim =
+ cpu_is_omap44xx() ? 16 : 1;
+ ovl->info.cconv = ctbl_bt601_5;
+
ovl->set_manager = &omap_dss_set_manager;
ovl->unset_manager = &omap_dss_unset_manager;
- ovl->set_overlay_info = &dss_ovl_set_overlay_info;
+ ovl->set_overlay_info = &omap_dss_ovl_set_info;
ovl->get_overlay_info = &dss_ovl_get_overlay_info;
ovl->wait_for_go = &dss_ovl_wait_for_go;
@@ -718,16 +860,18 @@
}
}
- if (mgr) {
+ if (mgr && force) {
+ dispc_runtime_get();
+
for (i = 0; i < dss_feat_get_num_ovls(); i++) {
struct omap_overlay *ovl;
ovl = omap_dss_get_overlay(i);
- if (!ovl->manager || force) {
- if (ovl->manager)
- omap_dss_unset_manager(ovl);
- omap_dss_set_manager(ovl, mgr);
- }
+ if (ovl->manager)
+ omap_dss_unset_manager(ovl);
+ omap_dss_set_manager(ovl, mgr);
}
+
+ dispc_runtime_put();
}
}
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index c06fbe0..3e2f5cd 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -33,6 +33,8 @@
#include <linux/hrtimer.h>
#include <linux/seq_file.h>
#include <linux/semaphore.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <video/omapdss.h>
#include "dss.h"
@@ -120,12 +122,46 @@
return __raw_readl(rfbi.base + idx.idx);
}
-static void rfbi_enable_clocks(bool enable)
+static int rfbi_runtime_get(void)
{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ int r;
+
+ DSSDBG("rfbi_runtime_get\n");
+
+ r = dss_runtime_get();
+ if (r)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r)
+ goto err_get_dispc;
+
+ r = pm_runtime_get_sync(&rfbi.pdev->dev);
+ WARN_ON(r);
+ if (r < 0)
+ goto err_runtime_get;
+
+ return 0;
+
+err_runtime_get:
+ dispc_runtime_put();
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
+ return r;
+}
+
+static void rfbi_runtime_put(void)
+{
+ int r;
+
+ DSSDBG("rfbi_runtime_put\n");
+
+ r = pm_runtime_put_sync(&rfbi.pdev->dev);
+ WARN_ON(r);
+
+ dispc_runtime_put();
+ dss_runtime_put();
}
void rfbi_bus_lock(void)
@@ -805,7 +841,8 @@
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (rfbi_runtime_get())
+ return;
DUMPREG(RFBI_REVISION);
DUMPREG(RFBI_SYSCONFIG);
@@ -836,7 +873,7 @@
DUMPREG(RFBI_VSYNC_WIDTH);
DUMPREG(RFBI_HSYNC_WIDTH);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ rfbi_runtime_put();
#undef DUMPREG
}
@@ -844,7 +881,9 @@
{
int r;
- rfbi_enable_clocks(1);
+ r = rfbi_runtime_get();
+ if (r)
+ return r;
r = omap_dss_start_device(dssdev);
if (r) {
@@ -879,6 +918,7 @@
err1:
omap_dss_stop_device(dssdev);
err0:
+ rfbi_runtime_put();
return r;
}
EXPORT_SYMBOL(omapdss_rfbi_display_enable);
@@ -889,7 +929,7 @@
DISPC_IRQ_FRAMEDONE);
omap_dss_stop_device(dssdev);
- rfbi_enable_clocks(0);
+ rfbi_runtime_put();
}
EXPORT_SYMBOL(omapdss_rfbi_display_disable);
@@ -904,8 +944,9 @@
static int omap_rfbihw_probe(struct platform_device *pdev)
{
u32 rev;
- u32 l;
struct resource *rfbi_mem;
+ struct clk *clk;
+ int r;
rfbi.pdev = pdev;
@@ -914,36 +955,55 @@
rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0);
if (!rfbi_mem) {
DSSERR("can't get IORESOURCE_MEM RFBI\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto err_ioremap;
}
rfbi.base = ioremap(rfbi_mem->start, resource_size(rfbi_mem));
if (!rfbi.base) {
DSSERR("can't ioremap RFBI\n");
- return -ENOMEM;
+ r = -ENOMEM;
+ goto err_ioremap;
}
- rfbi_enable_clocks(1);
+ pm_runtime_enable(&pdev->dev);
+
+ r = rfbi_runtime_get();
+ if (r)
+ goto err_get_rfbi;
msleep(10);
- rfbi.l4_khz = dss_clk_get_rate(DSS_CLK_ICK) / 1000;
+ clk = clk_get(&pdev->dev, "rfbi_iclk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get rfbi_iclk\n");
+ r = PTR_ERR(clk);
+ goto err_get_ick;
+ }
- /* Enable autoidle and smart-idle */
- l = rfbi_read_reg(RFBI_SYSCONFIG);
- l |= (1 << 0) | (2 << 3);
- rfbi_write_reg(RFBI_SYSCONFIG, l);
+ rfbi.l4_khz = clk_get_rate(clk) / 1000;
+
+ clk_put(clk);
rev = rfbi_read_reg(RFBI_REVISION);
dev_dbg(&pdev->dev, "OMAP RFBI rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
- rfbi_enable_clocks(0);
+ rfbi_runtime_put();
return 0;
+
+err_get_ick:
+ rfbi_runtime_put();
+err_get_rfbi:
+ pm_runtime_disable(&pdev->dev);
+ iounmap(rfbi.base);
+err_ioremap:
+ return r;
}
static int omap_rfbihw_remove(struct platform_device *pdev)
{
+ pm_runtime_disable(&pdev->dev);
iounmap(rfbi.base);
return 0;
}
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 0bd4b03..3a688c8 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -20,13 +20,11 @@
#define DSS_SUBSYS_NAME "SDI"
#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/regulator/consumer.h>
#include <video/omapdss.h>
-#include <plat/cpu.h>
#include "dss.h"
static struct {
@@ -60,14 +58,20 @@
r = omap_dss_start_device(dssdev);
if (r) {
DSSERR("failed to start device\n");
- goto err0;
+ goto err_start_dev;
}
r = regulator_enable(sdi.vdds_sdi_reg);
if (r)
- goto err1;
+ goto err_reg_enable;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ r = dss_runtime_get();
+ if (r)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r)
+ goto err_get_dispc;
sdi_basic_init(dssdev);
@@ -80,7 +84,7 @@
r = dss_calc_clock_div(1, t->pixel_clock * 1000,
&dss_cinfo, &dispc_cinfo);
if (r)
- goto err2;
+ goto err_calc_clock_div;
fck = dss_cinfo.fck;
lck_div = dispc_cinfo.lck_div;
@@ -101,27 +105,34 @@
r = dss_set_clock_div(&dss_cinfo);
if (r)
- goto err2;
+ goto err_set_dss_clock_div;
r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
if (r)
- goto err2;
+ goto err_set_dispc_clock_div;
dss_sdi_init(dssdev->phy.sdi.datapairs);
r = dss_sdi_enable();
if (r)
- goto err1;
+ goto err_sdi_enable;
mdelay(2);
dssdev->manager->enable(dssdev->manager);
return 0;
-err2:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+
+err_sdi_enable:
+err_set_dispc_clock_div:
+err_set_dss_clock_div:
+err_calc_clock_div:
+ dispc_runtime_put();
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
regulator_disable(sdi.vdds_sdi_reg);
-err1:
+err_reg_enable:
omap_dss_stop_device(dssdev);
-err0:
+err_start_dev:
return r;
}
EXPORT_SYMBOL(omapdss_sdi_display_enable);
@@ -132,7 +143,8 @@
dss_sdi_disable();
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dispc_runtime_put();
+ dss_runtime_put();
regulator_disable(sdi.vdds_sdi_reg);
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 980f919..44919830 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -33,11 +33,13 @@
#include <linux/seq_file.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <video/omapdss.h>
#include <plat/cpu.h>
#include "dss.h"
+#include "dss_features.h"
/* Venc registers */
#define VENC_REV_ID 0x00
@@ -292,6 +294,12 @@
struct mutex venc_lock;
u32 wss_data;
struct regulator *vdda_dac_reg;
+
+ struct mutex runtime_lock;
+ int runtime_count;
+
+ struct clk *tv_clk;
+ struct clk *tv_dac_clk;
} venc;
static inline void venc_write_reg(int idx, u32 val)
@@ -380,14 +388,71 @@
#endif
}
-static void venc_enable_clocks(int enable)
+static int venc_runtime_get(void)
{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK |
- DSS_CLK_VIDFCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK |
- DSS_CLK_VIDFCK);
+ int r;
+
+ mutex_lock(&venc.runtime_lock);
+
+ if (venc.runtime_count++ == 0) {
+ DSSDBG("venc_runtime_get\n");
+
+ r = dss_runtime_get();
+ if (r)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r)
+ goto err_get_dispc;
+
+ clk_enable(venc.tv_clk);
+ if (venc.tv_dac_clk)
+ clk_enable(venc.tv_dac_clk);
+
+ r = pm_runtime_get_sync(&venc.pdev->dev);
+ WARN_ON(r);
+ if (r < 0)
+ goto err_runtime_get;
+ }
+
+ mutex_unlock(&venc.runtime_lock);
+
+ return 0;
+
+err_runtime_get:
+ clk_disable(venc.tv_clk);
+ if (venc.tv_dac_clk)
+ clk_disable(venc.tv_dac_clk);
+ dispc_runtime_put();
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
+ mutex_unlock(&venc.runtime_lock);
+
+ return r;
+}
+
+static void venc_runtime_put(void)
+{
+ mutex_lock(&venc.runtime_lock);
+
+ if (--venc.runtime_count == 0) {
+ int r;
+
+ DSSDBG("venc_runtime_put\n");
+
+ r = pm_runtime_put_sync(&venc.pdev->dev);
+ WARN_ON(r);
+
+ clk_disable(venc.tv_clk);
+ if (venc.tv_dac_clk)
+ clk_disable(venc.tv_dac_clk);
+
+ dispc_runtime_put();
+ dss_runtime_put();
+ }
+
+ mutex_unlock(&venc.runtime_lock);
}
static const struct venc_config *venc_timings_to_config(
@@ -406,8 +471,6 @@
{
u32 l;
- venc_enable_clocks(1);
-
venc_reset();
venc_write_config(venc_timings_to_config(&dssdev->panel.timings));
@@ -448,8 +511,6 @@
dssdev->platform_disable(dssdev);
regulator_disable(venc.vdda_dac_reg);
-
- venc_enable_clocks(0);
}
@@ -487,6 +548,10 @@
goto err1;
}
+ r = venc_runtime_get();
+ if (r)
+ goto err1;
+
venc_power_on(dssdev);
venc.wss_data = 0;
@@ -520,6 +585,8 @@
venc_power_off(dssdev);
+ venc_runtime_put();
+
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
omap_dss_stop_device(dssdev);
@@ -598,6 +665,7 @@
static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
{
const struct venc_config *config;
+ int r;
DSSDBG("venc_set_wss\n");
@@ -608,16 +676,19 @@
/* Invert due to VENC_L21_WC_CTL:INV=1 */
venc.wss_data = (wss ^ 0xfffff) << 8;
- venc_enable_clocks(1);
+ r = venc_runtime_get();
+ if (r)
+ goto err;
venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
venc.wss_data);
- venc_enable_clocks(0);
+ venc_runtime_put();
+err:
mutex_unlock(&venc.venc_lock);
- return 0;
+ return r;
}
static struct omap_dss_driver venc_driver = {
@@ -673,7 +744,8 @@
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
- venc_enable_clocks(1);
+ if (venc_runtime_get())
+ return;
DUMPREG(VENC_F_CONTROL);
DUMPREG(VENC_VIDOUT_CTRL);
@@ -717,16 +789,53 @@
DUMPREG(VENC_OUTPUT_CONTROL);
DUMPREG(VENC_OUTPUT_TEST);
- venc_enable_clocks(0);
+ venc_runtime_put();
#undef DUMPREG
}
+static int venc_get_clocks(struct platform_device *pdev)
+{
+ struct clk *clk;
+
+ clk = clk_get(&pdev->dev, "tv_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get tv_clk\n");
+ return PTR_ERR(clk);
+ }
+
+ venc.tv_clk = clk;
+
+ if (dss_has_feature(FEAT_VENC_REQUIRES_TV_DAC_CLK)) {
+ clk = clk_get(&pdev->dev, "tv_dac_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get tv_dac_clk\n");
+ clk_put(venc.tv_clk);
+ return PTR_ERR(clk);
+ }
+ } else {
+ clk = NULL;
+ }
+
+ venc.tv_dac_clk = clk;
+
+ return 0;
+}
+
+static void venc_put_clocks(void)
+{
+ if (venc.tv_clk)
+ clk_put(venc.tv_clk);
+ if (venc.tv_dac_clk)
+ clk_put(venc.tv_dac_clk);
+}
+
/* VENC HW IP initialisation */
static int omap_venchw_probe(struct platform_device *pdev)
{
u8 rev_id;
struct resource *venc_mem;
+ int r;
venc.pdev = pdev;
@@ -737,22 +846,41 @@
venc_mem = platform_get_resource(venc.pdev, IORESOURCE_MEM, 0);
if (!venc_mem) {
DSSERR("can't get IORESOURCE_MEM VENC\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto err_ioremap;
}
venc.base = ioremap(venc_mem->start, resource_size(venc_mem));
if (!venc.base) {
DSSERR("can't ioremap VENC\n");
- return -ENOMEM;
+ r = -ENOMEM;
+ goto err_ioremap;
}
- venc_enable_clocks(1);
+ r = venc_get_clocks(pdev);
+ if (r)
+ goto err_get_clk;
+
+ mutex_init(&venc.runtime_lock);
+ pm_runtime_enable(&pdev->dev);
+
+ r = venc_runtime_get();
+ if (r)
+ goto err_get_venc;
rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff);
dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id);
- venc_enable_clocks(0);
+ venc_runtime_put();
return omap_dss_register_driver(&venc_driver);
+
+err_get_venc:
+ pm_runtime_disable(&pdev->dev);
+ venc_put_clocks();
+err_get_clk:
+ iounmap(venc.base);
+err_ioremap:
+ return r;
}
static int omap_venchw_remove(struct platform_device *pdev)
@@ -763,6 +891,9 @@
}
omap_dss_unregister_driver(&venc_driver);
+ pm_runtime_disable(&pdev->dev);
+ venc_put_clocks();
+
iounmap(venc.base);
return 0;
}
diff --git a/drivers/video/omap2/dsscomp/Kconfig b/drivers/video/omap2/dsscomp/Kconfig
new file mode 100644
index 0000000..0c2a749
--- /dev/null
+++ b/drivers/video/omap2/dsscomp/Kconfig
@@ -0,0 +1,20 @@
+menuconfig DSSCOMP
+ tristate "OMAP DSS Composition support (EXPERIMENTAL)"
+ depends on OMAP2_DSS && PVR_SGX
+ default y
+
+ help
+ Frame composition driver using OMAP DSS2. Allows using all
+ DSS2 resources in a unified configuration. Should not be used
+ together with other DSS2 devices, such as V4L2 or framebuffer.
+
+config DSSCOMP_DEBUG_LOG
+ bool "Log event timestamps in debugfs"
+ default y
+ depends on DEBUG_FS
+
+ help
+ Takes timestamp for each callback and state transition, and
+ logs the last 128 entries (last few frames' worth) in a
+ log buffer. This is a separate menuconfig in case this is
+ deemed an overhead.
diff --git a/drivers/video/omap2/dsscomp/Makefile b/drivers/video/omap2/dsscomp/Makefile
new file mode 100644
index 0000000..8a67933
--- /dev/null
+++ b/drivers/video/omap2/dsscomp/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_DSSCOMP) += dsscomp.o
+dsscomp-y := device.o base.o queue.o
+dsscomp-y += gralloc.o
diff --git a/drivers/video/omap2/dsscomp/base.c b/drivers/video/omap2/dsscomp/base.c
new file mode 100644
index 0000000..ad7ade6
--- /dev/null
+++ b/drivers/video/omap2/dsscomp/base.c
@@ -0,0 +1,504 @@
+/*
+ * linux/drivers/video/omap2/dsscomp/base.c
+ *
+ * DSS Composition basic operation support
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+
+#include <linux/notifier.h>
+#include <mach/tiler.h>
+
+#include <video/omapdss.h>
+#include <video/dsscomp.h>
+#include <plat/dsscomp.h>
+
+#include "dsscomp.h"
+
+int debug;
+module_param(debug, int, 0644);
+
+/* color formats supported - bitfield info is used for truncation logic */
+static const struct color_info {
+ int a_ix, a_bt; /* bitfields */
+ int r_ix, r_bt;
+ int g_ix, g_bt;
+ int b_ix, b_bt;
+ int x_bt;
+ enum omap_color_mode mode;
+ const char *name;
+} fmts[2][16] = { {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 1, OMAP_DSS_COLOR_CLUT1, "BITMAP1" },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 2, OMAP_DSS_COLOR_CLUT2, "BITMAP2" },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 4, OMAP_DSS_COLOR_CLUT4, "BITMAP4" },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 8, OMAP_DSS_COLOR_CLUT8, "BITMAP8" },
+ { 0, 0, 8, 4, 4, 4, 0, 4, 4, OMAP_DSS_COLOR_RGB12U, "xRGB12-4444" },
+ { 12, 4, 8, 4, 4, 4, 0, 4, 0, OMAP_DSS_COLOR_ARGB16, "ARGB16-4444" },
+ { 0, 0, 11, 5, 5, 6, 0, 5, 0, OMAP_DSS_COLOR_RGB16, "RGB16-565" },
+ { 15, 1, 10, 5, 5, 5, 0, 5, 0, OMAP_DSS_COLOR_ARGB16_1555,
+ "ARGB16-1555" },
+ { 0, 0, 16, 8, 8, 8, 0, 8, 8, OMAP_DSS_COLOR_RGB24U, "xRGB24-8888" },
+ { 0, 0, 16, 8, 8, 8, 0, 8, 0, OMAP_DSS_COLOR_RGB24P, "RGB24-888" },
+ { 0, 0, 12, 4, 8, 4, 4, 4, 4, OMAP_DSS_COLOR_RGBX16, "RGBx12-4444" },
+ { 0, 4, 12, 4, 8, 4, 4, 4, 0, OMAP_DSS_COLOR_RGBA16, "RGBA16-4444" },
+ { 24, 8, 16, 8, 8, 8, 0, 8, 0, OMAP_DSS_COLOR_ARGB32, "ARGB32-8888" },
+ { 0, 8, 24, 8, 16, 8, 8, 8, 0, OMAP_DSS_COLOR_RGBA32, "RGBA32-8888" },
+ { 0, 0, 24, 8, 16, 8, 8, 8, 8, OMAP_DSS_COLOR_RGBX32, "RGBx24-8888" },
+ { 0, 0, 10, 5, 5, 5, 0, 5, 1, OMAP_DSS_COLOR_XRGB16_1555,
+ "xRGB15-1555" },
+}, {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 12, OMAP_DSS_COLOR_NV12, "NV12" },
+ { 0, 0, 12, 4, 8, 4, 4, 4, 4, OMAP_DSS_COLOR_RGBX16, "RGBx12-4444" },
+ { 0, 4, 12, 4, 8, 4, 4, 4, 0, OMAP_DSS_COLOR_RGBA16, "RGBA16-4444" },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "invalid" },
+ { 0, 0, 8, 4, 4, 4, 0, 4, 4, OMAP_DSS_COLOR_RGB12U, "xRGB12-4444" },
+ { 12, 4, 8, 4, 4, 4, 0, 4, 0, OMAP_DSS_COLOR_ARGB16, "ARGB16-4444" },
+ { 0, 0, 11, 5, 5, 6, 0, 5, 0, OMAP_DSS_COLOR_RGB16, "RGB16-565" },
+ { 15, 1, 10, 5, 5, 5, 0, 5, 0, OMAP_DSS_COLOR_ARGB16_1555,
+ "ARGB16-1555" },
+ { 0, 0, 16, 8, 8, 8, 0, 8, 8, OMAP_DSS_COLOR_RGB24U, "xRGB24-8888" },
+ { 0, 0, 16, 8, 8, 8, 0, 8, 0, OMAP_DSS_COLOR_RGB24P, "RGB24-888" },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 16, OMAP_DSS_COLOR_YUV2, "YUYV" },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 16, OMAP_DSS_COLOR_UYVY, "UYVY" },
+ { 24, 8, 16, 8, 8, 8, 0, 8, 0, OMAP_DSS_COLOR_ARGB32, "ARGB32-8888" },
+ { 0, 8, 24, 8, 16, 8, 8, 8, 0, OMAP_DSS_COLOR_RGBA32, "RGBA32-8888" },
+ { 0, 0, 24, 8, 16, 8, 8, 8, 8, OMAP_DSS_COLOR_RGBX32, "RGBx24-8888" },
+ { 0, 0, 10, 5, 5, 5, 0, 5, 1, OMAP_DSS_COLOR_XRGB16_1555,
+ "xRGB15-1555" },
+} };
+
+static const struct color_info *get_color_info(enum omap_color_mode mode)
+{
+ int i;
+ for (i = 0; i < sizeof(fmts) / sizeof(fmts[0][0]); i++)
+ if (fmts[0][i].mode == mode)
+ return fmts[0] + i;
+ return NULL;
+}
+
+static int color_mode_to_bpp(enum omap_color_mode color_mode)
+{
+ const struct color_info *ci = get_color_info(color_mode);
+ BUG_ON(!ci);
+
+ return ci->a_bt + ci->r_bt + ci->g_bt + ci->b_bt + ci->x_bt;
+}
+
+#ifdef CONFIG_DEBUG_FS
+const char *dsscomp_get_color_name(enum omap_color_mode m)
+{
+ const struct color_info *ci = get_color_info(m);
+ return ci ? ci->name : NULL;
+}
+#endif
+
+union rect {
+ struct {
+ s32 x;
+ s32 y;
+ s32 w;
+ s32 h;
+ };
+ struct {
+ s32 xy[2];
+ s32 wh[2];
+ };
+ struct dss2_rect_t r;
+};
+
+int crop_to_rect(union rect *crop, union rect *win, union rect *vis,
+ int rotation, int mirror)
+{
+ int c, swap = rotation & 1;
+
+ /* align crop window with display coordinates */
+ if (swap)
+ crop->y -= (crop->h = -crop->h);
+ if (rotation & 2)
+ crop->xy[!swap] -= (crop->wh[!swap] = -crop->wh[!swap]);
+ if ((!mirror) ^ !(rotation & 2))
+ crop->xy[swap] -= (crop->wh[swap] = -crop->wh[swap]);
+
+ for (c = 0; c < 2; c++) {
+ /* see if complete buffer is outside the vis or it is
+ fully cropped or scaled to 0 */
+ if (win->wh[c] <= 0 || vis->wh[c] <= 0 ||
+ win->xy[c] + win->wh[c] <= vis->xy[c] ||
+ win->xy[c] >= vis->xy[c] + vis->wh[c] ||
+ !crop->wh[c ^ swap])
+ return -ENOENT;
+
+ /* crop left/top */
+ if (win->xy[c] < vis->xy[c]) {
+ /* correction term */
+ int a = (vis->xy[c] - win->xy[c]) *
+ crop->wh[c ^ swap] / win->wh[c];
+ crop->xy[c ^ swap] += a;
+ crop->wh[c ^ swap] -= a;
+ win->wh[c] -= vis->xy[c] - win->xy[c];
+ win->xy[c] = vis->xy[c];
+ }
+ /* crop right/bottom */
+ if (win->xy[c] + win->wh[c] > vis->xy[c] + vis->wh[c]) {
+ crop->wh[c ^ swap] = crop->wh[c ^ swap] *
+ (vis->xy[c] + vis->wh[c] - win->xy[c]) /
+ win->wh[c];
+ win->wh[c] = vis->xy[c] + vis->wh[c] - win->xy[c];
+ }
+
+ if (!crop->wh[c ^ swap] || !win->wh[c])
+ return -ENOENT;
+ }
+
+ /* realign crop window to buffer coordinates */
+ if (rotation & 2)
+ crop->xy[!swap] -= (crop->wh[!swap] = -crop->wh[!swap]);
+ if ((!mirror) ^ !(rotation & 2))
+ crop->xy[swap] -= (crop->wh[swap] = -crop->wh[swap]);
+ if (swap)
+ crop->y -= (crop->h = -crop->h);
+ return 0;
+}
+
+int set_dss_ovl_info(struct dss2_ovl_info *oi)
+{
+ struct omap_overlay_info info;
+ struct omap_overlay *ovl;
+ struct dss2_ovl_cfg *cfg;
+ union rect crop, win, vis;
+ int c;
+
+ /* check overlay number */
+ if (!oi || oi->cfg.ix >= omap_dss_get_num_overlays())
+ return -EINVAL;
+ cfg = &oi->cfg;
+ ovl = omap_dss_get_overlay(cfg->ix);
+
+ /* just in case there are new fields, we get the current info */
+ ovl->get_overlay_info(ovl, &info);
+
+ info.enabled = cfg->enabled;
+ if (!cfg->enabled)
+ goto done;
+
+ /* copied params */
+ info.zorder = cfg->zorder;
+
+ if (cfg->zonly)
+ goto done;
+
+ info.global_alpha = cfg->global_alpha;
+ info.pre_mult_alpha = cfg->pre_mult_alpha;
+ info.rotation = cfg->rotation;
+ info.mirror = cfg->mirror;
+ info.color_mode = cfg->color_mode;
+
+ /* crop to screen */
+ crop.r = cfg->crop;
+ win.r = cfg->win;
+ vis.x = vis.y = 0;
+ vis.w = ovl->manager->device->panel.timings.x_res;
+ vis.h = ovl->manager->device->panel.timings.y_res;
+
+ if (crop_to_rect(&crop, &win, &vis, cfg->rotation, cfg->mirror) ||
+ vis.w < 2) {
+ info.enabled = false;
+ goto done;
+ }
+
+ /* adjust crop to UV pixel boundaries */
+ for (c = 0; c < (cfg->color_mode == OMAP_DSS_COLOR_NV12 ? 2 :
+ (cfg->color_mode &
+ (OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY)) ? 1 : 0); c++) {
+ /* keep the output window to avoid trembling edges */
+ crop.wh[c] += crop.xy[c] & 1; /* round down start */
+ crop.xy[c] &= ~1;
+ crop.wh[c] += crop.wh[c] & 1; /* round up end */
+
+ /*
+ * Buffer is aligned on UV pixel boundaries, so no
+ * worries about extending crop region.
+ */
+ }
+
+ info.width = crop.w;
+ info.height = crop.h;
+ if (cfg->rotation & 1)
+ /* DISPC uses swapped height/width for 90/270 degrees */
+ swap(info.width, info.height);
+ info.pos_x = win.x;
+ info.pos_y = win.y;
+ info.out_width = win.w;
+ info.out_height = win.h;
+
+ /* calculate addresses and cropping */
+ info.paddr = oi->ba;
+ info.p_uv_addr = (info.color_mode == OMAP_DSS_COLOR_NV12) ? oi->uv : 0;
+ info.vaddr = NULL;
+
+ /* check for TILER 2D buffer */
+ if (info.paddr >= 0x60000000 && info.paddr < 0x78000000) {
+ int bpp = 1 << ((info.paddr >> 27) & 3);
+ struct tiler_view_t t;
+
+ /* crop to top-left */
+
+ /*
+ * DSS supports YUV422 on 32-bit mode, but its technically
+ * 2 bytes-per-pixel.
+ * Also RGB24-888 is 3 bytes-per-pixel even though no
+ * tiler pixel format matches this.
+ */
+ if (cfg->color_mode &
+ (OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY))
+ bpp = 2;
+ else if (cfg->color_mode == OMAP_DSS_COLOR_RGB24P)
+ bpp = 3;
+
+ tilview_create(&t, info.paddr, cfg->width, cfg->height);
+ info.paddr -= t.tsptr;
+ tilview_crop(&t, 0, crop.y, cfg->width, crop.h);
+ info.paddr += t.tsptr + bpp * crop.x;
+
+ info.rotation_type = OMAP_DSS_ROT_TILER;
+ info.screen_width = 0;
+
+ /* for NV12 format also crop NV12 */
+ if (info.color_mode == OMAP_DSS_COLOR_NV12) {
+ tilview_create(&t, info.p_uv_addr,
+ cfg->width >> 1, cfg->height >> 1);
+ info.p_uv_addr -= t.tsptr;
+ tilview_crop(&t, 0, crop.y >> 1, cfg->width >> 1,
+ crop.h >> 1);
+ info.p_uv_addr += t.tsptr + bpp * crop.x;
+ }
+ } else {
+ /* program tiler 1D as SDMA */
+
+ int bpp = color_mode_to_bpp(cfg->color_mode);
+ info.screen_width = cfg->stride * 8 / (bpp == 12 ? 8 : bpp);
+ info.paddr += crop.x * (bpp / 8) + crop.y * cfg->stride;
+
+ /* for NV12 format also crop NV12 */
+ if (info.color_mode == OMAP_DSS_COLOR_NV12)
+ info.p_uv_addr += crop.x * (bpp / 8) +
+ (crop.y >> 1) * cfg->stride;
+
+ /* no rotation on DMA buffer */
+ if (cfg->rotation & 3 || cfg->mirror)
+ return -EINVAL;
+
+ info.rotation_type = OMAP_DSS_ROT_DMA;
+ }
+
+ info.max_x_decim = cfg->decim.max_x ? : 255;
+ info.max_y_decim = cfg->decim.max_y ? : 255;
+ info.min_x_decim = cfg->decim.min_x ? : 1;
+ info.min_y_decim = cfg->decim.min_y ? : 1;
+#if 0
+ info.pic_height = cfg->height;
+
+ info.field = 0;
+ if (cfg->ilace & OMAP_DSS_ILACE_SEQ)
+ info.field |= OMAP_FLAG_IBUF;
+ if (cfg->ilace & OMAP_DSS_ILACE_SWAP)
+ info.field |= OMAP_FLAG_ISWAP;
+ /*
+ * Ignore OMAP_DSS_ILACE as there is no real support yet for
+ * interlaced interleaved vs progressive buffers
+ */
+ if (ovl->manager &&
+ ovl->manager->device &&
+ !strcmp(ovl->manager->device->name, "hdmi") &&
+ is_hdmi_interlaced())
+ info.field |= OMAP_FLAG_IDEV;
+
+ info.out_wb = 0;
+#endif
+
+ info.cconv = cfg->cconv;
+
+done:
+#if 0
+ pr_debug("ovl%d: en=%d %x/%x (%dx%d|%d) => (%dx%d) @ (%d,%d) rot=%d "
+ "mir=%d col=%x z=%d al=%02x prem=%d pich=%d ilace=%d\n",
+ ovl->id, info.enabled, info.paddr, info.p_uv_addr, info.width,
+ info.height, info.screen_width, info.out_width, info.out_height,
+ info.pos_x, info.pos_y, info.rotation, info.mirror,
+ info.color_mode, info.zorder, info.global_alpha,
+ info.pre_mult_alpha, info.pic_height, info.field);
+#else
+ pr_debug("ovl%d: en=%d %x/%x (%dx%d|%d) => (%dx%d) @ (%d,%d) rot=%d "
+ "mir=%d col=%x z=%d al=%02x prem=%d\n",
+ ovl->id, info.enabled, info.paddr, info.p_uv_addr, info.width,
+ info.height, info.screen_width, info.out_width, info.out_height,
+ info.pos_x, info.pos_y, info.rotation, info.mirror,
+ info.color_mode, info.zorder, info.global_alpha,
+ info.pre_mult_alpha);
+#endif
+ /* set overlay info */
+ return ovl->set_overlay_info(ovl, &info);
+}
+
+void swap_rb_in_ovl_info(struct dss2_ovl_info *oi)
+{
+ /* we need to swap YUV color matrix if we are swapping R and B */
+ if (oi->cfg.color_mode &
+ (OMAP_DSS_COLOR_NV12 | OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY)) {
+ swap(oi->cfg.cconv.ry, oi->cfg.cconv.by);
+ swap(oi->cfg.cconv.rcr, oi->cfg.cconv.bcr);
+ swap(oi->cfg.cconv.rcb, oi->cfg.cconv.bcb);
+ }
+}
+
+struct omap_overlay_manager *find_dss_mgr(int display_ix)
+{
+ struct omap_overlay_manager *mgr;
+ char name[32];
+ int i;
+
+ sprintf(name, "display%d", display_ix);
+
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
+ mgr = omap_dss_get_overlay_manager(i);
+ if (mgr->device && !strcmp(name, dev_name(&mgr->device->dev)))
+ return mgr;
+ }
+ return NULL;
+}
+
+int set_dss_mgr_info(struct dss2_mgr_info *mi, struct omapdss_ovl_cb *cb)
+{
+ struct omap_overlay_manager_info info;
+ struct omap_overlay_manager *mgr;
+
+ if (!mi)
+ return -EINVAL;
+ mgr = find_dss_mgr(mi->ix);
+ if (!mgr)
+ return -EINVAL;
+
+ /* just in case there are new fields, we get the current info */
+ mgr->get_manager_info(mgr, &info);
+
+ info.alpha_enabled = mi->alpha_blending;
+ info.default_color = mi->default_color;
+ info.trans_enabled = mi->trans_enabled && !mi->alpha_blending;
+ info.trans_key = mi->trans_key;
+ info.trans_key_type = mi->trans_key_type;
+
+ info.cpr_coefs = mi->cpr_coefs;
+ info.cpr_enable = mi->cpr_enabled;
+ info.cb = *cb;
+
+ return mgr->set_manager_info(mgr, &info);
+}
+
+void swap_rb_in_mgr_info(struct dss2_mgr_info *mi)
+{
+ const struct omap_dss_cpr_coefs c = { 256, 0, 0, 0, 256, 0, 0, 0, 256 };
+
+ /* set default CPR */
+ if (!mi->cpr_enabled)
+ mi->cpr_coefs = c;
+ mi->cpr_enabled = true;
+
+ /* swap red and blue */
+ swap(mi->cpr_coefs.rr, mi->cpr_coefs.br);
+ swap(mi->cpr_coefs.rg, mi->cpr_coefs.bg);
+ swap(mi->cpr_coefs.rb, mi->cpr_coefs.bb);
+}
+
+/*
+ * ===========================================================================
+ * DEBUG METHODS
+ * ===========================================================================
+ */
+void dump_ovl_info(struct dsscomp_dev *cdev, struct dss2_ovl_info *oi)
+{
+ struct dss2_ovl_cfg *c = &oi->cfg;
+ const struct color_info *ci;
+
+ if (!(debug & DEBUG_OVERLAYS) ||
+ !(debug & DEBUG_COMPOSITIONS))
+ return;
+
+ ci = get_color_info(c->color_mode);
+ if (c->zonly) {
+ dev_info(DEV(cdev), "ovl%d(%s z%d)\n",
+ c->ix, c->enabled ? "ON" : "off", c->zorder);
+ return;
+ }
+ dev_info(DEV(cdev), "ovl%d(%s z%d %s%s *%d%% %d*%d:%d,%d+%d,%d rot%d%s"
+ " => %d,%d+%d,%d %p/%p|%d)\n",
+ c->ix, c->enabled ? "ON" : "off", c->zorder,
+ ci->name ? : "(none)",
+ c->pre_mult_alpha ? " premult" : "",
+ (c->global_alpha * 100 + 128) / 255,
+ c->width, c->height, c->crop.x, c->crop.y,
+ c->crop.w, c->crop.h,
+ c->rotation, c->mirror ? "+mir" : "",
+ c->win.x, c->win.y, c->win.w, c->win.h,
+ (void *) oi->ba, (void *) oi->uv, c->stride);
+}
+
+static void print_mgr_info(struct dsscomp_dev *cdev,
+ struct dss2_mgr_info *mi)
+{
+ printk("(dis%d(%s) alpha=%d col=%08x ilace=%d) ",
+ mi->ix,
+ (mi->ix < cdev->num_displays && cdev->displays[mi->ix]) ?
+ cdev->displays[mi->ix]->name : "NONE",
+ mi->alpha_blending, mi->default_color,
+ mi->interlaced);
+}
+
+void dump_comp_info(struct dsscomp_dev *cdev, struct dsscomp_setup_mgr_data *d,
+ const char *phase)
+{
+ if (!(debug & DEBUG_COMPOSITIONS))
+ return;
+
+ dev_info(DEV(cdev), "[%p] %s: %c%c%c ",
+ *phase == 'q' ? (void *) d->sync_id : d, phase,
+ (d->mode & DSSCOMP_SETUP_MODE_APPLY) ? 'A' : '-',
+ (d->mode & DSSCOMP_SETUP_MODE_DISPLAY) ? 'D' : '-',
+ (d->mode & DSSCOMP_SETUP_MODE_CAPTURE) ? 'C' : '-');
+ print_mgr_info(cdev, &d->mgr);
+ printk("n=%d\n", d->num_ovls);
+}
+
+void dump_total_comp_info(struct dsscomp_dev *cdev,
+ struct dsscomp_setup_dispc_data *d,
+ const char *phase)
+{
+ int i;
+
+ if (!(debug & DEBUG_COMPOSITIONS))
+ return;
+
+ dev_info(DEV(cdev), "[%p] %s: %c%c%c ",
+ *phase == 'q' ? (void *) d->sync_id : d, phase,
+ (d->mode & DSSCOMP_SETUP_MODE_APPLY) ? 'A' : '-',
+ (d->mode & DSSCOMP_SETUP_MODE_DISPLAY) ? 'D' : '-',
+ (d->mode & DSSCOMP_SETUP_MODE_CAPTURE) ? 'C' : '-');
+
+ for (i = 0; i < d->num_mgrs && i < ARRAY_SIZE(d->mgrs); i++)
+ print_mgr_info(cdev, d->mgrs + i);
+ printk("n=%d\n", d->num_ovls);
+}
diff --git a/drivers/video/omap2/dsscomp/device.c b/drivers/video/omap2/dsscomp/device.c
new file mode 100644
index 0000000..80cc21b
--- /dev/null
+++ b/drivers/video/omap2/dsscomp/device.c
@@ -0,0 +1,634 @@
+/*
+ * linux/drivers/video/omap2/dsscomp/device.c
+ *
+ * DSS Composition file device and ioctl support
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DEBUG
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+
+#define MODULE_NAME "dsscomp"
+
+#include <video/omapdss.h>
+#include <video/dsscomp.h>
+#include <plat/dsscomp.h>
+#include "dsscomp.h"
+
+#include <linux/debugfs.h>
+
+static DECLARE_WAIT_QUEUE_HEAD(waitq);
+static DEFINE_MUTEX(wait_mtx);
+
+static u32 hwc_virt_to_phys(u32 arg)
+{
+ pmd_t *pmd;
+ pte_t *ptep;
+
+ pgd_t *pgd = pgd_offset(current->mm, arg);
+ if (pgd_none(*pgd) || pgd_bad(*pgd))
+ return 0;
+
+ pmd = pmd_offset(pgd, arg);
+ if (pmd_none(*pmd) || pmd_bad(*pmd))
+ return 0;
+
+ ptep = pte_offset_map(pmd, arg);
+ if (ptep && pte_present(*ptep))
+ return (PAGE_MASK & *ptep) | (~PAGE_MASK & arg);
+
+ return 0;
+}
+
+/*
+ * ===========================================================================
+ * WAIT OPERATIONS
+ * ===========================================================================
+ */
+
+static void sync_drop(struct dsscomp_sync_obj *sync)
+{
+ if (sync && atomic_dec_and_test(&sync->refs)) {
+ if (debug & DEBUG_WAITS)
+ pr_info("free sync [%p]\n", sync);
+
+ kfree(sync);
+ }
+}
+
+static int sync_setup(const char *name, const struct file_operations *fops,
+ struct dsscomp_sync_obj *sync, int flags)
+{
+ if (!sync)
+ return -ENOMEM;
+
+ sync->refs.counter = 1;
+ sync->fd = anon_inode_getfd(name, fops, sync, flags);
+ return sync->fd < 0 ? sync->fd : 0;
+}
+
+static int sync_finalize(struct dsscomp_sync_obj *sync, int r)
+{
+ if (sync) {
+ if (r < 0)
+ /* delete sync object on failure */
+ sys_close(sync->fd);
+ else
+ /* return file descriptor on success */
+ r = sync->fd;
+ }
+ return r;
+}
+
+/* wait for programming or release of a composition */
+int dsscomp_wait(struct dsscomp_sync_obj *sync, enum dsscomp_wait_phase phase,
+ int timeout)
+{
+ mutex_lock(&wait_mtx);
+ if (debug & DEBUG_WAITS)
+ pr_info("wait %s on [%p]\n",
+ phase == DSSCOMP_WAIT_DISPLAYED ? "display" :
+ phase == DSSCOMP_WAIT_PROGRAMMED ? "program" :
+ "release", sync);
+
+ if (sync->state < phase) {
+ mutex_unlock(&wait_mtx);
+
+ timeout = wait_event_interruptible_timeout(waitq,
+ sync->state >= phase, timeout);
+ if (debug & DEBUG_WAITS)
+ pr_info("wait over [%p]: %s %d\n", sync,
+ timeout < 0 ? "signal" :
+ timeout > 0 ? "ok" : "timeout",
+ timeout);
+ if (timeout <= 0)
+ return timeout ? : -ETIME;
+
+ mutex_lock(&wait_mtx);
+ }
+ mutex_unlock(&wait_mtx);
+
+ return 0;
+}
+EXPORT_SYMBOL(dsscomp_wait);
+
+static void dsscomp_queue_cb(void *data, int status)
+{
+ struct dsscomp_sync_obj *sync = data;
+ enum dsscomp_wait_phase phase =
+ status == DSS_COMPLETION_PROGRAMMED ? DSSCOMP_WAIT_PROGRAMMED :
+ status == DSS_COMPLETION_DISPLAYED ? DSSCOMP_WAIT_DISPLAYED :
+ DSSCOMP_WAIT_RELEASED, old_phase;
+
+ mutex_lock(&wait_mtx);
+ old_phase = sync->state;
+ if (old_phase < phase)
+ sync->state = phase;
+ mutex_unlock(&wait_mtx);
+
+ if (status & DSS_COMPLETION_RELEASED)
+ sync_drop(sync);
+ if (old_phase < phase)
+ wake_up_interruptible_sync(&waitq);
+}
+
+static int sync_release(struct inode *inode, struct file *filp)
+{
+ struct dsscomp_sync_obj *sync = filp->private_data;
+ sync_drop(sync);
+ return 0;
+}
+
+static long sync_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int r = 0;
+ struct dsscomp_sync_obj *sync = filp->private_data;
+ void __user *ptr = (void __user *)arg;
+
+ switch (cmd) {
+ case DSSCIOC_WAIT:
+ {
+ struct dsscomp_wait_data wd;
+ r = copy_from_user(&wd, ptr, sizeof(wd)) ? :
+ dsscomp_wait(sync, wd.phase,
+ usecs_to_jiffies(wd.timeout_us));
+ break;
+ }
+ default:
+ r = -EINVAL;
+ }
+ return r;
+}
+
+static const struct file_operations sync_fops = {
+ .owner = THIS_MODULE,
+ .release = sync_release,
+ .unlocked_ioctl = sync_ioctl,
+};
+
+static long setup_mgr(struct dsscomp_dev *cdev,
+ struct dsscomp_setup_mgr_data *d)
+{
+ int i, r;
+ struct omap_dss_device *dev;
+ struct omap_overlay_manager *mgr;
+ dsscomp_t comp;
+ struct dsscomp_sync_obj *sync = NULL;
+
+ dump_comp_info(cdev, d, "queue");
+ for (i = 0; i < d->num_ovls; i++)
+ dump_ovl_info(cdev, d->ovls + i);
+
+ /* verify display is valid and connected */
+ if (d->mgr.ix >= cdev->num_displays)
+ return -EINVAL;
+ dev = cdev->displays[d->mgr.ix];
+ if (!dev)
+ return -EINVAL;
+ mgr = dev->manager;
+ if (!mgr)
+ return -ENODEV;
+
+ comp = dsscomp_new(mgr);
+ if (IS_ERR(comp))
+ return PTR_ERR(comp);
+
+ /* swap red & blue if requested */
+ if (d->mgr.swap_rb) {
+ swap_rb_in_mgr_info(&d->mgr);
+ for (i = 0; i < d->num_ovls; i++)
+ swap_rb_in_ovl_info(d->ovls + i);
+ }
+
+ r = dsscomp_set_mgr(comp, &d->mgr);
+
+ for (i = 0; i < d->num_ovls; i++) {
+ struct dss2_ovl_info *oi = d->ovls + i;
+ u32 addr = (u32) oi->address;
+
+ /* convert addresses to user space */
+ if (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12)
+ oi->uv = hwc_virt_to_phys(addr +
+ oi->cfg.height * oi->cfg.stride);
+ oi->ba = hwc_virt_to_phys(addr);
+
+ r = r ? : dsscomp_set_ovl(comp, oi);
+ }
+
+ r = r ? : dsscomp_setup(comp, d->mode, d->win);
+
+ /* create sync object */
+ if (d->get_sync_obj) {
+ sync = kzalloc(sizeof(*sync), GFP_KERNEL);
+ r = sync_setup("dsscomp_sync", &sync_fops, sync, O_RDONLY);
+ if (sync && (debug & DEBUG_WAITS))
+ dev_info(DEV(cdev), "new sync [%p] on #%d\n", sync,
+ sync->fd);
+ if (r)
+ sync_drop(sync);
+ }
+
+ /* drop composition if failed to create */
+ if (r) {
+ dsscomp_drop(comp);
+ return r;
+ }
+
+ if (sync) {
+ sync->refs.counter++;
+ comp->extra_cb = dsscomp_queue_cb;
+ comp->extra_cb_data = sync;
+ }
+ if (d->mode & DSSCOMP_SETUP_APPLY)
+ r = dsscomp_delayed_apply(comp);
+
+ /* delete sync object if failed to apply or create file */
+ if (sync) {
+ r = sync_finalize(sync, r);
+ if (r < 0)
+ sync_drop(sync);
+ }
+ return r;
+}
+
+static long query_display(struct dsscomp_dev *cdev,
+ struct dsscomp_display_info *dis)
+{
+ struct omap_dss_device *dev;
+ struct omap_overlay_manager *mgr;
+ int i;
+
+ /* get display */
+ if (dis->ix >= cdev->num_displays)
+ return -EINVAL;
+ dev = cdev->displays[dis->ix];
+ if (!dev)
+ return -EINVAL;
+ mgr = dev->manager;
+
+ /* fill out display information */
+ dis->channel = dev->channel;
+ dis->enabled = (dev->state == OMAP_DSS_DISPLAY_SUSPENDED) ?
+ dev->activate_after_resume :
+ (dev->state == OMAP_DSS_DISPLAY_ACTIVE);
+ dis->overlays_available = 0;
+ dis->overlays_owned = 0;
+#if 0
+ dis->s3d_info = dev->panel.s3d_info;
+#endif
+ dis->state = dev->state;
+ dis->timings = dev->panel.timings;
+
+ dis->width_in_mm = DIV_ROUND_CLOSEST(dev->panel.width_in_um, 1000);
+ dis->height_in_mm = DIV_ROUND_CLOSEST(dev->panel.height_in_um, 1000);
+
+ /* find all overlays available for/owned by this display */
+ for (i = 0; i < cdev->num_ovls && dis->enabled; i++) {
+ if (cdev->ovls[i]->manager == mgr)
+ dis->overlays_owned |= 1 << i;
+ else if (!cdev->ovls[i]->info.enabled)
+ dis->overlays_available |= 1 << i;
+ }
+ dis->overlays_available |= dis->overlays_owned;
+
+ /* fill out manager information */
+ if (mgr) {
+ dis->mgr.alpha_blending = mgr->info.alpha_enabled;
+ dis->mgr.default_color = mgr->info.default_color;
+#if 0
+ dis->mgr.interlaced = !strcmp(dev->name, "hdmi") &&
+ is_hdmi_interlaced()
+#else
+ dis->mgr.interlaced = 0;
+#endif
+ dis->mgr.trans_enabled = mgr->info.trans_enabled;
+ dis->mgr.trans_key = mgr->info.trans_key;
+ dis->mgr.trans_key_type = mgr->info.trans_key_type;
+ } else {
+ /* display is disabled if it has no manager */
+ memset(&dis->mgr, 0, sizeof(dis->mgr));
+ }
+ dis->mgr.ix = dis->ix;
+
+ if (dis->modedb_len && dev->driver->get_modedb)
+ dis->modedb_len = dev->driver->get_modedb(dev,
+ (struct fb_videomode *) dis->modedb, dis->modedb_len);
+ return 0;
+}
+
+static long check_ovl(struct dsscomp_dev *cdev,
+ struct dsscomp_check_ovl_data *chk)
+{
+ /* for now return all overlays as possible */
+ return (1 << cdev->num_ovls) - 1;
+}
+
+static long setup_display(struct dsscomp_dev *cdev,
+ struct dsscomp_setup_display_data *dis)
+{
+ struct omap_dss_device *dev;
+
+ /* get display */
+ if (dis->ix >= cdev->num_displays)
+ return -EINVAL;
+ dev = cdev->displays[dis->ix];
+ if (!dev)
+ return -EINVAL;
+
+ if (dev->driver->set_mode)
+ return dev->driver->set_mode(dev,
+ (struct fb_videomode *) &dis->mode);
+ else
+ return 0;
+}
+
+static void fill_cache(struct dsscomp_dev *cdev)
+{
+ unsigned long i;
+ struct omap_dss_device *dssdev = NULL;
+
+ cdev->num_ovls = min(omap_dss_get_num_overlays(), MAX_OVERLAYS);
+ for (i = 0; i < cdev->num_ovls; i++)
+ cdev->ovls[i] = omap_dss_get_overlay(i);
+
+ cdev->num_mgrs = min(omap_dss_get_num_overlay_managers(), MAX_MANAGERS);
+ for (i = 0; i < cdev->num_mgrs; i++)
+ cdev->mgrs[i] = omap_dss_get_overlay_manager(i);
+
+ for_each_dss_dev(dssdev) {
+ const char *name = dev_name(&dssdev->dev);
+ if (strncmp(name, "display", 7) ||
+ strict_strtoul(name + 7, 10, &i) ||
+ i >= MAX_DISPLAYS)
+ continue;
+
+ if (cdev->num_displays <= i)
+ cdev->num_displays = i + 1;
+
+ cdev->displays[i] = dssdev;
+ dev_dbg(DEV(cdev), "display%lu=%s\n", i, dssdev->driver_name);
+
+ cdev->state_notifiers[i].notifier_call = dsscomp_state_notifier;
+ blocking_notifier_chain_register(&dssdev->state_notifiers,
+ cdev->state_notifiers + i);
+ }
+ dev_info(DEV(cdev), "found %d displays and %d overlays\n",
+ cdev->num_displays, cdev->num_ovls);
+}
+
+static long comp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int r = 0;
+ struct miscdevice *dev = filp->private_data;
+ struct dsscomp_dev *cdev = container_of(dev, struct dsscomp_dev, dev);
+ void __user *ptr = (void __user *)arg;
+
+ union {
+ struct {
+ struct dsscomp_setup_mgr_data set;
+ struct dss2_ovl_info ovl[MAX_OVERLAYS];
+ } m;
+ struct dsscomp_setup_dispc_data dispc;
+ struct dsscomp_display_info dis;
+ struct dsscomp_check_ovl_data chk;
+ struct dsscomp_setup_display_data sdis;
+ } u;
+
+ dsscomp_gralloc_init(cdev);
+
+ switch (cmd) {
+ case DSSCIOC_SETUP_MGR:
+ {
+ r = copy_from_user(&u.m.set, ptr, sizeof(u.m.set)) ? :
+ u.m.set.num_ovls >= ARRAY_SIZE(u.m.ovl) ? -EINVAL :
+ copy_from_user(&u.m.ovl,
+ (void __user *)arg + sizeof(u.m.set),
+ sizeof(*u.m.ovl) * u.m.set.num_ovls) ? :
+ setup_mgr(cdev, &u.m.set);
+ break;
+ }
+ case DSSCIOC_SETUP_DISPC:
+ {
+ r = copy_from_user(&u.dispc, ptr, sizeof(u.dispc)) ? :
+ dsscomp_gralloc_queue_ioctl(&u.dispc);
+ break;
+ }
+ case DSSCIOC_QUERY_DISPLAY:
+ {
+ struct dsscomp_display_info *dis = NULL;
+ r = copy_from_user(&u.dis, ptr, sizeof(u.dis));
+ if (!r) {
+ /* impose a safe limit on modedb_len to prevent
+ * wrap around/overflow calculation of the alloced
+ * size that would make it smaller than
+ * struct dsscomp_display_info and cause heap
+ * corruption.
+ */
+ u.dis.modedb_len = clamp_val(u.dis.modedb_len, 0, 256);
+
+ dis = kzalloc(sizeof(*dis->modedb) * u.dis.modedb_len +
+ sizeof(*dis), GFP_KERNEL);
+ }
+ if (dis) {
+ *dis = u.dis;
+ r = query_display(cdev, dis) ? :
+ copy_to_user(ptr, dis, sizeof(*dis) +
+ sizeof(*dis->modedb) * dis->modedb_len);
+ kfree(dis);
+ } else {
+ r = r ? : -ENOMEM;
+ }
+ break;
+ }
+ case DSSCIOC_CHECK_OVL:
+ {
+ r = copy_from_user(&u.chk, ptr, sizeof(u.chk)) ? :
+ check_ovl(cdev, &u.chk);
+ break;
+ }
+ case DSSCIOC_SETUP_DISPLAY:
+ {
+ r = copy_from_user(&u.sdis, ptr, sizeof(u.sdis)) ? :
+ setup_display(cdev, &u.sdis);
+ }
+ default:
+ r = -EINVAL;
+ }
+ return r;
+}
+
+/* must implement open for filp->private_data to be filled */
+static int comp_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static const struct file_operations comp_fops = {
+ .owner = THIS_MODULE,
+ .open = comp_open,
+ .unlocked_ioctl = comp_ioctl,
+};
+
+static int dsscomp_debug_show(struct seq_file *s, void *unused)
+{
+ void (*fn)(struct seq_file *s) = s->private;
+ fn(s);
+ return 0;
+}
+
+static int dsscomp_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dsscomp_debug_show, inode->i_private);
+}
+
+static const struct file_operations dsscomp_debug_fops = {
+ .open = dsscomp_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dsscomp_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct dsscomp_dev *cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev) {
+ pr_err("dsscomp: failed to allocate device.\n");
+ return -ENOMEM;
+ }
+ cdev->dev.minor = MISC_DYNAMIC_MINOR;
+ cdev->dev.name = "dsscomp";
+ cdev->dev.mode = 0666;
+ cdev->dev.fops = &comp_fops;
+
+ ret = misc_register(&cdev->dev);
+ if (ret) {
+ pr_err("dsscomp: failed to register misc device.\n");
+ return ret;
+ }
+ cdev->dbgfs = debugfs_create_dir("dsscomp", NULL);
+ if (IS_ERR_OR_NULL(cdev->dbgfs))
+ dev_warn(DEV(cdev), "failed to create debug files.\n");
+ else {
+ debugfs_create_file("comps", S_IRUGO,
+ cdev->dbgfs, dsscomp_dbg_comps, &dsscomp_debug_fops);
+ debugfs_create_file("gralloc", S_IRUGO,
+ cdev->dbgfs, dsscomp_dbg_gralloc, &dsscomp_debug_fops);
+#ifdef CONFIG_DSSCOMP_DEBUG_LOG
+ debugfs_create_file("log", S_IRUGO,
+ cdev->dbgfs, dsscomp_dbg_events, &dsscomp_debug_fops);
+#endif
+ }
+
+ platform_set_drvdata(pdev, cdev);
+
+ pr_info("dsscomp: initializing.\n");
+
+ fill_cache(cdev);
+
+ /* initialize queues */
+ dsscomp_queue_init(cdev);
+ dsscomp_gralloc_init(cdev);
+
+ return 0;
+}
+
+static int dsscomp_remove(struct platform_device *pdev)
+{
+ struct dsscomp_dev *cdev = platform_get_drvdata(pdev);
+ misc_deregister(&cdev->dev);
+ debugfs_remove_recursive(cdev->dbgfs);
+ dsscomp_queue_exit();
+ dsscomp_gralloc_exit();
+ kfree(cdev);
+
+ return 0;
+}
+
+static struct platform_driver dsscomp_pdriver = {
+ .probe = dsscomp_probe,
+ .remove = dsscomp_remove,
+ .driver = { .name = MODULE_NAME, .owner = THIS_MODULE }
+};
+
+static struct platform_device dsscomp_pdev = {
+ .name = MODULE_NAME,
+ .id = -1
+};
+
+static int __init dsscomp_init(void)
+{
+ int err = platform_driver_register(&dsscomp_pdriver);
+ if (err)
+ return err;
+
+ err = platform_device_register(&dsscomp_pdev);
+ if (err)
+ platform_driver_unregister(&dsscomp_pdriver);
+ return err;
+}
+
+static void __exit dsscomp_exit(void)
+{
+ platform_device_unregister(&dsscomp_pdev);
+ platform_driver_unregister(&dsscomp_pdriver);
+}
+
+#define DUMP_CHUNK 256
+static char dump_buf[64 * 1024];
+void dsscomp_kdump(void)
+{
+ struct seq_file s = {
+ .buf = dump_buf,
+ .size = sizeof(dump_buf) - 1,
+ };
+ int i;
+
+ dsscomp_dbg_events(&s);
+ dsscomp_dbg_comps(&s);
+ dsscomp_dbg_gralloc(&s);
+
+ for (i = 0; i < s.count; i += DUMP_CHUNK) {
+ if ((s.count - i) > DUMP_CHUNK) {
+ char c = s.buf[i + DUMP_CHUNK];
+ s.buf[i + DUMP_CHUNK] = 0;
+ pr_cont("%s", s.buf + i);
+ s.buf[i + DUMP_CHUNK] = c;
+ } else {
+ s.buf[s.count] = 0;
+ pr_cont("%s", s.buf + i);
+ }
+ }
+}
+EXPORT_SYMBOL(dsscomp_kdump);
+
+MODULE_LICENSE("GPL v2");
+module_init(dsscomp_init);
+module_exit(dsscomp_exit);
diff --git a/drivers/video/omap2/dsscomp/dsscomp.h b/drivers/video/omap2/dsscomp/dsscomp.h
new file mode 100644
index 0000000..8edfaa7
--- /dev/null
+++ b/drivers/video/omap2/dsscomp/dsscomp.h
@@ -0,0 +1,208 @@
+/*
+ * linux/drivers/video/omap2/dsscomp/base.c
+ *
+ * DSS Composition basic operation support
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DSSCOMP_H
+#define _DSSCOMP_H
+
+#include <linux/miscdevice.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#ifdef CONFIG_DSSCOMP_DEBUG_LOG
+#include <linux/hrtimer.h>
+#endif
+
+#define MAX_OVERLAYS 5
+#define MAX_MANAGERS 3
+#define MAX_DISPLAYS 4
+
+#define DEBUG_OVERLAYS (1 << 0)
+#define DEBUG_COMPOSITIONS (1 << 1)
+#define DEBUG_PHASES (1 << 2)
+#define DEBUG_WAITS (1 << 3)
+#define DEBUG_GRALLOC_PHASES (1 << 4)
+
+/*
+ * Utility macros
+ */
+#define ZERO(c) memset(&c, 0, sizeof(c))
+#define ZEROn(c, n) memset(c, 0, sizeof(*c) * n)
+#define DEV(c) (c->dev.this_device)
+
+/**
+ * DSS Composition Device Driver
+ *
+ * @dev: misc device base
+ * @dbgfs: debugfs hook
+ */
+struct dsscomp_dev {
+ struct miscdevice dev;
+ struct dentry *dbgfs;
+
+ /* cached DSS objects */
+ u32 num_ovls;
+ struct omap_overlay *ovls[MAX_OVERLAYS];
+ u32 num_mgrs;
+ struct omap_overlay_manager *mgrs[MAX_MANAGERS];
+ u32 num_displays;
+ struct omap_dss_device *displays[MAX_DISPLAYS];
+ struct notifier_block state_notifiers[MAX_DISPLAYS];
+};
+
+extern int debug;
+
+#ifdef CONFIG_DEBUG_FS
+extern struct mutex dbg_mtx;
+extern struct list_head dbg_comps;
+#define DO_IF_DEBUG_FS(cmd) { \
+ mutex_lock(&dbg_mtx); \
+ cmd; \
+ mutex_unlock(&dbg_mtx); \
+}
+#else
+#define DO_IF_DEBUG_FS(cmd)
+#endif
+
+enum dsscomp_state {
+ DSSCOMP_STATE_ACTIVE = 0xAC54156E,
+ DSSCOMP_STATE_APPLYING = 0xB554C591,
+ DSSCOMP_STATE_APPLIED = 0xB60504C1,
+ DSSCOMP_STATE_PROGRAMMED = 0xC0520652,
+ DSSCOMP_STATE_DISPLAYED = 0xD15504CA,
+};
+
+struct dsscomp_data {
+ enum dsscomp_state state;
+ /*
+ * :TRICKY: before applying, overlays used in a composition are stored
+ * in ovl_mask and the other masks are empty. Once composition is
+ * applied, blank is set to see if all overlays are to be disabled on
+ * this composition, any disabled overlays in the composition are set in
+ * ovl_dmask, and ovl_mask is updated to include ALL overlays that are
+ * actually on the display - even if they are not part of the
+ * composition. The reason: we use ovl_mask to see if an overlay is used
+ * or planned to be used on a manager. We update ovl_mask when
+ * composition is programmed (removing the disabled overlays).
+ */
+ bool blank; /* true if all overlays are to be disabled */
+ u32 ovl_mask; /* overlays used on this frame */
+ u32 ovl_dmask; /* overlays disabled on this frame */
+ u32 ix; /* manager index that this frame is on */
+ struct dsscomp_setup_mgr_data frm;
+ struct dss2_ovl_info ovls[5];
+ void (*extra_cb)(void *data, int status);
+ void *extra_cb_data;
+ bool must_apply; /* whether composition must be applied */
+
+#ifdef CONFIG_DEBUG_FS
+ struct list_head dbg_q;
+ u32 dbg_used;
+ struct {
+ u32 t, state;
+ } dbg_log[8];
+#endif
+};
+
+struct dsscomp_sync_obj {
+ int state;
+ int fd;
+ atomic_t refs;
+};
+
+/*
+ * Kernel interface
+ */
+int dsscomp_queue_init(struct dsscomp_dev *cdev);
+void dsscomp_queue_exit(void);
+void dsscomp_gralloc_init(struct dsscomp_dev *cdev);
+void dsscomp_gralloc_exit(void);
+int dsscomp_gralloc_queue_ioctl(struct dsscomp_setup_dispc_data *d);
+int dsscomp_wait(struct dsscomp_sync_obj *sync, enum dsscomp_wait_phase phase,
+ int timeout);
+int dsscomp_state_notifier(struct notifier_block *nb,
+ unsigned long arg, void *ptr);
+
+/* basic operation - if not using queues */
+int set_dss_ovl_info(struct dss2_ovl_info *oi);
+int set_dss_mgr_info(struct dss2_mgr_info *mi, struct omapdss_ovl_cb *cb);
+struct omap_overlay_manager *find_dss_mgr(int display_ix);
+void swap_rb_in_ovl_info(struct dss2_ovl_info *oi);
+void swap_rb_in_mgr_info(struct dss2_mgr_info *mi);
+
+/*
+ * Debug functions
+ */
+void dump_ovl_info(struct dsscomp_dev *cdev, struct dss2_ovl_info *oi);
+void dump_comp_info(struct dsscomp_dev *cdev, struct dsscomp_setup_mgr_data *d,
+ const char *phase);
+void dump_total_comp_info(struct dsscomp_dev *cdev,
+ struct dsscomp_setup_dispc_data *d,
+ const char *phase);
+const char *dsscomp_get_color_name(enum omap_color_mode m);
+
+void dsscomp_dbg_comps(struct seq_file *s);
+void dsscomp_dbg_gralloc(struct seq_file *s);
+
+#define log_state_str(s) (\
+ (s) == DSSCOMP_STATE_ACTIVE ? "ACTIVE" : \
+ (s) == DSSCOMP_STATE_APPLYING ? "APPLY'N" : \
+ (s) == DSSCOMP_STATE_APPLIED ? "APPLIED" : \
+ (s) == DSSCOMP_STATE_PROGRAMMED ? "PROGR'D" : \
+ (s) == DSSCOMP_STATE_DISPLAYED ? "DISPL'D" : "INVALID")
+
+#define log_status_str(ev) ( \
+ ((ev) & DSS_COMPLETION_CHANGED) ? "CHANGED" : \
+ (ev) == DSS_COMPLETION_DISPLAYED ? "DISPLAYED" : \
+ (ev) == DSS_COMPLETION_PROGRAMMED ? "PROGRAMMED" : \
+ (ev) == DSS_COMPLETION_TORN ? "TORN" : \
+ (ev) == DSS_COMPLETION_RELEASED ? "RELEASED" : \
+ ((ev) & DSS_COMPLETION_RELEASED) ? "ECLIPSED" : "???")
+
+#ifdef CONFIG_DSSCOMP_DEBUG_LOG
+extern struct dbg_event_t {
+ u32 ms, a1, a2, ix;
+ void *data;
+ const char *fmt;
+} dbg_events[128];
+extern u32 dbg_event_ix;
+
+void dsscomp_dbg_events(struct seq_file *s);
+#endif
+
+static inline
+void __log_event(u32 ix, u32 ms, void *data, const char *fmt, u32 a1, u32 a2)
+{
+#ifdef CONFIG_DSSCOMP_DEBUG_LOG
+ if (!ms)
+ ms = ktime_to_ms(ktime_get());
+ dbg_events[dbg_event_ix].ms = ms;
+ dbg_events[dbg_event_ix].data = data;
+ dbg_events[dbg_event_ix].fmt = fmt;
+ dbg_events[dbg_event_ix].a1 = a1;
+ dbg_events[dbg_event_ix].a2 = a2;
+ dbg_events[dbg_event_ix].ix = ix;
+ dbg_event_ix = (dbg_event_ix + 1) % ARRAY_SIZE(dbg_events);
+#endif
+}
+
+#define log_event(ix, ms, data, fmt, a1, a2) \
+ DO_IF_DEBUG_FS(__log_event(ix, ms, data, fmt, a1, a2))
+
+#endif
diff --git a/drivers/video/omap2/dsscomp/gralloc.c b/drivers/video/omap2/dsscomp/gralloc.c
new file mode 100644
index 0000000..b75dfd9
--- /dev/null
+++ b/drivers/video/omap2/dsscomp/gralloc.c
@@ -0,0 +1,607 @@
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <mach/tiler.h>
+#include <video/dsscomp.h>
+#include <plat/dsscomp.h>
+#include "dsscomp.h"
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+static bool blanked;
+
+#define NUM_TILER1D_SLOTS 2
+#define TILER1D_SLOT_SIZE (16 << 20)
+
+static struct tiler1d_slot {
+ struct list_head q;
+ tiler_blk_handle slot;
+ u32 phys;
+ u32 size;
+ u32 *page_map;
+} slots[NUM_TILER1D_SLOTS];
+static struct list_head free_slots;
+static struct dsscomp_dev *cdev;
+static DEFINE_MUTEX(mtx);
+static struct semaphore free_slots_sem =
+ __SEMAPHORE_INITIALIZER(free_slots_sem, 0);
+
+/* gralloc composition sync object */
+struct dsscomp_gralloc_t {
+ void (*cb_fn)(void *, int);
+ void *cb_arg;
+ struct list_head q;
+ struct list_head slots;
+ atomic_t refs;
+ bool early_callback;
+ bool programmed;
+};
+
+/* queued gralloc compositions */
+static LIST_HEAD(flip_queue);
+
+static u32 ovl_use_mask[MAX_MANAGERS];
+
+static void unpin_tiler_blocks(struct list_head *slots)
+{
+ struct tiler1d_slot *slot;
+
+ /* unpin any tiler memory */
+ list_for_each_entry(slot, slots, q) {
+ tiler_unpin_block(slot->slot);
+ up(&free_slots_sem);
+ }
+
+ /* free tiler slots */
+ list_splice_init(slots, &free_slots);
+}
+
+static void dsscomp_gralloc_cb(void *data, int status)
+{
+ struct dsscomp_gralloc_t *gsync = data, *gsync_;
+ bool early_cbs = true;
+ LIST_HEAD(done);
+
+ mutex_lock(&mtx);
+ if (gsync->early_callback && status == DSS_COMPLETION_PROGRAMMED)
+ gsync->programmed = true;
+
+ if (status & DSS_COMPLETION_RELEASED) {
+ if (atomic_dec_and_test(&gsync->refs))
+ unpin_tiler_blocks(&gsync->slots);
+
+ log_event(0, 0, gsync, "--refs=%d on %s",
+ atomic_read(&gsync->refs),
+ (u32) log_status_str(status));
+ }
+
+ /* get completed list items in order, if any */
+ list_for_each_entry_safe(gsync, gsync_, &flip_queue, q) {
+ if (gsync->cb_fn) {
+ early_cbs &= gsync->early_callback && gsync->programmed;
+ if (early_cbs) {
+ gsync->cb_fn(gsync->cb_arg, 1);
+ gsync->cb_fn = NULL;
+ }
+ }
+ if (gsync->refs.counter && gsync->cb_fn)
+ break;
+ if (gsync->refs.counter == 0)
+ list_move_tail(&gsync->q, &done);
+ }
+ mutex_unlock(&mtx);
+
+ /* call back for completed composition with mutex unlocked */
+ list_for_each_entry_safe(gsync, gsync_, &done, q) {
+ if (debug & DEBUG_GRALLOC_PHASES)
+ dev_info(DEV(cdev), "[%p] completed flip\n", gsync);
+
+ log_event(0, 0, gsync, "calling %pf [%p]",
+ (u32) gsync->cb_fn, (u32) gsync->cb_arg);
+
+ if (gsync->cb_fn)
+ gsync->cb_fn(gsync->cb_arg, 1);
+ kfree(gsync);
+ }
+}
+
+/* This is just test code for now that does the setup + apply.
+ It still uses userspace virtual addresses, but maps non
+ TILER buffers into 1D */
+int dsscomp_gralloc_queue_ioctl(struct dsscomp_setup_dispc_data *d)
+{
+ struct tiler_pa_info *pas[MAX_OVERLAYS];
+ s32 ret;
+ u32 i;
+
+ if (d->num_ovls > MAX_OVERLAYS)
+ return -EINVAL;
+
+ /* convert virtual addresses to physical and get tiler pa infos */
+ for (i = 0; i < d->num_ovls; i++) {
+ struct dss2_ovl_info *oi = d->ovls + i;
+ u32 addr = (u32) oi->address;
+
+ pas[i] = NULL;
+
+ /* assume virtual NV12 for now */
+ if (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12)
+ oi->uv = tiler_virt2phys(addr +
+ oi->cfg.height * oi->cfg.stride);
+ else
+ oi->uv = 0;
+ oi->ba = tiler_virt2phys(addr);
+
+ /* map non-TILER buffers to 1D */
+ if ((oi->ba < 0x60000000 || oi->ba >= 0x80000000) && oi->ba)
+ pas[i] = user_block_to_pa(addr & PAGE_MASK,
+ PAGE_ALIGN(oi->cfg.height * oi->cfg.stride +
+ (addr & ~PAGE_MASK)) >> PAGE_SHIFT);
+ }
+ ret = dsscomp_gralloc_queue(d, pas, false, NULL, NULL);
+ for (i = 0; i < d->num_ovls; i++)
+ tiler_pa_free(pas[i]);
+ return ret;
+}
+
+int dsscomp_gralloc_queue(struct dsscomp_setup_dispc_data *d,
+ struct tiler_pa_info **pas,
+ bool early_callback,
+ void (*cb_fn)(void *, int), void *cb_arg)
+{
+ u32 i;
+ int r = 0;
+ struct omap_dss_device *dev;
+ struct omap_overlay_manager *mgr;
+ static DEFINE_MUTEX(local_mtx);
+ dsscomp_t comp[MAX_MANAGERS];
+ u32 ovl_new_use_mask[MAX_MANAGERS];
+ u32 mgr_set_mask = 0;
+ u32 ovl_set_mask = 0;
+ struct tiler1d_slot *slot = NULL;
+ u32 slot_used = 0;
+#ifdef CONFIG_DEBUG_FS
+ u32 ms = ktime_to_ms(ktime_get());
+#endif
+ u32 channels[ARRAY_SIZE(d->mgrs)], ch;
+ int skip;
+ struct dsscomp_gralloc_t *gsync;
+ struct dss2_rect_t win = { .w = 0 };
+
+ /* reserve tiler areas if not already done so */
+ dsscomp_gralloc_init(cdev);
+
+ dump_total_comp_info(cdev, d, "queue");
+ for (i = 0; i < d->num_ovls; i++)
+ dump_ovl_info(cdev, d->ovls + i);
+
+ mutex_lock(&local_mtx);
+
+ mutex_lock(&mtx);
+
+ /* create sync object with 1 temporary ref */
+ gsync = kzalloc(sizeof(*gsync), GFP_KERNEL);
+ gsync->cb_arg = cb_arg;
+ gsync->cb_fn = cb_fn;
+ gsync->refs.counter = 1;
+ gsync->early_callback = early_callback;
+ INIT_LIST_HEAD(&gsync->slots);
+ list_add_tail(&gsync->q, &flip_queue);
+ if (debug & DEBUG_GRALLOC_PHASES)
+ dev_info(DEV(cdev), "[%p] queuing flip\n", gsync);
+
+ log_event(0, ms, gsync, "new in %pf (refs=1)",
+ (u32) dsscomp_gralloc_queue, 0);
+
+ /* ignore frames while we are blanked */
+ skip = blanked;
+ if (skip && (debug & DEBUG_PHASES))
+ dev_info(DEV(cdev), "[%p,%08x] ignored\n", gsync, d->sync_id);
+
+ /* mark blank frame by NULL tiler pa pointer */
+ if (!skip && pas == NULL)
+ blanked = true;
+
+ mutex_unlock(&mtx);
+
+ d->num_mgrs = min(d->num_mgrs, (u16) ARRAY_SIZE(d->mgrs));
+ d->num_ovls = min(d->num_ovls, (u16) ARRAY_SIZE(d->ovls));
+
+ memset(comp, 0, sizeof(comp));
+ memset(ovl_new_use_mask, 0, sizeof(ovl_new_use_mask));
+
+ if (skip)
+ goto skip_comp;
+
+ d->mode = DSSCOMP_SETUP_DISPLAY;
+
+ /* mark managers we are using */
+ for (i = 0; i < d->num_mgrs; i++) {
+ /* verify display is valid & connected, ignore if not */
+ if (d->mgrs[i].ix >= cdev->num_displays)
+ continue;
+ dev = cdev->displays[d->mgrs[i].ix];
+ if (!dev) {
+ dev_warn(DEV(cdev), "failed to get display%d\n",
+ d->mgrs[i].ix);
+ continue;
+ }
+ mgr = dev->manager;
+ if (!mgr) {
+ dev_warn(DEV(cdev), "no manager for display%d\n",
+ d->mgrs[i].ix);
+ continue;
+ }
+ channels[i] = ch = mgr->id;
+ mgr_set_mask |= 1 << ch;
+
+ /* swap red & blue if requested */
+ if (d->mgrs[i].swap_rb)
+ swap_rb_in_mgr_info(d->mgrs + i);
+ }
+
+ /* create dsscomp objects for set managers (including active ones) */
+ for (ch = 0; ch < MAX_MANAGERS; ch++) {
+ if (!(mgr_set_mask & (1 << ch)) && !ovl_use_mask[ch])
+ continue;
+
+ mgr = cdev->mgrs[ch];
+
+ comp[ch] = dsscomp_new(mgr);
+ if (IS_ERR(comp[ch])) {
+ comp[ch] = NULL;
+ dev_warn(DEV(cdev), "failed to get composition on %s\n",
+ mgr->name);
+ continue;
+ }
+
+ /* set basic manager information for blanked managers */
+ if (!(mgr_set_mask & (1 << ch))) {
+ struct dss2_mgr_info mi = {
+ .alpha_blending = true,
+ .ix = comp[ch]->frm.mgr.ix,
+ };
+ dsscomp_set_mgr(comp[ch], &mi);
+ }
+
+ comp[ch]->must_apply = true;
+ r = dsscomp_setup(comp[ch], d->mode, win);
+ if (r)
+ dev_err(DEV(cdev), "failed to setup comp (%d)\n", r);
+ }
+
+ /* configure manager data from gralloc composition */
+ for (i = 0; i < d->num_mgrs; i++) {
+ ch = channels[i];
+ r = dsscomp_set_mgr(comp[ch], d->mgrs + i);
+ if (r)
+ dev_err(DEV(cdev), "failed to set mgr%d (%d)\n", ch, r);
+ }
+
+ /* NOTE: none of the dsscomp sets should fail as composition is new */
+ for (i = 0; i < d->num_ovls; i++) {
+ struct dss2_ovl_info *oi = d->ovls + i;
+ u32 mgr_ix = oi->cfg.mgr_ix;
+ u32 size;
+
+ /* verify manager index */
+ if (mgr_ix >= d->num_mgrs) {
+ dev_err(DEV(cdev), "invalid manager for ovl%d\n",
+ oi->cfg.ix);
+ continue;
+ }
+ ch = channels[mgr_ix];
+
+ /* skip overlays on compositions we could not create */
+ if (!comp[ch])
+ continue;
+
+ /* swap red & blue if requested */
+ if (d->mgrs[mgr_ix].swap_rb)
+ swap_rb_in_ovl_info(d->ovls + i);
+
+ /* copy prior overlay to avoid mapping layers twice to 1D */
+ if (oi->addressing == OMAP_DSS_BUFADDR_OVL_IX) {
+ unsigned int j = oi->ba;
+ if (j >= i) {
+ WARN(1, "Invalid clone layer (%u)", j);
+ goto skip_buffer;
+ }
+
+ oi->ba = d->ovls[j].ba;
+ oi->uv = d->ovls[j].uv;
+ goto skip_map1d;
+ } else if (oi->addressing == OMAP_DSS_BUFADDR_FB) {
+ /* get fb */
+ int fb_ix = (oi->ba >> 28);
+ int fb_uv_ix = (oi->uv >> 28);
+ struct fb_info *fbi = NULL, *fbi_uv = NULL;
+ size_t size = oi->cfg.height * oi->cfg.stride;
+ if (fb_ix >= num_registered_fb ||
+ (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12 &&
+ fb_uv_ix >= num_registered_fb)) {
+ WARN(1, "display has no framebuffer");
+ goto skip_buffer;
+ }
+
+ fbi = fbi_uv = registered_fb[fb_ix];
+ if (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12)
+ fbi_uv = registered_fb[fb_uv_ix];
+
+ if (size + oi->ba > fbi->fix.smem_len ||
+ (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12 &&
+ (size >> 1) + oi->uv > fbi_uv->fix.smem_len)) {
+ WARN(1, "image outside of framebuffer memory");
+ goto skip_buffer;
+ }
+
+ oi->ba += fbi->fix.smem_start;
+ oi->uv += fbi_uv->fix.smem_start;
+ goto skip_map1d;
+ }
+
+ /* map non-TILER buffers to 1D */
+
+ /* skip 2D and disabled layers */
+ if (!pas[i] || !oi->cfg.enabled)
+ goto skip_map1d;
+
+ if (!slot) {
+ if (down_timeout(&free_slots_sem,
+ msecs_to_jiffies(100))) {
+ dev_warn(DEV(cdev), "could not obtain tiler slot");
+ goto skip_buffer;
+ }
+ mutex_lock(&mtx);
+ slot = list_first_entry(&free_slots, typeof(*slot), q);
+ list_move(&slot->q, &gsync->slots);
+ mutex_unlock(&mtx);
+ }
+
+ size = oi->cfg.stride * oi->cfg.height;
+ if (oi->cfg.color_mode == OMAP_DSS_COLOR_NV12)
+ size += size >> 2;
+ size = DIV_ROUND_UP(size, PAGE_SIZE);
+
+ if (slot_used + size > slot->size) {
+ dev_err(DEV(cdev), "tiler slot not big enough for frame %d + %d > %d",
+ slot_used, size, slot->size);
+ goto skip_buffer;
+ }
+
+ /* "map" into TILER 1D - will happen after loop */
+ oi->ba = slot->phys + (slot_used << PAGE_SHIFT) +
+ (oi->ba & ~PAGE_MASK);
+ memcpy(slot->page_map + slot_used, pas[i]->mem,
+ sizeof(*slot->page_map) * size);
+ slot_used += size;
+ goto skip_map1d;
+
+skip_buffer:
+ oi->cfg.enabled = false;
+skip_map1d:
+
+ if (oi->cfg.enabled)
+ ovl_new_use_mask[ch] |= 1 << oi->cfg.ix;
+
+ r = dsscomp_set_ovl(comp[ch], oi);
+ if (r)
+ dev_err(DEV(cdev), "failed to set ovl%d (%d)\n",
+ oi->cfg.ix, r);
+ else
+ ovl_set_mask |= 1 << oi->cfg.ix;
+ }
+
+ if (slot && slot_used) {
+ r = tiler_pin_block(slot->slot, slot->page_map,
+ slot_used);
+ if (r)
+ dev_err(DEV(cdev), "failed to pin %d pages into"
+ " %d-pg slots (%d)\n", slot_used,
+ TILER1D_SLOT_SIZE >> PAGE_SHIFT, r);
+ }
+
+ for (ch = 0; ch < MAX_MANAGERS; ch++) {
+ /* disable all overlays not specifically set from prior frame */
+ u32 mask = ovl_use_mask[ch] & ~ovl_set_mask;
+
+ if (!comp[ch])
+ continue;
+
+ while (mask) {
+ struct dss2_ovl_info oi = {
+ .cfg.zonly = true,
+ .cfg.enabled = false,
+ .cfg.ix = fls(mask) - 1,
+ };
+ dsscomp_set_ovl(comp[ch], &oi);
+ mask &= ~(1 << oi.cfg.ix);
+ }
+
+ /* associate dsscomp objects with this gralloc composition */
+ comp[ch]->extra_cb = dsscomp_gralloc_cb;
+ comp[ch]->extra_cb_data = gsync;
+ atomic_inc(&gsync->refs);
+ log_event(0, ms, gsync, "++refs=%d for [%p]",
+ atomic_read(&gsync->refs), (u32) comp[ch]);
+
+ r = dsscomp_delayed_apply(comp[ch]);
+ if (r)
+ dev_err(DEV(cdev), "failed to apply comp (%d)\n", r);
+ else
+ ovl_use_mask[ch] = ovl_new_use_mask[ch];
+ }
+skip_comp:
+ /* release sync object ref - this completes unapplied compositions */
+ dsscomp_gralloc_cb(gsync, DSS_COMPLETION_RELEASED);
+
+ mutex_unlock(&local_mtx);
+
+ return r;
+}
+
+#ifdef CONFIG_EARLYSUSPEND
+static int blank_complete;
+static DECLARE_WAIT_QUEUE_HEAD(early_suspend_wq);
+
+static void dsscomp_early_suspend_cb(void *data, int status)
+{
+ blank_complete = true;
+ wake_up(&early_suspend_wq);
+}
+
+static void dsscomp_early_suspend(struct early_suspend *h)
+{
+ struct dsscomp_setup_dispc_data d = {
+ .num_mgrs = 0,
+ };
+ int err;
+
+ pr_info("DSSCOMP: %s\n", __func__);
+
+ /* use gralloc queue as we need to blank all screens */
+ blank_complete = false;
+ dsscomp_gralloc_queue(&d, NULL, false, dsscomp_early_suspend_cb, NULL);
+
+ /* wait until composition is displayed */
+ err = wait_event_timeout(early_suspend_wq, blank_complete,
+ msecs_to_jiffies(500));
+ if (err == 0)
+ pr_warn("DSSCOMP: timeout blanking screen\n");
+ else
+ pr_info("DSSCOMP: blanked screen\n");
+}
+
+static void dsscomp_late_resume(struct early_suspend *h)
+{
+ pr_info("DSSCOMP: %s\n", __func__);
+ blanked = false;
+}
+
+static struct early_suspend early_suspend_info = {
+ .suspend = dsscomp_early_suspend,
+ .resume = dsscomp_late_resume,
+ .level = EARLY_SUSPEND_LEVEL_DISABLE_FB,
+};
+#endif
+
+void dsscomp_dbg_gralloc(struct seq_file *s)
+{
+#ifdef CONFIG_DEBUG_FS
+ struct dsscomp_gralloc_t *g;
+ struct tiler1d_slot *t;
+ dsscomp_t c;
+ int i;
+
+ mutex_lock(&dbg_mtx);
+ seq_printf(s, "ACTIVE GRALLOC FLIPS\n\n");
+ list_for_each_entry(g, &flip_queue, q) {
+ char *sep = "";
+ seq_printf(s, " [%p] (refs=%d)\n"
+ " slots=[", g, atomic_read(&g->refs));
+ list_for_each_entry(t, &g->slots, q) {
+ seq_printf(s, "%s%08x", sep, t->phys);
+ sep = ", ";
+ }
+ seq_printf(s, "]\n cmdcb=[%08x] ", (u32) g->cb_arg);
+ if (g->cb_fn)
+ seq_printf(s, "%pf\n\n ", g->cb_fn);
+ else
+ seq_printf(s, "(called)\n\n ");
+
+ list_for_each_entry(c, &dbg_comps, dbg_q) {
+ if (c->extra_cb && c->extra_cb_data == g)
+ seq_printf(s, "| %8s ",
+ cdev->mgrs[c->ix]->name);
+ }
+ seq_printf(s, "\n ");
+ list_for_each_entry(c, &dbg_comps, dbg_q) {
+ if (c->extra_cb && c->extra_cb_data == g)
+ seq_printf(s, "| [%08x] %7s ", (u32) c,
+ log_state_str(c->state));
+ }
+#ifdef CONFIG_DSSCOMP_DEBUG_LOG
+ for (i = 0; i < ARRAY_SIZE(c->dbg_log); i++) {
+ int go = false;
+ seq_printf(s, "\n ");
+ list_for_each_entry(c, &dbg_comps, dbg_q) {
+ if (!c->extra_cb || c->extra_cb_data != g)
+ continue;
+ if (i < c->dbg_used) {
+ u32 t = c->dbg_log[i].t;
+ u32 state = c->dbg_log[i].state;
+ seq_printf(s, "| % 6d.%03d %7s ",
+ t / 1000, t % 1000,
+ log_state_str(state));
+ go |= c->dbg_used > i + 1;
+ } else {
+ seq_printf(s, "%-21s", "|");
+ }
+ }
+ if (!go)
+ break;
+ }
+#endif
+ seq_printf(s, "\n\n");
+ }
+ seq_printf(s, "\n");
+ mutex_unlock(&dbg_mtx);
+#endif
+}
+
+void dsscomp_gralloc_init(struct dsscomp_dev *cdev_)
+{
+ int i;
+
+ /* save at least cdev pointer */
+ if (!cdev && cdev_) {
+ cdev = cdev_;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ register_early_suspend(&early_suspend_info);
+#endif
+ }
+
+ if (!free_slots.next) {
+ INIT_LIST_HEAD(&free_slots);
+ for (i = 0; i < NUM_TILER1D_SLOTS; i++) {
+ u32 phys;
+ tiler_blk_handle slot =
+ tiler_alloc_block_area(TILFMT_PAGE,
+ TILER1D_SLOT_SIZE, 1, &phys, NULL);
+ if (IS_ERR_OR_NULL(slot)) {
+ pr_err("could not allocate slot");
+ break;
+ }
+ slots[i].slot = slot;
+ slots[i].phys = phys;
+ slots[i].size = TILER1D_SLOT_SIZE >> PAGE_SHIFT;
+ slots[i].page_map = kmalloc(sizeof(*slots[i].page_map) *
+ slots[i].size, GFP_KERNEL);
+ if (!slots[i].page_map) {
+ pr_err("could not allocate page_map");
+ tiler_free_block_area(slot);
+ break;
+ }
+ list_add(&slots[i].q, &free_slots);
+ up(&free_slots_sem);
+ }
+ /* reset free_slots if no TILER memory could be reserved */
+ if (!i)
+ ZERO(free_slots);
+ }
+}
+
+void dsscomp_gralloc_exit(void)
+{
+ struct tiler1d_slot *slot;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&early_suspend_info);
+#endif
+
+ list_for_each_entry(slot, &free_slots, q)
+ tiler_free_block_area(slot->slot);
+ INIT_LIST_HEAD(&free_slots);
+}
diff --git a/drivers/video/omap2/dsscomp/queue.c b/drivers/video/omap2/dsscomp/queue.c
new file mode 100644
index 0000000..c25d655
--- /dev/null
+++ b/drivers/video/omap2/dsscomp/queue.c
@@ -0,0 +1,807 @@
+/*
+ * linux/drivers/video/omap2/dsscomp/queue.c
+ *
+ * DSS Composition queueing support
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/ratelimit.h>
+
+#include <video/omapdss.h>
+#include <video/dsscomp.h>
+#include <plat/dsscomp.h>
+
+#include <linux/debugfs.h>
+
+#include "dsscomp.h"
+/* queue state */
+
+static DEFINE_MUTEX(mtx);
+
+/* free overlay structs */
+struct maskref {
+ u32 mask;
+ u32 refs[MAX_OVERLAYS];
+};
+
+static struct {
+ struct workqueue_struct *apply_workq;
+
+ u32 ovl_mask; /* overlays used on this display */
+ struct maskref ovl_qmask; /* overlays queued to this display */
+ bool blanking;
+} mgrq[MAX_MANAGERS];
+
+static struct workqueue_struct *cb_wkq; /* callback work queue */
+static struct dsscomp_dev *cdev;
+
+#ifdef CONFIG_DEBUG_FS
+LIST_HEAD(dbg_comps);
+DEFINE_MUTEX(dbg_mtx);
+#endif
+
+#ifdef CONFIG_DSSCOMP_DEBUG_LOG
+struct dbg_event_t dbg_events[128];
+u32 dbg_event_ix;
+#endif
+
+static inline void __log_state(dsscomp_t c, void *fn, u32 ev)
+{
+#ifdef CONFIG_DSSCOMP_DEBUG_LOG
+ if (c->dbg_used < ARRAY_SIZE(c->dbg_log)) {
+ u32 t = (u32) ktime_to_ms(ktime_get());
+ c->dbg_log[c->dbg_used].t = t;
+ c->dbg_log[c->dbg_used++].state = c->state;
+ __log_event(20 * c->ix + 20, t, c, ev ? "%pf on %s" : "%pf",
+ (u32) fn, (u32) log_status_str(ev));
+ }
+#endif
+}
+#define log_state(c, fn, ev) DO_IF_DEBUG_FS(__log_state(c, fn, ev))
+
+static inline void maskref_incbit(struct maskref *om, u32 ix)
+{
+ om->refs[ix]++;
+ om->mask |= 1 << ix;
+}
+
+static void maskref_decmask(struct maskref *om, u32 mask)
+{
+ while (mask) {
+ u32 ix = fls(mask) - 1, m = 1 << ix;
+ if (!--om->refs[ix])
+ om->mask &= ~m;
+ mask &= ~m;
+ }
+}
+
+/*
+ * ===========================================================================
+ * EXIT
+ * ===========================================================================
+ */
+
+/* Initialize queue structures, and set up state of the displays */
+int dsscomp_queue_init(struct dsscomp_dev *cdev_)
+{
+ u32 i, j;
+ cdev = cdev_;
+
+ if (ARRAY_SIZE(mgrq) < cdev->num_mgrs)
+ return -EINVAL;
+
+ ZERO(mgrq);
+ for (i = 0; i < cdev->num_mgrs; i++) {
+ struct omap_overlay_manager *mgr;
+ mgrq[i].apply_workq = create_singlethread_workqueue("dsscomp_apply");
+ if (!mgrq[i].apply_workq)
+ goto error;
+
+ /* record overlays on this display */
+ mgr = cdev->mgrs[i];
+ for (j = 0; j < cdev->num_ovls; j++) {
+ if (cdev->ovls[j]->info.enabled &&
+ mgr &&
+ cdev->ovls[j]->manager == mgr)
+ mgrq[i].ovl_mask |= 1 << j;
+ }
+ }
+
+ cb_wkq = create_singlethread_workqueue("dsscomp_cb");
+ if (!cb_wkq)
+ goto error;
+
+ return 0;
+error:
+ while (i--)
+ destroy_workqueue(mgrq[i].apply_workq);
+ return -ENOMEM;
+}
+
+/* get display index from manager */
+static u32 get_display_ix(struct omap_overlay_manager *mgr)
+{
+ u32 i;
+
+ /* handle if manager is not attached to a display */
+ if (!mgr || !mgr->device)
+ return cdev->num_displays;
+
+ /* find manager's display */
+ for (i = 0; i < cdev->num_displays; i++)
+ if (cdev->displays[i] == mgr->device)
+ break;
+
+ return i;
+}
+
+/*
+ * ===========================================================================
+ * QUEUING SETUP OPERATIONS
+ * ===========================================================================
+ */
+
+/* create a new composition for a display */
+dsscomp_t dsscomp_new(struct omap_overlay_manager *mgr)
+{
+ struct dsscomp_data *comp = NULL;
+ u32 display_ix = get_display_ix(mgr);
+
+ /* check manager */
+ u32 ix = mgr ? mgr->id : cdev->num_mgrs;
+ if (ix >= cdev->num_mgrs || display_ix >= cdev->num_displays)
+ return ERR_PTR(-EINVAL);
+
+ /* allocate composition */
+ comp = kzalloc(sizeof(*comp), GFP_KERNEL);
+ if (!comp)
+ return NULL;
+
+ /* initialize new composition */
+ comp->ix = ix; /* save where this composition came from */
+ comp->ovl_mask = comp->ovl_dmask = 0;
+ comp->frm.sync_id = 0;
+ comp->frm.mgr.ix = display_ix;
+ comp->state = DSSCOMP_STATE_ACTIVE;
+
+ DO_IF_DEBUG_FS({
+ __log_state(comp, dsscomp_new, 0);
+ list_add(&comp->dbg_q, &dbg_comps);
+ });
+
+ return comp;
+}
+EXPORT_SYMBOL(dsscomp_new);
+
+/* returns overlays used in a composition */
+u32 dsscomp_get_ovls(dsscomp_t comp)
+{
+ u32 mask;
+
+ mutex_lock(&mtx);
+ BUG_ON(comp->state != DSSCOMP_STATE_ACTIVE);
+ mask = comp->ovl_mask;
+ mutex_unlock(&mtx);
+
+ return mask;
+}
+EXPORT_SYMBOL(dsscomp_get_ovls);
+
+/* set overlay info */
+int dsscomp_set_ovl(dsscomp_t comp, struct dss2_ovl_info *ovl)
+{
+ int r = -EBUSY;
+ u32 i, mask, oix, ix;
+ struct omap_overlay *o;
+
+ mutex_lock(&mtx);
+
+ BUG_ON(!ovl);
+ BUG_ON(comp->state != DSSCOMP_STATE_ACTIVE);
+
+ ix = comp->ix;
+
+ if (ovl->cfg.ix >= cdev->num_ovls) {
+ r = -EINVAL;
+ goto done;
+ }
+
+ /* if overlay is already part of the composition */
+ mask = 1 << ovl->cfg.ix;
+ if (mask & comp->ovl_mask) {
+ /* look up overlay */
+ for (oix = 0; oix < comp->frm.num_ovls; oix++) {
+ if (comp->ovls[oix].cfg.ix == ovl->cfg.ix)
+ break;
+ }
+ BUG_ON(oix == comp->frm.num_ovls);
+ } else {
+ /* check if ovl is free to use */
+ if (comp->frm.num_ovls >= ARRAY_SIZE(comp->ovls))
+ goto done;
+
+ /* not in any other displays queue */
+ if (mask & ~mgrq[ix].ovl_qmask.mask) {
+ for (i = 0; i < cdev->num_mgrs; i++) {
+ if (i == ix)
+ continue;
+ if (mgrq[i].ovl_qmask.mask & mask)
+ goto done;
+ }
+ }
+
+ /* and disabled (unless forced) if on another manager */
+ o = cdev->ovls[ovl->cfg.ix];
+ if (o->info.enabled && (!o->manager || o->manager->id != ix))
+ goto done;
+
+ /* add overlay to composition & display */
+ comp->ovl_mask |= mask;
+ oix = comp->frm.num_ovls++;
+ maskref_incbit(&mgrq[ix].ovl_qmask, ovl->cfg.ix);
+ }
+
+ comp->ovls[oix] = *ovl;
+ r = 0;
+done:
+ mutex_unlock(&mtx);
+
+ return r;
+}
+EXPORT_SYMBOL(dsscomp_set_ovl);
+
+/* get overlay info */
+int dsscomp_get_ovl(dsscomp_t comp, u32 ix, struct dss2_ovl_info *ovl)
+{
+ int r;
+ u32 oix;
+
+ mutex_lock(&mtx);
+
+ BUG_ON(!ovl);
+ BUG_ON(comp->state != DSSCOMP_STATE_ACTIVE);
+
+ if (ix >= cdev->num_ovls) {
+ r = -EINVAL;
+ } else if (comp->ovl_mask & (1 << ix)) {
+ r = 0;
+ for (oix = 0; oix < comp->frm.num_ovls; oix++)
+ if (comp->ovls[oix].cfg.ix == ovl->cfg.ix) {
+ *ovl = comp->ovls[oix];
+ break;
+ }
+ BUG_ON(oix == comp->frm.num_ovls);
+ } else {
+ r = -ENOENT;
+ }
+
+ mutex_unlock(&mtx);
+
+ return r;
+}
+EXPORT_SYMBOL(dsscomp_get_ovl);
+
+/* set manager info */
+int dsscomp_set_mgr(dsscomp_t comp, struct dss2_mgr_info *mgr)
+{
+ mutex_lock(&mtx);
+
+ BUG_ON(comp->state != DSSCOMP_STATE_ACTIVE);
+ BUG_ON(mgr->ix != comp->frm.mgr.ix);
+
+ comp->frm.mgr = *mgr;
+
+ mutex_unlock(&mtx);
+
+ return 0;
+}
+EXPORT_SYMBOL(dsscomp_set_mgr);
+
+/* get manager info */
+int dsscomp_get_mgr(dsscomp_t comp, struct dss2_mgr_info *mgr)
+{
+ mutex_lock(&mtx);
+
+ BUG_ON(!mgr);
+ BUG_ON(comp->state != DSSCOMP_STATE_ACTIVE);
+
+ *mgr = comp->frm.mgr;
+
+ mutex_unlock(&mtx);
+
+ return 0;
+}
+EXPORT_SYMBOL(dsscomp_get_mgr);
+
+/* get manager info */
+int dsscomp_setup(dsscomp_t comp, enum dsscomp_setup_mode mode,
+ struct dss2_rect_t win)
+{
+ mutex_lock(&mtx);
+
+ BUG_ON(comp->state != DSSCOMP_STATE_ACTIVE);
+
+ comp->frm.mode = mode;
+ comp->frm.win = win;
+
+ mutex_unlock(&mtx);
+
+ return 0;
+}
+EXPORT_SYMBOL(dsscomp_setup);
+
+/*
+ * ===========================================================================
+ * QUEUING COMMITTING OPERATIONS
+ * ===========================================================================
+ */
+void dsscomp_drop(dsscomp_t comp)
+{
+ /* decrement unprogrammed references */
+ if (comp->state < DSSCOMP_STATE_PROGRAMMED)
+ maskref_decmask(&mgrq[comp->ix].ovl_qmask, comp->ovl_mask);
+ comp->state = 0;
+
+ if (debug & DEBUG_COMPOSITIONS)
+ dev_info(DEV(cdev), "[%p] released\n", comp);
+
+ DO_IF_DEBUG_FS(list_del(&comp->dbg_q));
+
+ kfree(comp);
+}
+EXPORT_SYMBOL(dsscomp_drop);
+
+struct dsscomp_cb_work {
+ struct work_struct work;
+ struct dsscomp_data *comp;
+ int status;
+};
+
+static void dsscomp_mgr_delayed_cb(struct work_struct *work)
+{
+ struct dsscomp_cb_work *wk = container_of(work, typeof(*wk), work);
+ struct dsscomp_data *comp = wk->comp;
+ int status = wk->status;
+ u32 ix;
+
+ kfree(work);
+
+ mutex_lock(&mtx);
+
+ BUG_ON(comp->state == DSSCOMP_STATE_ACTIVE);
+ ix = comp->ix;
+
+ /* call extra callbacks if requested */
+ if (comp->extra_cb)
+ comp->extra_cb(comp->extra_cb_data, status);
+
+ /* handle programming & release */
+ if (status == DSS_COMPLETION_PROGRAMMED) {
+ comp->state = DSSCOMP_STATE_PROGRAMMED;
+ log_state(comp, dsscomp_mgr_delayed_cb, status);
+
+ /* update used overlay mask */
+ mgrq[ix].ovl_mask = comp->ovl_mask & ~comp->ovl_dmask;
+ maskref_decmask(&mgrq[ix].ovl_qmask, comp->ovl_mask);
+
+ if (debug & DEBUG_PHASES)
+ dev_info(DEV(cdev), "[%p] programmed\n", comp);
+ } else if ((status == DSS_COMPLETION_DISPLAYED) &&
+ comp->state == DSSCOMP_STATE_PROGRAMMED) {
+ /* composition is 1st displayed */
+ comp->state = DSSCOMP_STATE_DISPLAYED;
+ log_state(comp, dsscomp_mgr_delayed_cb, status);
+ if (debug & DEBUG_PHASES)
+ dev_info(DEV(cdev), "[%p] displayed\n", comp);
+ } else if (status & DSS_COMPLETION_RELEASED) {
+ /* composition is no longer displayed */
+ log_event(20 * comp->ix + 20, 0, comp, "%pf on %s",
+ (u32) dsscomp_mgr_delayed_cb,
+ (u32) log_status_str(status));
+ dsscomp_drop(comp);
+ }
+ mutex_unlock(&mtx);
+}
+
+static u32 dsscomp_mgr_callback(void *data, int id, int status)
+{
+ struct dsscomp_data *comp = data;
+
+ if (status == DSS_COMPLETION_PROGRAMMED ||
+ (status == DSS_COMPLETION_DISPLAYED &&
+ comp->state != DSSCOMP_STATE_DISPLAYED) ||
+ (status & DSS_COMPLETION_RELEASED)) {
+ struct dsscomp_cb_work *wk = kzalloc(sizeof(*wk), GFP_ATOMIC);
+ wk->comp = comp;
+ wk->status = status;
+ INIT_WORK(&wk->work, dsscomp_mgr_delayed_cb);
+ queue_work(cb_wkq, &wk->work);
+ }
+
+ /* get each callback only once */
+ return ~status;
+}
+
+static inline bool dssdev_manually_updated(struct omap_dss_device *dev)
+{
+ return dev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
+ dev->driver->get_update_mode(dev) != OMAP_DSS_UPDATE_AUTO;
+}
+
+/* apply composition */
+/* at this point the composition is not on any queue */
+static int dsscomp_apply(dsscomp_t comp)
+{
+ int i, r = -EFAULT;
+ u32 dmask, display_ix;
+ struct omap_dss_device *dssdev;
+ struct omap_dss_driver *drv;
+ struct omap_overlay_manager *mgr;
+ struct omap_overlay *ovl;
+ struct dsscomp_setup_mgr_data *d;
+ u32 oix;
+ bool cb_programmed = false;
+
+ struct omapdss_ovl_cb cb = {
+ .fn = dsscomp_mgr_callback,
+ .data = comp,
+ .mask = DSS_COMPLETION_DISPLAYED |
+ DSS_COMPLETION_PROGRAMMED | DSS_COMPLETION_RELEASED,
+ };
+
+ BUG_ON(comp->state != DSSCOMP_STATE_APPLYING);
+
+ /* check if the display is valid and used */
+ r = -ENODEV;
+ d = &comp->frm;
+ display_ix = d->mgr.ix;
+ if (display_ix >= cdev->num_displays)
+ goto done;
+ dssdev = cdev->displays[display_ix];
+ if (!dssdev)
+ goto done;
+
+ drv = dssdev->driver;
+ mgr = dssdev->manager;
+ if (!mgr || !drv || mgr->id >= cdev->num_mgrs)
+ goto done;
+
+ dump_comp_info(cdev, d, "apply");
+
+ r = 0;
+ dmask = 0;
+ for (oix = 0; oix < comp->frm.num_ovls; oix++) {
+ struct dss2_ovl_info *oi = comp->ovls + oix;
+
+ /* keep track of disabled overlays */
+ if (!oi->cfg.enabled)
+ dmask |= 1 << oi->cfg.ix;
+
+ if (r && !comp->must_apply)
+ continue;
+
+ dump_ovl_info(cdev, oi);
+
+ if (oi->cfg.ix >= cdev->num_ovls) {
+ r = -EINVAL;
+ continue;
+ }
+ ovl = cdev->ovls[oi->cfg.ix];
+
+ /* set overlays' manager & info */
+ if (ovl->info.enabled && ovl->manager != mgr) {
+ r = -EBUSY;
+ goto skip_ovl_set;
+ }
+ if (ovl->manager != mgr) {
+ /*
+ * Ideally, we should call ovl->unset_manager(ovl),
+ * but it may block on go even though the disabling
+ * of the overlay already went through. So instead,
+ * we are just clearing the manager.
+ */
+ ovl->manager = NULL;
+ r = ovl->set_manager(ovl, mgr);
+ if (r)
+ goto skip_ovl_set;
+ }
+
+ r = set_dss_ovl_info(oi);
+skip_ovl_set:
+ if (r && comp->must_apply) {
+ dev_err(DEV(cdev), "[%p] set ovl%d failed %d", comp,
+ oi->cfg.ix, r);
+ oi->cfg.enabled = false;
+ dmask |= 1 << oi->cfg.ix;
+ set_dss_ovl_info(oi);
+ }
+ }
+
+ /*
+ * set manager's info - this also sets the completion callback,
+ * so if it succeeds, we will use the callback to complete the
+ * composition. Otherwise, we can skip the composition now.
+ */
+ if (!r || comp->must_apply) {
+ r = set_dss_mgr_info(&d->mgr, &cb);
+ cb_programmed = r == 0;
+ }
+
+ if (r && !comp->must_apply) {
+ dev_err(DEV(cdev), "[%p] set failed %d\n", comp, r);
+ goto done;
+ } else {
+ if (r)
+ dev_warn(DEV(cdev), "[%p] ignoring set failure %d\n",
+ comp, r);
+ comp->blank = dmask == comp->ovl_mask;
+ comp->ovl_dmask = dmask;
+
+ /*
+ * Check other overlays that may also use this display.
+ * NOTE: This is only needed in case someone changes
+ * overlays via sysfs. We use comp->ovl_mask to refresh
+ * the overlays actually used on a manager when the
+ * composition is programmed.
+ */
+ for (i = 0; i < cdev->num_ovls; i++) {
+ u32 mask = 1 << i;
+ if ((~comp->ovl_mask & mask) &&
+ cdev->ovls[i]->info.enabled &&
+ cdev->ovls[i]->manager == mgr) {
+ mutex_lock(&mtx);
+ comp->ovl_mask |= mask;
+ maskref_incbit(&mgrq[comp->ix].ovl_qmask, i);
+ mutex_unlock(&mtx);
+ }
+ }
+ }
+
+ /* apply changes and call update on manual panels */
+ /* no need for mutex as no callbacks are scheduled yet */
+ comp->state = DSSCOMP_STATE_APPLIED;
+ log_state(comp, dsscomp_apply, 0);
+
+ if (!d->win.w && !d->win.x)
+ d->win.w = dssdev->panel.timings.x_res - d->win.x;
+ if (!d->win.h && !d->win.y)
+ d->win.h = dssdev->panel.timings.y_res - d->win.y;
+
+ mutex_lock(&mtx);
+ if (mgrq[comp->ix].blanking) {
+ pr_info_ratelimited("ignoring apply mgr(%s) while blanking\n",
+ mgr->name);
+ r = -ENODEV;
+ } else {
+ r = mgr->apply(mgr);
+ if (r)
+ dev_err(DEV(cdev), "failed while applying %d", r);
+ /* keep error if set_mgr_info failed */
+ if (!r && !cb_programmed)
+ r = -EINVAL;
+ }
+ mutex_unlock(&mtx);
+
+ /*
+ * TRICKY: try to unregister callback to see if callbacks have
+ * been applied (moved into DSS2 pipeline). Unregistering also
+ * avoids having to unnecessarily kick out compositions (which
+ * would result in screen blinking). If callbacks failed to apply,
+ * (e.g. could not set them or apply them) we will need to call
+ * them ourselves (we note this by returning an error).
+ */
+ if (cb_programmed && r) {
+ /* clear error if callback already registered */
+ if (omap_dss_manager_unregister_callback(mgr, &cb))
+ r = 0;
+ }
+ /* if failed to apply, kick out prior composition */
+ if (comp->must_apply && r)
+ mgr->blank(mgr, true);
+
+ if (!r && (d->mode & DSSCOMP_SETUP_MODE_DISPLAY)) {
+ /* cannot handle update errors, so ignore them */
+ if (dssdev_manually_updated(dssdev) && drv->update)
+ drv->update(dssdev, d->win.x,
+ d->win.y, d->win.w, d->win.h);
+ else
+ /* wait for sync to do smooth animations */
+ mgr->wait_for_vsync(mgr);
+ }
+
+done:
+ return r;
+}
+
+struct dsscomp_apply_work {
+ struct work_struct work;
+ dsscomp_t comp;
+};
+
+int dsscomp_state_notifier(struct notifier_block *nb,
+ unsigned long arg, void *ptr)
+{
+ struct omap_dss_device *dssdev = ptr;
+ enum omap_dss_display_state state = arg;
+ struct omap_overlay_manager *mgr = dssdev->manager;
+ if (mgr) {
+ mutex_lock(&mtx);
+ if (state == OMAP_DSS_DISPLAY_DISABLED) {
+ mgr->blank(mgr, true);
+ mgrq[mgr->id].blanking = true;
+ } else if (state == OMAP_DSS_DISPLAY_ACTIVE) {
+ mgrq[mgr->id].blanking = false;
+ }
+ mutex_unlock(&mtx);
+ }
+ return 0;
+}
+
+
+static void dsscomp_do_apply(struct work_struct *work)
+{
+ struct dsscomp_apply_work *wk = container_of(work, typeof(*wk), work);
+ /* complete compositions that failed to apply */
+ if (dsscomp_apply(wk->comp))
+ dsscomp_mgr_callback(wk->comp, -1, DSS_COMPLETION_ECLIPSED_SET);
+ kfree(wk);
+}
+
+int dsscomp_delayed_apply(dsscomp_t comp)
+{
+ /* don't block in case we are called from interrupt context */
+ struct dsscomp_apply_work *wk = kzalloc(sizeof(*wk), GFP_NOWAIT);
+ if (!wk)
+ return -ENOMEM;
+
+ mutex_lock(&mtx);
+
+ BUG_ON(comp->state != DSSCOMP_STATE_ACTIVE);
+ comp->state = DSSCOMP_STATE_APPLYING;
+ log_state(comp, dsscomp_delayed_apply, 0);
+
+ if (debug & DEBUG_PHASES)
+ dev_info(DEV(cdev), "[%p] applying\n", comp);
+ mutex_unlock(&mtx);
+
+ wk->comp = comp;
+ INIT_WORK(&wk->work, dsscomp_do_apply);
+ return queue_work(mgrq[comp->ix].apply_workq, &wk->work) ? 0 : -EBUSY;
+}
+EXPORT_SYMBOL(dsscomp_delayed_apply);
+
+/*
+ * ===========================================================================
+ * DEBUGFS
+ * ===========================================================================
+ */
+
+#ifdef CONFIG_DEBUG_FS
+void seq_print_comp(struct seq_file *s, dsscomp_t c)
+{
+ struct dsscomp_setup_mgr_data *d = &c->frm;
+ int i;
+
+ seq_printf(s, " [%p]: %s%s\n", c, c->blank ? "blank " : "",
+ c->state == DSSCOMP_STATE_ACTIVE ? "ACTIVE" :
+ c->state == DSSCOMP_STATE_APPLYING ? "APPLYING" :
+ c->state == DSSCOMP_STATE_APPLIED ? "APPLIED" :
+ c->state == DSSCOMP_STATE_PROGRAMMED ? "PROGRAMMED" :
+ c->state == DSSCOMP_STATE_DISPLAYED ? "DISPLAYED" :
+ "???");
+ seq_printf(s, " sync_id=%x, flags=%c%c%c\n",
+ d->sync_id,
+ (d->mode & DSSCOMP_SETUP_MODE_APPLY) ? 'A' : '-',
+ (d->mode & DSSCOMP_SETUP_MODE_DISPLAY) ? 'D' : '-',
+ (d->mode & DSSCOMP_SETUP_MODE_CAPTURE) ? 'C' : '-');
+ for (i = 0; i < d->num_ovls; i++) {
+ struct dss2_ovl_info *oi;
+ struct dss2_ovl_cfg *g;
+ oi = d->ovls + i;
+ g = &oi->cfg;
+ if (g->zonly) {
+ seq_printf(s, " ovl%d={%s z%d}\n",
+ g->ix, g->enabled ? "ON" : "off", g->zorder);
+ } else {
+ seq_printf(s, " ovl%d={%s z%d %s%s *%d%%"
+ " %d*%d:%d,%d+%d,%d rot%d%s"
+ " => %d,%d+%d,%d %p/%p|%d}\n",
+ g->ix, g->enabled ? "ON" : "off", g->zorder,
+ dsscomp_get_color_name(g->color_mode) ? : "N/A",
+ g->pre_mult_alpha ? " premult" : "",
+ (g->global_alpha * 100 + 128) / 255,
+ g->width, g->height, g->crop.x, g->crop.y,
+ g->crop.w, g->crop.h,
+ g->rotation, g->mirror ? "+mir" : "",
+ g->win.x, g->win.y, g->win.w, g->win.h,
+ (void *) oi->ba, (void *) oi->uv, g->stride);
+ }
+ }
+ if (c->extra_cb)
+ seq_printf(s, " gsync=[%p] %pf\n\n", c->extra_cb_data,
+ c->extra_cb);
+ else
+ seq_printf(s, " gsync=[%p] (called)\n\n", c->extra_cb_data);
+}
+#endif
+
+void dsscomp_dbg_comps(struct seq_file *s)
+{
+#ifdef CONFIG_DEBUG_FS
+ dsscomp_t c;
+ u32 i;
+
+ mutex_lock(&dbg_mtx);
+ for (i = 0; i < cdev->num_mgrs; i++) {
+ struct omap_overlay_manager *mgr = cdev->mgrs[i];
+ seq_printf(s, "ACTIVE COMPOSITIONS on %s\n\n", mgr->name);
+ list_for_each_entry(c, &dbg_comps, dbg_q) {
+ struct dss2_mgr_info *mi = &c->frm.mgr;
+ if (mi->ix < cdev->num_displays &&
+ cdev->displays[mi->ix]->manager == mgr)
+ seq_print_comp(s, c);
+ }
+
+ /* print manager cache */
+ mgr->dump_cb(mgr, s);
+ }
+ mutex_unlock(&dbg_mtx);
+#endif
+}
+
+void dsscomp_dbg_events(struct seq_file *s)
+{
+#ifdef CONFIG_DSSCOMP_DEBUG_LOG
+ u32 i;
+ struct dbg_event_t *d;
+
+ mutex_lock(&dbg_mtx);
+ for (i = dbg_event_ix; i < dbg_event_ix + ARRAY_SIZE(dbg_events); i++) {
+ d = dbg_events + (i % ARRAY_SIZE(dbg_events));
+ if (!d->ms)
+ continue;
+ seq_printf(s, "[% 5d.%03d] %*s[%08x] ",
+ d->ms / 1000, d->ms % 1000,
+ d->ix + ((u32) d->data) % 7,
+ "", (u32) d->data);
+ seq_printf(s, d->fmt, d->a1, d->a2);
+ seq_printf(s, "\n");
+ }
+ mutex_unlock(&dbg_mtx);
+#endif
+}
+
+/*
+ * ===========================================================================
+ * EXIT
+ * ===========================================================================
+ */
+void dsscomp_queue_exit(void)
+{
+ if (cdev) {
+ int i;
+ for (i = 0; i < cdev->num_displays; i++)
+ destroy_workqueue(mgrq[i].apply_workq);
+ destroy_workqueue(cb_wkq);
+ cdev = NULL;
+ }
+}
+EXPORT_SYMBOL(dsscomp_queue_exit);
diff --git a/drivers/video/omap2/hdcp/Kconfig b/drivers/video/omap2/hdcp/Kconfig
new file mode 100644
index 0000000..a18f183
--- /dev/null
+++ b/drivers/video/omap2/hdcp/Kconfig
@@ -0,0 +1,14 @@
+menuconfig OMAP4_HDCP
+ bool "OMAP4 HDCP support"
+ depends on OMAP2_DSS && OMAP4_DSS_HDMI
+ default n
+ help
+ HDCP Interface. This adds the High Definition Content Protection Interface.
+ See http://www.digital-cp.com/ for HDCP specification.
+
+config OMAP4_HDCP_DEBUG
+ bool "OMAP4 HDCP Debugging"
+ depends on OMAP4_HDCP
+ default n
+ help
+ Enableds verbose debugging the the HDCP drivers
diff --git a/drivers/video/omap2/hdcp/Makefile b/drivers/video/omap2/hdcp/Makefile
new file mode 100644
index 0000000..80b0c0c
--- /dev/null
+++ b/drivers/video/omap2/hdcp/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for HDCP linux kernel module.
+#
+
+ccflags-$(CONFIG_OMAP4_HDCP_DEBUG) = -DDEBUG -DHDCP_DEBUG
+
+obj-$(CONFIG_OMAP4_HDCP) += hdcp.o
+hdcp-y := hdcp_top.o hdcp_lib.o hdcp_ddc.o
diff --git a/drivers/video/omap2/hdcp/hdcp.h b/drivers/video/omap2/hdcp/hdcp.h
new file mode 100644
index 0000000..e22a588
--- /dev/null
+++ b/drivers/video/omap2/hdcp/hdcp.h
@@ -0,0 +1,394 @@
+/*
+ * hdcp.h
+ *
+ * HDCP interface DSS driver setting for TI's OMAP4 family of processor.
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Fabrice Olivero
+ * Fabrice Olivero <f-olivero@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _HDCP_H_
+#define _HDCP_H_
+
+
+/********************************/
+/* Structures related to ioctl */
+/********************************/
+
+/* HDCP key size in 32-bit words */
+#define DESHDCP_KEY_SIZE 160
+
+/* HDCP ioctl */
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+struct hdcp_encrypt_control {
+ uint32_t in_key[DESHDCP_KEY_SIZE];
+ uint32_t *out_key;
+};
+
+struct hdcp_enable_control {
+ uint32_t key[DESHDCP_KEY_SIZE];
+ int nb_retry;
+};
+
+#define MAX_SHA_DATA_SIZE 645
+#define MAX_SHA_VPRIME_SIZE 20
+
+struct hdcp_sha_in {
+ uint8_t data[MAX_SHA_DATA_SIZE];
+ uint32_t byte_counter;
+ uint8_t vprime[MAX_SHA_VPRIME_SIZE];
+};
+
+struct hdcp_wait_control {
+ uint32_t event;
+ struct hdcp_sha_in *data;
+};
+
+/* HDCP ioctl */
+#define HDCP_IOCTL_MAGIC 'h'
+#define HDCP_ENABLE _IOW(HDCP_IOCTL_MAGIC, 0, \
+ struct hdcp_enable_control)
+#define HDCP_DISABLE _IO(HDCP_IOCTL_MAGIC, 1)
+#define HDCP_ENCRYPT_KEY _IOWR(HDCP_IOCTL_MAGIC, 2, \
+ struct hdcp_encrypt_control)
+#define HDCP_QUERY_STATUS _IOWR(HDCP_IOCTL_MAGIC, 3, uint32_t)
+#define HDCP_WAIT_EVENT _IOWR(HDCP_IOCTL_MAGIC, 4, \
+ struct hdcp_wait_control)
+#define HDCP_DONE _IOW(HDCP_IOCTL_MAGIC, 5, uint32_t)
+
+/* HDCP state */
+#define HDCP_STATE_DISABLED 0
+#define HDCP_STATE_INIT 1
+#define HDCP_STATE_AUTH_1ST_STEP 2
+#define HDCP_STATE_AUTH_2ND_STEP 3
+#define HDCP_STATE_AUTH_3RD_STEP 4
+#define HDCP_STATE_AUTH_FAIL_RESTARTING 5
+#define HDCP_STATE_AUTH_FAILURE 6
+
+/* HDCP events */
+#define HDCP_EVENT_STEP1 (1 << 0x0)
+#define HDCP_EVENT_STEP2 (1 << 0x1)
+#define HDCP_EVENT_EXIT (1 << 0x2)
+
+/* HDCP user space status */
+#define HDCP_US_NO_ERR (0 << 8)
+#define HDCP_US_FAILURE (1 << 8)
+
+#ifdef __KERNEL__
+
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+
+#define _9032_AUTO_RI_ /* Auto Ri mode */
+#define _9032_BCAP_ /* BCAP polling */
+#undef _9032_AN_STOP_FIX_
+
+#ifdef DEBUG
+#define DDC_DBG /* Log DDC data */
+#undef POWER_TRANSITION_DBG /* Add wait loops to allow testing DSS power
+ * transition during HDCP */
+#endif
+
+/***************************/
+/* HW specific definitions */
+/***************************/
+
+/* DESHDCP base address */
+/*----------------------*/
+
+#define DSS_SS_FROM_L3__DESHDCP 0x58007000
+
+/* DESHDCP registers */
+#define DESHDCP__DHDCP_CTRL 0x020
+#define DESHDCP__DHDCP_DATA_L 0x024
+#define DESHDCP__DHDCP_DATA_H 0x028
+
+/* DESHDCP CTRL bits */
+#define DESHDCP__DHDCP_CTRL__DIRECTION_POS_F 2
+#define DESHDCP__DHDCP_CTRL__DIRECTION_POS_L 2
+
+#define DESHDCP__DHDCP_CTRL__OUTPUT_READY_POS_F 0
+#define DESHDCP__DHDCP_CTRL__OUTPUT_READY_POS_L 0
+
+/* HDMI WP base address */
+/*----------------------*/
+#define HDMI_WP 0x58006000
+
+/* HDMI CORE SYSTEM base address */
+/*-------------------------------*/
+
+#define HDMI_IP_CORE_SYSTEM 0x400
+
+/* HDMI CORE registers */
+#define HDMI_IP_CORE_SYSTEM__DCTL 0x034
+
+#define HDMI_IP_CORE_SYSTEM__HDCP_CTRL 0x03C
+
+#define HDMI_IP_CORE_SYSTEM__BKSV0 0x040
+
+#define HDMI_IP_CORE_SYSTEM__AN0 0x054
+
+#define HDMI_IP_CORE_SYSTEM__AKSV0 0x074
+
+#define HDMI_IP_CORE_SYSTEM__R1 0x088
+#define HDMI_IP_CORE_SYSTEM__R2 0x08C
+
+#define HDMI_IP_CORE_SYSTEM__RI_CMD 0x09C
+#define HDMI_IP_CORE_SYSTEM__RI_STAT 0x098
+
+#define HDMI_IP_CORE_SYSTEM__INTR2 0x1C8
+#define HDMI_IP_CORE_SYSTEM__INTR3 0x1CC
+
+#define HDMI_IP_CORE_SYSTEM__INT_UNMASK2 0x1D8
+#define HDMI_IP_CORE_SYSTEM__INT_UNMASK3 0x1DC
+
+#define HDMI_IP_CORE_SYSTEM__SHA_CTRL 0x330
+
+#define HDMI_IP_CORE_SYSTEM__INTR2__BCAP 0x80
+#define HDMI_IP_CORE_SYSTEM__INTR3__RI_ERR 0xF0
+
+enum hdcp_repeater {
+ HDCP_RECEIVER = 0,
+ HDCP_REPEATER = 1
+};
+
+enum encryption_state {
+ HDCP_ENC_OFF = 0x0,
+ HDCP_ENC_ON = 0x1
+};
+
+/* HDMI CORE AV base address */
+/*---------------------------*/
+
+#define HDMI_CORE_AV_BASE 0x900
+#ifndef HDMI_CORE_AV_HDMI_CTRL
+#define HDMI_CORE_AV_HDMI_CTRL 0x0BC
+#define HDMI_CORE_AV_PB_CTRL2 0x0FC
+#define HDMI_CORE_AV_CP_BYTE1 0x37C
+#endif
+
+#define HDMI_CORE_AV_HDMI_CTRL__HDMI_MODE 0x01
+
+enum av_mute {
+ AV_MUTE_SET = 0x01,
+ AV_MUTE_CLEAR = 0x10
+};
+/***********************/
+/* HDCP DDC addresses */
+/***********************/
+
+#define DDC_BKSV_ADDR 0x00
+#define DDC_Ri_ADDR 0x08
+#define DDC_AKSV_ADDR 0x10
+#define DDC_AN_ADDR 0x18
+#define DDC_V_ADDR 0x20
+#define DDC_BCAPS_ADDR 0x40
+#define DDC_BSTATUS_ADDR 0x41
+#define DDC_KSV_FIFO_ADDR 0x43
+
+#define DDC_BKSV_LEN 5
+#define DDC_Ri_LEN 2
+#define DDC_AKSV_LEN 5
+#define DDC_AN_LEN 8
+#define DDC_V_LEN 20
+#define DDC_BCAPS_LEN 1
+#define DDC_BSTATUS_LEN 2
+
+#define DDC_BIT_REPEATER 6
+
+#define DDC_BSTATUS0_MAX_DEVS 0x80
+#define DDC_BSTATUS0_DEV_COUNT 0x7F
+#define DDC_BSTATUS1_MAX_CASC 0x08
+
+/***************************/
+/* Definitions */
+/***************************/
+
+/* Status / error codes */
+#define HDCP_OK 0
+#define HDCP_DDC_ERROR 1
+#define HDCP_AUTH_FAILURE 2
+#define HDCP_AKSV_ERROR 3
+#define HDCP_3DES_ERROR 4
+#define HDCP_SHA1_ERROR 5
+#define HDCP_DRIVER_ERROR 6
+#define HDCP_CANCELLED_AUTH 7
+
+#define HDCP_INFINITE_REAUTH 0x100
+#define HDCP_MAX_DDC_ERR 5
+
+/* FIXME: should be 300ms delay between HDMI start frame event and HDCP enable
+ * (to respect 7 VSYNC delay in 24 Hz)
+ */
+#define HDCP_ENABLE_DELAY 300
+#define HDCP_R0_DELAY 110
+#define HDCP_KSV_TIMEOUT_DELAY 5000
+#define HDCP_REAUTH_DELAY 100
+
+/* DDC access timeout in ms */
+#define HDCP_DDC_TIMEOUT 500
+#define HDCP_STOP_FRAME_BLOCKING_TIMEOUT (2*HDCP_DDC_TIMEOUT)
+
+/* Event source */
+#define HDCP_SRC_SHIFT 8
+#define HDCP_IOCTL_SRC (0x1 << HDCP_SRC_SHIFT)
+#define HDCP_HDMI_SRC (0x2 << HDCP_SRC_SHIFT)
+#define HDCP_IRQ_SRC (0x4 << HDCP_SRC_SHIFT)
+#define HDCP_WORKQUEUE_SRC (0x8 << HDCP_SRC_SHIFT)
+
+/* Workqueue events */
+/* Note: HDCP_ENABLE_CTL, HDCP_R0_EXP_EVENT, HDCP_KSV_TIMEOUT_EVENT and
+ * HDCP_AUTH_REATT_EVENT can be cancelled by HDCP disabling
+ */
+#define HDCP_ENABLE_CTL (HDCP_IOCTL_SRC | 0)
+#define HDCP_DISABLE_CTL (HDCP_IOCTL_SRC | 1)
+#define HDCP_START_FRAME_EVENT (HDCP_HDMI_SRC | 2)
+#define HDCP_STOP_FRAME_EVENT (HDCP_HDMI_SRC | 3)
+#define HDCP_HPD_LOW_EVENT (HDCP_IRQ_SRC | 4)
+#define HDCP_RI_FAIL_EVENT (HDCP_IRQ_SRC | 5)
+#define HDCP_KSV_LIST_RDY_EVENT (HDCP_IRQ_SRC | 6)
+#define HDCP_R0_EXP_EVENT (HDCP_WORKQUEUE_SRC | 7)
+#define HDCP_KSV_TIMEOUT_EVENT (HDCP_WORKQUEUE_SRC | 8)
+#define HDCP_AUTH_REATT_EVENT (HDCP_WORKQUEUE_SRC | 9)
+
+/* IRQ status */
+#define HDCP_IRQ_RI_FAIL 0x01
+#define HDCP_IRQ_KSV_RDY 0x02
+
+enum hdcp_states {
+ HDCP_DISABLED,
+ HDCP_ENABLE_PENDING,
+ HDCP_AUTHENTICATION_START,
+ HDCP_WAIT_R0_DELAY,
+ HDCP_WAIT_KSV_LIST,
+ HDCP_LINK_INTEGRITY_CHECK,
+ HDCP_KEY_ENCRYPTION_ONGOING
+};
+
+enum hdmi_states {
+ HDMI_STOPPED,
+ HDMI_STARTED
+};
+
+struct hdcp_delayed_work {
+ struct delayed_work work;
+ int event;
+};
+
+struct hdcp {
+ void __iomem *hdmi_wp_base_addr;
+ void __iomem *deshdcp_base_addr;
+ struct mutex lock;
+ struct hdcp_enable_control *en_ctrl;
+ dev_t dev_id;
+ struct class *hdcp_class;
+ enum hdmi_states hdmi_state;
+ enum hdcp_states hdcp_state;
+ int auth_state;
+ struct delayed_work *pending_start;
+ /* Following variable should store works submitted from workqueue
+ * context
+ * WARNING: only ONE work at a time can be stored (no conflict
+ * should happen). It is used to allow cancelled pending works when
+ * disabling HDCP
+ */
+ struct delayed_work *pending_wq_event;
+ int retry_cnt;
+ int dss_state;
+ int pending_disable;
+ int hdmi_restart;
+ int hpd_low;
+ spinlock_t spinlock;
+ struct workqueue_struct *workqueue;
+ int hdcp_up_event;
+ int hdcp_down_event;
+ bool hdcp_keys_loaded;
+};
+
+extern struct hdcp hdcp;
+extern struct hdcp_sha_in sha_input;
+
+
+/***************************/
+/* Macros for accessing HW */
+/***************************/
+
+#define WR_REG_32(base, offset, val) __raw_writel(val, base + offset)
+#define RD_REG_32(base, offset) __raw_readl(base + offset)
+
+
+#undef FLD_MASK
+#define FLD_MASK(start, end) (((1 << (start - end + 1)) - 1) << (end))
+#undef FLD_VAL
+#define FLD_VAL(val, start, end) (((val) << end) & FLD_MASK(start, end))
+#define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end))
+#define FLD_MOD(orig, val, start, end) \
+ (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
+
+#define WR_FIELD_32(base, offset, start, end, val) \
+ WR_REG_32(base, offset, FLD_MOD(RD_REG_32(base, offset), val, \
+ start, end))
+
+#define RD_FIELD_32(base, offset, start, end) \
+ ((RD_REG_32(base, offset) & FLD_MASK(start, end)) >> (end))
+
+
+#undef DBG
+
+#ifdef HDCP_DEBUG
+#define DBG(format, ...) \
+ printk(KERN_DEBUG "HDCP: " format "\n", ## __VA_ARGS__)
+#else
+#define DBG(format, ...)
+#endif
+
+/***************************/
+/* Function prototypes */
+/***************************/
+
+int hdcp_user_space_task(int flags);
+
+/* 3DES */
+int hdcp_3des_load_key(uint32_t *deshdcp_encrypted_key);
+void hdcp_3des_encrypt_key(struct hdcp_encrypt_control *enc_ctrl,
+ uint32_t out_key[DESHDCP_KEY_SIZE]);
+
+/* IP control */
+int hdcp_lib_disable(void);
+int hdcp_lib_step1_start(void);
+int hdcp_lib_step1_r0_check(void);
+int hdcp_lib_step2(void);
+int hdcp_lib_irq(void);
+void hdcp_lib_auto_ri_check(bool state);
+void hdcp_lib_auto_bcaps_rdy_check(bool state);
+void hdcp_lib_set_av_mute(enum av_mute av_mute_state);
+void hdcp_lib_set_encryption(enum encryption_state enc_state);
+u8 hdcp_lib_check_repeater_bit_in_tx(void);
+
+/* DDC */
+int hdcp_ddc_read(u16 no_bytes, u8 addr, u8 *pdata);
+int hdcp_ddc_write(u16 no_bytes, u8 addr, u8 *pdata);
+void hdcp_ddc_abort(void);
+
+#endif /* __KERNEL__ */
+
+#endif /* _HDCP_H_ */
diff --git a/drivers/video/omap2/hdcp/hdcp_ddc.c b/drivers/video/omap2/hdcp/hdcp_ddc.c
new file mode 100644
index 0000000..e5104fa
--- /dev/null
+++ b/drivers/video/omap2/hdcp/hdcp_ddc.c
@@ -0,0 +1,310 @@
+/*
+ * hdcp_ddc.c
+ *
+ * HDCP interface DSS driver setting for TI's OMAP4 family of processor.
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Fabrice Olivero
+ * Fabrice Olivero <f-olivero@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include "hdcp.h"
+#include "hdcp_ddc.h"
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_suspend_resume_auto_ri
+ *-----------------------------------------------------------------------------
+ */
+static int hdcp_suspend_resume_auto_ri(enum ri_suspend_resume state)
+{
+ static u8 OldRiStat, OldRiCommand;
+ u8 TimeOut = 10;
+
+ /* Suspend Auto Ri in order to allow FW access MDDC bus.
+ * Poll 0x72:0x26[0] for MDDC bus availability or timeout
+ */
+
+ DBG("hdcp_suspend_resume_auto_ri() state=%s",
+ state == AUTO_RI_SUSPEND ? "SUSPEND" : "RESUME");
+
+ if (state == AUTO_RI_SUSPEND) {
+ /* Save original Auto Ri state */
+ OldRiCommand = RD_FIELD_32(hdcp.hdmi_wp_base_addr +
+ HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__RI_CMD, 0, 0);
+
+ /* Disable Auto Ri */
+ hdcp_lib_auto_ri_check(false);
+
+ /* Wait for HW to release MDDC bus */
+ /* TODO: while loop / timeout to be enhanced */
+ while (--TimeOut) {
+ if (!RD_FIELD_32(hdcp.hdmi_wp_base_addr +
+ HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__RI_STAT, 0, 0))
+ break;
+ }
+
+ /* MDDC bus not relinquished */
+ if (!TimeOut) {
+ printk(KERN_ERR "HDCP: Suspending Auto Ri failed !\n");
+ return -HDCP_DDC_ERROR;
+ }
+
+ OldRiStat = RD_FIELD_32(hdcp.hdmi_wp_base_addr +
+ HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__RI_STAT, 0, 0);
+ } else {
+ /* If Auto Ri was enabled before it was suspended */
+ if ((OldRiStat) && (OldRiCommand))
+ /* Re-enable Auto Ri */
+ hdcp_lib_auto_ri_check(false);
+ }
+
+ return HDCP_OK;
+}
+
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_start_ddc_transfer
+ *-----------------------------------------------------------------------------
+ */
+static int hdcp_start_ddc_transfer(mddc_type *mddc_cmd, u8 operation)
+{
+ u8 *cmd = (u8 *)mddc_cmd;
+ struct timeval t0, t1, t2;
+ u32 time_elapsed_ms = 0;
+ u32 i, size;
+ unsigned long flags;
+
+#ifdef _9032_AUTO_RI_
+ if (hdcp_suspend_resume_auto_ri(AUTO_RI_SUSPEND))
+ return -HDCP_DDC_ERROR;
+#endif
+
+ spin_lock_irqsave(&hdcp.spinlock, flags);
+
+ /* Abort Master DDC operation and Clear FIFO pointer */
+ WR_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__DDC_CMD, MASTER_CMD_CLEAR_FIFO);
+
+ /* Read to flush */
+ RD_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__DDC_CMD);
+
+ /* Sending DDC header, it'll clear DDC Status register too */
+ for (i = 0; i < 7; i++) {
+ WR_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__DDC_ADDR + i * sizeof(uint32_t),
+ cmd[i]);
+
+ /* Read to flush */
+ RD_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__DDC_ADDR +
+ i * sizeof(uint32_t));
+ }
+
+ spin_unlock_irqrestore(&hdcp.spinlock, flags);
+
+ do_gettimeofday(&t1);
+ memcpy(&t0, &t1, sizeof(t0));
+
+ i = 0;
+ size = mddc_cmd->nbytes_lsb + (mddc_cmd->nbytes_msb << 8);
+
+ while ((i < size) && (hdcp.pending_disable == 0)) {
+ if (operation == DDC_WRITE) {
+ /* Write data to DDC FIFO as long as it is NOT full */
+ if (RD_FIELD_32(hdcp.hdmi_wp_base_addr +
+ HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__DDC_STATUS, 3, 3)
+ == 0) {
+ WR_REG_32(hdcp.hdmi_wp_base_addr +
+ HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__DDC_DATA,
+ mddc_cmd->pdata[i++]);
+ do_gettimeofday(&t1);
+ }
+ } else if (operation == DDC_READ) {
+ /* Read from DDC FIFO as long as it is NOT empty */
+ if (RD_FIELD_32(hdcp.hdmi_wp_base_addr +
+ HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__DDC_STATUS, 2, 2)
+ == 0) {
+ mddc_cmd->pdata[i++] =
+ RD_REG_32(hdcp.hdmi_wp_base_addr +
+ HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__DDC_DATA);
+ do_gettimeofday(&t1);
+ }
+ }
+
+ do_gettimeofday(&t2);
+ time_elapsed_ms = (t2.tv_sec - t1.tv_sec) * 1000 +
+ (t2.tv_usec - t1.tv_usec) / 1000;
+
+ if (time_elapsed_ms > HDCP_DDC_TIMEOUT) {
+ DBG("DDC timeout - no data during %d ms - "
+ "status=%02x %u",
+ HDCP_DDC_TIMEOUT,
+ RD_REG_32(hdcp.hdmi_wp_base_addr +
+ HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__DDC_STATUS),
+ jiffies_to_msecs(jiffies));
+ goto ddc_error;
+ }
+ }
+
+ if (hdcp.pending_disable)
+ goto ddc_abort;
+
+ /* Wait for the FIFO to be empty (end of transfer) */
+ while ((RD_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__DDC_STATUS) != 0x4) &&
+ (hdcp.pending_disable == 0)) {
+ do_gettimeofday(&t2);
+ time_elapsed_ms = (t2.tv_sec - t1.tv_sec) * 1000 +
+ (t2.tv_usec - t1.tv_usec) / 1000;
+
+ if (time_elapsed_ms > HDCP_DDC_TIMEOUT) {
+ DBG("DDC timeout - FIFO not getting empty - "
+ "status=%02x",
+ RD_REG_32(hdcp.hdmi_wp_base_addr +
+ HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__DDC_STATUS));
+ goto ddc_error;
+ }
+ }
+
+ if (hdcp.pending_disable)
+ goto ddc_abort;
+
+ DBG("DDC transfer: bytes: %d time_us: %lu status: %x",
+ i,
+ (t2.tv_sec - t0.tv_sec) * 1000000 + (t2.tv_usec - t0.tv_usec),
+ RD_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__DDC_STATUS));
+
+#ifdef DDC_DBG
+ {
+ int k;
+ for (k = 0; k < i; k++)
+ printk(KERN_DEBUG "%02x ", mddc_cmd->pdata[k]);
+ printk(KERN_DEBUG "\n");
+ }
+#endif
+
+#ifdef _9032_AUTO_RI_
+ /* Re-enable Auto Ri */
+ if (hdcp_suspend_resume_auto_ri(AUTO_RI_RESUME))
+ return -HDCP_DDC_ERROR;
+#endif
+
+ return HDCP_OK;
+
+ddc_error:
+ hdcp_ddc_abort();
+ return -HDCP_DDC_ERROR;
+
+ddc_abort:
+ DBG("DDC transfer aborted - status=%02x",
+ RD_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__DDC_STATUS));
+
+ return HDCP_OK;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_ddc_operation
+ *-----------------------------------------------------------------------------
+ */
+static int hdcp_ddc_operation(u16 no_bytes, u8 addr, u8 *pdata,
+ enum ddc_operation operation)
+{
+ mddc_type mddc;
+
+ mddc.slaveAddr = HDCPRX_SLV;
+ mddc.offset = 0;
+ mddc.regAddr = addr;
+ mddc.nbytes_lsb = no_bytes & 0xFF;
+ mddc.nbytes_msb = (no_bytes & 0x300) >> 8;
+ mddc.dummy = 0;
+ mddc.pdata = pdata;
+
+ if (operation == DDC_READ)
+ mddc.cmd = MASTER_CMD_SEQ_RD;
+ else
+ mddc.cmd = MASTER_CMD_SEQ_WR;
+
+ DBG("DDC %s: offset=%02x len=%d %u", operation == DDC_READ ?
+ "READ" : "WRITE",
+ addr, no_bytes,
+ jiffies_to_msecs(jiffies));
+
+ return hdcp_start_ddc_transfer(&mddc, operation);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_ddc_read
+ *-----------------------------------------------------------------------------
+ */
+int hdcp_ddc_read(u16 no_bytes, u8 addr, u8 *pdata)
+{
+ return hdcp_ddc_operation(no_bytes, addr, pdata, DDC_READ);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_ddc_write
+ *-----------------------------------------------------------------------------
+ */
+int hdcp_ddc_write(u16 no_bytes, u8 addr, u8 *pdata)
+{
+ return hdcp_ddc_operation(no_bytes, addr, pdata, DDC_WRITE);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_ddc_abort
+ *-----------------------------------------------------------------------------
+ */
+void hdcp_ddc_abort(void)
+{
+ unsigned long flags;
+
+ /* In case of I2C_NO_ACK error, do not abort DDC to avoid
+ * DDC lockup
+ */
+ if (RD_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__DDC_STATUS) & 0x20)
+ return;
+
+ spin_lock_irqsave(&hdcp.spinlock, flags);
+
+ /* Abort Master DDC operation and Clear FIFO pointer */
+ WR_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__DDC_CMD, MASTER_CMD_ABORT);
+
+ /* Read to flush */
+ RD_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__DDC_CMD);
+
+ WR_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__DDC_CMD, MASTER_CMD_CLEAR_FIFO);
+
+ /* Read to flush */
+ RD_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__DDC_CMD);
+
+ spin_unlock_irqrestore(&hdcp.spinlock, flags);
+}
diff --git a/drivers/video/omap2/hdcp/hdcp_ddc.h b/drivers/video/omap2/hdcp/hdcp_ddc.h
new file mode 100644
index 0000000..83bae23
--- /dev/null
+++ b/drivers/video/omap2/hdcp/hdcp_ddc.h
@@ -0,0 +1,111 @@
+/*
+ * hdcp_ddc.h
+ *
+ * HDCP interface DSS driver setting for TI's OMAP4 family of processor.
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Fabrice Olivero
+ * Fabrice Olivero <f-olivero@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define HDCPRX_SLV 0x74
+
+#define MASTER_BASE 0xEC
+#define MDDC_MANUAL_ADDR 0xEC
+#define MDDC_SLAVE_ADDR 0xED
+#define MDDC_SEGMENT_ADDR 0xEE
+#define MDDC_OFFSET_ADDR 0xEF
+#define MDDC_DIN_CNT_LSB_ADDR 0xF0
+#define MDDC_DIN_CNT_MSB_ADDR 0xF1
+#define MDDC_STATUS_ADDR 0xF2
+#define MDDC_COMMAND_ADDR 0xF3
+#define MDDC_FIFO_ADDR 0xF4
+#define MDDC_FIFO_CNT_ADDR 0xF5
+
+#define BIT_MDDC_ST_IN_PROGR 0x10
+#define BIT_MDDC_ST_I2C_LOW 0x40
+#define BIT_MDDC_ST_NO_ACK 0x20
+
+/* DDC Command[3:0]:
+ *
+ * 1111 - Abort transaction
+ * 1001 - Clear FIFO
+ * 1010 - Clock SCL
+ * 0000 - Current address read with no ACK on last byte
+ * 0001 - Current address read with ACK on last byte
+ * 0010 - Sequential read with no ACK on last byte
+ * 0011 - Sequential read with ACK on last byte
+ * 0100 - Enhanced DDC read with no ACK on last byte
+ * 0101 - Enhanced DDC read with ACK on last byte
+ * 0110 - Sequential write ignoring ACK on last byte
+ * 0111 - Sequential write requiring ACK on last byte
+ */
+
+#define MASTER_CMD_ABORT 0x0f
+#define MASTER_CMD_CLEAR_FIFO 0x09
+#define MASTER_CMD_CLOCK 0x0a
+#define MASTER_CMD_CUR_RD 0x00
+#define MASTER_CMD_SEQ_RD 0x02
+#define MASTER_CMD_ENH_RD 0x04
+#define MASTER_CMD_SEQ_WR 0x06
+
+#define MASTER_FIFO_WR_USE 0x01
+#define MASTER_FIFO_RD_USE 0x02
+#define MASTER_FIFO_EMPTY 0x04
+#define MASTER_FIFO_FULL 0x08
+#define MASTER_DDC_BUSY 0x10
+#define MASTER_DDC_NOACK 0x20
+#define MASTER_DDC_STUCK 0x40
+#define MASTER_DDC_RSVD 0x80
+
+/* OMAP 4 HDMI TRM: */
+#define HDMI_IP_CORE_SYSTEM__DDC_MAN 0x3B0
+#define HDMI_IP_CORE_SYSTEM__DDC_ADDR 0x3B4
+#define HDMI_IP_CORE_SYSTEM__DDC_SEGM 0x3B8
+#define HDMI_IP_CORE_SYSTEM__DDC_OFFSET 0x3BC
+#define HDMI_IP_CORE_SYSTEM__DDC_COUNT1 0x3C0
+#define HDMI_IP_CORE_SYSTEM__DDC_COUNT2 0x3C4
+#define HDMI_IP_CORE_SYSTEM__DDC_STATUS 0x3C8
+#define HDMI_IP_CORE_SYSTEM__DDC_CMD 0x3CC
+#define HDMI_IP_CORE_SYSTEM__DDC_DATA 0x3D0
+#define HDMI_IP_CORE_SYSTEM__DDC_FIFOCNT 0x3D4
+
+#define IIC_OK 0
+#define _IIC_CAPTURED 1
+#define _IIC_NOACK 2
+#define _MDDC_CAPTURED 3
+#define _MDDC_NOACK 4
+#define _MDDC_FIFO_FULL 5
+
+typedef struct {
+ u8 slaveAddr;
+ u8 offset; /* "offset = DDC_SEGM register" */
+ u8 regAddr;
+ u8 nbytes_lsb;
+ u8 nbytes_msb;
+ u8 dummy;
+ u8 cmd;
+ u8 *pdata;
+ u8 data[6];
+} mddc_type;
+
+enum ddc_operation {
+ DDC_READ,
+ DDC_WRITE
+};
+
+enum ri_suspend_resume {
+ AUTO_RI_SUSPEND,
+ AUTO_RI_RESUME
+};
diff --git a/drivers/video/omap2/hdcp/hdcp_lib.c b/drivers/video/omap2/hdcp/hdcp_lib.c
new file mode 100644
index 0000000..6e7850c
--- /dev/null
+++ b/drivers/video/omap2/hdcp/hdcp_lib.c
@@ -0,0 +1,838 @@
+/*
+ * hdcp_lib.c
+ *
+ * HDCP interface DSS driver setting for TI's OMAP4 family of processor.
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Fabrice Olivero
+ * Fabrice Olivero <f-olivero@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include <mach/omap4-common.h>
+#include <linux/dma-mapping.h>
+#include "hdcp.h"
+
+static void hdcp_lib_read_an(u8 *an);
+static void hdcp_lib_read_aksv(u8 *ksv_data);
+static void hdcp_lib_write_bksv(u8 *ksv_data);
+static void hdcp_lib_generate_an(u8 *an);
+static int hdcp_lib_r0_check(void);
+static int hdcp_lib_sha_bstatus(struct hdcp_sha_in *sha);
+static void hdcp_lib_set_repeater_bit_in_tx(enum hdcp_repeater rx_mode);
+static void hdcp_lib_toggle_repeater_bit_in_tx(void);
+static int hdcp_lib_initiate_step1(void);
+static int hdcp_lib_check_ksv(uint8_t ksv[5]);
+
+#define PPA_SERVICE_HDCP_READ_M0 0x30
+#define PPA_SERVICE_HDCP_CHECK_V 0x31
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_read_an
+ *-----------------------------------------------------------------------------
+ */
+static void hdcp_lib_read_an(u8 *an)
+{
+ u8 i;
+
+ for (i = 0; i < 8; i++) {
+ an[i] = (RD_REG_32(hdcp.hdmi_wp_base_addr +
+ HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__AN0 +
+ i * sizeof(uint32_t))) & 0xFF;
+ }
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_read_aksv
+ *-----------------------------------------------------------------------------
+ */
+static void hdcp_lib_read_aksv(u8 *ksv_data)
+{
+ u8 i;
+ for (i = 0; i < 5; i++) {
+ ksv_data[i] = RD_REG_32(hdcp.hdmi_wp_base_addr +
+ HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__AKSV0 +
+ i * sizeof(uint32_t));
+
+ }
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_write_bksv
+ *-----------------------------------------------------------------------------
+ */
+static void hdcp_lib_write_bksv(u8 *ksv_data)
+{
+ u8 i;
+ for (i = 0; i < 5; i++) {
+ WR_REG_32(hdcp.hdmi_wp_base_addr +
+ HDMI_IP_CORE_SYSTEM, HDMI_IP_CORE_SYSTEM__BKSV0 +
+ i * sizeof(uint32_t), ksv_data[i]);
+ }
+}
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_generate_an
+ *-----------------------------------------------------------------------------
+ */
+static void hdcp_lib_generate_an(u8 *an)
+{
+ /* Generate An using HDCP HW */
+ DBG("hdcp_lib_generate_an()");
+
+ /* Start AN Gen */
+ WR_FIELD_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__HDCP_CTRL, 3, 3, 0);
+
+ /* Delay of 10 ms */
+ mdelay(10);
+
+ /* Stop AN Gen */
+ WR_FIELD_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__HDCP_CTRL, 3, 3, 1);
+
+ /* Must set 0x72:0x0F[3] twice to guarantee that takes effect */
+ WR_FIELD_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__HDCP_CTRL, 3, 3, 1);
+
+ hdcp_lib_read_an(an);
+
+ DBG("AN: %x %x %x %x %x %x %x %x", an[0], an[1], an[2], an[3],
+ an[4], an[5], an[6], an[7]);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_r0_check
+ *-----------------------------------------------------------------------------
+ */
+static int hdcp_lib_r0_check(void)
+{
+ u8 ro_rx[2], ro_tx[2];
+
+ DBG("hdcp_lib_r0_check()");
+
+ /* DDC: Read Ri' from RX */
+ if (hdcp_ddc_read(DDC_Ri_LEN, DDC_Ri_ADDR , (u8 *)&ro_rx))
+ return -HDCP_DDC_ERROR;
+
+ /* Read Ri in HDCP IP */
+ ro_tx[0] = RD_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__R1) & 0xFF;
+
+ ro_tx[1] = RD_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__R2) & 0xFF;
+
+ /* Compare values */
+ DBG("ROTX: %x%x RORX:%x%x", ro_tx[0], ro_tx[1], ro_rx[0], ro_rx[1]);
+
+ if ((ro_rx[0] == ro_tx[0]) && (ro_rx[1] == ro_tx[1]))
+ return HDCP_OK;
+ else
+ return -HDCP_AUTH_FAILURE;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_sha_bstatus
+ *-----------------------------------------------------------------------------
+ */
+static int hdcp_lib_sha_bstatus(struct hdcp_sha_in *sha)
+{
+ u8 data[2];
+
+ if (hdcp_ddc_read(DDC_BSTATUS_LEN, DDC_BSTATUS_ADDR, data))
+ return -HDCP_DDC_ERROR;
+
+ sha->data[sha->byte_counter++] = data[0];
+ sha->data[sha->byte_counter++] = data[1];
+
+ return HDCP_OK;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_set_repeater_bit_in_tx
+ *-----------------------------------------------------------------------------
+ */
+static void hdcp_lib_set_repeater_bit_in_tx(enum hdcp_repeater rx_mode)
+{
+ DBG("hdcp_lib_set_repeater_bit_in_tx() value=%d", rx_mode);
+
+ WR_FIELD_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__HDCP_CTRL, 4, 4, rx_mode);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_toggle_repeater_bit_in_tx
+ *-----------------------------------------------------------------------------
+ */
+static void hdcp_lib_toggle_repeater_bit_in_tx(void)
+{
+ if (hdcp_lib_check_repeater_bit_in_tx())
+ hdcp_lib_set_repeater_bit_in_tx(HDCP_RECEIVER);
+ else
+ hdcp_lib_set_repeater_bit_in_tx(HDCP_REPEATER);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_initiate_step1
+ *-----------------------------------------------------------------------------
+ */
+static int hdcp_lib_initiate_step1(void)
+{
+ /* HDCP authentication steps:
+ * 1) Read Bksv - check validity (is HDMI Rx supporting HDCP ?)
+ * 2) Initializes HDCP (CP reset release)
+ * 3) Read Bcaps - is HDMI Rx a repeater ?
+ * *** First part authentication ***
+ * 4) Read Bksv - check validity (is HDMI Rx supporting HDCP ?)
+ * 5) Generates An
+ * 6) DDC: Writes An, Aksv
+ * 7) DDC: Write Bksv
+ */
+ uint8_t an_ksv_data[8], an_bksv_data[8];
+ uint8_t rx_type;
+
+ DBG("hdcp_lib_initiate_step1()\n");
+
+ /* DDC: Read BKSV from RX */
+ if (hdcp_ddc_read(DDC_BKSV_LEN, DDC_BKSV_ADDR , an_ksv_data))
+ return -HDCP_DDC_ERROR;
+
+ if (hdcp.pending_disable)
+ return -HDCP_CANCELLED_AUTH;
+
+ DBG("BKSV: %02x %02x %02x %02x %02x", an_ksv_data[0], an_ksv_data[1],
+ an_ksv_data[2], an_ksv_data[3],
+ an_ksv_data[4]);
+
+ if (hdcp_lib_check_ksv(an_ksv_data)) {
+ DBG("BKSV error (number of 0 and 1)");
+ return -HDCP_AUTH_FAILURE;
+ }
+
+ /* TODO: Need to confirm it is required */
+#ifndef _9032_AN_STOP_FIX_
+ hdcp_lib_toggle_repeater_bit_in_tx();
+#endif
+
+ /* Release CP reset bit */
+ WR_FIELD_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__HDCP_CTRL, 2, 2, 1);
+
+ /* Read BCAPS to determine if HDCP RX is a repeater */
+ if (hdcp_ddc_read(DDC_BCAPS_LEN, DDC_BCAPS_ADDR, &rx_type))
+ return -HDCP_DDC_ERROR;
+
+ if (hdcp.pending_disable)
+ return -HDCP_CANCELLED_AUTH;
+
+ rx_type = FLD_GET(rx_type, DDC_BIT_REPEATER, DDC_BIT_REPEATER);
+
+ /* Set repeater bit in HDCP CTRL */
+ if (rx_type == 1) {
+ hdcp_lib_set_repeater_bit_in_tx(HDCP_REPEATER);
+ DBG("HDCP RX is a repeater");
+ } else {
+ hdcp_lib_set_repeater_bit_in_tx(HDCP_RECEIVER);
+ DBG("HDCP RX is a receiver");
+ }
+
+/* Power debug code */
+#ifdef POWER_TRANSITION_DBG
+ printk(KERN_INFO "\n**************************\n"
+ "AUTHENTICATION: WAIT FOR DSS TRANSITION\n"
+ "*************************\n");
+ mdelay(10000);
+ printk(KERN_INFO "\n**************************\n"
+ "DONE\n"
+ "*************************\n");
+#endif
+ /* DDC: Read BKSV from RX */
+ if (hdcp_ddc_read(DDC_BKSV_LEN, DDC_BKSV_ADDR , an_bksv_data))
+ return -HDCP_DDC_ERROR;
+
+ /* Generate An */
+ hdcp_lib_generate_an(an_ksv_data);
+
+ /* Authentication 1st step initiated HERE */
+
+ /* DDC: Write An */
+ if (hdcp_ddc_write(DDC_AN_LEN, DDC_AN_ADDR , an_ksv_data))
+ return -HDCP_DDC_ERROR;
+
+ if (hdcp.pending_disable)
+ return -HDCP_CANCELLED_AUTH;
+
+ /* Read AKSV from IP: (HDCP AKSV register) */
+ hdcp_lib_read_aksv(an_ksv_data);
+
+ DBG("AKSV: %02x %02x %02x %02x %02x", an_ksv_data[0], an_ksv_data[1],
+ an_ksv_data[2], an_ksv_data[3],
+ an_ksv_data[4]);
+
+ if (hdcp_lib_check_ksv(an_ksv_data)) {
+ printk(KERN_INFO "HDCP: AKSV error (number of 0 and 1)\n");
+ return -HDCP_AKSV_ERROR;
+ }
+
+ if (hdcp.pending_disable)
+ return -HDCP_CANCELLED_AUTH;
+
+ /* DDC: Write AKSV */
+ if (hdcp_ddc_write(DDC_AKSV_LEN, DDC_AKSV_ADDR, an_ksv_data))
+ return -HDCP_DDC_ERROR;
+
+ if (hdcp.pending_disable)
+ return -HDCP_CANCELLED_AUTH;
+
+ /* Write Bksv to IP */
+ hdcp_lib_write_bksv(an_bksv_data);
+
+ /* Check IP BKSV error */
+ if (RD_FIELD_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__HDCP_CTRL, 5, 5))
+ return -HDCP_AUTH_FAILURE;
+
+ /* Here BSKV should be checked against revokation list */
+
+ return HDCP_OK;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_check_ksv
+ *-----------------------------------------------------------------------------
+ */
+static int hdcp_lib_check_ksv(uint8_t ksv[5])
+{
+ int i, j;
+ int zero = 0, one = 0;
+
+ for (i = 0; i < 5; i++) {
+ /* Count number of zero / one */
+ for (j = 0; j < 8; j++) {
+ if (ksv[i] & (0x01 << j))
+ one++;
+ else
+ zero++;
+ }
+ }
+
+ if (one == zero)
+ return 0;
+ else
+ return -1;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_3des_load_key
+ *-----------------------------------------------------------------------------
+ */
+int hdcp_3des_load_key(uint32_t *deshdcp_encrypted_key)
+{
+ int counter = 0, status = HDCP_OK;
+
+ DBG("Loading HDCP keys...");
+
+ /* Set decryption mode in DES control register */
+ WR_FIELD_32(hdcp.deshdcp_base_addr,
+ DESHDCP__DHDCP_CTRL,
+ DESHDCP__DHDCP_CTRL__DIRECTION_POS_F,
+ DESHDCP__DHDCP_CTRL__DIRECTION_POS_L,
+ 0x0);
+
+ /* Write encrypted data */
+ while (counter < DESHDCP_KEY_SIZE) {
+ /* Fill Data registers */
+ WR_REG_32(hdcp.deshdcp_base_addr, DESHDCP__DHDCP_DATA_L,
+ deshdcp_encrypted_key[counter]);
+ WR_REG_32(hdcp.deshdcp_base_addr, DESHDCP__DHDCP_DATA_H,
+ deshdcp_encrypted_key[counter + 1]);
+
+ /* Wait for output bit at '1' */
+ while (RD_FIELD_32(hdcp.deshdcp_base_addr,
+ DESHDCP__DHDCP_CTRL,
+ DESHDCP__DHDCP_CTRL__OUTPUT_READY_POS_F,
+ DESHDCP__DHDCP_CTRL__OUTPUT_READY_POS_L
+ ) != 0x1)
+ ;
+
+ /* Dummy read (indeed data are transfered directly into
+ * key memory)
+ */
+ if (RD_REG_32(hdcp.deshdcp_base_addr, DESHDCP__DHDCP_DATA_L) !=
+ 0x0) {
+ status = -HDCP_3DES_ERROR;
+ printk(KERN_ERR "HDCP: DESHDCP dummy read error\n");
+ }
+ if (RD_REG_32(hdcp.deshdcp_base_addr, DESHDCP__DHDCP_DATA_H) !=
+ 0x0) {
+ status = -HDCP_3DES_ERROR;
+ printk(KERN_ERR "HDCP: DESHDCP dummy read error\n");
+ }
+
+ counter += 2;
+ }
+
+ return status;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_3des_encrypt_key
+ *-----------------------------------------------------------------------------
+ */
+void hdcp_3des_encrypt_key(struct hdcp_encrypt_control *enc_ctrl,
+ uint32_t out_key[DESHDCP_KEY_SIZE])
+{
+ int counter = 0;
+
+ DBG("Encrypting HDCP keys...");
+
+ /* Reset encrypted key array */
+ for (counter = 0; counter < DESHDCP_KEY_SIZE; counter++)
+ out_key[counter] = 0;
+
+ /* Set encryption mode in DES control register */
+ WR_FIELD_32(hdcp.deshdcp_base_addr,
+ DESHDCP__DHDCP_CTRL,
+ DESHDCP__DHDCP_CTRL__DIRECTION_POS_F,
+ DESHDCP__DHDCP_CTRL__DIRECTION_POS_L,
+ 0x1);
+
+ /* Write raw data and read encrypted data */
+ counter = 0;
+
+#ifdef POWER_TRANSITION_DBG
+ printk(KERN_ERR "\n**************************\n"
+ "ENCRYPTION: WAIT FOR DSS TRANSITION\n"
+ "*************************\n");
+ mdelay(10000);
+ printk(KER_INFO "\n**************************\n"
+ "DONE\n"
+ "*************************\n");
+#endif
+
+ while (counter < DESHDCP_KEY_SIZE) {
+ /* Fill Data registers */
+ WR_REG_32(hdcp.deshdcp_base_addr, DESHDCP__DHDCP_DATA_L,
+ enc_ctrl->in_key[counter]);
+ WR_REG_32(hdcp.deshdcp_base_addr, DESHDCP__DHDCP_DATA_H,
+ enc_ctrl->in_key[counter + 1]);
+
+ /* Wait for output bit at '1' */
+ while (RD_FIELD_32(hdcp.deshdcp_base_addr,
+ DESHDCP__DHDCP_CTRL,
+ DESHDCP__DHDCP_CTRL__OUTPUT_READY_POS_F,
+ DESHDCP__DHDCP_CTRL__OUTPUT_READY_POS_L
+ ) != 0x1)
+ ;
+
+ /* Read enrypted data */
+ out_key[counter] = RD_REG_32(hdcp.deshdcp_base_addr,
+ DESHDCP__DHDCP_DATA_L);
+ out_key[counter + 1] = RD_REG_32(hdcp.deshdcp_base_addr,
+ DESHDCP__DHDCP_DATA_H);
+
+ counter += 2;
+ }
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_disable
+ *-----------------------------------------------------------------------------
+ */
+int hdcp_lib_disable()
+{
+ DBG("hdcp_lib_disable() %u", jiffies_to_msecs(jiffies));
+
+ /* CP reset */
+ WR_FIELD_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__HDCP_CTRL, 2, 2, 0);
+
+ /* Clear AV mute in case it was set */
+ hdcp_lib_set_av_mute(AV_MUTE_CLEAR);
+
+ return HDCP_OK;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_set_encryption
+ *-----------------------------------------------------------------------------
+ */
+void hdcp_lib_set_encryption(enum encryption_state enc_state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdcp.spinlock, flags);
+
+ /* HDCP_CTRL::ENC_EN set/clear */
+ WR_FIELD_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__HDCP_CTRL, 0, 0, enc_state);
+
+ /* Read to flush */
+ RD_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__HDCP_CTRL);
+
+ spin_unlock_irqrestore(&hdcp.spinlock, flags);
+
+ pr_info("HDCP: Encryption state changed: %s hdcp_ctrl: %02x",
+ enc_state == HDCP_ENC_OFF ? "OFF" : "ON",
+ RD_REG_32(hdcp.hdmi_wp_base_addr +
+ HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__HDCP_CTRL));
+
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_set_av_mute
+ *-----------------------------------------------------------------------------
+ */
+void hdcp_lib_set_av_mute(enum av_mute av_mute_state)
+{
+ unsigned long flags;
+
+ DBG("hdcp_lib_set_av_mute() av_mute=%d", av_mute_state);
+
+
+ spin_lock_irqsave(&hdcp.spinlock, flags);
+
+ {
+ u8 RegVal, TimeOutCount = 64;
+
+ RegVal = RD_REG_32(hdcp.hdmi_wp_base_addr + HDMI_CORE_AV_BASE,
+ HDMI_CORE_AV_PB_CTRL2);
+
+ /* PRguide-GPC: To change the content of the CP_BYTE1 register,
+ * CP_EN must be zero
+ * set PB_CTRL2 :: CP_RPT = 0
+ */
+ WR_FIELD_32(hdcp.hdmi_wp_base_addr + HDMI_CORE_AV_BASE,
+ HDMI_CORE_AV_PB_CTRL2, 2, 2, 0);
+
+ /* Set/clear AV mute state */
+ WR_REG_32(hdcp.hdmi_wp_base_addr + HDMI_CORE_AV_BASE,
+ HDMI_CORE_AV_CP_BYTE1, av_mute_state);
+
+ /* FIXME: This loop should be removed */
+ while (TimeOutCount--) {
+ /* Continue in this loop till CP_EN becomes 0,
+ * prior to TimeOutCount becoming 0 */
+ if (!RD_FIELD_32(hdcp.hdmi_wp_base_addr +
+ HDMI_CORE_AV_BASE,
+ HDMI_CORE_AV_PB_CTRL2, 3, 3))
+ break;
+ }
+
+ DBG(" timeoutcount=%d", TimeOutCount);
+
+ /* FIXME: why is this if condition required?, according to prg,
+ * this shall be unconditioanlly */
+ if (TimeOutCount) {
+ /* set PB_CTRL2 :: CP_EN = 1 & CP_RPT = 1 */
+ RegVal = FLD_MOD(RegVal, 0x3, 3, 2);
+
+ WR_REG_32(hdcp.hdmi_wp_base_addr + HDMI_CORE_AV_BASE,
+ HDMI_CORE_AV_PB_CTRL2, RegVal);
+ }
+ }
+
+ spin_unlock_irqrestore(&hdcp.spinlock, flags);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_check_repeater_bit_in_tx
+ *-----------------------------------------------------------------------------
+ */
+u8 hdcp_lib_check_repeater_bit_in_tx(void)
+{
+ return RD_FIELD_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__HDCP_CTRL, 4, 4);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_auto_ri_check
+ *-----------------------------------------------------------------------------
+ */
+void hdcp_lib_auto_ri_check(bool state)
+{
+ u8 reg_val;
+ unsigned long flags;
+
+ DBG("hdcp_lib_auto_ri_check() state=%s",
+ state == true ? "ON" : "OFF");
+
+ spin_lock_irqsave(&hdcp.spinlock, flags);
+
+ reg_val = RD_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__INT_UNMASK3);
+
+ reg_val = (state == true) ? (reg_val | 0xB0) : (reg_val & ~0xB0);
+
+ /* Turn on/off the following Auto Ri interrupts */
+ WR_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__INT_UNMASK3, reg_val);
+
+ /* Enable/Disable Ri */
+ WR_FIELD_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__RI_CMD, 0, 0,
+ ((state == true) ? 1 : 0));
+
+ /* Read to flush */
+ RD_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__RI_CMD);
+
+ spin_unlock_irqrestore(&hdcp.spinlock, flags);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_auto_bcaps_rdy_check
+ *-----------------------------------------------------------------------------
+ */
+void hdcp_lib_auto_bcaps_rdy_check(bool state)
+{
+ u8 reg_val;
+ unsigned long flags;
+
+ DBG("hdcp_lib_auto_bcaps_rdy_check() state=%s",
+ state == true ? "ON" : "OFF");
+
+ spin_lock_irqsave(&hdcp.spinlock, flags);
+
+ /* Enable KSV_READY / BACP_DONE interrupt */
+ WR_FIELD_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__INT_UNMASK2, 7, 7,
+ ((state == true) ? 1 : 0));
+
+ /* Enable/Disable Ri & Bcap */
+ reg_val = RD_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__RI_CMD);
+
+ /* Enable RI_EN & BCAP_EN OR disable BCAP_EN */
+ reg_val = (state == true) ? (reg_val | 0x3) : (reg_val & ~0x2);
+
+ WR_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__RI_CMD, reg_val);
+
+ /* Read to flush */
+ RD_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__RI_CMD);
+
+ spin_unlock_irqrestore(&hdcp.spinlock, flags);
+
+ DBG("hdcp_lib_auto_bcaps_rdy_check() Done\n");
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_step1_start
+ *-----------------------------------------------------------------------------
+ */
+int hdcp_lib_step1_start(void)
+{
+ u8 hdmi_mode;
+ int status;
+
+ DBG("hdcp_lib_step1_start() %u", jiffies_to_msecs(jiffies));
+
+ /* Check if mode is HDMI or DVI */
+ hdmi_mode = RD_REG_32(hdcp.hdmi_wp_base_addr + HDMI_CORE_AV_BASE,
+ HDMI_CORE_AV_HDMI_CTRL) &
+ HDMI_CORE_AV_HDMI_CTRL__HDMI_MODE;
+
+ DBG("RX mode: %s", hdmi_mode ? "HDMI" : "DVI");
+
+ /* Set AV Mute */
+ hdcp_lib_set_av_mute(AV_MUTE_SET);
+
+ /* Must turn encryption off when AVMUTE */
+ hdcp_lib_set_encryption(HDCP_ENC_OFF);
+
+ status = hdcp_lib_initiate_step1();
+
+ if (hdcp.pending_disable)
+ return -HDCP_CANCELLED_AUTH;
+ else
+ return status;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_step1_r0_check
+ *-----------------------------------------------------------------------------
+ */
+int hdcp_lib_step1_r0_check(void)
+{
+ int status = HDCP_OK;
+
+ /* HDCP authentication steps:
+ * 1) DDC: Read M0'
+ * 2) Compare M0 and M0'
+ * if Rx is a receiver: switch to authentication step 3
+ * 3) Enable encryption / auto Ri check / disable AV mute
+ * if Rx is a repeater: switch to authentication step 2
+ * 3) Get M0 from HDMI IP and store it for further processing (V)
+ * 4) Enable encryption / auto Ri check / auto BCAPS RDY polling
+ * Disable AV mute
+ */
+
+ DBG("hdcp_lib_step1_r0_check() %u", jiffies_to_msecs(jiffies));
+
+ status = hdcp_lib_r0_check();
+ if (status < 0)
+ return status;
+
+ /* Authentication 1st step done */
+
+ /* Now prepare 2nd step authentication in case of RX repeater and
+ * enable encryption / Ri check
+ */
+
+ if (hdcp.pending_disable)
+ return -HDCP_CANCELLED_AUTH;
+
+ if (hdcp_lib_check_repeater_bit_in_tx()) {
+ status = omap4_secure_dispatcher(PPA_SERVICE_HDCP_READ_M0,
+ FLAG_START_CRITICAL,
+ 0, 0, 0, 0, 0);
+ /* Wait for user space */
+ if (status) {
+ printk(KERN_ERR "HDCP: omap4_secure_dispatcher M0 error "
+ "%d\n", status);
+ return -HDCP_AUTH_FAILURE;
+ }
+
+ DBG("hdcp_lib_set_encryption() %u", jiffies_to_msecs(jiffies));
+
+ /* Enable encryption */
+ hdcp_lib_set_encryption(HDCP_ENC_ON);
+
+#ifdef _9032_AUTO_RI_
+ /* Enable Auto Ri */
+ hdcp_lib_auto_ri_check(true);
+#endif
+
+#ifdef _9032_BCAP_
+ /* Enable automatic BCAPS polling */
+ hdcp_lib_auto_bcaps_rdy_check(true);
+#endif
+
+ /* Now, IP waiting for BCAPS ready bit */
+ } else {
+ /* Receiver: enable encryption and auto Ri check */
+ hdcp_lib_set_encryption(HDCP_ENC_ON);
+
+#ifdef _9032_AUTO_RI_
+ /* Enable Auto Ri */
+ hdcp_lib_auto_ri_check(true);
+#endif
+
+ }
+
+ /* Clear AV mute */
+ hdcp_lib_set_av_mute(AV_MUTE_CLEAR);
+
+ return HDCP_OK;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_lib_step2
+ *-----------------------------------------------------------------------------
+ */
+int hdcp_lib_step2(void)
+{
+ /* HDCP authentication steps:
+ * 1) Disable auto Ri check
+ * 2) DDC: read BStatus (nb of devices, MAX_DEV
+ */
+
+ u8 bstatus[2];
+ int status = HDCP_OK;
+
+ DBG("hdcp_lib_step2() %u", jiffies_to_msecs(jiffies));
+
+#ifdef _9032_AUTO_RI_
+ /* Disable Auto Ri */
+ hdcp_lib_auto_ri_check(false);
+#endif
+
+ /* DDC: Read Bstatus (1st byte) from Rx */
+ if (hdcp_ddc_read(DDC_BSTATUS_LEN, DDC_BSTATUS_ADDR, bstatus))
+ return -HDCP_DDC_ERROR;
+
+ /* Get KSV list size */
+ DBG("KSV list size: %d", bstatus[0] & DDC_BSTATUS0_DEV_COUNT);
+ sha_input.byte_counter = (bstatus[0] & DDC_BSTATUS0_DEV_COUNT) * 5;
+
+ /* Check BStatus topology errors */
+ if (bstatus[0] & DDC_BSTATUS0_MAX_DEVS) {
+ DBG("MAX_DEV_EXCEEDED set");
+ return -HDCP_AUTH_FAILURE;
+ }
+
+ if (bstatus[1] & DDC_BSTATUS1_MAX_CASC) {
+ DBG("MAX_CASCADE_EXCEEDED set");
+ return -HDCP_AUTH_FAILURE;
+ }
+
+ DBG("Retrieving KSV list...");
+
+ /* Clear all SHA input data */
+ /* TODO: should be done earlier at HDCP init */
+ memset(sha_input.data, 0, MAX_SHA_DATA_SIZE);
+
+ if (hdcp.pending_disable)
+ return -HDCP_CANCELLED_AUTH;
+
+ /* DDC: read KSV list */
+ if (sha_input.byte_counter) {
+ if (hdcp_ddc_read(sha_input.byte_counter, DDC_KSV_FIFO_ADDR,
+ (u8 *)&sha_input.data))
+ return -HDCP_DDC_ERROR;
+ }
+
+ /* Read and add Bstatus */
+ if (hdcp_lib_sha_bstatus(&sha_input))
+ return -HDCP_DDC_ERROR;
+
+ if (hdcp.pending_disable)
+ return -HDCP_CANCELLED_AUTH;
+
+ /* Read V' */
+ if (hdcp_ddc_read(DDC_V_LEN, DDC_V_ADDR, sha_input.vprime))
+ return -HDCP_DDC_ERROR;
+
+ if (hdcp.pending_disable)
+ return -HDCP_CANCELLED_AUTH;
+
+ /* clear sha_input values in cache*/
+ dma_sync_single_for_device(NULL,
+ __pa((u32)(&sha_input)),
+ sizeof(struct hdcp_sha_in),
+ DMA_TO_DEVICE);
+
+ status = omap4_secure_dispatcher(PPA_SERVICE_HDCP_CHECK_V,
+ FLAG_START_CRITICAL,
+ 1, __pa((u32)&sha_input), 0, 0, 0);
+ /* Wait for user space */
+ if (status) {
+ printk(KERN_ERR "HDCP: omap4_secure_dispatcher CHECH_V error "
+ "%d\n", status);
+ return -HDCP_AUTH_FAILURE;
+ }
+
+ if (status == HDCP_OK) {
+ /* Re-enable Ri check */
+#ifdef _9032_AUTO_RI_
+ hdcp_lib_auto_ri_check(true);
+#endif
+ }
+
+ return status;
+}
diff --git a/drivers/video/omap2/hdcp/hdcp_top.c b/drivers/video/omap2/hdcp/hdcp_top.c
new file mode 100644
index 0000000..1bb32f4
--- /dev/null
+++ b/drivers/video/omap2/hdcp/hdcp_top.c
@@ -0,0 +1,1050 @@
+/*
+ * hdcp_top.c
+ *
+ * HDCP interface DSS driver setting for TI's OMAP4 family of processor.
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Fabrice Olivero
+ * Fabrice Olivero <f-olivero@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/completion.h>
+#include <linux/miscdevice.h>
+#include <linux/firmware.h>
+#include "../../hdmi_ti_4xxx_ip.h"
+#include "../dss/dss.h"
+#include "hdcp.h"
+
+struct hdcp hdcp;
+struct hdcp_sha_in sha_input;
+
+/* State machine / workqueue */
+static void hdcp_wq_disable(void);
+static void hdcp_wq_start_authentication(void);
+static void hdcp_wq_check_r0(void);
+static void hdcp_wq_step2_authentication(void);
+static void hdcp_wq_authentication_failure(void);
+static void hdcp_work_queue(struct work_struct *work);
+static struct delayed_work *hdcp_submit_work(int event, int delay);
+static void hdcp_cancel_work(struct delayed_work **work);
+
+/* Callbacks */
+static void hdcp_start_frame_cb(void);
+static void hdcp_irq_cb(int hpd_low);
+
+/* Control */
+static long hdcp_enable_ctl(void __user *argp);
+static long hdcp_disable_ctl(void);
+static long hdcp_query_status_ctl(void __user *argp);
+static long hdcp_encrypt_key_ctl(void __user *argp);
+
+/* Driver */
+static int __init hdcp_init(void);
+static void __exit hdcp_exit(void);
+
+struct completion hdcp_comp;
+static DECLARE_WAIT_QUEUE_HEAD(hdcp_up_wait_queue);
+static DECLARE_WAIT_QUEUE_HEAD(hdcp_down_wait_queue);
+
+#define DSS_POWER
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_request_dss
+ *-----------------------------------------------------------------------------
+ */
+static void hdcp_request_dss(void)
+{
+#ifdef DSS_POWER
+ hdcp.dss_state = dss_runtime_get();
+#endif
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_user_space_task
+ *-----------------------------------------------------------------------------
+ */
+int hdcp_user_space_task(int flags)
+{
+ int ret;
+
+ DBG("Wait for user space task %x\n", flags);
+ hdcp.hdcp_up_event = flags & 0xFF;
+ hdcp.hdcp_down_event = flags & 0xFF;
+ wake_up_interruptible(&hdcp_up_wait_queue);
+ wait_event_interruptible(hdcp_down_wait_queue,
+ (hdcp.hdcp_down_event & 0xFF) == 0);
+ ret = (hdcp.hdcp_down_event & 0xFF00) >> 8;
+
+ DBG("User space task done %x\n", hdcp.hdcp_down_event);
+ hdcp.hdcp_down_event = 0;
+
+ return ret;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_release_dss
+ *-----------------------------------------------------------------------------
+ */
+static void hdcp_release_dss(void)
+{
+#ifdef DSS_POWER
+ if (hdcp.dss_state == 0)
+ dss_runtime_put();
+#endif
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_wq_disable
+ *-----------------------------------------------------------------------------
+ */
+static void hdcp_wq_disable(void)
+{
+ printk(KERN_INFO "HDCP: disabled\n");
+
+ hdcp_cancel_work(&hdcp.pending_wq_event);
+ hdcp_lib_disable();
+ hdcp.pending_disable = 0;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_wq_start_authentication
+ *-----------------------------------------------------------------------------
+ */
+static void hdcp_wq_start_authentication(void)
+{
+ int status = HDCP_OK;
+
+ hdcp.hdcp_state = HDCP_AUTHENTICATION_START;
+
+ printk(KERN_INFO "HDCP: authentication start\n");
+
+ /* Step 1 part 1 (until R0 calc delay) */
+ status = hdcp_lib_step1_start();
+
+ if (status == -HDCP_AKSV_ERROR) {
+ hdcp_wq_authentication_failure();
+ } else if (status == -HDCP_CANCELLED_AUTH) {
+ DBG("Authentication step 1 cancelled.");
+ return;
+ } else if (status != HDCP_OK) {
+ hdcp_wq_authentication_failure();
+ } else {
+ hdcp.hdcp_state = HDCP_WAIT_R0_DELAY;
+ hdcp.auth_state = HDCP_STATE_AUTH_1ST_STEP;
+ hdcp.pending_wq_event = hdcp_submit_work(HDCP_R0_EXP_EVENT,
+ HDCP_R0_DELAY);
+ }
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_wq_check_r0
+ *-----------------------------------------------------------------------------
+ */
+static void hdcp_wq_check_r0(void)
+{
+ int status = hdcp_lib_step1_r0_check();
+
+ if (status == -HDCP_CANCELLED_AUTH) {
+ DBG("Authentication step 1/R0 cancelled.");
+ return;
+ } else if (status < 0)
+ hdcp_wq_authentication_failure();
+ else {
+ if (hdcp_lib_check_repeater_bit_in_tx()) {
+ /* Repeater */
+ printk(KERN_INFO "HDCP: authentication step 1 "
+ "successful - Repeater\n");
+
+ hdcp.hdcp_state = HDCP_WAIT_KSV_LIST;
+ hdcp.auth_state = HDCP_STATE_AUTH_2ND_STEP;
+
+ hdcp.pending_wq_event =
+ hdcp_submit_work(HDCP_KSV_TIMEOUT_EVENT,
+ HDCP_KSV_TIMEOUT_DELAY);
+ } else {
+ /* Receiver */
+ printk(KERN_INFO "HDCP: authentication step 1 "
+ "successful - Receiver\n");
+
+ hdcp.hdcp_state = HDCP_LINK_INTEGRITY_CHECK;
+ hdcp.auth_state = HDCP_STATE_AUTH_3RD_STEP;
+
+ /* Restore retry counter */
+ if (hdcp.en_ctrl->nb_retry == 0)
+ hdcp.retry_cnt = HDCP_INFINITE_REAUTH;
+ else
+ hdcp.retry_cnt = hdcp.en_ctrl->nb_retry;
+ }
+ }
+}
+
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_wq_step2_authentication
+ *-----------------------------------------------------------------------------
+ */
+static void hdcp_wq_step2_authentication(void)
+{
+ int status = HDCP_OK;
+
+ /* KSV list timeout is running and should be canceled */
+ hdcp_cancel_work(&hdcp.pending_wq_event);
+
+ status = hdcp_lib_step2();
+
+ if (status == -HDCP_CANCELLED_AUTH) {
+ DBG("Authentication step 2 cancelled.");
+ return;
+ } else if (status < 0)
+ hdcp_wq_authentication_failure();
+ else {
+ printk(KERN_INFO "HDCP: (Repeater) authentication step 2 "
+ "successful\n");
+
+ hdcp.hdcp_state = HDCP_LINK_INTEGRITY_CHECK;
+ hdcp.auth_state = HDCP_STATE_AUTH_3RD_STEP;
+
+ /* Restore retry counter */
+ if (hdcp.en_ctrl->nb_retry == 0)
+ hdcp.retry_cnt = HDCP_INFINITE_REAUTH;
+ else
+ hdcp.retry_cnt = hdcp.en_ctrl->nb_retry;
+ }
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_wq_authentication_failure
+ *-----------------------------------------------------------------------------
+ */
+static void hdcp_wq_authentication_failure(void)
+{
+ if (hdcp.hdmi_state == HDMI_STOPPED) {
+ hdcp.auth_state = HDCP_STATE_AUTH_FAILURE;
+ return;
+ }
+
+ hdcp_lib_auto_ri_check(false);
+ hdcp_lib_auto_bcaps_rdy_check(false);
+ hdcp_lib_set_av_mute(AV_MUTE_SET);
+ hdcp_lib_set_encryption(HDCP_ENC_OFF);
+
+ hdcp_cancel_work(&hdcp.pending_wq_event);
+
+ hdcp_lib_disable();
+ hdcp.pending_disable = 0;
+
+ if (hdcp.retry_cnt && (hdcp.hdmi_state != HDMI_STOPPED)) {
+ if (hdcp.retry_cnt < HDCP_INFINITE_REAUTH) {
+ hdcp.retry_cnt--;
+ printk(KERN_INFO "HDCP: authentication failed - "
+ "retrying, attempts=%d\n",
+ hdcp.retry_cnt);
+ } else
+ printk(KERN_INFO "HDCP: authentication failed - "
+ "retrying\n");
+
+ hdcp.hdcp_state = HDCP_AUTHENTICATION_START;
+ hdcp.auth_state = HDCP_STATE_AUTH_FAIL_RESTARTING;
+
+ hdcp.pending_wq_event = hdcp_submit_work(HDCP_AUTH_REATT_EVENT,
+ HDCP_REAUTH_DELAY);
+ } else {
+ printk(KERN_INFO "HDCP: authentication failed - "
+ "HDCP disabled\n");
+ hdcp.hdcp_state = HDCP_ENABLE_PENDING;
+ hdcp.auth_state = HDCP_STATE_AUTH_FAILURE;
+ }
+
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_work_queue
+ *-----------------------------------------------------------------------------
+ */
+static void hdcp_work_queue(struct work_struct *work)
+{
+ struct hdcp_delayed_work *hdcp_w =
+ container_of(work, struct hdcp_delayed_work, work.work);
+ int event = hdcp_w->event;
+
+ mutex_lock(&hdcp.lock);
+
+ DBG("hdcp_work_queue() - START - %u hdmi=%d hdcp=%d auth=%d evt= %x %d"
+ " hdcp_ctrl=%02x",
+ jiffies_to_msecs(jiffies),
+ hdcp.hdmi_state,
+ hdcp.hdcp_state,
+ hdcp.auth_state,
+ (event & 0xFF00) >> 8,
+ event & 0xFF,
+ RD_REG_32(hdcp.hdmi_wp_base_addr + HDMI_IP_CORE_SYSTEM,
+ HDMI_IP_CORE_SYSTEM__HDCP_CTRL));
+
+ /* Clear pending_wq_event
+ * In case a delayed work is scheduled from the state machine
+ * "pending_wq_event" is used to memorize pointer on the event to be
+ * able to cancel any pending work in case HDCP is disabled
+ */
+ if (event & HDCP_WORKQUEUE_SRC)
+ hdcp.pending_wq_event = 0;
+
+ /* First handle HDMI state */
+ if (event == HDCP_START_FRAME_EVENT) {
+ hdcp.pending_start = 0;
+ hdcp.hdmi_state = HDMI_STARTED;
+ }
+ /**********************/
+ /* HDCP state machine */
+ /**********************/
+ switch (hdcp.hdcp_state) {
+
+ /* State */
+ /*********/
+ case HDCP_DISABLED:
+ /* HDCP enable control or re-authentication event */
+ if (event == HDCP_ENABLE_CTL) {
+ if (hdcp.en_ctrl->nb_retry == 0)
+ hdcp.retry_cnt = HDCP_INFINITE_REAUTH;
+ else
+ hdcp.retry_cnt = hdcp.en_ctrl->nb_retry;
+
+ if (hdcp.hdmi_state == HDMI_STARTED)
+ hdcp_wq_start_authentication();
+ else
+ hdcp.hdcp_state = HDCP_ENABLE_PENDING;
+ }
+
+ break;
+
+ /* State */
+ /*********/
+ case HDCP_ENABLE_PENDING:
+ /* HDMI start frame event */
+ if (event == HDCP_START_FRAME_EVENT)
+ hdcp_wq_start_authentication();
+
+ break;
+
+ /* State */
+ /*********/
+ case HDCP_AUTHENTICATION_START:
+ /* Re-authentication */
+ if (event == HDCP_AUTH_REATT_EVENT)
+ hdcp_wq_start_authentication();
+
+ break;
+
+ /* State */
+ /*********/
+ case HDCP_WAIT_R0_DELAY:
+ /* R0 timer elapsed */
+ if (event == HDCP_R0_EXP_EVENT)
+ hdcp_wq_check_r0();
+
+ break;
+
+ /* State */
+ /*********/
+ case HDCP_WAIT_KSV_LIST:
+ /* Ri failure */
+ if (event == HDCP_RI_FAIL_EVENT) {
+ printk(KERN_INFO "HDCP: Ri check failure\n");
+
+ hdcp_wq_authentication_failure();
+ }
+ /* KSV list ready event */
+ else if (event == HDCP_KSV_LIST_RDY_EVENT)
+ hdcp_wq_step2_authentication();
+ /* Timeout */
+ else if (event == HDCP_KSV_TIMEOUT_EVENT) {
+ printk(KERN_INFO "HDCP: BCAPS polling timeout\n");
+ hdcp_wq_authentication_failure();
+ }
+ break;
+
+ /* State */
+ /*********/
+ case HDCP_LINK_INTEGRITY_CHECK:
+ /* Ri failure */
+ if (event == HDCP_RI_FAIL_EVENT) {
+ printk(KERN_INFO "HDCP: Ri check failure\n");
+ hdcp_wq_authentication_failure();
+ }
+ break;
+
+ default:
+ printk(KERN_WARNING "HDCP: error - unknow HDCP state\n");
+ break;
+ }
+
+ kfree(hdcp_w);
+ hdcp_w = 0;
+ if (event == HDCP_START_FRAME_EVENT)
+ hdcp.pending_start = 0;
+ if (event == HDCP_KSV_LIST_RDY_EVENT ||
+ event == HDCP_R0_EXP_EVENT) {
+ hdcp.pending_wq_event = 0;
+ }
+
+ DBG("hdcp_work_queue() - END - %u hdmi=%d hdcp=%d auth=%d evt=%x %d ",
+ jiffies_to_msecs(jiffies),
+ hdcp.hdmi_state,
+ hdcp.hdcp_state,
+ hdcp.auth_state,
+ (event & 0xFF00) >> 8,
+ event & 0xFF);
+
+ mutex_unlock(&hdcp.lock);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_submit_work
+ *-----------------------------------------------------------------------------
+ */
+static struct delayed_work *hdcp_submit_work(int event, int delay)
+{
+ struct hdcp_delayed_work *work;
+
+ work = kmalloc(sizeof(struct hdcp_delayed_work), GFP_ATOMIC);
+
+ if (work) {
+ INIT_DELAYED_WORK(&work->work, hdcp_work_queue);
+ work->event = event;
+ queue_delayed_work(hdcp.workqueue,
+ &work->work,
+ msecs_to_jiffies(delay));
+ } else {
+ printk(KERN_WARNING "HDCP: Cannot allocate memory to "
+ "create work\n");
+ return 0;
+ }
+
+ return &work->work;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_cancel_work
+ *-----------------------------------------------------------------------------
+ */
+static void hdcp_cancel_work(struct delayed_work **work)
+{
+ int ret = 0;
+
+ if (*work) {
+ ret = cancel_delayed_work(*work);
+ if (ret != 1) {
+ ret = cancel_work_sync(&((*work)->work));
+ printk(KERN_INFO "Canceling work failed - "
+ "cancel_work_sync done %d\n", ret);
+ }
+ kfree(*work);
+ *work = 0;
+ }
+}
+
+
+/******************************************************************************
+ * HDCP callbacks
+ *****************************************************************************/
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_3des_cb
+ *-----------------------------------------------------------------------------
+ */
+static bool hdcp_3des_cb(void)
+{
+ DBG("hdcp_3des_cb() %u", jiffies_to_msecs(jiffies));
+
+ if (!hdcp.hdcp_keys_loaded) {
+ printk(KERN_ERR "%s: hdcp_keys not loaded = %d",
+ __func__, hdcp.hdcp_keys_loaded);
+ return false;
+ }
+
+ /* Load 3DES key */
+ if (hdcp_3des_load_key(hdcp.en_ctrl->key) != HDCP_OK) {
+ printk(KERN_ERR "Error Loading HDCP keys\n");
+ return false;
+ }
+ return true;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_start_frame_cb
+ *-----------------------------------------------------------------------------
+ */
+static void hdcp_start_frame_cb(void)
+{
+ DBG("hdcp_start_frame_cb() %u", jiffies_to_msecs(jiffies));
+
+ if (!hdcp.hdcp_keys_loaded) {
+ DBG("%s: hdcp_keys not loaded = %d",
+ __func__, hdcp.hdcp_keys_loaded);
+ return;
+ }
+
+ /* Cancel any pending work */
+ if (hdcp.pending_start)
+ hdcp_cancel_work(&hdcp.pending_start);
+ if (hdcp.pending_wq_event)
+ hdcp_cancel_work(&hdcp.pending_wq_event);
+
+ hdcp.hpd_low = 0;
+ hdcp.pending_disable = 0;
+ hdcp.retry_cnt = hdcp.en_ctrl->nb_retry;
+ hdcp.pending_start = hdcp_submit_work(HDCP_START_FRAME_EVENT,
+ HDCP_ENABLE_DELAY);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_irq_cb
+ *-----------------------------------------------------------------------------
+ */
+static void hdcp_irq_cb(int status)
+{
+ DBG("hdcp_irq_cb() status=%x", status);
+
+ if (!hdcp.hdcp_keys_loaded) {
+ DBG("%s: hdcp_keys not loaded = %d",
+ __func__, hdcp.hdcp_keys_loaded);
+ return;
+ }
+
+ /* Disable auto Ri/BCAPS immediately */
+ if (((status & HDMI_RI_ERR) ||
+ (status & HDMI_BCAP) ||
+ (status & HDMI_HPD_LOW)) &&
+ (hdcp.hdcp_state != HDCP_ENABLE_PENDING)) {
+ hdcp_lib_auto_ri_check(false);
+ hdcp_lib_auto_bcaps_rdy_check(false);
+ }
+
+ /* Work queue execution not required if HDCP is disabled */
+ /* TODO: ignore interrupts if they are masked (cannnot access UMASK
+ * here so should use global variable
+ */
+ if ((hdcp.hdcp_state != HDCP_DISABLED) &&
+ (hdcp.hdcp_state != HDCP_ENABLE_PENDING)) {
+ if (status & HDMI_HPD_LOW) {
+ hdcp_lib_set_encryption(HDCP_ENC_OFF);
+ hdcp_ddc_abort();
+ }
+
+ if (status & HDMI_RI_ERR) {
+ hdcp_lib_set_av_mute(AV_MUTE_SET);
+ hdcp_lib_set_encryption(HDCP_ENC_OFF);
+ hdcp_submit_work(HDCP_RI_FAIL_EVENT, 0);
+ }
+ /* RI error takes precedence over BCAP */
+ else if (status & HDMI_BCAP)
+ hdcp_submit_work(HDCP_KSV_LIST_RDY_EVENT, 0);
+ }
+
+ if (status & HDMI_HPD_LOW) {
+ hdcp.pending_disable = 1; /* Used to exit on-going HDCP
+ * work */
+ hdcp.hpd_low = 0; /* Used to cancel HDCP works */
+ hdcp_lib_disable();
+ /* In case of HDCP_STOP_FRAME_EVENT, HDCP stop
+ * frame callback is blocked and waiting for
+ * HDCP driver to finish accessing the HW
+ * before returning
+ * Reason is to avoid HDMI driver to shutdown
+ * DSS/HDMI power before HDCP work is finished
+ */
+ hdcp.hdmi_state = HDMI_STOPPED;
+ hdcp.hdcp_state = HDCP_ENABLE_PENDING;
+ hdcp.auth_state = HDCP_STATE_DISABLED;
+ }
+}
+
+/******************************************************************************
+ * HDCP control from ioctl
+ *****************************************************************************/
+
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_enable_ctl
+ *-----------------------------------------------------------------------------
+ */
+static long hdcp_enable_ctl(void __user *argp)
+{
+ DBG("hdcp_ioctl() - ENABLE %u", jiffies_to_msecs(jiffies));
+
+ if (hdcp.en_ctrl == 0) {
+ hdcp.en_ctrl =
+ kmalloc(sizeof(struct hdcp_enable_control),
+ GFP_KERNEL);
+
+ if (hdcp.en_ctrl == 0) {
+ printk(KERN_WARNING
+ "HDCP: Cannot allocate memory for HDCP"
+ " enable control struct\n");
+ return -EFAULT;
+ }
+ }
+
+ if (copy_from_user(hdcp.en_ctrl, argp,
+ sizeof(struct hdcp_enable_control))) {
+ printk(KERN_WARNING "HDCP: Error copying from user space "
+ "- enable ioctl\n");
+ return -EFAULT;
+ }
+
+ /* Post event to workqueue */
+ if (hdcp_submit_work(HDCP_ENABLE_CTL, 0) == 0)
+ return -EFAULT;
+
+ return 0;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_disable_ctl
+ *-----------------------------------------------------------------------------
+ */
+static long hdcp_disable_ctl(void)
+{
+ DBG("hdcp_ioctl() - DISABLE %u", jiffies_to_msecs(jiffies));
+
+ hdcp_cancel_work(&hdcp.pending_start);
+ hdcp_cancel_work(&hdcp.pending_wq_event);
+
+ hdcp.pending_disable = 1;
+ /* Post event to workqueue */
+ if (hdcp_submit_work(HDCP_DISABLE_CTL, 0) == 0)
+ return -EFAULT;
+
+ return 0;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_query_status_ctl
+ *-----------------------------------------------------------------------------
+ */
+static long hdcp_query_status_ctl(void __user *argp)
+{
+ uint32_t *status = (uint32_t *)argp;
+
+ DBG("hdcp_ioctl() - QUERY %u", jiffies_to_msecs(jiffies));
+
+ *status = hdcp.auth_state;
+
+ return 0;
+}
+
+static int hdcp_wait_re_entrance;
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_wait_event_ctl
+ *-----------------------------------------------------------------------------
+ */
+static long hdcp_wait_event_ctl(void __user *argp)
+{
+ struct hdcp_wait_control ctrl;
+
+ DBG("hdcp_ioctl() - WAIT %u %d", jiffies_to_msecs(jiffies),
+ hdcp.hdcp_up_event);
+
+ if (copy_from_user(&ctrl, argp,
+ sizeof(struct hdcp_wait_control))) {
+ printk(KERN_WARNING "HDCP: Error copying from user space"
+ " - wait ioctl");
+ return -EFAULT;
+ }
+
+ if (hdcp_wait_re_entrance == 0) {
+ hdcp_wait_re_entrance = 1;
+ wait_event_interruptible(hdcp_up_wait_queue,
+ (hdcp.hdcp_up_event & 0xFF) != 0);
+
+ ctrl.event = hdcp.hdcp_up_event;
+
+ if ((ctrl.event & 0xFF) == HDCP_EVENT_STEP2) {
+ if (copy_to_user(ctrl.data, &sha_input,
+ sizeof(struct hdcp_sha_in))) {
+ printk(KERN_WARNING "HDCP: Error copying to "
+ "user space - wait ioctl");
+ return -EFAULT;
+ }
+ }
+
+ hdcp.hdcp_up_event = 0;
+ hdcp_wait_re_entrance = 0;
+ } else
+ ctrl.event = HDCP_EVENT_EXIT;
+
+ /* Store output data to output pointer */
+ if (copy_to_user(argp, &ctrl,
+ sizeof(struct hdcp_wait_control))) {
+ printk(KERN_WARNING "HDCP: Error copying to user space -"
+ " wait ioctl");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_done_ctl
+ *-----------------------------------------------------------------------------
+ */
+static long hdcp_done_ctl(void __user *argp)
+{
+ uint32_t *status = (uint32_t *)argp;
+
+ DBG("hdcp_ioctl() - DONE %u %d", jiffies_to_msecs(jiffies), *status);
+
+ hdcp.hdcp_down_event &= ~(*status & 0xFF);
+ hdcp.hdcp_down_event |= *status & 0xFF00;
+
+ wake_up_interruptible(&hdcp_down_wait_queue);
+
+ return 0;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_encrypt_key_ctl
+ *-----------------------------------------------------------------------------
+ */
+static long hdcp_encrypt_key_ctl(void __user *argp)
+{
+ struct hdcp_encrypt_control *ctrl;
+ uint32_t *out_key;
+
+ DBG("hdcp_ioctl() - ENCRYPT KEY %u", jiffies_to_msecs(jiffies));
+
+ mutex_lock(&hdcp.lock);
+
+ if (hdcp.hdcp_state != HDCP_DISABLED) {
+ printk(KERN_INFO "HDCP: Cannot encrypt keys while HDCP "
+ "is enabled\n");
+ mutex_unlock(&hdcp.lock);
+ return -EFAULT;
+ }
+
+ hdcp.hdcp_state = HDCP_KEY_ENCRYPTION_ONGOING;
+
+ /* Encryption happens in ioctl / user context */
+ ctrl = kmalloc(sizeof(struct hdcp_encrypt_control),
+ GFP_KERNEL);
+
+ if (ctrl == 0) {
+ printk(KERN_WARNING "HDCP: Cannot allocate memory for HDCP"
+ " encryption control struct\n");
+ mutex_unlock(&hdcp.lock);
+ return -EFAULT;
+ }
+
+ out_key = kmalloc(sizeof(uint32_t) *
+ DESHDCP_KEY_SIZE, GFP_KERNEL);
+
+ if (out_key == 0) {
+ printk(KERN_WARNING "HDCP: Cannot allocate memory for HDCP "
+ "encryption output key\n");
+ kfree(ctrl);
+ mutex_unlock(&hdcp.lock);
+ return -EFAULT;
+ }
+
+ if (copy_from_user(ctrl, argp,
+ sizeof(struct hdcp_encrypt_control))) {
+ printk(KERN_WARNING "HDCP: Error copying from user space"
+ " - encrypt ioctl\n");
+ kfree(ctrl);
+ kfree(out_key);
+ mutex_unlock(&hdcp.lock);
+ return -EFAULT;
+ }
+
+ hdcp_request_dss();
+
+ /* Call encrypt function */
+ hdcp_3des_encrypt_key(ctrl, out_key);
+
+ hdcp_release_dss();
+
+ hdcp.hdcp_state = HDCP_DISABLED;
+ mutex_unlock(&hdcp.lock);
+
+ /* Store output data to output pointer */
+ if (copy_to_user(ctrl->out_key, out_key,
+ sizeof(uint32_t)*DESHDCP_KEY_SIZE)) {
+ printk(KERN_WARNING "HDCP: Error copying to user space -"
+ " encrypt ioctl\n");
+ kfree(ctrl);
+ kfree(out_key);
+ return -EFAULT;
+ }
+
+ kfree(ctrl);
+ kfree(out_key);
+ return 0;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_ioctl
+ *-----------------------------------------------------------------------------
+ */
+long hdcp_ioctl(struct file *fd, unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+
+ switch (cmd) {
+ case HDCP_ENABLE:
+ return hdcp_enable_ctl(argp);
+
+ case HDCP_DISABLE:
+ return hdcp_disable_ctl();
+
+ case HDCP_ENCRYPT_KEY:
+ return hdcp_encrypt_key_ctl(argp);
+
+ case HDCP_QUERY_STATUS:
+ return hdcp_query_status_ctl(argp);
+
+ case HDCP_WAIT_EVENT:
+ return hdcp_wait_event_ctl(argp);
+
+ case HDCP_DONE:
+ return hdcp_done_ctl(argp);
+
+ default:
+ return -ENOTTY;
+ } /* End switch */
+}
+
+
+/******************************************************************************
+ * HDCP driver init/exit
+ *****************************************************************************/
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_mmap
+ *-----------------------------------------------------------------------------
+ */
+static int hdcp_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int status;
+
+ DBG("hdcp_mmap() %lx %lx %lx\n", vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start);
+
+ vma->vm_flags |= VM_IO | VM_RESERVED;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ status = remap_pfn_range(vma, vma->vm_start,
+ HDMI_WP >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ if (status) {
+ DBG("mmap error %d\n", status);
+ return -EAGAIN;
+ }
+
+ DBG("mmap succesfull\n");
+ return 0;
+}
+
+static struct file_operations hdcp_fops = {
+ .owner = THIS_MODULE,
+ .mmap = hdcp_mmap,
+ .unlocked_ioctl = hdcp_ioctl,
+};
+
+struct miscdevice mdev;
+
+static void hdcp_load_keys_cb(const struct firmware *fw, void *context)
+{
+ struct hdcp_enable_control *en_ctrl;
+
+ if (!fw) {
+ pr_err("HDCP: failed to load keys\n");
+ return;
+ }
+
+ if (fw->size != sizeof(en_ctrl->key)) {
+ pr_err("HDCP: encrypted key file wrong size %d\n", fw->size);
+ return;
+ }
+
+ en_ctrl = kmalloc(sizeof(*en_ctrl), GFP_KERNEL);
+ if (!en_ctrl) {
+ pr_err("HDCP: can't allocated space for keys\n");
+ return;
+ }
+
+ memcpy(en_ctrl->key, fw->data, sizeof(en_ctrl->key));
+ en_ctrl->nb_retry = 20;
+
+ hdcp.en_ctrl = en_ctrl;
+ hdcp.retry_cnt = hdcp.en_ctrl->nb_retry;
+ hdcp.hdcp_state = HDCP_ENABLE_PENDING;
+ hdcp.hdcp_keys_loaded = true;
+ pr_info("HDCP: loaded keys\n");
+}
+
+static int hdcp_load_keys(void)
+{
+ int ret;
+
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ "hdcp.keys", mdev.this_device, GFP_KERNEL,
+ &hdcp, hdcp_load_keys_cb);
+ if (ret < 0) {
+ pr_err("HDCP: request_firmware_nowait failed: %d\n", ret);
+ hdcp.hdcp_keys_loaded = false;
+ return ret;
+ }
+
+ return 0;
+}
+
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_init
+ *-----------------------------------------------------------------------------
+ */
+static int __init hdcp_init(void)
+{
+ DBG("hdcp_init() %u", jiffies_to_msecs(jiffies));
+
+ /* Map HDMI WP address */
+ hdcp.hdmi_wp_base_addr = ioremap(HDMI_WP, 0x1000);
+
+ if (!hdcp.hdmi_wp_base_addr) {
+ printk(KERN_ERR "HDCP: HDMI WP IOremap error\n");
+ return -EFAULT;
+ }
+
+ /* Map DESHDCP in kernel address space */
+ hdcp.deshdcp_base_addr = ioremap(DSS_SS_FROM_L3__DESHDCP, 0x34);
+
+ if (!hdcp.deshdcp_base_addr) {
+ printk(KERN_ERR "HDCP: DESHDCP IOremap error\n");
+ goto err_map_deshdcp;
+ }
+
+ mutex_init(&hdcp.lock);
+
+ mdev.minor = MISC_DYNAMIC_MINOR;
+ mdev.name = "hdcp";
+ mdev.mode = 0666;
+ mdev.fops = &hdcp_fops;
+
+ if (misc_register(&mdev)) {
+ printk(KERN_ERR "HDCP: Could not add character driver\n");
+ goto err_register;
+ }
+
+ mutex_lock(&hdcp.lock);
+
+ /* Variable init */
+ hdcp.en_ctrl = 0;
+ hdcp.hdcp_state = HDCP_DISABLED;
+ hdcp.pending_start = 0;
+ hdcp.pending_wq_event = 0;
+ hdcp.retry_cnt = 0;
+ hdcp.auth_state = HDCP_STATE_DISABLED;
+ hdcp.pending_disable = 0;
+ hdcp.hdcp_up_event = 0;
+ hdcp.hdcp_down_event = 0;
+ hdcp_wait_re_entrance = 0;
+ hdcp.hpd_low = 0;
+
+ spin_lock_init(&hdcp.spinlock);
+
+ init_completion(&hdcp_comp);
+
+ hdcp.workqueue = create_singlethread_workqueue("hdcp");
+ if (hdcp.workqueue == NULL)
+ goto err_add_driver;
+
+ hdcp_request_dss();
+
+ /* Register HDCP callbacks to HDMI library */
+ if (omapdss_hdmi_register_hdcp_callbacks(&hdcp_start_frame_cb,
+ &hdcp_irq_cb,
+ &hdcp_3des_cb))
+ hdcp.hdmi_state = HDMI_STARTED;
+ else
+ hdcp.hdmi_state = HDMI_STOPPED;
+
+ hdcp_release_dss();
+
+ mutex_unlock(&hdcp.lock);
+
+ hdcp_load_keys();
+
+ return 0;
+
+err_add_driver:
+ misc_deregister(&mdev);
+
+err_register:
+ mutex_destroy(&hdcp.lock);
+
+ iounmap(hdcp.deshdcp_base_addr);
+
+err_map_deshdcp:
+ iounmap(hdcp.hdmi_wp_base_addr);
+
+ return -EFAULT;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: hdcp_exit
+ *-----------------------------------------------------------------------------
+ */
+static void __exit hdcp_exit(void)
+{
+ DBG("hdcp_exit() %u", jiffies_to_msecs(jiffies));
+
+ mutex_lock(&hdcp.lock);
+
+ kfree(hdcp.en_ctrl);
+
+ hdcp_request_dss();
+
+ /* Un-register HDCP callbacks to HDMI library */
+ omapdss_hdmi_register_hdcp_callbacks(0, 0, 0);
+
+ hdcp_release_dss();
+
+ misc_deregister(&mdev);
+
+ /* Unmap HDMI WP / DESHDCP */
+ iounmap(hdcp.hdmi_wp_base_addr);
+ iounmap(hdcp.deshdcp_base_addr);
+
+ destroy_workqueue(hdcp.workqueue);
+
+ mutex_unlock(&hdcp.lock);
+
+ mutex_destroy(&hdcp.lock);
+}
+
+/*-----------------------------------------------------------------------------
+ *-----------------------------------------------------------------------------
+ */
+module_init(hdcp_init);
+module_exit(hdcp_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("OMAP HDCP kernel module");
+MODULE_AUTHOR("Fabrice Olivero");
diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/omap2/omapfb/Kconfig
index aa33386..d15486e 100644
--- a/drivers/video/omap2/omapfb/Kconfig
+++ b/drivers/video/omap2/omapfb/Kconfig
@@ -7,6 +7,7 @@
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select FB_MODE_HELPERS
help
Frame buffer driver for OMAP2+ based boards.
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index cff4503..8188e92 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -883,6 +883,7 @@
case OMAPFB_GET_DISPLAY_INFO: {
u16 xres, yres;
+ u32 w, h;
DBG("ioctl GET_DISPLAY_INFO\n");
@@ -896,15 +897,9 @@
p.display_info.xres = xres;
p.display_info.yres = yres;
- if (display->driver->get_dimensions) {
- u32 w, h;
- display->driver->get_dimensions(display, &w, &h);
- p.display_info.width = w;
- p.display_info.height = h;
- } else {
- p.display_info.width = 0;
- p.display_info.height = 0;
- }
+ omapdss_display_get_dimensions(display, &w, &h);
+ p.display_info.width = w;
+ p.display_info.height = h;
if (copy_to_user((void __user *)arg, &p.display_info,
sizeof(p.display_info)))
@@ -912,6 +907,24 @@
break;
}
+ case OMAPFB_ENABLEVSYNC:
+ if (get_user(p.crt, (__u32 __user *)arg)) {
+ r = -EFAULT;
+ break;
+ }
+
+ omapfb_lock(fbdev);
+ fbdev->vsync_active = !!p.crt;
+
+ if (display->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ if (p.crt)
+ omapfb_enable_vsync(fbdev);
+ else
+ omapfb_disable_vsync(fbdev);
+ }
+ omapfb_unlock(fbdev);
+ break;
+
default:
dev_err(fbdev->dev, "Unknown ioctl 0x%x\n", cmd);
r = -EINVAL;
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 505bc12..319c2ac 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -29,6 +29,7 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/omapfb.h>
+#include <linux/wait.h>
#include <video/omapdss.h>
#include <plat/vram.h>
@@ -303,7 +304,7 @@
var->transp = color->transp;
}
-static int fb_mode_to_dss_mode(struct fb_var_screeninfo *var,
+int omapfb_mode_to_dss_mode(struct fb_var_screeninfo *var,
enum omap_color_mode *mode)
{
enum omap_color_mode dssmode;
@@ -358,7 +359,7 @@
dssmode = OMAP_DSS_COLOR_RGB24P;
break;
case 32:
- dssmode = OMAP_DSS_COLOR_RGB24U;
+ dssmode = OMAP_DSS_COLOR_ARGB32;
break;
default:
return -EINVAL;
@@ -375,6 +376,7 @@
return -EINVAL;
}
+EXPORT_SYMBOL(omapfb_mode_to_dss_mode);
static int check_fb_res_bounds(struct fb_var_screeninfo *var)
{
@@ -512,7 +514,7 @@
DBG("setup_vrfb_rotation\n");
- r = fb_mode_to_dss_mode(var, &mode);
+ r = omapfb_mode_to_dss_mode(var, &mode);
if (r)
return r;
@@ -622,8 +624,9 @@
fix->smem_len = var->yres_virtual * fix->line_length;
} else {
- fix->line_length =
- (var->xres_virtual * var->bits_per_pixel) >> 3;
+ /* SGX requires stride to be a multiple of 32 pixels */
+ int xres_align = ALIGN(var->xres_virtual, 32);
+ fix->line_length = (xres_align * var->bits_per_pixel) >> 3;
fix->smem_len = rg->size;
}
@@ -665,12 +668,13 @@
enum omap_color_mode mode = 0;
int i;
int r;
+ u32 w = 0, h = 0;
DBG("check_fb_var %d\n", ofbi->id);
WARN_ON(!atomic_read(&ofbi->region->lock_count));
- r = fb_mode_to_dss_mode(var, &mode);
+ r = omapfb_mode_to_dss_mode(var, &mode);
if (r) {
DBG("cannot convert var to omap dss mode\n");
return r;
@@ -702,9 +706,10 @@
var->xres, var->yres,
var->xres_virtual, var->yres_virtual);
- if (display && display->driver->get_dimensions) {
- u32 w, h;
- display->driver->get_dimensions(display, &w, &h);
+ if (display)
+ omapdss_display_get_dimensions(display, &w, &h);
+
+ if (w && h) {
var->width = DIV_ROUND_CLOSEST(w, 1000);
var->height = DIV_ROUND_CLOSEST(h, 1000);
} else {
@@ -757,6 +762,11 @@
static int omapfb_release(struct fb_info *fbi, int user)
{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+
+ omapfb_disable_vsync(fbdev);
+
return 0;
}
@@ -879,9 +889,9 @@
omapfb_calc_addr(ofbi, var, fix, rotation,
&data_start_p, &data_start_v);
- r = fb_mode_to_dss_mode(var, &mode);
+ r = omapfb_mode_to_dss_mode(var, &mode);
if (r) {
- DBG("fb_mode_to_dss_mode failed");
+ DBG("omapfb_mode_to_dss_mode failed");
goto err;
}
@@ -1018,6 +1028,41 @@
return r;
}
+void omapfb_fb2dss_timings(struct fb_videomode *fb_timings,
+ struct omap_video_timings *dss_timings)
+{
+ dss_timings->x_res = fb_timings->xres;
+ dss_timings->y_res = fb_timings->yres;
+ if (fb_timings->vmode & FB_VMODE_INTERLACED)
+ dss_timings->y_res /= 2;
+ dss_timings->pixel_clock = fb_timings->pixclock ?
+ PICOS2KHZ(fb_timings->pixclock) : 0;
+ dss_timings->hfp = fb_timings->right_margin;
+ dss_timings->hbp = fb_timings->left_margin;
+ dss_timings->hsw = fb_timings->hsync_len;
+ dss_timings->vfp = fb_timings->lower_margin;
+ dss_timings->vbp = fb_timings->upper_margin;
+ dss_timings->vsw = fb_timings->vsync_len;
+}
+EXPORT_SYMBOL(omapfb_fb2dss_timings);
+
+void omapfb_dss2fb_timings(struct omap_video_timings *dss_timings,
+ struct fb_videomode *fb_timings)
+{
+ memset(fb_timings, 0, sizeof(*fb_timings));
+ fb_timings->xres = dss_timings->x_res;
+ fb_timings->yres = dss_timings->y_res;
+ fb_timings->pixclock = dss_timings->pixel_clock ?
+ KHZ2PICOS(dss_timings->pixel_clock) : 0;
+ fb_timings->right_margin = dss_timings->hfp;
+ fb_timings->left_margin = dss_timings->hbp;
+ fb_timings->hsync_len = dss_timings->hsw;
+ fb_timings->lower_margin = dss_timings->vfp;
+ fb_timings->upper_margin = dss_timings->vbp;
+ fb_timings->vsync_len = dss_timings->vsw;
+}
+EXPORT_SYMBOL(omapfb_dss2fb_timings);
+
/* set the video mode according to info->var */
static int omapfb_set_par(struct fb_info *fbi)
{
@@ -1251,11 +1296,16 @@
switch (blank) {
case FB_BLANK_UNBLANK:
- if (display->state != OMAP_DSS_DISPLAY_SUSPENDED)
- goto exit;
+ if (display->state == OMAP_DSS_DISPLAY_SUSPENDED) {
+ if (display->driver->resume)
+ r = display->driver->resume(display);
+ } else if (display->state == OMAP_DSS_DISPLAY_DISABLED) {
+ if (display->driver->enable)
+ r = display->driver->enable(display);
+ }
- if (display->driver->resume)
- r = display->driver->resume(display);
+ if (fbdev->vsync_active)
+ omapfb_enable_vsync(fbdev);
break;
@@ -1265,11 +1315,17 @@
case FB_BLANK_VSYNC_SUSPEND:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_POWERDOWN:
+
+ if (fbdev->vsync_active)
+ omapfb_disable_vsync(fbdev);
+
if (display->state != OMAP_DSS_DISPLAY_ACTIVE)
goto exit;
if (display->driver->suspend)
r = display->driver->suspend(display);
+ else if (display->driver->disable)
+ display->driver->disable(display);
break;
@@ -2233,6 +2289,39 @@
return 0;
}
+static void omapfb_send_vsync_work(struct work_struct *work)
+{
+ struct omapfb2_device *fbdev =
+ container_of(work, typeof(*fbdev), vsync_work);
+ char buf[64];
+ char *envp[2];
+
+ snprintf(buf, sizeof(buf), "VSYNC=%llu",
+ ktime_to_ns(fbdev->vsync_timestamp));
+ envp[0] = buf;
+ envp[1] = NULL;
+ kobject_uevent_env(&fbdev->dev->kobj, KOBJ_CHANGE, envp);
+}
+static void omapfb_vsync_isr(void *data, u32 mask)
+{
+ struct omapfb2_device *fbdev = data;
+ fbdev->vsync_timestamp = ktime_get();
+ schedule_work(&fbdev->vsync_work);
+}
+
+int omapfb_enable_vsync(struct omapfb2_device *fbdev)
+{
+ int r;
+ /* TODO: should determine correct IRQ like dss_mgr_wait_for_vsync does*/
+ r = omap_dispc_register_isr(omapfb_vsync_isr, fbdev, DISPC_IRQ_VSYNC);
+ return r;
+}
+
+void omapfb_disable_vsync(struct omapfb2_device *fbdev)
+{
+ omap_dispc_unregister_isr(omapfb_vsync_isr, fbdev, DISPC_IRQ_VSYNC);
+}
+
static int omapfb_probe(struct platform_device *pdev)
{
struct omapfb2_device *fbdev = NULL;
@@ -2348,6 +2437,7 @@
goto cleanup;
}
+ INIT_WORK(&fbdev->vsync_work, omapfb_send_vsync_work);
return 0;
cleanup:
@@ -2362,6 +2452,7 @@
struct omapfb2_device *fbdev = platform_get_drvdata(pdev);
/* FIXME: wait till completion of pending events */
+ /* TODO: terminate vsync thread */
omapfb_remove_sysfs(fbdev);
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index aa1b1d9..649388f 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -97,6 +97,10 @@
struct omap_dss_device *dssdev;
u8 bpp;
} bpp_overrides[10];
+
+ bool vsync_active;
+ ktime_t vsync_timestamp;
+ struct work_struct vsync_work;
};
struct omapfb_colormode {
@@ -128,6 +132,9 @@
int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
u16 posx, u16 posy, u16 outw, u16 outh);
+int omapfb_enable_vsync(struct omapfb2_device *fbdev);
+void omapfb_disable_vsync(struct omapfb2_device *fbdev);
+
/* find the display connected to this fb, if any */
static inline struct omap_dss_device *fb2display(struct fb_info *fbi)
{
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 92bd773..38710c8 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -300,6 +300,14 @@
return vq->last_used_idx != vq->vring.used->idx;
}
+bool virtqueue_more_used(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ return more_used(vq);
+}
+EXPORT_SYMBOL_GPL(virtqueue_more_used);
+
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
{
struct vring_virtqueue *vq = to_vvq(_vq);
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 2b4acb8..574588b 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -43,6 +43,9 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/nmi.h>
#include <mach/hardware.h>
#include <plat/prcm.h>
@@ -54,6 +57,10 @@
module_param(timer_margin, uint, 0);
MODULE_PARM_DESC(timer_margin, "initial watchdog timeout (in seconds)");
+static int kernelpet = 1;
+module_param(kernelpet, int, 0);
+MODULE_PARM_DESC(kernelpet, "pet watchdog in kernel via irq");
+
static unsigned int wdt_trgr_pattern = 0x1234;
static spinlock_t wdt_lock;
@@ -63,6 +70,8 @@
int omap_wdt_users;
struct resource *mem;
struct miscdevice omap_wdt_miscdev;
+ int irq;
+ struct notifier_block nb;
};
static void omap_wdt_ping(struct omap_wdt_dev *wdev)
@@ -122,6 +131,7 @@
static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev)
{
u32 pre_margin = GET_WLDR_VAL(timer_margin);
+ u32 delay_period = GET_WLDR_VAL(timer_margin / 2);
void __iomem *base = wdev->base;
pm_runtime_get_sync(wdev->dev);
@@ -134,15 +144,31 @@
while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04)
cpu_relax();
+ /* Set delay interrupt to half the watchdog interval. */
+ while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 1 << 5)
+ cpu_relax();
+ __raw_writel(delay_period, base + OMAP_WATCHDOG_WDLY);
+
pm_runtime_put_sync(wdev->dev);
}
-/*
- * Allow only one task to hold it open
- */
-static int omap_wdt_open(struct inode *inode, struct file *file)
+
+static irqreturn_t omap_wdt_interrupt(int irq, void *dev_id)
{
- struct omap_wdt_dev *wdev = platform_get_drvdata(omap_wdt_dev);
+ struct omap_wdt_dev *wdev = dev_id;
+ void __iomem *base = wdev->base;
+ u32 i;
+
+ pm_runtime_get_sync(wdev->dev);
+ omap_wdt_ping(wdev);
+ i = __raw_readl(base + OMAP_WATCHDOG_WIRQSTAT);
+ __raw_writel(i, base + OMAP_WATCHDOG_WIRQSTAT);
+ pm_runtime_put_sync_suspend(wdev->dev);
+ return IRQ_HANDLED;
+}
+
+static int omap_wdt_setup(struct omap_wdt_dev *wdev)
+{
void __iomem *base = wdev->base;
if (test_and_set_bit(1, (unsigned long *)&(wdev->omap_wdt_users)))
@@ -158,20 +184,41 @@
while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x01)
cpu_relax();
- file->private_data = (void *) wdev;
-
omap_wdt_set_timeout(wdev);
omap_wdt_ping(wdev); /* trigger loading of new timeout value */
+
+ /* Enable delay interrupt */
+
+ if (kernelpet && wdev->irq)
+ __raw_writel(0x2, base + OMAP_WATCHDOG_WIRQENSET);
+
omap_wdt_enable(wdev);
pm_runtime_put_sync(wdev->dev);
+ return 0;
+}
+/*
+ * Allow only one task to hold it open
+ */
+static int omap_wdt_open(struct inode *inode, struct file *file)
+{
+ struct omap_wdt_dev *wdev = platform_get_drvdata(omap_wdt_dev);
+ int ret;
+
+ ret = omap_wdt_setup(wdev);
+
+ if (ret)
+ return ret;
+
+ file->private_data = (void *) wdev;
return nonseekable_open(inode, file);
}
static int omap_wdt_release(struct inode *inode, struct file *file)
{
struct omap_wdt_dev *wdev = file->private_data;
+ void __iomem *base = wdev->base;
/*
* Shut off the timer unless NOWAYOUT is defined.
@@ -181,6 +228,10 @@
omap_wdt_disable(wdev);
+ /* Disable delay interrupt */
+ if (kernelpet && wdev->irq)
+ __raw_writel(0x2, base + OMAP_WATCHDOG_WIRQENCLR);
+
pm_runtime_put_sync(wdev->dev);
#else
printk(KERN_CRIT "omap_wdt: Unexpected close, not stopping!\n");
@@ -270,9 +321,20 @@
.llseek = no_llseek,
};
+static int omap_wdt_nb_func(struct notifier_block *nb, unsigned long val,
+ void *v)
+{
+ struct omap_wdt_dev *wdev = container_of(nb, struct omap_wdt_dev, nb);
+ pm_runtime_get_sync(wdev->dev);
+ omap_wdt_ping(wdev);
+ pm_runtime_put_sync_suspend(wdev->dev);
+
+ return NOTIFY_OK;
+}
+
static int __devinit omap_wdt_probe(struct platform_device *pdev)
{
- struct resource *res, *mem;
+ struct resource *res, *mem, *res_irq;
struct omap_wdt_dev *wdev;
int ret;
@@ -294,6 +356,8 @@
goto err_busy;
}
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
wdev = kzalloc(sizeof(struct omap_wdt_dev), GFP_KERNEL);
if (!wdev) {
ret = -ENOMEM;
@@ -310,9 +374,20 @@
goto err_ioremap;
}
+ if (res_irq) {
+ ret = request_irq(res_irq->start, omap_wdt_interrupt, 0,
+ dev_name(&pdev->dev), wdev);
+
+ if (ret)
+ goto err_irq;
+
+ wdev->irq = res_irq->start;
+ }
+
platform_set_drvdata(pdev, wdev);
pm_runtime_enable(wdev->dev);
+ pm_runtime_irq_safe(wdev->dev);
pm_runtime_get_sync(wdev->dev);
omap_wdt_disable(wdev);
@@ -335,10 +410,22 @@
omap_wdt_dev = pdev;
+ if (kernelpet && wdev->irq) {
+ wdev->nb.notifier_call = omap_wdt_nb_func;
+ atomic_notifier_chain_register(&touch_watchdog_notifier_head,
+ &wdev->nb);
+ return omap_wdt_setup(wdev);
+ }
+
return 0;
err_misc:
platform_set_drvdata(pdev, NULL);
+
+ if (wdev->irq)
+ free_irq(wdev->irq, wdev);
+
+err_irq:
iounmap(wdev->base);
err_ioremap:
@@ -377,6 +464,13 @@
release_mem_region(res->start, resource_size(res));
platform_set_drvdata(pdev, NULL);
+ if (wdev->irq)
+ free_irq(wdev->irq, wdev);
+
+ if (kernelpet && wdev->irq)
+ atomic_notifier_chain_unregister(&touch_watchdog_notifier_head,
+ &wdev->nb);
+
iounmap(wdev->base);
kfree(wdev);
@@ -393,28 +487,30 @@
* may not play well enough with NOWAYOUT...
*/
-static int omap_wdt_suspend(struct platform_device *pdev, pm_message_t state)
+static int omap_wdt_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
if (wdev->omap_wdt_users) {
pm_runtime_get_sync(wdev->dev);
omap_wdt_disable(wdev);
- pm_runtime_put_sync(wdev->dev);
+ pm_runtime_put_sync_suspend(wdev->dev);
}
return 0;
}
-static int omap_wdt_resume(struct platform_device *pdev)
+static int omap_wdt_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
if (wdev->omap_wdt_users) {
pm_runtime_get_sync(wdev->dev);
omap_wdt_enable(wdev);
omap_wdt_ping(wdev);
- pm_runtime_put_sync(wdev->dev);
+ pm_runtime_put_sync_suspend(wdev->dev);
}
return 0;
@@ -425,15 +521,19 @@
#define omap_wdt_resume NULL
#endif
+static const struct dev_pm_ops omap_wdt_pm_ops = {
+ .suspend_noirq = omap_wdt_suspend,
+ .resume_noirq = omap_wdt_resume,
+};
+
static struct platform_driver omap_wdt_driver = {
.probe = omap_wdt_probe,
.remove = __devexit_p(omap_wdt_remove),
.shutdown = omap_wdt_shutdown,
- .suspend = omap_wdt_suspend,
- .resume = omap_wdt_resume,
.driver = {
.owner = THIS_MODULE,
.name = "omap_wdt",
+ .pm = &omap_wdt_pm_ops,
},
};
diff --git a/drivers/watchdog/omap_wdt.h b/drivers/watchdog/omap_wdt.h
index 09b774c..c9980d3 100644
--- a/drivers/watchdog/omap_wdt.h
+++ b/drivers/watchdog/omap_wdt.h
@@ -38,7 +38,11 @@
#define OMAP_WATCHDOG_LDR (0x2c)
#define OMAP_WATCHDOG_TGR (0x30)
#define OMAP_WATCHDOG_WPS (0x34)
+#define OMAP_WATCHDOG_WDLY (0x44)
#define OMAP_WATCHDOG_SPR (0x48)
+#define OMAP_WATCHDOG_WIRQSTAT (0x58)
+#define OMAP_WATCHDOG_WIRQENSET (0x5c)
+#define OMAP_WATCHDOG_WIRQENCLR (0x60)
/* Using the prescaler, the OMAP watchdog could go for many
* months before firing. These limits work without scaling,
diff --git a/include/linux/cpu_pm.h b/include/linux/cpu_pm.h
new file mode 100644
index 0000000..455b233
--- /dev/null
+++ b/include/linux/cpu_pm.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_CPU_PM_H
+#define _LINUX_CPU_PM_H
+
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+
+/*
+ * When a CPU goes to a low power state that turns off power to the CPU's
+ * power domain, the contents of some blocks (floating point coprocessors,
+ * interrupt controllers, caches, timers) in the same power domain can
+ * be lost. The cpm_pm notifiers provide a method for platform idle, suspend,
+ * and hotplug implementations to notify the drivers for these blocks that
+ * they may be reset.
+ *
+ * All cpu_pm notifications must be called with interrupts disabled.
+ *
+ * The notifications are split into two classes: CPU notifications and CPU
+ * cluster notifications.
+ *
+ * CPU notifications apply to a single CPU and must be called on the affected
+ * CPU. They are used to save per-cpu context for affected blocks.
+ *
+ * CPU cluster notifications apply to all CPUs in a single power domain. They
+ * are used to save any global context for affected blocks, and must be called
+ * after all the CPUs in the power domain have been notified of the low power
+ * state.
+ */
+
+/*
+ * Event codes passed as unsigned long val to notifier calls
+ */
+enum cpu_pm_event {
+ /* A single cpu is entering a low power state */
+ CPU_PM_ENTER,
+
+ /* A single cpu failed to enter a low power state */
+ CPU_PM_ENTER_FAILED,
+
+ /* A single cpu is exiting a low power state */
+ CPU_PM_EXIT,
+
+ /* A cpu power domain is entering a low power state */
+ CPU_CLUSTER_PM_ENTER,
+
+ /* A cpu power domain failed to enter a low power state */
+ CPU_CLUSTER_PM_ENTER_FAILED,
+
+ /* A cpu power domain is exiting a low power state */
+ CPU_CLUSTER_PM_EXIT,
+};
+
+#ifdef CONFIG_CPU_PM
+int cpu_pm_register_notifier(struct notifier_block *nb);
+int cpu_pm_unregister_notifier(struct notifier_block *nb);
+int cpu_pm_enter(void);
+int cpu_pm_exit(void);
+int cpu_cluster_pm_enter(void);
+int cpu_cluster_pm_exit(void);
+
+#else
+
+static inline int cpu_pm_register_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int cpu_pm_unregister_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int cpu_pm_enter(void)
+{
+ return 0;
+}
+
+static inline int cpu_pm_exit(void)
+{
+ return 0;
+}
+
+static inline int cpu_cluster_pm_enter(void)
+{
+ return 0;
+}
+
+static inline int cpu_cluster_pm_exit(void)
+{
+ return 0;
+}
+#endif
+#endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index ae06dc9..905ea72 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -361,6 +361,9 @@
#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE)
extern struct cpufreq_governor cpufreq_gov_interactive;
#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactive)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_HOTPLUG)
+extern struct cpufreq_governor cpufreq_gov_hotplug;
+#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_hotplug)
#endif
@@ -402,5 +405,14 @@
void cpufreq_frequency_table_put_attr(unsigned int cpu);
+/* the following are for use in governors, or anywhere else */
+extern int cpufreq_frequency_table_next_lowest(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table,
+ int *index);
+
+extern int cpufreq_frequency_table_next_highest(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table,
+ int *index);
+
#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index f9d013d..e8cc747 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -226,6 +226,10 @@
#define FB_VMODE_SMOOTH_XPAN 512 /* smooth xpan possible (internally used) */
#define FB_VMODE_CONUPDATE 512 /* don't update x/yoffset */
+#define FB_FLAG_RATIO_4_3 64
+#define FB_FLAG_RATIO_16_9 128
+#define FB_FLAG_PIXEL_REPEAT 256
+
/*
* Display rotation support
*/
@@ -439,6 +443,8 @@
#define FB_MISC_PRIM_COLOR 1
#define FB_MISC_1ST_DETAIL 2 /* First Detailed Timing is preferred */
+#define FB_MISC_HDMI 4 /* display supports HDMI signaling */
+
struct fb_chroma {
__u32 redx; /* in fraction of 1024 */
__u32 greenx;
@@ -1104,6 +1110,7 @@
/* drivers/video/modedb.c */
#define VESA_MODEDB_SIZE 34
+#define CEA_MODEDB_SIZE 65
extern void fb_var_to_videomode(struct fb_videomode *mode,
const struct fb_var_screeninfo *var);
extern void fb_videomode_to_var(struct fb_var_screeninfo *var,
@@ -1156,7 +1163,7 @@
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
-extern const struct fb_videomode cea_modes[64];
+extern const struct fb_videomode cea_modes[];
struct fb_modelist {
struct list_head list;
diff --git a/include/linux/hsi_char.h b/include/linux/hsi_char.h
new file mode 100644
index 0000000..cfa6580
--- /dev/null
+++ b/include/linux/hsi_char.h
@@ -0,0 +1,71 @@
+/*
+ * hsi_char.h
+ *
+ * HSI character driver public declaration header file.
+ *
+ * Copyright (C) 2009 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Andras Domokos <andras.domokos@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef HSI_CHAR_H
+#define HSI_CHAR_H
+
+#define HSI_CHAR_BASE 'S'
+#define CS_IOW(num, dtype) _IOW(HSI_CHAR_BASE, num, dtype)
+#define CS_IOR(num, dtype) _IOR(HSI_CHAR_BASE, num, dtype)
+#define CS_IOWR(num, dtype) _IOWR(HSI_CHAR_BASE, num, dtype)
+#define CS_IO(num) _IO(HSI_CHAR_BASE, num)
+
+#define CS_SEND_BREAK CS_IO(1)
+#define CS_FLUSH_RX CS_IO(2)
+#define CS_FLUSH_TX CS_IO(3)
+#define CS_BOOTSTRAP CS_IO(4)
+#define CS_SET_ACWAKELINE CS_IOW(5, unsigned int)
+#define CS_GET_ACWAKELINE CS_IOR(6, unsigned int)
+#define CS_SET_RX CS_IOW(7, struct hsi_rx_config)
+#define CS_GET_RX CS_IOW(8, struct hsi_rx_config)
+#define CS_SET_TX CS_IOW(9, struct hsi_tx_config)
+#define CS_GET_TX CS_IOW(10, struct hsi_tx_config)
+#define CS_SW_RESET CS_IO(11)
+#define CS_GET_FIFO_OCCUPANCY CS_IOR(12, size_t)
+
+#define HSI_MODE_SLEEP 0
+#define HSI_MODE_STREAM 1
+#define HSI_MODE_FRAME 2
+
+#define HSI_ARBMODE_RR 0
+#define HSI_ARBMODE_PRIO 1
+
+#define WAKE_UP 1
+#define WAKE_DOWN 0
+
+struct hsi_tx_config {
+ __u32 mode;
+ __u32 flow;
+ __u32 frame_size;
+ __u32 channels;
+ __u32 divisor;
+ __u32 arb_mode;
+};
+
+struct hsi_rx_config {
+ __u32 mode;
+ __u32 flow;
+ __u32 frame_size;
+ __u32 channels;
+ __u32 divisor; /* not used for SSI */
+ __u32 counters;
+};
+
+#endif /* HSI_CHAR_H */
diff --git a/include/linux/hsi_driver_if.h b/include/linux/hsi_driver_if.h
new file mode 100644
index 0000000..547b30e
--- /dev/null
+++ b/include/linux/hsi_driver_if.h
@@ -0,0 +1,181 @@
+/*
+ * hsi_driver_if.h
+ *
+ * Header for the HSI driver low level interface.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Author: Carlos Chinea <carlos.chinea@nokia.com>
+ * Author: Sebastien JAN <s-jan@ti.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef __HSI_DRIVER_IF_H__
+#define __HSI_DRIVER_IF_H__
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/notifier.h>
+
+/* The number of ports handled by the driver (MAX:2). Reducing this value
+ * optimizes the driver memory footprint.
+ */
+#define HSI_MAX_PORTS 1
+
+/* bit-field definition for allowed controller IDs and channels */
+#define ANY_HSI_CONTROLLER -1
+
+/* HSR special divisor values set to control the auto-divisor Rx mode */
+#define HSI_HSR_DIVISOR_AUTO 0x1000 /* Activate auto Rx */
+#define HSI_SSR_DIVISOR_USE_TIMEOUT 0x1001 /* De-activate auto-Rx (SSI) */
+
+enum {
+ HSI_EVENT_BREAK_DETECTED = 0,
+ HSI_EVENT_ERROR,
+ HSI_EVENT_PRE_SPEED_CHANGE,
+ HSI_EVENT_POST_SPEED_CHANGE,
+ HSI_EVENT_CAWAKE_UP,
+ HSI_EVENT_CAWAKE_DOWN,
+ HSI_EVENT_HSR_DATAAVAILABLE,
+};
+
+enum {
+ HSI_IOCTL_ACWAKE_DOWN = 0, /* Unset HST ACWAKE line for channel */
+ HSI_IOCTL_ACWAKE_UP, /* Set HSI wakeup line (acwake) for channel */
+ HSI_IOCTL_SEND_BREAK, /* Send a HW BREAK frame in FRAME mode */
+ HSI_IOCTL_GET_ACWAKE, /* Get HST CAWAKE line status */
+ HSI_IOCTL_FLUSH_RX, /* Force the HSR to idle state */
+ HSI_IOCTL_FLUSH_TX, /* Force the HST to idle state */
+ HSI_IOCTL_GET_CAWAKE, /* Get CAWAKE (HSR) line status */
+ HSI_IOCTL_SET_RX, /* Set HSR configuration */
+ HSI_IOCTL_GET_RX, /* Get HSR configuration */
+ HSI_IOCTL_SET_TX, /* Set HST configuration */
+ HSI_IOCTL_GET_TX, /* Get HST configuration */
+ HSI_IOCTL_SW_RESET, /* Force a HSI SW RESET */
+ HSI_IOCTL_GET_FIFO_OCCUPANCY, /* Get amount of words in RX FIFO */
+ HSI_IOCTL_SET_ACREADY_SAFEMODE,
+ HSI_IOCTL_SET_ACREADY_NORMAL,
+ HSI_IOCTL_SET_3WIRE_MODE,
+ HSI_IOCTL_SET_4WIRE_MODE,
+};
+
+/* Forward references */
+struct hsi_device;
+struct hsi_channel;
+
+/* DPS */
+struct hst_ctx {
+ u32 mode;
+ u32 flow;
+ u32 frame_size;
+ u32 divisor;
+ u32 arb_mode;
+ u32 channels;
+};
+
+struct hsr_ctx {
+ u32 mode;
+ u32 flow;
+ u32 frame_size;
+ u32 divisor;
+ u32 counters;
+ u32 channels;
+};
+
+struct port_ctx {
+ u32 sys_mpu_enable[2];
+ struct hst_ctx hst;
+ struct hsr_ctx hsr;
+};
+
+/**
+ * struct ctrl_ctx - hsi controller regs context
+ * @sysconfig: keeps HSI_SYSCONFIG reg state
+ * @gdd_gcr: keeps DMA_GCR reg state
+ * @dll: keeps HSR_DLL state
+ * @pctx: array of port context
+ */
+struct ctrl_ctx {
+ u32 sysconfig;
+ u32 gdd_gcr;
+ u32 dll;
+ struct port_ctx *pctx;
+};
+/* END DPS */
+
+
+/**
+ * struct hsi_device - HSI device object (Virtual)
+ * @n_ctrl: associated HSI controller platform id number
+ * @n_p: port number
+ * @n_ch: channel number
+ * @ch: channel descriptor
+ * @device: associated device
+*/
+struct hsi_device {
+ int n_ctrl;
+ unsigned int n_p;
+ unsigned int n_ch;
+ struct hsi_channel *ch;
+ struct device device;
+};
+
+#define to_hsi_device(dev) container_of(dev, struct hsi_device, device)
+
+/**
+ * struct hsi_device_driver - HSI driver instance container
+ * @ctrl_mask: bit-field indicating the supported HSI device ids
+ * @ch_mask: bit-field indicating enabled channels for this port
+ * @probe: probe callback (driver registering)
+ * @remove: remove callback (driver un-registering)
+ * @suspend: suspend callback
+ * @resume: resume callback
+ * @driver: associated device_driver object
+*/
+struct hsi_device_driver {
+ unsigned long ctrl_mask;
+ unsigned long ch_mask[HSI_MAX_PORTS];
+ int (*probe) (struct hsi_device *dev);
+ int (*remove) (struct hsi_device *dev);
+ int (*suspend) (struct hsi_device *dev, pm_message_t mesg);
+ int (*resume) (struct hsi_device *dev);
+ struct device_driver driver;
+ void *priv_data;
+
+};
+
+#define to_hsi_device_driver(drv) container_of(drv, \
+ struct hsi_device_driver, \
+ driver)
+
+int hsi_register_driver(struct hsi_device_driver *driver);
+void hsi_unregister_driver(struct hsi_device_driver *driver);
+int hsi_open(struct hsi_device *dev);
+int hsi_write(struct hsi_device *dev, u32 * addr, unsigned int size);
+int hsi_write_cancel(struct hsi_device *dev);
+int hsi_read(struct hsi_device *dev, u32 * addr, unsigned int size);
+int hsi_read_cancel(struct hsi_device *dev);
+int hsi_poll(struct hsi_device *dev);
+int hsi_unpoll(struct hsi_device *dev);
+int hsi_ioctl(struct hsi_device *dev, unsigned int command, void *arg);
+void hsi_close(struct hsi_device *dev);
+void hsi_set_read_cb(struct hsi_device *dev,
+ void (*read_cb) (struct hsi_device *dev,
+ unsigned int size));
+void hsi_set_write_cb(struct hsi_device *dev,
+ void (*write_cb) (struct hsi_device *dev,
+ unsigned int size));
+void hsi_set_port_event_cb(struct hsi_device *dev,
+ void (*port_event_cb) (struct hsi_device *dev,
+ unsigned int event,
+ void *arg));
+#endif /* __HSI_DRIVER_IF_H__ */
diff --git a/include/linux/i2c-omap.h b/include/linux/i2c-omap.h
index 7472449..3beb390 100644
--- a/include/linux/i2c-omap.h
+++ b/include/linux/i2c-omap.h
@@ -5,7 +5,7 @@
struct omap_i2c_bus_platform_data {
u32 clkrate;
- void (*set_mpu_wkup_lat)(struct device *dev, long set);
+ bool needs_wakeup_latency;
int (*device_enable) (struct platform_device *pdev);
int (*device_shutdown) (struct platform_device *pdev);
int (*device_idle) (struct platform_device *pdev);
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index a6c652e..fd8fcf0 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -208,6 +208,7 @@
struct i2c_driver *driver; /* and our access routines */
struct device dev; /* the device structure */
int irq; /* irq issued by device */
+ bool ext_master; /* determine if the dev has a master outside mpu */
struct list_head detected;
};
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
@@ -239,6 +240,7 @@
* @archdata: copied into i2c_client.dev.archdata
* @of_node: pointer to OpenFirmware device node
* @irq: stored in i2c_client.irq
+ * @ext_master: determine if the dev has a master outside mpu
*
* I2C doesn't actually support hardware probing, although controllers and
* devices may be able to use I2C_SMBUS_QUICK to tell whether or not there's
@@ -259,6 +261,7 @@
struct dev_archdata *archdata;
struct device_node *of_node;
int irq;
+ bool ext_master;
};
/**
@@ -370,6 +373,9 @@
struct mutex userspace_clients_lock;
struct list_head userspace_clients;
+
+ struct mutex ext_clients_lock; /* Lock for external clients list */
+ struct list_head ext_clients; /* Clients with master from external proc */
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
@@ -429,6 +435,7 @@
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
extern int i2c_add_adapter(struct i2c_adapter *);
extern int i2c_del_adapter(struct i2c_adapter *);
+extern void i2c_detect_ext_master(struct i2c_adapter *);
extern int i2c_add_numbered_adapter(struct i2c_adapter *);
extern int i2c_register_driver(struct module *, struct i2c_driver *);
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index ba4f886..3c6e9a0 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -71,6 +71,7 @@
#define TWL4030_MODULE_PM_RECEIVER 0x15
#define TWL4030_MODULE_RTC 0x16
#define TWL4030_MODULE_SECURED_REG 0x17
+#define TWL6030_MODULE_SLAVE_RES 0x19
#define TWL_MODULE_USB TWL4030_MODULE_USB
#define TWL_MODULE_AUDIO_VOICE TWL4030_MODULE_AUDIO_VOICE
@@ -81,6 +82,7 @@
#define TWL_MODULE_PM_RECEIVER TWL4030_MODULE_PM_RECEIVER
#define TWL_MODULE_RTC TWL4030_MODULE_RTC
#define TWL_MODULE_PWM TWL4030_MODULE_PWM0
+#define TWL_MODULE_PM_SLAVE_RES TWL6030_MODULE_SLAVE_RES
#define TWL6030_MODULE_ID0 0x0D
#define TWL6030_MODULE_ID1 0x0E
@@ -100,6 +102,7 @@
* Offset from TWL6030_IRQ_BASE / pdata->irq_base
*/
#define PWR_INTR_OFFSET 0
+#define TWL_VLOW_INTR_OFFSET 6
#define HOTDIE_INTR_OFFSET 12
#define SMPSLDO_INTR_OFFSET 13
#define BATDETECT_INTR_OFFSET 14
@@ -151,6 +154,8 @@
#define MMC_PU (0x1 << 3)
#define MMC_PD (0x1 << 2)
+#define VLOW_INT_MASK (0x1 << 2)
+
#define TWL_SIL_TYPE(rev) ((rev) & 0x00FFFFFF)
#define TWL_SIL_REV(rev) ((rev) >> 24)
#define TWL_SIL_5030 0x09002F
@@ -450,6 +455,16 @@
#define TWL4030_PM_MASTER_GLOBAL_TST 0xb6
+#define TWL6030_PHOENIX_DEV_ON 0x06
+
+/*
+ * PM Slave resource module register offsets (use TWL6030_MODULE_SLAVE_RES)
+ */
+
+#define REG_VBATMIN_HI_CFG_STATE 0x1D
+
+#define VBATMIN_VLOW_EN 0x21
+
/*----------------------------------------------------------------------*/
/* Power bus message definitions */
@@ -522,6 +537,26 @@
#define RES_MAIN_REF 28
#define TOTAL_RESOURCES 28
+/* 6030 extra resources */
+#define RES_V1V29 29
+#define RES_V1V8 30
+#define RES_V2V1 31
+#define RES_VDD3 32
+#define RES_VMEM 33
+#define RES_VANA 34
+#define RES_VUAX1 35
+#define RES_VCXIO 36
+#define RES_VPP 37
+#define RES_VRTC 38
+#define RES_REGEN2 39
+#define RES_32KCLKAO 40
+#define RES_32KCLKG 41
+#define RES_32KCLKAUDIO 42
+#define RES_BIAS 43
+#define RES_VBATMIN_HI 44
+#define RES_RC6MHZ 45
+#define RES_TEMP 46
+
/*
* Power Bus Message Format ... these can be sent individually by Linux,
* but are usually part of downloaded scripts that are run when various
@@ -641,21 +676,39 @@
struct twl4030_resconfig {
u8 resource;
u8 devgroup; /* Processor group that Power resource belongs to */
+ /* The following are used by TWL4030 only */
u8 type; /* Power resource addressed, 6 / broadcast message */
u8 type2; /* Power resource addressed, 3 / broadcast message */
u8 remap_off; /* off state remapping */
u8 remap_sleep; /* sleep state remapping */
};
+struct twl4030_system_config {
+ char *name;
+ u8 group;
+};
+
struct twl4030_power_data {
- struct twl4030_script **scripts;
- unsigned num;
+ struct twl4030_script **scripts; /* used in TWL4030 only */
+ unsigned num; /* used in TWL4030 only */
struct twl4030_resconfig *resource_config;
+ struct twl4030_system_config *sys_config; /*system resources*/
#define TWL4030_RESCONFIG_UNDEF ((u8)-1)
};
+#ifdef CONFIG_TWL4030_POWER
extern void twl4030_power_init(struct twl4030_power_data *triton2_scripts);
extern int twl4030_remove_script(u8 flags);
+#else
+static inline void twl4030_power_init(struct twl4030_power_data *triton2_scripts) { }
+static inline int twl4030_remove_script(u8 flags) { return -EINVAL; }
+#endif
+
+#ifdef CONFIG_TWL6030_POWER
+extern void twl6030_power_init(struct twl4030_power_data *power_data);
+#else
+extern inline void twl6030_power_init(struct twl4030_power_data *power_data) { }
+#endif
struct twl4030_codec_audio_data {
unsigned int digimic_delay; /* in ms */
@@ -664,6 +717,11 @@
unsigned int check_defaults:1;
unsigned int reset_registers:1;
unsigned int hs_extmute:1;
+ u16 hs_left_step;
+ u16 hs_right_step;
+ u16 hf_left_step;
+ u16 hf_right_step;
+ u16 ep_step;
void (*set_hs_extmute)(int mute);
};
@@ -679,6 +737,10 @@
/* twl6040 */
int audpwron_gpio; /* audio power-on gpio */
int naudint_irq; /* audio interrupt */
+ unsigned int irq_base;
+ int (*get_ext_clk32k)(void);
+ void (*put_ext_clk32k)(void);
+ int (*set_ext_clk32k)(bool on);
};
struct twl4030_platform_data {
@@ -710,6 +772,10 @@
struct regulator_init_data *vintana1;
struct regulator_init_data *vintana2;
struct regulator_init_data *vintdig;
+ /* TWL6030 DCDC regulators */
+ struct regulator_init_data *vdd3;
+ struct regulator_init_data *vmem;
+ struct regulator_init_data *v2v1;
/* TWL6030 LDO regulators */
struct regulator_init_data *vmmc;
struct regulator_init_data *vpp;
@@ -718,6 +784,7 @@
struct regulator_init_data *vcxio;
struct regulator_init_data *vusb;
struct regulator_init_data *clk32kg;
+ struct regulator_init_data *clk32kaudio;
/* TWL6025 LDO regulators */
struct regulator_init_data *ldo1;
struct regulator_init_data *ldo2;
@@ -829,5 +896,6 @@
#define TWL6025_REG_SMPS4 59
#define TWL6025_REG_VIO 60
+#define TWL6030_REG_CLK32KAUDIO 61
#endif /* End of __TWL4030_H */
diff --git a/include/linux/i2c/twl6030-madc.h b/include/linux/i2c/twl6030-madc.h
new file mode 100644
index 0000000..81b9464
--- /dev/null
+++ b/include/linux/i2c/twl6030-madc.h
@@ -0,0 +1,86 @@
+/*
+ * twl6030_madc.h - Header for TWL6030 MADC
+ *
+ * Copyright (C) 2011 Samsung Telecommunications of America
+ *
+ * Based on twl4030-madc.h
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * J Keerthy <j-keerthy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef _TWL6030_MADC_H
+#define _TWL6030_MADC_H
+
+#define TWL6030_MADC_MAX_CHANNELS 17
+/*
+ * twl6030 madc occupies the same offset in the twl6030 map that
+ * twl4030 madc does in the twl4030 map.
+ * likewise the charger
+ */
+#define TWL6030_MODULE_MADC TWL4030_MODULE_MADC
+#define TWL6030_MODULE_MAIN_CHARGE TWL4030_MODULE_MAIN_CHARGE
+
+#define TWL6030_MADC_CTRL 0x00
+#define TWL6030_MADC_TEMP1_EN (1 << 0)
+#define TWL6030_MADC_TEMP2_EN (1 << 1)
+#define TWL6030_MADC_SCALER_EN_CH2 (1 << 2)
+#define TWL6030_MADC_VBAT_SCALER_DIV (1 << 3)
+#define TWL6030_MADC_SCALER_EN_CH11 (1 << 4)
+#define TWL6030_MADC_TMP1_EN_MONITOR (1 << 5)
+#define TWL6030_MADC_TMP2_EN_MONITOR (1 << 6)
+#define TWL6030_MADC_ISOURCE_EN (1 << 7)
+
+#define TWL6030_MADC_RTSELECT_LSB 0x02
+#define TWL6030_MADC_ADCIN0 (1 << 0)
+#define TWL6030_MADC_ADCIN1 (1 << 1)
+#define TWL6030_MADC_ADCIN2 (1 << 2)
+#define TWL6030_MADC_ADCIN3 (1 << 3)
+#define TWL6030_MADC_ADCIN4 (1 << 4)
+#define TWL6030_MADC_ADCIN5 (1 << 5)
+#define TWL6030_MADC_ADCIN6 (1 << 6)
+#define TWL6030_MADC_ADCIN7 (1 << 7)
+
+#define TWL6030_MADC_RTSELECT_ISB 0x03
+#define TWL6030_MADC_ADCIN8 (1 << 0)
+#define TWL6030_MADC_ADCIN9 (1 << 1)
+#define TWL6030_MADC_ADCIN10 (1 << 2)
+#define TWL6030_MADC_ADCIN11 (1 << 3)
+#define TWL6030_MADC_ADCIN12 (1 << 4)
+#define TWL6030_MADC_ADCIN13 (1 << 5)
+#define TWL6030_MADC_ADCIN14 (1 << 6)
+#define TWL6030_MADC_ADCIN15 (1 << 7)
+
+#define TWL6030_MADC_RTSELECT_MSB 0x04
+#define TWL6030_MADC_ADCIN16 (1 << 0)
+
+#define TWL6030_MADC_CTRL_P1 0x05
+#define TWL6030_MADC_BUSY (1 << 0)
+#define TWL6030_MADC_EOCP1 (1 << 1)
+#define TWL6030_MADC_EOCRT (1 << 2)
+#define TWL6030_MADC_SP1 (1 << 3)
+
+#define TWL6030_MADC_CTRL_P2 0x06
+#define TWL6030_MADC_BUSYB (1 << 0)
+#define TWL6030_MADC_EOCP2 (1 << 1)
+#define TWL6030_MADC_SP2 (1 << 2)
+
+#define TWL6030_MADC_RTCH0_LSB 0x07
+#define TWL6030_MADC_GPCH0_LSB 0x29
+
+int twl6030_get_madc_conversion(int channel_no);
+#endif
diff --git a/include/linux/mfd/twl6040-codec.h b/include/linux/mfd/twl6040-codec.h
new file mode 100644
index 0000000..13ac335
--- /dev/null
+++ b/include/linux/mfd/twl6040-codec.h
@@ -0,0 +1,269 @@
+/*
+ * MFD driver for twl6040 codec submodule
+ *
+ * Authors: Jorge Eduardo Candelaria <jorge.candelaria@ti.com>
+ * Misael Lopez Cruz <misael.lopez@ti.com>
+ *
+ * Copyright: (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __TWL6040_CODEC_H__
+#define __TWL6040_CODEC_H__
+
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+
+#define TWL6040_NO_SUPPLY 0
+#define TWL6040_VIO_SUPPLY 1
+#define TWL6040_VDD_SUPPLY 2
+
+
+#define TWL6040_REG_ASICID 0x01
+#define TWL6040_REG_ASICREV 0x02
+#define TWL6040_REG_INTID 0x03
+#define TWL6040_REG_INTMR 0x04
+#define TWL6040_REG_NCPCTL 0x05
+#define TWL6040_REG_LDOCTL 0x06
+#define TWL6040_REG_HPPLLCTL 0x07
+#define TWL6040_REG_LPPLLCTL 0x08
+#define TWL6040_REG_LPPLLDIV 0x09
+#define TWL6040_REG_AMICBCTL 0x0A
+#define TWL6040_REG_DMICBCTL 0x0B
+#define TWL6040_REG_MICLCTL 0x0C
+#define TWL6040_REG_MICRCTL 0x0D
+#define TWL6040_REG_MICGAIN 0x0E
+#define TWL6040_REG_LINEGAIN 0x0F
+#define TWL6040_REG_HSLCTL 0x10
+#define TWL6040_REG_HSRCTL 0x11
+#define TWL6040_REG_HSGAIN 0x12
+#define TWL6040_REG_EARCTL 0x13
+#define TWL6040_REG_HFLCTL 0x14
+#define TWL6040_REG_HFLGAIN 0x15
+#define TWL6040_REG_HFRCTL 0x16
+#define TWL6040_REG_HFRGAIN 0x17
+#define TWL6040_REG_VIBCTLL 0x18
+#define TWL6040_REG_VIBDATL 0x19
+#define TWL6040_REG_VIBCTLR 0x1A
+#define TWL6040_REG_VIBDATR 0x1B
+#define TWL6040_REG_HKCTL1 0x1C
+#define TWL6040_REG_HKCTL2 0x1D
+#define TWL6040_REG_GPOCTL 0x1E
+#define TWL6040_REG_ALB 0x1F
+#define TWL6040_REG_DLB 0x20
+#define TWL6040_REG_TRIM1 0x28
+#define TWL6040_REG_TRIM2 0x29
+#define TWL6040_REG_TRIM3 0x2A
+#define TWL6040_REG_HSOTRIM 0x2B
+#define TWL6040_REG_HFOTRIM 0x2C
+#define TWL6040_REG_ACCCTL 0x2D
+#define TWL6040_REG_STATUS 0x2E
+
+#define TWL6040_CACHEREGNUM (TWL6040_REG_STATUS + 1)
+
+#define TWL6040_VIOREGNUM 18
+#define TWL6040_VDDREGNUM 21
+
+/* ASICREV (0x02) values */
+
+#define TWL6040_REV_1_0 0x00
+#define TWL6040_REV_1_1 0x01
+#define TWL6040_REV_1_3 0x02
+
+/* INTID (0x03) fields */
+
+#define TWL6040_THINT 0x01
+#define TWL6040_PLUGINT 0x02
+#define TWL6040_UNPLUGINT 0x04
+#define TWL6040_HOOKINT 0x08
+#define TWL6040_HFINT 0x10
+#define TWL6040_VIBINT 0x20
+#define TWL6040_READYINT 0x40
+
+/* INTMR (0x04) fields */
+
+#define TWL6040_THMSK 0x01
+#define TWL6040_PLUGMSK 0x02
+#define TWL6040_HOOKMSK 0x08
+#define TWL6040_HFMSK 0x10
+#define TWL6040_VIBMSK 0x20
+#define TWL6040_READYMSK 0x40
+#define TWL6040_ALLINT_MSK 0x7B
+
+/* NCPCTL (0x05) fields */
+
+#define TWL6040_NCPENA 0x01
+#define TWL6040_NCPOPEN 0x40
+#define TWL6040_TSHUTENA 0x80
+
+/* LDOCTL (0x06) fields */
+
+#define TWL6040_LSLDOENA 0x01
+#define TWL6040_HSLDOENA 0x04
+#define TWL6040_REFENA 0x40
+#define TWL6040_OSCENA 0x80
+
+/* HPPLLCTL (0x07) fields */
+
+#define TWL6040_HPLLENA 0x01
+#define TWL6040_HPLLRST 0x02
+#define TWL6040_HPLLBP 0x04
+#define TWL6040_HPLLSQRENA 0x08
+#define TWL6040_HPLLSQRBP 0x10
+#define TWL6040_MCLK_12000KHZ (0 << 5)
+#define TWL6040_MCLK_19200KHZ (1 << 5)
+#define TWL6040_MCLK_26000KHZ (2 << 5)
+#define TWL6040_MCLK_38400KHZ (3 << 5)
+#define TWL6040_MCLK_MSK 0x60
+
+/* LPPLLCTL (0x08) fields */
+
+#define TWL6040_LPLLENA 0x01
+#define TWL6040_LPLLRST 0x02
+#define TWL6040_LPLLSEL 0x04
+#define TWL6040_LPLLFIN 0x08
+#define TWL6040_HPLLSEL 0x10
+
+/* HSLCTL (0x10) fields */
+
+#define TWL6040_HSDACMODEL 0x02
+#define TWL6040_HSDRVMODEL 0x08
+
+/* HSRCTL (0x11) fields */
+
+#define TWL6040_HSDACMODER 0x02
+#define TWL6040_HSDRVMODER 0x08
+
+/* VIBCTLL (0x18) fields */
+
+#define TWL6040_VIBCTRLLN 0x10
+#define TWL6040_VIBCTRLLP 0x04
+#define TWL6040_VIBENAL 0x01
+
+/* VIBCTLL (0x19) fields */
+
+#define TWL6040_VIBCTRLRN 0x10
+#define TWL6040_VIBCTRLRP 0x04
+#define TWL6040_VIBENAR 0x01
+
+/* GPOCTL (0x1E) fields */
+
+#define TWL6040_GPO1 0x01
+#define TWL6040_GPO2 0x02
+#define TWL6040_GPO3 0x03
+
+/* HSOTRIM (0x2B) fields */
+
+#define TWL6040_HSLO 0x0F
+#define TWL6040_HSRO 0xF0
+#define TWL6040_HSLO_OFFSET 0
+#define TWL6040_HSRO_OFFSET 4
+
+/* HFOTRIM (0x2C) fields */
+
+#define TWL6040_HFLO 0x0F
+#define TWL6040_HFRO 0xF0
+#define TWL6040_HFLO_OFFSET 0
+#define TWL6040_HFRO_OFFSET 4
+
+/* ACCCTL (0x2D) fields */
+
+#define TWL6040_I2CSEL 0x01
+#define TWL6040_RESETSPLIT 0x04
+#define TWL6040_INTCLRMODE 0x08
+#define TWL6040_CLK32KSEL 0x40
+
+/* STATUS (0x2E) fields */
+
+#define TWL6040_PLUGCOMP 0x02
+
+#define TWL6040_CELLS 2
+
+#define TWL6040_IRQ_TH 0
+#define TWL6040_IRQ_PLUG 1
+#define TWL6040_IRQ_HOOK 2
+#define TWL6040_IRQ_HF 3
+#define TWL6040_IRQ_VIB 4
+#define TWL6040_IRQ_READY 5
+
+enum twl6040_pll_id {
+ TWL6040_NOPLL_ID,
+ TWL6040_LPPLL_ID,
+ TWL6040_HPPLL_ID,
+};
+
+struct twl6040 {
+ struct device *dev;
+ struct mutex mutex;
+ struct mutex io_mutex;
+ struct mutex irq_mutex;
+ struct mfd_cell cells[TWL6040_CELLS];
+ struct completion ready;
+
+ int audpwron;
+ int powered;
+ int power_count;
+
+ enum twl6040_pll_id pll;
+ unsigned int sysclk;
+ int icrev;
+
+ unsigned int irq;
+ unsigned int irq_base;
+ u8 irq_masks_cur;
+ u8 irq_masks_cache;
+};
+
+static inline int twl6040_request_irq(struct twl6040 *twl6040, int irq,
+ irq_handler_t handler, const char *name,
+ void *data)
+{
+ if (!twl6040->irq_base)
+ return -EINVAL;
+
+ return request_threaded_irq(twl6040->irq_base + irq, NULL, handler,
+ 0, name, data);
+}
+
+static inline void twl6040_free_irq(struct twl6040 *twl6040, int irq,
+ void *data)
+{
+ if (!twl6040->irq_base)
+ return;
+
+ free_irq(twl6040->irq_base + irq, data);
+}
+
+int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg);
+int twl6040_reg_write(struct twl6040 *twl6040, unsigned int reg,
+ u8 val);
+int twl6040_set_bits(struct twl6040 *twl6040, unsigned int reg,
+ u8 mask);
+int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg,
+ u8 mask);
+int twl6040_enable(struct twl6040 *twl6040);
+int twl6040_disable(struct twl6040 *twl6040);
+int twl6040_is_enabled(struct twl6040 *twl6040);
+int twl6040_set_pll(struct twl6040 *twl6040, enum twl6040_pll_id id,
+ unsigned int freq_in, unsigned int freq_out);
+enum twl6040_pll_id twl6040_get_pll(struct twl6040 *twl6040);
+unsigned int twl6040_get_sysclk(struct twl6040 *twl6040);
+int twl6040_get_icrev(struct twl6040 *twl6040);
+int twl6040_irq_init(struct twl6040 *twl6040);
+void twl6040_irq_exit(struct twl6040 *twl6040);
+
+#endif /* End of __TWL6040_CODEC_H__ */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index ae28e93..561567e 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -533,4 +533,14 @@
kernel_ulong_t driver_data; /* data private to the driver */
};
+/* rpmsg */
+
+#define RPMSG_NAME_SIZE 32
+#define RPMSG_DEVICE_MODALIAS_FMT "rpmsg:%s"
+
+struct rpmsg_device_id {
+ char name[RPMSG_NAME_SIZE];
+ kernel_ulong_t driver_data /* Data private to the driver */
+ __attribute__((aligned(sizeof(kernel_ulong_t))));
+};
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/omap_ion.h b/include/linux/omap_ion.h
new file mode 100644
index 0000000..f73f127
--- /dev/null
+++ b/include/linux/omap_ion.h
@@ -0,0 +1,88 @@
+/*
+ * include/linux/omap_ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_OMAP_ION_H
+#define _LINUX_OMAP_ION_H
+
+#include <linux/types.h>
+
+/**
+ * struct omap_ion_tiler_alloc_data - metadata passed from userspace for allocations
+ * @w: width of the allocation
+ * @h: height of the allocation
+ * @fmt: format of the data (8, 16, 32bit or page)
+ * @flags: flags passed to heap
+ * @stride: stride of the allocation, returned to caller from kernel
+ * @handle: pointer that will be populated with a cookie to use to refer
+ * to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct omap_ion_tiler_alloc_data {
+ size_t w;
+ size_t h;
+ int fmt;
+ unsigned int flags;
+ struct ion_handle *handle;
+ size_t stride;
+ size_t offset;
+};
+
+#ifdef __KERNEL__
+int omap_ion_tiler_alloc(struct ion_client *client,
+ struct omap_ion_tiler_alloc_data *data);
+int omap_ion_nonsecure_tiler_alloc(struct ion_client *client,
+ struct omap_ion_tiler_alloc_data *data);
+/* given a handle in the tiler, return a list of tiler pages that back it */
+int omap_tiler_pages(struct ion_client *client, struct ion_handle *handle,
+ int *n, u32 ** tiler_pages);
+#endif /* __KERNEL__ */
+
+/* additional heaps used only on omap */
+enum {
+ OMAP_ION_HEAP_TYPE_TILER = ION_HEAP_TYPE_CUSTOM + 1,
+};
+
+#define OMAP_ION_HEAP_TILER_MASK (1 << OMAP_ION_HEAP_TYPE_TILER)
+
+enum {
+ OMAP_ION_TILER_ALLOC,
+};
+
+/**
+ * These should match the defines in the tiler driver
+ */
+enum {
+ TILER_PIXEL_FMT_MIN = 0,
+ TILER_PIXEL_FMT_8BIT = 0,
+ TILER_PIXEL_FMT_16BIT = 1,
+ TILER_PIXEL_FMT_32BIT = 2,
+ TILER_PIXEL_FMT_PAGE = 3,
+ TILER_PIXEL_FMT_MAX = 3
+};
+
+/**
+ * List of heaps in the system
+ */
+enum {
+ OMAP_ION_HEAP_LARGE_SURFACES,
+ OMAP_ION_HEAP_TILER,
+ OMAP_ION_HEAP_SECURE_INPUT,
+ OMAP_ION_HEAP_NONSECURE_TILER,
+};
+
+#endif /* _LINUX_ION_H */
+
diff --git a/include/linux/omap_v4l2_gfx.h b/include/linux/omap_v4l2_gfx.h
new file mode 100644
index 0000000..cb175e5
--- /dev/null
+++ b/include/linux/omap_v4l2_gfx.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * This file specifies the custom ioctl API between a client "consumer"
+ * process and the V4L2-GFX driver. The consumer process should only use
+ * these APIs and will typically/ultimately be a GL application.
+ *
+ * There will also be a "producer" process which queues multimedia
+ * content to the driver, however, this will only use standard V4L2 APIs.
+ */
+
+#ifndef _OMAP_V4L2_GFX_H_
+#define _OMAP_V4L2_GFX_H_
+
+#include <linux/videodev.h>
+
+/*
+ * @see V4L2_GFX_IOC_CONSUMER, struct v4l2_gfx_consumer_params
+ */
+enum v4l2_gfx_consumer_type {
+ /*
+ * Wait for the producer process to activate a video stream
+ */
+ V4L2_GFX_CONSUMER_WAITSTREAM,
+ };
+
+/*
+ * @see V4L2_GFX_IOC_CONSUMER
+ */
+struct v4l2_gfx_consumer_params {
+ /*
+ * @see v4l2_gfx_consumer_type
+ */
+ int type; /* w */
+ /*
+ * If the consumer process is waiting the ioctl will block until the
+ * timeout expires or the expected event occurs, see the type field
+ */
+ unsigned int timeout_ms; /* w */
+ /*
+ * If acquire_timeout_ms > 0 and no streaming activity has been detected
+ * for acquire_timeout_ms milliseconds the V4L2_GFX_IOC_ACQ ioctl will
+ * return with ETIMEOUT
+ */
+ unsigned int acquire_timeout_ms; /* w */
+};
+
+/*
+ * @see V4L2_GFX_IOC_INFO
+ */
+struct v4l2_gfx_info_params {
+
+ /*
+ * Return how many times the device has been opened, this number will
+ * decrement when the device is closed.
+ *
+ * One use for this might be to detect if a consumer or producer is
+ * active and in the process of setting up a stream. However this could
+ * be unreliable if the processes are in the process of closing / crashing.
+ *
+ * Obviously this value will always be at least one i.e. the process
+ * issuing the ioctl opens the device.
+ */
+ unsigned int opencnt; /* r */
+
+};
+
+/*
+ * @see V4L2_GFX_IOC_PRODUCER
+ */
+struct v4l2_gfx_producer_params {
+ /*
+ * If set mark the producer side as open, if not set mark as closed.
+ * For Android we need this because the mediaserver won't close the
+ * driver.
+ */
+ #define V4L2_GFX_PRODUCER_MASK_OPEN 0x1
+ unsigned int flags; /* w */
+};
+
+struct v4l2_gfx_buf_params {
+ /*
+ * Buffer index.
+ *
+ * On acquire, when the ioctl returns the bufid field will be filled in
+ * with the next buffer with data available.
+ *
+ * On release, the consumer process just specifies the buffer to release
+ * which usually is the last acquired buffer index.
+ */
+ int bufid; /* r/w */
+
+ /*
+ * Cropping information
+ * For the acquire ioctl only
+ */
+ int crop_top; /* r */
+ int crop_left; /* r */
+ int crop_width; /* r */
+ int crop_height; /* r */
+};
+
+/*
+ * This ioctl should be issued once by the consumer process before starting
+ * any rendering loop. It allows the process to wait for the producer process
+ * to become ready.
+ *
+ * @see struct v4l2_gfx_consumer_params
+ *
+ * Return value:
+ * Returns 0 if successful, or -1 on error, in which case errno indicates
+ * the error.
+ */
+#define V4L2_GFX_IOC_CONSUMER _IOWR ('v', BASE_VIDIOCPRIVATE+0, \
+ struct v4l2_gfx_consumer_params)
+
+/*
+ * Acquire the buffer to be rendered and its properties.
+ *
+ * @see struct v4l2_gfx_buf_params
+ *
+ * Return value:
+ * Returns 0 if successful, or -1 on error, in which case errno indicates
+ * the error.
+ *
+ * ETIMEDOUT If acquire_timeout_ms is set via V4L2_GFX_IOC_CONSUMER
+ * this error code can be returned.
+ * ENODEV If the producer side of the stream stops this error will
+ * be returned.
+ */
+#define V4L2_GFX_IOC_ACQ _IOR ('v', BASE_VIDIOCPRIVATE+1, \
+ struct v4l2_gfx_buf_params)
+
+/*
+ * Release the buffer that was rendered
+ *
+ * @see struct v4l2_gfx_buf_params
+ *
+ * Return value:
+ * Returns 0 if successful, or -1 on error, in which case errno indicates
+ * the error.
+ *
+ * ETIMEDOUT It took longer than 16ms for the app to render the frame
+ * (This will probably go away to avoid render loop stalls)
+ * EINVAL Attempted to release an invalid buffer index.
+ */
+#define V4L2_GFX_IOC_REL _IOW ('v', BASE_VIDIOCPRIVATE+2, \
+ struct v4l2_gfx_buf_params)
+
+/*
+ * Ioctl used to get information about the device
+ *
+ * @see struct v4l2_gfx_info_params
+ *
+ * Return value:
+ * Returns 0 if successful, or -1 on error, in which case errno indicates
+ * the error.
+ */
+#define V4L2_GFX_IOC_INFO _IOWR ('v', BASE_VIDIOCPRIVATE+3, \
+ struct v4l2_gfx_info_params)
+
+/*
+ * Ioctl used to set producer params
+ *
+ * @see struct v4l2_gfx_producer_params
+ *
+ * Return value:
+ * Returns 0 if successful, or -1 on error, in which case errno indicates
+ * the error.
+ */
+#define V4L2_GFX_IOC_PRODUCER _IOWR ('v', BASE_VIDIOCPRIVATE+4, \
+ struct v4l2_gfx_producer_params)
+#endif // _OMAP_V4L2_GFX_H_
diff --git a/include/linux/omapfb.h b/include/linux/omapfb.h
index c0b0187..80c5dc6 100644
--- a/include/linux/omapfb.h
+++ b/include/linux/omapfb.h
@@ -58,6 +58,7 @@
#define OMAPFB_GET_VRAM_INFO OMAP_IOR(61, struct omapfb_vram_info)
#define OMAPFB_SET_TEARSYNC OMAP_IOW(62, struct omapfb_tearsync_info)
#define OMAPFB_GET_DISPLAY_INFO OMAP_IOR(63, struct omapfb_display_info)
+#define OMAPFB_ENABLEVSYNC OMAP_IOW(64, int)
#define OMAPFB_CAPS_GENERIC_MASK 0x00000fff
#define OMAPFB_CAPS_LCDC_MASK 0x00fff000
@@ -258,6 +259,16 @@
extern void omapfb_set_ctrl_platform_data(void *pdata);
extern void omapfb_reserve_sdram_memblock(void);
+/* helper methods that may be used by other modules */
+enum omap_color_mode;
+struct omap_video_timings;
+int omapfb_mode_to_dss_mode(struct fb_var_screeninfo *var,
+ enum omap_color_mode *mode);
+void omapfb_fb2dss_timings(struct fb_videomode *fb_timings,
+ struct omap_video_timings *dss_timings);
+void omapfb_dss2fb_timings(struct omap_video_timings *dss_timings,
+ struct fb_videomode *fb_timings);
+
#endif
#endif /* __OMAPFB_H */
diff --git a/include/linux/opp.h b/include/linux/opp.h
index 5449945..7020e97 100644
--- a/include/linux/opp.h
+++ b/include/linux/opp.h
@@ -94,12 +94,20 @@
#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
int opp_init_cpufreq_table(struct device *dev,
struct cpufreq_frequency_table **table);
+void opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table);
#else
static inline int opp_init_cpufreq_table(struct device *dev,
struct cpufreq_frequency_table **table)
{
return -EINVAL;
}
+
+static inline
+void opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+}
#endif /* CONFIG_CPU_FREQ */
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
new file mode 100644
index 0000000..b7264e1
--- /dev/null
+++ b/include/linux/remoteproc.h
@@ -0,0 +1,311 @@
+/*
+ * Remote Processor Framework
+ *
+ * Copyright(c) 2011 Texas Instruments. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Texas Instruments nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef REMOTEPROC_H
+#define REMOTEPROC_H
+
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/notifier.h>
+#include <linux/pm_qos_params.h>
+
+/* Must match the BIOS version embeded in the BIOS firmware image */
+#define RPROC_BIOS_VERSION 2
+
+/* Maximum number of entries that can be added for lookup */
+#define RPROC_MAX_MEM_ENTRIES 20
+
+/**
+ * The following enums and structures define the binary format of the images
+ * we load and run the remote processors with.
+ *
+ * The binary format is as follows:
+ *
+ * struct {
+ * char magic[4] = { 'R', 'P', 'R', 'C' };
+ * u32 version;
+ * u32 header_len;
+ * char header[...] = { header_len bytes of unformatted, textual header };
+ * struct section {
+ * u32 type;
+ * u64 da;
+ * u32 len;
+ * u8 content[...] = { len bytes of binary data };
+ * } [ no limit on number of sections ];
+ * } __packed;
+ */
+struct fw_header {
+ char magic[4];
+ u32 version;
+ u32 header_len;
+ char header[0];
+} __packed;
+
+struct fw_section {
+ u32 type;
+ u64 da;
+ u32 len;
+ char content[0];
+} __packed;
+
+enum fw_section_type {
+ FW_RESOURCE = 0,
+ FW_TEXT = 1,
+ FW_DATA = 2,
+ FW_MMU = 3,
+ FW_SIGNATURE = 4,
+};
+
+struct fw_resource {
+ u32 type;
+ u64 da;
+ u64 pa;
+ u32 len;
+ u32 reserved;
+ u8 name[48];
+} __packed;
+
+enum fw_resource_type {
+ RSC_CARVEOUT = 0,
+ RSC_DEVMEM = 1,
+ RSC_DEVICE = 2,
+ RSC_IRQ = 3,
+ RSC_TRACE = 4,
+ RSC_BOOTADDR = 5,
+ RSC_CRASHDUMP = 6,
+ RSC_END = 7,
+};
+
+/**
+ * struct rproc_mem_pool - descriptor for the rproc's contiguous memory pool data
+ *
+ * @mem_base: starting physical address of the dynamic pool
+ * @mem_size: size of the initial dynamic pool
+ * @cur_base: current available physical address in the pool
+ * @cur_size: remaining available memory in the pool
+ * @st_base: starting physical address of the static pool
+ * @st_size: size of the static pool
+ */
+struct rproc_mem_pool {
+ phys_addr_t mem_base;
+ u32 mem_size;
+ phys_addr_t cur_base;
+ u32 cur_size;
+ phys_addr_t st_base;
+ u32 st_size;
+};
+
+/**
+ * struct rproc_mem_entry - descriptor of a remote memory region
+ *
+ * @da: virtual address as seen by the device (aka device address)
+ * @pa: physical address
+ * @size: size of this memory region
+ */
+struct rproc_mem_entry {
+ u64 da;
+ phys_addr_t pa;
+ u32 size;
+ bool core;
+};
+
+enum rproc_constraint {
+ RPROC_CONSTRAINT_SCALE,
+ RPROC_CONSTRAINT_LATENCY,
+ RPROC_CONSTRAINT_BANDWIDTH,
+};
+
+struct rproc;
+
+struct rproc_ops {
+ int (*start)(struct rproc *rproc, u64 bootaddr);
+ int (*stop)(struct rproc *rproc);
+ int (*suspend)(struct rproc *rproc, bool force);
+ int (*resume)(struct rproc *rproc);
+ int (*iommu_init)(struct rproc *, int (*)(struct rproc *, u64, u32));
+ int (*iommu_exit)(struct rproc *);
+ int (*set_lat)(struct rproc *rproc, long v);
+ int (*set_bw)(struct rproc *rproc, long v);
+ int (*scale)(struct rproc *rproc, long v);
+ int (*watchdog_init)(struct rproc *, int (*)(struct rproc *));
+ int (*watchdog_exit)(struct rproc *);
+ void (*dump_registers)(struct rproc *);
+};
+
+/*
+ * enum rproc_state - remote processor states
+ *
+ * @RPROC_OFFLINE: needs firmware load and init to exit this state.
+ *
+ * @RPROC_SUSPENDED: needs to be woken up to receive a message.
+ *
+ * @RPROC_RUNNING: up and running.
+ *
+ * @RPROC_LOADING: asynchronous firmware loading has started
+ *
+ * @RPROC_CRASHED: needs to be logged, connections torn down, resources
+ * released, and returned to OFFLINE.
+ */
+enum rproc_state {
+ RPROC_OFFLINE,
+ RPROC_SUSPENDED,
+ RPROC_RUNNING,
+ RPROC_LOADING,
+ RPROC_CRASHED,
+};
+
+/*
+ * enum rproc_event - remote processor events
+ *
+ * @RPROC_ERROR: Fatal error has happened on the remote processor.
+ *
+ * @RPROC_PRE_SUSPEND: users can register for that event in order to cancel
+ * autosuspend, they just need to return an error in the
+ * callback function.
+ *
+ * @RPROC_POS_SUSPEND: users can register for that event in order to release
+ * resources not needed when the remote processor is
+ * sleeping or if they need to save some context.
+ *
+ * @RPROC_RESUME: users should use this event to revert what was done in the
+ * POS_SUSPEND event.
+ *
+ * @RPROC_SECURE: remote processor secure mode has changed.
+ */
+enum rproc_event {
+ RPROC_ERROR,
+ RPROC_PRE_SUSPEND,
+ RPROC_POS_SUSPEND,
+ RPROC_RESUME,
+ RPROC_SECURE,
+};
+
+#define RPROC_MAX_NAME 100
+
+/*
+ * struct rproc - a physical remote processor device
+ *
+ * @next: next rproc entry in the list
+ * @name: human readable name of the rproc, cannot exceed RPROC_MAX_NAME bytes
+ * @memory_maps: table of da-to-pa memory maps (relevant if device is behind
+ * an iommu)
+ * @memory_pool: platform-specific contiguous memory pool data (relevant for
+ * allocating memory needed for the remote processor image)
+ * @firmware: name of firmware file to be loaded
+ * @owner: reference to the platform-specific rproc module
+ * @priv: private data which belongs to the platform-specific rproc module
+ * @ops: platform-specific start/stop rproc handlers
+ * @dev: reference to the platform-specific rproc dev
+ * @count: usage refcount
+ * @state: rproc_state enum value representing the state of the device
+ * @lock: lock which protects concurrent manipulations of the rproc
+ * @dbg_dir: debugfs directory of this rproc device
+ * @trace_buf0: main trace buffer of the remote processor
+ * @trace_buf1: second, optional, trace buffer of the remote processor
+ * @trace_len0: length of main trace buffer of the remote processor
+ * @trace_len1: length of the second (and optional) trace buffer
+ * @cdump_buf0: main exception/crash dump buffer of the remote processor
+ * @cdump_buf1: second exception/crash dump buffer of the remote processor
+ * @cdump_len0: length of main crash dump buffer of the remote processor
+ * @cdump_len1: length of the second (and optional) crash dump buffer
+ * @firmware_loading_complete: flags e/o asynchronous firmware loading
+ * @mmufault_work: work in charge of notifing mmufault
+ * @nb_error: notify block for fatal errors
+ * @error_comp: completion used when an error happens
+ * @secure_ttb: private data for configuring iommu in secure mode
+ * @secure_restart: completion event notifier for the secure restart process
+ * @secure_mode: flag to dictate whether to enable secure loading
+ * @secure_ok: restart status flag to be looked up upon the event's completion
+ */
+struct rproc {
+ struct list_head next;
+ const char *name;
+ struct rproc_mem_entry memory_maps[RPROC_MAX_MEM_ENTRIES];
+ struct rproc_mem_pool *memory_pool;
+ const char *firmware;
+ struct module *owner;
+ void *priv;
+ const struct rproc_ops *ops;
+ struct device *dev;
+ int count;
+ int state;
+ struct mutex lock;
+ struct dentry *dbg_dir;
+ char *trace_buf0, *trace_buf1;
+ char *last_trace_buf0, *last_trace_buf1;
+ int trace_len0, trace_len1;
+ int last_trace_len0, last_trace_len1;
+ void *cdump_buf0, *cdump_buf1;
+ int cdump_len0, cdump_len1;
+ struct mutex tlock;
+ struct completion firmware_loading_complete;
+ struct work_struct error_work;
+ struct blocking_notifier_head nbh;
+ struct completion error_comp;
+#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
+ unsigned sus_timeout;
+ bool force_suspend;
+ bool need_resume;
+ struct mutex pm_lock;
+#endif
+ struct pm_qos_request_list *qos_request;
+ void *secure_ttb;
+ struct completion secure_restart;
+ struct mutex secure_lock;
+ bool secure_mode;
+ bool secure_ok;
+ bool halt_on_crash;
+ char *header;
+ int header_len;
+};
+
+int rproc_set_secure(const char *, bool);
+struct rproc *rproc_get(const char *);
+void rproc_put(struct rproc *);
+int rproc_event_register(struct rproc *, struct notifier_block *);
+int rproc_event_unregister(struct rproc *, struct notifier_block *);
+int rproc_register(struct device *, const char *, const struct rproc_ops *,
+ const char *, struct rproc_mem_pool *, struct module *,
+ unsigned int timeout);
+int rproc_unregister(const char *);
+void rproc_last_busy(struct rproc *);
+#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
+extern const struct dev_pm_ops rproc_gen_pm_ops;
+#define GENERIC_RPROC_PM_OPS (&rproc_gen_pm_ops)
+#else
+#define GENERIC_RPROC_PM_OPS NULL
+#endif
+int rproc_set_constraints(struct rproc *, enum rproc_constraint type, long v);
+int rproc_error_notify(struct rproc *rproc);
+
+#endif /* REMOTEPROC_H */
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
new file mode 100644
index 0000000..1f7ba09
--- /dev/null
+++ b/include/linux/rpmsg.h
@@ -0,0 +1,191 @@
+/*
+ * Remote processor messaging
+ *
+ * Copyright(c) 2011 Texas Instruments. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Texas Instruments nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_RPMSG_H
+#define _LINUX_RPMSG_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+/* The feature bitmap for virtio rpmsg */
+#define VIRTIO_RPMSG_F_NS 0 /* RP supports name service notifications */
+
+/**
+ * struct rpmsg_hdr -
+ *
+ * ... keep documenting ...
+ */
+struct rpmsg_hdr {
+ u16 len;
+ u16 flags;
+ u32 src;
+ u32 dst;
+ u32 unused;
+ u8 data[0];
+} __packed;
+
+enum rpmsg_ns_flags {
+ RPMSG_NS_CREATE = 0,
+ RPMSG_NS_DESTROY = 1,
+};
+
+struct rpmsg_ns_msg {
+ char name[RPMSG_NAME_SIZE];
+ u32 addr;
+ u32 flags;
+} __packed;
+
+/* driver requests */
+enum {
+ VPROC_BUF_ADDR,
+ VPROC_BUF_NUM,
+ VPROC_BUF_SZ,
+ VPROC_SIM_BASE,
+ VPROC_STATIC_CHANNELS,
+};
+
+#define RPMSG_ADDR_ANY 0xFFFFFFFF
+
+struct virtproc_info;
+
+/**
+ * rpmsg_channel - rpmsg channels are the devices of the rpmsg bus
+ *
+ * @vrp: the remote processor this channel connects to
+ * @dev: underlying device
+ * @id: the device type identification (used to match an rpmsg driver)
+ * @src: local address of this channel
+ * @dst: destination address of the remote service
+ * @priv: private pointer for the driver's use.
+ * @ept: local rpmsg endpoint of this channel
+ * @announce: need to tell remoteproc about channel creation/removal
+ */
+struct rpmsg_channel {
+ struct virtproc_info *vrp;
+ struct device dev;
+ struct rpmsg_device_id id;
+ u32 src;
+ u32 dst;
+ void *priv;
+ struct rpmsg_endpoint *ept;
+ bool announce;
+};
+
+struct rpmsg_channel_info {
+ char name[RPMSG_NAME_SIZE];
+ u32 src;
+ u32 dst;
+};
+
+/**
+ * struct rpmsg_endpoint
+ *
+ * @rpdev:
+ * @cb:
+ * @src: local rpmsg address
+ * @priv:
+ */
+struct rpmsg_endpoint {
+ struct rpmsg_channel *rpdev;
+ void (*cb)(struct rpmsg_channel *, void *, int, void *, u32);
+ u32 addr;
+ void *priv;
+};
+
+/**
+ * rpmsg_driver - operations for a rpmsg I/O driver
+ * @driver: underlying device driver (populate name and owner).
+ * @id_table: the ids serviced by this driver.
+ * @probe: the function to call when a device is found. Returns 0 or -errno.
+ * @remove: the function when a device is removed.
+ * @callback: invoked when a message is received on the channel
+ */
+struct rpmsg_driver {
+ struct device_driver drv;
+ const struct rpmsg_device_id *id_table;
+ int (*probe)(struct rpmsg_channel *dev);
+ void (*remove)(struct rpmsg_channel *dev);
+ void (*callback)(struct rpmsg_channel *, void *, int, void *, u32);
+};
+
+int register_rpmsg_device(struct rpmsg_channel *dev);
+void unregister_rpmsg_device(struct rpmsg_channel *dev);
+int register_rpmsg_driver(struct rpmsg_driver *drv);
+void unregister_rpmsg_driver(struct rpmsg_driver *drv);
+void rpmsg_destroy_ept(struct rpmsg_endpoint *);
+struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *,
+ void (*cb)(struct rpmsg_channel *, void *, int, void *, u32),
+ void *priv, u32 addr);
+
+int
+rpmsg_send_offchannel_raw(struct rpmsg_channel *, u32, u32, void *, int, bool);
+
+static inline
+int rpmsg_send_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst,
+ void *data, int len)
+{
+ return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
+}
+
+static inline int rpmsg_send(struct rpmsg_channel *rpdev, void *data, int len)
+{
+ return rpmsg_send_offchannel(rpdev, rpdev->src, rpdev->dst, data, len);
+}
+
+static inline
+int rpmsg_sendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst)
+{
+ return rpmsg_send_offchannel(rpdev, rpdev->src, dst, data, len);
+}
+
+static inline
+int rpmsg_trysend_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst,
+ void *data, int len)
+{
+ return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
+}
+
+static inline
+int rpmsg_trysend(struct rpmsg_channel *rpdev, void *data, int len)
+{
+ return rpmsg_trysend_offchannel(rpdev, rpdev->src, rpdev->dst,
+ data, len);
+}
+
+static inline
+int rpmsg_trysendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst)
+{
+ return rpmsg_trysend_offchannel(rpdev, rpdev->src, dst, data, len);
+}
+
+#endif /* _LINUX_RPMSG_H */
diff --git a/include/linux/rpmsg_omx.h b/include/linux/rpmsg_omx.h
new file mode 100644
index 0000000..15503d5
--- /dev/null
+++ b/include/linux/rpmsg_omx.h
@@ -0,0 +1,144 @@
+/*
+ * OMX offloading remote processor driver
+ *
+ * Copyright(c) 2011 Texas Instruments. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Texas Instruments nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RPMSG_OMX_H
+#define RPMSG_OMX_H
+
+#include <linux/ioctl.h>
+
+#define OMX_IOC_MAGIC 'X'
+
+#define OMX_IOCCONNECT _IOW(OMX_IOC_MAGIC, 1, char *)
+#define OMX_IOCIONREGISTER _IOWR(OMX_IOC_MAGIC, 2, struct ion_fd_data)
+#define OMX_IOCIONUNREGISTER _IOWR(OMX_IOC_MAGIC, 3, struct ion_fd_data)
+
+#define OMX_IOC_MAXNR (3)
+
+#ifdef __KERNEL__
+
+/**
+ * enum omx_msg_types - various message types currently supported
+ *
+ * @OMX_CONN_REQ: a connection request message type. the message should carry
+ * the name of the OMX service which we try to connect to. An instance of
+ * that service will be created remotely, and its address will be sent as
+ * a reply.
+ *
+ * @OMX_CONN_RSP: a response to a connection request. the message will carry
+ * an error code (success/failure), and if connection established successfully,
+ * the addr field will carry the address of the newly created OMX instance.
+ *
+ * @OMX_DISCONNECT: disconnect remote OMX instance. this message tells
+ * remote processor to release the resources coupled with this connection
+ *
+ * @OMX_RAW_MSG: a message that should be propagated as-is to the user.
+ * this would immediately enable user space development to start.
+ * as we progress, most likely this message won't be needed anymore.
+ */
+enum omx_msg_types {
+ OMX_CONN_REQ = 0,
+ OMX_CONN_RSP = 1,
+ OMX_DISCONNECT = 4,
+ OMX_RAW_MSG = 5,
+ /* todo: do we need a disconnect response ? ION refcounts should allow
+ * asynchronous release of relevant buffers */
+};
+
+/**
+ * enum omx_error_codes - various error codes that will be used
+ *
+ * @OMX_SUCCESS: success
+ *
+ * @OMX_NOTSUPP: not supported
+ *
+ * @OMX_NOMEM: remote processor is out of memory
+ */
+enum omx_error_codes {
+ OMX_SUCCESS = 0,
+ OMX_NOTSUPP = 1,
+ OMX_NOMEM = 2,
+};
+
+/* keep documenting... */
+enum omx_state {
+ OMX_UNCONNECTED,
+ OMX_CONNECTED,
+ OMX_FAIL,
+};
+
+/**
+ * struct omx_msg_hdr - common header for all OMX messages
+ * @type: type of message, see enum omx_msg_types
+ * @flags: currently unused, should be zero
+ * @len: length of msg payload (in bytes)
+ * @data: the msg payload (depends on the message type)
+ *
+ * All OMX messages will start with this common header (which will begin
+ * right after the standard rpmsg header ends).
+ */
+struct omx_msg_hdr {
+ u32 type;
+ u32 flags;
+ u32 len;
+ char data[0];
+} __packed;
+
+struct omx_conn_rsp {
+ u32 status;
+ u32 addr;
+} __packed;
+
+struct omx_disc_req {
+ u32 addr;
+} __packed;
+
+
+#endif /* __KERNEL__ */
+
+/* temporarily exposed to user space too */
+struct omx_conn_req {
+ char name[48];
+} __packed;
+
+/* the packet structure (actual message sent to omx service) */
+struct omx_packet {
+ uint16_t desc; /* descriptor, and omx service status */
+ uint16_t msg_id; /* message id */
+ uint32_t flags; /* Set to a fixed value for now. */
+ uint32_t fxn_idx; /* Index into OMX service's function table.*/
+ int32_t result; /* The OMX function status. */
+ uint32_t data_size;/* Size of in/out data to/from the function. */
+ uint32_t data[0]; /* Payload of data_size char's passed to
+ function. */
+};
+
+#endif /* RPMSG_OMX_H */
diff --git a/include/linux/rpmsg_resmgr.h b/include/linux/rpmsg_resmgr.h
new file mode 100644
index 0000000..707809e
--- /dev/null
+++ b/include/linux/rpmsg_resmgr.h
@@ -0,0 +1,131 @@
+/*
+ * Remote processor messaging
+ *
+ * Copyright(c) 2011 Texas Instruments. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Texas Instruments nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_RPMSG_RESMGR_H
+#define _LINUX_RPMSG_RESMGR_H
+
+#define MAX_NUM_SDMA_CHANNELS 16
+
+enum {
+ RPRM_GPTIMER = 0,
+ RPRM_IVAHD = 1,
+ RPRM_IVASEQ0 = 2,
+ RPRM_IVASEQ1 = 3,
+ RPRM_L3BUS = 4,
+ RPRM_ISS = 5,
+ RPRM_FDIF = 6,
+ RPRM_SL2IF = 7,
+ RPRM_AUXCLK = 8,
+ RPRM_REGULATOR = 9,
+ RPRM_GPIO = 10,
+ RPRM_SDMA = 11,
+ RPRM_IPU = 12,
+ RPRM_DSP = 13,
+ RPRM_I2C = 14,
+ RPRM_MAX
+};
+
+enum {
+ RPRM_CONNECT = 0,
+ RPRM_REQ_ALLOC = 1,
+ RPRM_REQ_FREE = 2,
+ RPRM_DISCONNECT = 3,
+ RPRM_REQ_CONSTRAINTS = 4,
+ RPRM_REL_CONSTRAINTS = 5,
+};
+
+enum {
+ RPRM_SCALE = 0x1,
+ RPRM_LATENCY = 0x2,
+ RPRM_BANDWIDTH = 0x4,
+};
+
+struct rprm_request {
+ u32 res_type;
+ u32 acquire;
+ u32 res_id;
+ char data[];
+} __packed;
+
+struct rprm_ack {
+ u32 ret;
+ u32 res_type;
+ u32 res_id;
+ u32 base;
+ char data[];
+} __packed;
+
+struct rprm_gpt {
+ u32 id;
+ u32 src_clk;
+};
+
+struct rprm_auxclk {
+ u32 id;
+ u32 clk_rate;
+ u32 parent_src_clk;
+ u32 parent_src_clk_rate;
+};
+
+struct rprm_regulator {
+ u32 id;
+ u32 min_uv;
+ u32 max_uv;
+};
+
+struct rprm_gpio {
+ u32 id;
+};
+
+/**
+ * struct rprm_i2c - resource i2c
+ * @id: i2c id
+ *
+ * meant to store the i2c related information
+ */
+struct rprm_i2c {
+ u32 id;
+};
+
+struct rprm_sdma {
+ u32 num_chs;
+ s32 channels[MAX_NUM_SDMA_CHANNELS];
+};
+
+struct rprm_constraints_data {
+ u32 mask;
+ long frequency;
+ long bandwidth;
+ long latency;
+};
+
+#endif /* _LINUX_RPMSG_RESMGR_H */
diff --git a/include/linux/virtio_ids.h b/include/linux/virtio_ids.h
index 85bb0bb..1520c06 100644
--- a/include/linux/virtio_ids.h
+++ b/include/linux/virtio_ids.h
@@ -35,5 +35,6 @@
#define VIRTIO_ID_RNG 4 /* virtio ring */
#define VIRTIO_ID_BALLOON 5 /* virtio balloon */
#define VIRTIO_ID_9P 9 /* 9p virtio console */
+#define VIRTIO_ID_RPMSG 10 /* virtio remote processor messaging */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 4a32cb6..7894a16 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -177,5 +177,7 @@
void vring_transport_features(struct virtio_device *vdev);
irqreturn_t vring_interrupt(int irq, void *_vq);
+struct vring_virtqueue;
+bool virtqueue_more_used(struct virtqueue *vq);
#endif /* __KERNEL__ */
#endif /* _LINUX_VIRTIO_RING_H */
diff --git a/include/sound/omap-abe-dsp.h b/include/sound/omap-abe-dsp.h
new file mode 100644
index 0000000..901a55c
--- /dev/null
+++ b/include/sound/omap-abe-dsp.h
@@ -0,0 +1,21 @@
+/*
+ * omap-aess -- OMAP4 ABE DSP
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _OMAP4_ABE_DSP_H
+#define _OMAP4_ABE_DSP_H
+
+struct omap4_abe_dsp_pdata {
+ bool (*was_context_lost)(struct device *dev);
+ int (*device_scale)(struct device *req_dev,
+ struct device *target_dev,
+ unsigned long rate);
+};
+
+#endif
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index e1bad11..6a18765 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -413,6 +413,7 @@
#endif
/* misc flags */
unsigned int hw_opened: 1;
+ unsigned int hw_no_buffer: 1; /* substream may not have a buffer */
};
#define SUBSTREAM_BUSY(substream) ((substream)->ref_count > 0)
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 1bafe95..bda171e 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -172,6 +172,8 @@
struct snd_soc_dai *);
int (*trigger)(struct snd_pcm_substream *, int,
struct snd_soc_dai *);
+ int (*bespoke_trigger)(struct snd_pcm_substream *, int,
+ struct snd_soc_dai *);
/*
* For hardware based FIFO caused delay reporting.
* Optional.
@@ -209,6 +211,10 @@
struct snd_soc_pcm_stream capture;
struct snd_soc_pcm_stream playback;
unsigned int symmetric_rates:1;
+
+ /* probe ordering - for components with runtime dependencies */
+ int probe_order;
+ int remove_order;
};
/*
@@ -277,4 +283,98 @@
return dev_get_drvdata(dai->dev);
}
+/* Backend DAI PCM ops */
+static inline int snd_soc_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int ret = 0;
+
+ mutex_lock(&rtd->pcm_mutex);
+
+ if (dai->driver->ops->startup)
+ ret = dai->driver->ops->startup(substream, dai);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ dai->playback_active++;
+ else
+ dai->capture_active++;
+
+ dai->active++;
+
+ mutex_unlock(&rtd->pcm_mutex);
+ return ret;
+}
+
+static inline void snd_soc_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+
+ mutex_lock(&rtd->pcm_mutex);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ dai->playback_active--;
+ else
+ dai->capture_active--;
+
+ dai->active--;
+
+ if (dai->driver->ops->shutdown)
+ dai->driver->ops->shutdown(substream, dai);
+ mutex_unlock(&rtd->pcm_mutex);
+}
+
+static inline int snd_soc_dai_hw_params(struct snd_pcm_substream * substream,
+ struct snd_pcm_hw_params *hw_params, struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int ret = 0;
+
+ mutex_lock(&rtd->pcm_mutex);
+
+ if (dai->driver->ops->hw_params)
+ ret = dai->driver->ops->hw_params(substream, hw_params, dai);
+
+ mutex_unlock(&rtd->pcm_mutex);
+ return ret;
+}
+
+static inline int snd_soc_dai_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int ret = 0;
+
+ mutex_lock(&rtd->pcm_mutex);
+
+ if (dai->driver->ops->hw_free)
+ ret = dai->driver->ops->hw_free(substream, dai);
+
+ mutex_unlock(&rtd->pcm_mutex);
+ return ret;
+}
+
+static inline int snd_soc_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int ret = 0;
+
+ mutex_lock(&rtd->pcm_mutex);
+
+ if (dai->driver->ops->prepare)
+ ret = dai->driver->ops->prepare(substream, dai);
+
+ mutex_unlock(&rtd->pcm_mutex);
+ return ret;
+}
+
+static inline int snd_soc_dai_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ if (dai->driver->ops->trigger)
+ return dai->driver->ops->trigger(substream, cmd, dai);
+ return 0;
+}
#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index c46e7d8..083f667 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -266,6 +266,12 @@
.get = snd_soc_dapm_get_enum_virt, \
.put = snd_soc_dapm_put_enum_virt, \
.private_value = (unsigned long)&xenum }
+#define SOC_DAPM_ENUM_EXT(xname, xenum, xget, xput) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_soc_info_enum_double, \
+ .get = xget, \
+ .put = xput, \
+ .private_value = (unsigned long)&xenum }
#define SOC_DAPM_VALUE_ENUM(xname, xenum) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_enum_double, \
@@ -310,6 +316,7 @@
struct snd_soc_dapm_pin;
struct snd_soc_dapm_route;
struct snd_soc_dapm_context;
+struct snd_soc_dapm_widget_list;
int dapm_reg_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
@@ -348,11 +355,25 @@
void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm);
int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_route *route, int num);
+int snd_soc_dapm_query_path(struct snd_soc_dapm_context *dapm,
+ const char *source_name, const char *sink_name, int stream);
+const char *snd_soc_dapm_get_aif(struct snd_soc_dapm_context *dapm,
+ const char *stream_name, enum snd_soc_dapm_type type);
/* dapm events */
+void snd_soc_dapm_platform_stream_event(struct snd_soc_platform *platform,
+ const char *stream, int event);
+void snd_soc_dapm_codec_stream_event(struct snd_soc_codec *codec,
+ const char *stream, int event);
int snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd,
const char *stream, int event);
void snd_soc_dapm_shutdown(struct snd_soc_card *card);
+/* external DAPM widget events */
+int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_widget *widget,
+ struct snd_kcontrol *kcontrol, int connect);
+int snd_soc_dapm_mux_update_power(struct snd_soc_dapm_widget *widget,
+ struct snd_kcontrol *kcontrol, int change,
+ int mux, struct soc_enum *e);
/* dapm sys fs - used by the core */
int snd_soc_dapm_sys_add(struct device *dev);
@@ -367,12 +388,21 @@
int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin);
int snd_soc_dapm_get_pin_status(struct snd_soc_dapm_context *dapm,
const char *pin);
+int snd_soc_dapm_get_pin_power(struct snd_soc_dapm_context *dapm,
+ const char *pin);
int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm);
int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm,
const char *pin);
int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
const char *pin);
+/* dapm path query */
+int snd_soc_dapm_get_connected_widgets_type(struct snd_soc_dapm_context *dapm,
+ const char *stream_name, struct snd_soc_dapm_widget_list **list,
+ int stream, enum snd_soc_dapm_type type);
+int snd_soc_dapm_get_connected_widgets_name(struct snd_soc_dapm_context *dapm,
+ const char *name, struct snd_soc_dapm_widget_list **list, int stream);
+
/* dapm widget types */
enum snd_soc_dapm_type {
snd_soc_dapm_input = 0, /* input pin */
@@ -429,6 +459,7 @@
/* status */
u32 connect:1; /* source and sink widgets are connected */
u32 walked:1; /* path has been walked */
+ u32 length:6; /* path length - used by route mapper */
int (*connected)(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink);
@@ -444,6 +475,7 @@
char *name; /* widget name */
char *sname; /* stream name */
struct snd_soc_codec *codec;
+ struct snd_soc_platform *platform;
struct list_head list;
struct snd_soc_dapm_context *dapm;
@@ -452,6 +484,8 @@
unsigned char shift; /* bits to shift */
unsigned int saved_value; /* widget saved value */
unsigned int value; /* widget current value */
+ unsigned int path_idx;
+ unsigned int hops;
unsigned int mask; /* non-shifted mask */
unsigned int on_val; /* on state value */
unsigned int off_val; /* off state value */
@@ -507,12 +541,17 @@
struct device *dev; /* from parent - for debug */
struct snd_soc_codec *codec; /* parent codec */
+ struct snd_soc_platform *platform; /*parent platform */
struct snd_soc_card *card; /* parent card */
/* used during DAPM updates */
int dev_power;
struct list_head list;
+ int num_valid_paths;
+
+ int (*stream_event)(struct snd_soc_dapm_context *dapm);
+
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_dapm;
#endif
diff --git a/include/sound/soc-dsp.h b/include/sound/soc-dsp.h
new file mode 100644
index 0000000..033d61a
--- /dev/null
+++ b/include/sound/soc-dsp.h
@@ -0,0 +1,128 @@
+/*
+ * linux/sound/soc-dsp.h -- ALSA SoC DSP
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_SOC_DSP_H
+#define __LINUX_SND_SOC_DSP_H
+
+#include <sound/pcm.h>
+
+struct snd_soc_dapm_widget;
+
+/*
+ * Types of runtime_update to perform (e.g. originated from FE PCM ops
+ * or audio route changes triggered by muxes/mixers.
+ */
+#define SND_SOC_DSP_UPDATE_NO 0
+#define SND_SOC_DSP_UPDATE_BE 1
+#define SND_SOC_DSP_UPDATE_FE 2
+
+/*
+ * DSP trigger ordering. Triggering flexibility is required as some DSPs
+ * require triggering before/after their clients/hosts.
+ *
+ * i.e. some clients may want to manually order this call in their PCM
+ * trigger() whilst others will just use the regular core ordering.
+ */
+enum snd_soc_dsp_trigger {
+ SND_SOC_DSP_TRIGGER_PRE = 0,
+ SND_SOC_DSP_TRIGGER_POST,
+ SND_SOC_DSP_TRIGGER_BESPOKE,
+};
+
+/*
+ * The DSP Frontend -> Backend link state.
+ */
+enum snd_soc_dsp_link_state {
+ SND_SOC_DSP_LINK_STATE_NEW = 0, /* newly created path */
+ SND_SOC_DSP_LINK_STATE_FREE, /* path to be dismantled */
+};
+
+struct snd_soc_dsp_params {
+ struct snd_soc_pcm_runtime *be;
+ struct snd_soc_pcm_runtime *fe;
+ enum snd_soc_dsp_link_state state;
+ struct list_head list_be;
+ struct list_head list_fe;
+ struct snd_pcm_hw_params hw_params;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_state;
+#endif
+};
+
+struct snd_soc_dsp_link {
+ bool capture;
+ bool playback;
+ enum snd_soc_dsp_trigger trigger[2];
+};
+
+/* FE DSP PCM ops - called by soc-core */
+int soc_dsp_fe_dai_open(struct snd_pcm_substream *substream);
+int soc_dsp_fe_dai_close(struct snd_pcm_substream *substream);
+int soc_dsp_fe_dai_prepare(struct snd_pcm_substream *substream);
+int soc_dsp_fe_dai_hw_free(struct snd_pcm_substream *substream);
+int soc_dsp_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd);
+int soc_dsp_fe_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params);
+
+/* Backend DSP trigger.
+ * Can be called by core or components depending on trigger config.
+ */
+int soc_dsp_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, int cmd);
+
+/* Is this trigger() call required for this FE and stream */
+static inline int snd_soc_dsp_is_trigger_for_fe(struct snd_soc_pcm_runtime *fe,
+ int stream)
+{
+ return (fe->dsp[stream].runtime_update == SND_SOC_DSP_UPDATE_FE);
+}
+
+static inline int snd_soc_dsp_is_op_for_be(struct snd_soc_pcm_runtime *fe,
+ struct snd_soc_pcm_runtime *be, int stream)
+{
+ if ((fe->dsp[stream].runtime_update == SND_SOC_DSP_UPDATE_FE) ||
+ ((fe->dsp[stream].runtime_update == SND_SOC_DSP_UPDATE_BE) &&
+ be->dsp[stream].runtime_update))
+ return 1;
+ else
+ return 0;
+}
+
+static inline int snd_soc_dsp_platform_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_platform *platform)
+{
+ if (platform->driver->ops->trigger)
+ return platform->driver->ops->trigger(substream, cmd);
+ return 0;
+}
+
+int soc_dsp_fe_state_count(struct snd_soc_pcm_runtime *be, int stream,
+ enum snd_soc_dsp_state state);
+
+/* Runtime update - open/close Backend DSP paths depending on mixer updates */
+int soc_dsp_runtime_update(struct snd_soc_dapm_widget *widget);
+
+/* Backend DSP suspend and resume */
+int soc_dsp_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute);
+int soc_dsp_fe_suspend(struct snd_soc_pcm_runtime *fe);
+int soc_dsp_be_ac97_cpu_dai_suspend(struct snd_soc_pcm_runtime *fe);
+int soc_dsp_fe_resume(struct snd_soc_pcm_runtime *fe);
+int soc_dsp_be_ac97_cpu_dai_resume(struct snd_soc_pcm_runtime *fe);
+
+/* DAPM stream events for Backend DSP paths */
+int soc_dsp_dapm_stream_event(struct snd_soc_pcm_runtime *fe,
+ int dir, const char *stream, int event);
+
+static inline struct snd_pcm_substream *snd_soc_dsp_get_substream(
+ struct snd_soc_pcm_runtime *be, int stream)
+{
+ return be->pcm->streams[stream].substream;
+}
+
+#endif
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 3a4bd3a..fa9f2ef 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -203,6 +203,24 @@
SOC_VALUE_ENUM_DOUBLE_DECL(name, xreg, xshift, xshift, xmask, xtexts, xvalues)
/*
+ * Component probe and remove ordering levels for components with runtime
+ * dependencies.
+ */
+#define SND_SOC_COMP_ORDER_FIRST -2
+#define SND_SOC_COMP_ORDER_EARLY -1
+#define SND_SOC_COMP_ORDER_NORMAL 0
+#define SND_SOC_COMP_ORDER_LATE 1
+#define SND_SOC_COMP_ORDER_LAST 2
+
+/* DAI Link Host Mode Support */
+#define SND_SOC_DAI_LINK_NO_HOST 0x1
+#define SND_SOC_DAI_LINK_OPT_HOST 0x2
+
+#define snd_soc_get_enum_text(soc_enum, idx) \
+ (soc_enum->texts ? soc_enum->texts[idx] : soc_enum->dtexts[idx])
+
+
+/*
* Bias levels
*
* @ON: Bias is fully on for audio playback and capture operations.
@@ -220,6 +238,17 @@
SND_SOC_BIAS_ON,
};
+enum snd_soc_dsp_state {
+ SND_SOC_DSP_STATE_NEW = 0,
+ SND_SOC_DSP_STATE_OPEN,
+ SND_SOC_DSP_STATE_HW_PARAMS,
+ SND_SOC_DSP_STATE_PREPARE,
+ SND_SOC_DSP_STATE_START,
+ SND_SOC_DSP_STATE_STOP,
+ SND_SOC_DSP_STATE_HW_FREE,
+ SND_SOC_DSP_STATE_CLOSE,
+};
+
struct snd_jack;
struct snd_soc_card;
struct snd_soc_pcm_stream;
@@ -237,6 +266,7 @@
struct snd_soc_jack_zone;
struct snd_soc_jack_pin;
struct snd_soc_cache_ops;
+struct snd_soc_dsp_link;
#include <sound/soc-dapm.h>
#ifdef CONFIG_GPIOLIB
@@ -258,6 +288,11 @@
SND_SOC_RBTREE_COMPRESSION
};
+enum snd_soc_pcm_subclass {
+ SND_SOC_MUTEX_FE = 0,
+ SND_SOC_MUTEX_BE = 1,
+};
+
int snd_soc_codec_set_sysclk(struct snd_soc_codec *codec, int clk_id,
unsigned int freq, int dir);
int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
@@ -297,6 +332,18 @@
unsigned int reg);
int snd_soc_default_writable_register(struct snd_soc_codec *codec,
unsigned int reg);
+unsigned int snd_soc_platform_read(struct snd_soc_platform *platform,
+ unsigned int reg);
+unsigned int snd_soc_platform_write(struct snd_soc_platform *platform,
+ unsigned int reg, unsigned int val);
+
+struct snd_soc_codec *snd_soc_card_get_codec(struct snd_soc_card *card,
+ const char *codec_name);
+struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
+ const char *dai_link, int stream);
+struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
+ const char *dai_link);
+int snd_soc_card_active_links(struct snd_soc_card *card);
/* Utility functions to get clock rates from various things */
int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots);
@@ -349,6 +396,8 @@
const char *prefix);
int snd_soc_add_controls(struct snd_soc_codec *codec,
const struct snd_kcontrol_new *controls, int num_controls);
+int snd_soc_add_platform_controls(struct snd_soc_platform *platform,
+ const struct snd_kcontrol_new *controls, int num_controls);
int snd_soc_info_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo);
int snd_soc_info_enum_ext(struct snd_kcontrol *kcontrol,
@@ -612,6 +661,13 @@
void (*seq_notifier)(struct snd_soc_dapm_context *,
enum snd_soc_dapm_type, int);
+
+ /* probe ordering - for components with runtime dependencies */
+ int probe_order;
+ int remove_order;
+
+ /* codec stream completion event */
+ int (*stream_event)(struct snd_soc_dapm_context *dapm);
};
/* SoC platform interface */
@@ -623,8 +679,7 @@
int (*resume)(struct snd_soc_dai *dai);
/* pcm creation and destruction */
- int (*pcm_new)(struct snd_card *, struct snd_soc_dai *,
- struct snd_pcm *);
+ int (*pcm_new)(struct snd_soc_pcm_runtime *);
void (*pcm_free)(struct snd_pcm *);
/*
@@ -636,6 +691,17 @@
/* platform stream ops */
struct snd_pcm_ops *ops;
+
+ /* probe ordering - for components with runtime dependencies */
+ int probe_order;
+ int remove_order;
+
+ int (*stream_event)(struct snd_soc_dapm_context *dapm);
+ int (*bespoke_trigger)(struct snd_pcm_substream *, int);
+
+ /* platform DAPM IO TODO: refactor this */
+ unsigned int (*read)(struct snd_soc_platform *, unsigned int);
+ int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
};
struct snd_soc_platform {
@@ -647,9 +713,19 @@
unsigned int suspended:1; /* platform is suspended */
unsigned int probed:1;
+ struct snd_card *snd_card;
struct snd_soc_card *card;
struct list_head list;
struct list_head card_list;
+ int num_dai;
+
+ /* dapm */
+ struct snd_soc_dapm_context dapm;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_platform_root;
+ struct dentry *debugfs_dapm;
+#endif
};
struct snd_soc_dai_link {
@@ -661,15 +737,32 @@
const char *cpu_dai_name;
const char *codec_dai_name;
+ struct snd_soc_dsp_link *dsp_link;
/* Keep DAI active over suspend */
unsigned int ignore_suspend:1;
/* Symmetry requirements */
unsigned int symmetric_rates:1;
+ /* No PCM created for this DAI link */
+ unsigned int no_pcm:1;
+ /* This DAI link can change CODEC and platform at runtime*/
+ unsigned int dynamic:1;
+ /* This DAI link has no codec side driver*/
+ unsigned int no_codec:1;
+ /* This DAI has a Backend ID */
+ unsigned int be_id;
+ /* This DAI can support no host IO (no pcm data is copied to from host) */
+ unsigned int no_host_mode:2;
+ /* DAI link active */
+ unsigned int active;
/* codec/machine specific init - e.g. add machine controls */
int (*init)(struct snd_soc_pcm_runtime *rtd);
+ /* hw_params re-writing for BE and FE sync */
+ int (*be_hw_params_fixup)(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params);
+
/* machine stream operations */
struct snd_soc_ops *ops;
};
@@ -709,6 +802,9 @@
struct list_head list;
struct mutex mutex;
+ struct mutex dapm_mutex;
+ struct mutex dsp_mutex;
+ struct mutex power_mutex;
bool instantiated;
@@ -774,6 +870,7 @@
/* Generic DAPM context for the card */
struct snd_soc_dapm_context dapm;
+ int (*stream_event)(struct snd_soc_dapm_context *dapm);
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_card_root;
@@ -784,15 +881,32 @@
void *drvdata;
};
+/* DSP runtime data */
+struct snd_soc_dsp_runtime {
+ struct list_head be_clients;
+ struct list_head fe_clients;
+ int users;
+ struct snd_pcm_runtime *runtime;
+ struct snd_pcm_hw_params hw_params;
+ int runtime_update;
+ enum snd_soc_dsp_state state;
+};
+
/* SoC machine DAI configuration, glues a codec and cpu DAI together */
struct snd_soc_pcm_runtime {
struct device dev;
struct snd_soc_card *card;
struct snd_soc_dai_link *dai_link;
+ struct mutex pcm_mutex;
+ enum snd_soc_pcm_subclass pcm_subclass;
+ struct snd_pcm_ops ops;
unsigned int complete:1;
unsigned int dev_registered:1;
+ /* DSP runtime data */
+ struct snd_soc_dsp_runtime dsp[2];
+
/* Symmetry data - only valid if symmetry is being enforced */
unsigned int rate;
long pmdown_time;
@@ -805,6 +919,11 @@
struct snd_soc_dai *cpu_dai;
struct delayed_work delayed_work;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_dsp_root;
+ struct dentry *debugfs_dsp_state;
+#endif
};
/* mixer control */
@@ -822,6 +941,7 @@
unsigned int max;
unsigned int mask;
const char * const *texts;
+ char **dtexts;
const unsigned int *values;
void *dapm;
};
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index ae973d2..603f5a0 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -9,6 +9,7 @@
struct snd_soc_jack;
struct snd_soc_codec;
+struct snd_soc_platform;
struct snd_soc_card;
struct snd_soc_dapm_widget;
@@ -59,6 +60,50 @@
);
+DECLARE_EVENT_CLASS(snd_soc_preg,
+
+ TP_PROTO(struct snd_soc_platform *platform, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(platform, reg, val),
+
+ TP_STRUCT__entry(
+ __string( name, platform->name )
+ __field( int, id )
+ __field( unsigned int, reg )
+ __field( unsigned int, val )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, platform->name);
+ __entry->id = platform->id;
+ __entry->reg = reg;
+ __entry->val = val;
+ ),
+
+ TP_printk("platform=%s.%d reg=%x val=%x", __get_str(name),
+ (int)__entry->id, (unsigned int)__entry->reg,
+ (unsigned int)__entry->val)
+);
+
+DEFINE_EVENT(snd_soc_preg, snd_soc_preg_write,
+
+ TP_PROTO(struct snd_soc_platform *platform, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(platform, reg, val)
+
+);
+
+DEFINE_EVENT(snd_soc_preg, snd_soc_preg_read,
+
+ TP_PROTO(struct snd_soc_platform *platform, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(platform, reg, val)
+
+);
+
DECLARE_EVENT_CLASS(snd_soc_card,
TP_PROTO(struct snd_soc_card *card, int val),
diff --git a/include/video/dsscomp.h b/include/video/dsscomp.h
new file mode 100644
index 0000000..4fdcb0c
--- /dev/null
+++ b/include/video/dsscomp.h
@@ -0,0 +1,644 @@
+#ifndef _LINUX_DSSCOMP_H
+#define _LINUX_DSSCOMP_H
+
+#ifdef __KERNEL__
+#include <video/omapdss.h>
+#else
+
+/* exporting enumerations from arch/arm/plat-omap/include/plat/display.h */
+enum omap_plane {
+ OMAP_DSS_GFX = 0,
+ OMAP_DSS_VIDEO1 = 1,
+ OMAP_DSS_VIDEO2 = 2,
+ OMAP_DSS_VIDEO3 = 3,
+ OMAP_DSS_WB = 4,
+};
+
+enum omap_channel {
+ OMAP_DSS_CHANNEL_LCD = 0,
+ OMAP_DSS_CHANNEL_DIGIT = 1,
+ OMAP_DSS_CHANNEL_LCD2 = 2,
+};
+
+enum omap_color_mode {
+ OMAP_DSS_COLOR_CLUT1 = 1 << 0, /* BITMAP 1 */
+ OMAP_DSS_COLOR_CLUT2 = 1 << 1, /* BITMAP 2 */
+ OMAP_DSS_COLOR_CLUT4 = 1 << 2, /* BITMAP 4 */
+ OMAP_DSS_COLOR_CLUT8 = 1 << 3, /* BITMAP 8 */
+
+ /* also referred to as RGB 12-BPP, 16-bit container */
+ OMAP_DSS_COLOR_RGB12U = 1 << 4, /* xRGB12-4444 */
+ OMAP_DSS_COLOR_ARGB16 = 1 << 5, /* ARGB16-4444 */
+ OMAP_DSS_COLOR_RGB16 = 1 << 6, /* RGB16-565 */
+
+ /* also referred to as RGB 24-BPP, 32-bit container */
+ OMAP_DSS_COLOR_RGB24U = 1 << 7, /* xRGB24-8888 */
+ OMAP_DSS_COLOR_RGB24P = 1 << 8, /* RGB24-888 */
+ OMAP_DSS_COLOR_YUV2 = 1 << 9, /* YUV2 4:2:2 co-sited */
+ OMAP_DSS_COLOR_UYVY = 1 << 10, /* UYVY 4:2:2 co-sited */
+ OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32-8888 */
+ OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32-8888 */
+
+ /* also referred to as RGBx 32 in TRM */
+ OMAP_DSS_COLOR_RGBX24 = 1 << 13, /* RGBx32-8888 */
+ OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32-8888 */
+ OMAP_DSS_COLOR_NV12 = 1 << 14, /* NV12 format: YUV 4:2:0 */
+
+ /* also referred to as RGBA12-4444 in TRM */
+ OMAP_DSS_COLOR_RGBA16 = 1 << 15, /* RGBA16-4444 */
+
+ OMAP_DSS_COLOR_RGBX12 = 1 << 16, /* RGBx16-4444 */
+ OMAP_DSS_COLOR_RGBX16 = 1 << 16, /* RGBx16-4444 */
+ OMAP_DSS_COLOR_ARGB16_1555 = 1 << 17, /* ARGB16-1555 */
+
+ /* also referred to as xRGB16-555 in TRM */
+ OMAP_DSS_COLOR_XRGB15 = 1 << 18, /* xRGB16-1555 */
+ OMAP_DSS_COLOR_XRGB16_1555 = 1 << 18, /* xRGB16-1555 */
+};
+
+enum omap_dss_trans_key_type {
+ OMAP_DSS_COLOR_KEY_GFX_DST = 0,
+ OMAP_DSS_COLOR_KEY_VID_SRC = 1,
+};
+
+enum omap_dss_display_state {
+ OMAP_DSS_DISPLAY_DISABLED = 0,
+ OMAP_DSS_DISPLAY_ACTIVE,
+ OMAP_DSS_DISPLAY_SUSPENDED,
+ OMAP_DSS_DISPLAY_TRANSITION,
+};
+
+struct omap_video_timings {
+ /* Unit: pixels */
+ __u16 x_res;
+ /* Unit: pixels */
+ __u16 y_res;
+ /* Unit: KHz */
+ __u32 pixel_clock;
+ /* Unit: pixel clocks */
+ __u16 hsw; /* Horizontal synchronization pulse width */
+ /* Unit: pixel clocks */
+ __u16 hfp; /* Horizontal front porch */
+ /* Unit: pixel clocks */
+ __u16 hbp; /* Horizontal back porch */
+ /* Unit: line clocks */
+ __u16 vsw; /* Vertical synchronization pulse width */
+ /* Unit: line clocks */
+ __u16 vfp; /* Vertical front porch */
+ /* Unit: line clocks */
+ __u16 vbp; /* Vertical back porch */
+};
+
+/* YUV to RGB color conversion info */
+struct omap_dss_cconv_coefs {
+ __s16 ry, rcr, rcb;
+ __s16 gy, gcr, gcb;
+ __s16 by, bcr, bcb;
+
+ /* Y is 16..235, UV is 16..240 if not fullrange. Otherwise 0..255 */
+ __u16 full_range;
+} __attribute__ ((aligned(4)));
+
+struct omap_dss_cpr_coefs {
+ __s16 rr, rg, rb;
+ __s16 gr, gg, gb;
+ __s16 br, bg, bb;
+};
+
+#endif
+
+/* copy of fb_videomode */
+struct dsscomp_videomode {
+ const char *name; /* optional */
+ __u32 refresh; /* optional */
+ __u32 xres;
+ __u32 yres;
+ __u32 pixclock;
+ __u32 left_margin;
+ __u32 right_margin;
+ __u32 upper_margin;
+ __u32 lower_margin;
+ __u32 hsync_len;
+ __u32 vsync_len;
+ __u32 sync;
+ __u32 vmode;
+ __u32 flag;
+};
+
+/*
+ * Stereoscopic Panel types
+ * row, column, overunder, sidebyside options
+ * are with respect to native scan order
+ */
+enum s3d_disp_type {
+ S3D_DISP_NONE = 0,
+ S3D_DISP_FRAME_SEQ,
+ S3D_DISP_ROW_IL,
+ S3D_DISP_COL_IL,
+ S3D_DISP_PIX_IL,
+ S3D_DISP_CHECKB,
+ S3D_DISP_OVERUNDER,
+ S3D_DISP_SIDEBYSIDE,
+};
+
+/* Subsampling direction is based on native panel scan order.*/
+enum s3d_disp_sub_sampling {
+ S3D_DISP_SUB_SAMPLE_NONE = 0,
+ S3D_DISP_SUB_SAMPLE_V,
+ S3D_DISP_SUB_SAMPLE_H,
+};
+
+/*
+ * Indicates if display expects left view first followed by right or viceversa
+ * For row interlaved displays, defines first row view
+ * For column interleaved displays, defines first column view
+ * For checkerboard, defines first pixel view
+ * For overunder, defines top view
+ * For sidebyside, defines west view
+ */
+enum s3d_disp_order {
+ S3D_DISP_ORDER_L = 0,
+ S3D_DISP_ORDER_R = 1,
+};
+
+/*
+ * Indicates current view
+ * Used mainly for displays that need to trigger a sync signal
+ */
+enum s3d_disp_view {
+ S3D_DISP_VIEW_L = 0,
+ S3D_DISP_VIEW_R,
+};
+
+struct s3d_disp_info {
+ enum s3d_disp_type type;
+ enum s3d_disp_sub_sampling sub_samp;
+ enum s3d_disp_order order;
+ /*
+ * Gap between left and right views
+ * For over/under units are lines
+ * For sidebyside units are pixels
+ * For other types ignored
+ */
+ unsigned int gap;
+};
+
+enum omap_dss_ilace_mode {
+ OMAP_DSS_ILACE = (1 << 0), /* interlaced vs. progressive */
+ OMAP_DSS_ILACE_SEQ = (1 << 1), /* sequential vs interleaved */
+ OMAP_DSS_ILACE_SWAP = (1 << 2), /* swap fields, e.g. TB=>BT */
+
+ OMAP_DSS_ILACE_NONE = 0,
+ OMAP_DSS_ILACE_IL_TB = OMAP_DSS_ILACE,
+ OMAP_DSS_ILACE_IL_BT = OMAP_DSS_ILACE | OMAP_DSS_ILACE_SWAP,
+ OMAP_DSS_ILACE_SEQ_TB = OMAP_DSS_ILACE_IL_TB | OMAP_DSS_ILACE_SEQ,
+ OMAP_DSS_ILACE_SEQ_BT = OMAP_DSS_ILACE_IL_BT | OMAP_DSS_ILACE_SEQ,
+};
+
+/* YUV VC1 range mapping info */
+struct dss2_vc1_range_map_info {
+ __u8 enable; /* bool */
+
+ __u8 range_y; /* 0..7 */
+ __u8 range_uv; /* 0..7 */
+} __attribute__ ((aligned(4)));
+
+/* standard rectangle */
+struct dss2_rect_t {
+ __s32 x; /* left */
+ __s32 y; /* top */
+ __u32 w; /* width */
+ __u32 h; /* height */
+} __attribute__ ((aligned(4)));
+
+/* decimation constraints */
+struct dss2_decim {
+ __u8 min_x;
+ __u8 max_x; /* 0 is same as 255 */
+ __u8 min_y;
+ __u8 max_y; /* 0 is same as 255 */
+} __attribute__ ((aligned(4)));
+
+/*
+ * A somewhat more user friendly interface to the DSS2. This is a
+ * direct interface to the DSS2 overlay and overlay_manager modules.
+ * User-space APIs are provided for HW-specific control of DSS in
+ * contrast with V4L2/FB that are more generic, but in this process
+ * omit HW-specific features.
+ *
+ * For now managers are specified by display index as opposed to manager
+ * type, so that display0 is always the default display (e.g. HDMI on
+ * panda, and LCD blaze.) For now you would need to query the displays
+ * or use sysfs to find a specific display.
+ *
+ * Userspace operations are as follows:
+ *
+ * 1) check if DSS supports an overlay configuration, use DSSCIOC_CHECK_OVL
+ * ioctl with the manager, overlay, and setup-mode information filled out.
+ * All fields should be filled out as it may influence whether DSS can
+ * display/render the overlay.
+ *
+ * If proper address information is not available, it may be possible to
+ * use a type-of-address enumeration instead for luma/rgb and chroma (if
+ * applicable) frames.
+ *
+ * Do this for each overlay before attempting to configure DSS.
+ *
+ * 2) configure DSS pipelines for display/manager using DSSCOMP_SETUP_MANAGER
+ * ioctl. You can delay applying the settings until an dss2_manager_apply()
+ * is called for the internal composition object, if the APPLY bit of setup mode
+ * is not set. However the CAPTURE/DISPLAY bits of the setup mode settings will
+ * determine if at this time a capture will take place (in case of capture
+ * only mode). You may also set up additional pipelines with
+ * dss2_overlay_setup() before this.
+ *
+ * 3) On OMAP4/5 you can use the DSS WB pipeline to copy (and convert) a buffer
+ * using DSS. Use the DSSCIOC_WB_COPY ioctl for this. This is a blocking
+ * call, and it may possibly fail if an ongoing WB capture mode has been
+ * scheduled (which is outside of the current scope of the DSS2 interface.)
+ *
+ * There is also a one-shot configuration API (DSSCIOC_SETUP_DISPC). This
+ * allows you to set-up all overlays on all managers in one call. This call
+ * performs additional functionality:
+ *
+ * - it maps userspace 1D buffers into TILER 1D for the duration of the display
+ * - it disables all overlays that were specified before, but are no longer
+ * specified
+ *
+ */
+
+/*
+ * DSS2 overlay information. This structure contains all information
+ * needed to set up the overlay for a particular buffer to be displayed
+ * at a particular orientation.
+ *
+ * The following information is deemed to be set globally, so it is not
+ * included:
+ * - whether to enable zorder (always enabled)
+ * - whether to replicate/truncate color fields (it is decided per the
+ * whole manager/overlay settings, and is enabled unless overlay is
+ * directed to WB.)
+ *
+ * There is also no support for CLUT formats
+ *
+ * Requirements:
+ *
+ * 1) 0 <= crop.x <= crop.x + crop.w <= width
+ * 2) 0 <= crop.y <= crop.y + crop.h <= height
+ * 3) win.x <= win.x + win.w and win.w >= 0
+ * 4) win.y <= win.y + win.h and win.h >= 0
+ *
+ * 5) color_mode is supported by overlay
+ * 6) requested scaling is supported by overlay and functional clocks
+ *
+ * Notes:
+ *
+ * 1) Any portions of X:[pos_x, pos_x + out_width] and
+ * Y:[pos_y, pos_y + out_height] outside of the screen
+ * X:[0, screen.width], Y:[0, screen.height] will be cropped
+ * automatically without changing the scaling ratio.
+ *
+ * 2) Crop region will be adjusted to the pixel granularity:
+ * (2-by-1) for YUV422, (2-by-2) for YUV420. This will
+ * not modify the output region. Crop region is for the
+ * original (unrotated) buffer, so it does not change with
+ * rotation.
+ *
+ * 3) Rotation will not modify the output region, specifically
+ * its height and width. Also the coordinate system of the
+ * display is always (0,0) = top left.
+ *
+ * 4) cconv and vc1 only needs to be filled for YUV color modes.
+ *
+ * 5) vc1.range_y and vc1.range_uv only needs to be filled if
+ * vc1.enable is true.
+ */
+struct dss2_ovl_cfg {
+ __u16 width; /* buffer width */
+ __u16 height; /* buffer height */
+ __u32 stride; /* buffer stride */
+
+ enum omap_color_mode color_mode;
+ __u8 pre_mult_alpha; /* bool */
+ __u8 global_alpha; /* 0..255 */
+ __u8 rotation; /* 0..3 (*90 degrees clockwise) */
+ __u8 mirror; /* left-to-right: mirroring is applied after rotation */
+
+ enum omap_dss_ilace_mode ilace; /* interlace mode */
+
+ struct dss2_rect_t win; /* output window - on display */
+ struct dss2_rect_t crop; /* crop window - in source buffer */
+
+ struct dss2_decim decim; /* predecimation limits */
+
+ struct omap_dss_cconv_coefs cconv;
+ struct dss2_vc1_range_map_info vc1;
+
+ __u8 ix; /* ovl index same as sysfs/overlay# */
+ __u8 zorder; /* 0..3 */
+ __u8 enabled; /* bool */
+ __u8 zonly; /* only set zorder and enabled bit */
+ __u8 mgr_ix; /* mgr index */
+} __attribute__ ((aligned(4)));
+
+enum omapdss_buffer_type {
+ OMAP_DSS_BUFTYPE_SDMA,
+ OMAP_DSS_BUFTYPE_TILER_8BIT,
+ OMAP_DSS_BUFTYPE_TILER_16BIT,
+ OMAP_DSS_BUFTYPE_TILER_32BIT,
+ OMAP_DSS_BUFTYPE_TILER_PAGE,
+};
+
+enum omapdss_buffer_addressing_type {
+ OMAP_DSS_BUFADDR_DIRECT, /* using direct addresses */
+ OMAP_DSS_BUFADDR_BYTYPE, /* using buffer types */
+ OMAP_DSS_BUFADDR_ION, /* using ion handle(s) */
+ OMAP_DSS_BUFADDR_GRALLOC, /* using gralloc handle */
+ OMAP_DSS_BUFADDR_OVL_IX, /* using a prior overlay */
+ OMAP_DSS_BUFADDR_LAYER_IX, /* using a Post2 layer */
+ OMAP_DSS_BUFADDR_FB, /* using framebuffer memory */
+};
+
+struct dss2_ovl_info {
+ struct dss2_ovl_cfg cfg;
+
+ enum omapdss_buffer_addressing_type addressing;
+
+ union {
+ /* user-space interfaces */
+ struct {
+ void *address; /* main buffer address */
+ void *uv_address; /* uv buffer */
+ };
+
+ /*
+ * For DSSCIOC_CHECK_OVL we allow specifying just the
+ * type of each buffer. This is used if we need to
+ * check whether DSS will be able to display a buffer
+ * if using a particular memory type before spending
+ * time to map/copy the buffer into that type of
+ * memory.
+ */
+ struct {
+ enum omapdss_buffer_type ba_type;
+ enum omapdss_buffer_type uv_type;
+ };
+
+ /* kernel-space interfaces */
+
+ /*
+ * for fbmem, highest 4-bits of address is fb index,
+ * rest of the bits are the offset
+ */
+ struct {
+ __u32 ba; /* base address or index */
+ __u32 uv; /* uv address */
+ };
+ };
+};
+
+/*
+ * DSS2 manager information.
+ *
+ * The following information is deemed to be set globally, so it is not
+ * included:
+ * gamma correction
+ * whether to enable zorder (always enabled)
+ * whether to replicate/truncate color fields (it is decided per the
+ * whole manager/overlay settings, and is enabled unless overlay is
+ * directed to WB.)
+ * Notes:
+ *
+ * 1) trans_key_type and trans_enabled only need to be filled if
+ * trans_enabled is true, and alpha_blending is false.
+ */
+struct dss2_mgr_info {
+ __u32 ix; /* display index same as sysfs/display# */
+
+ __u32 default_color;
+
+ enum omap_dss_trans_key_type trans_key_type;
+ __u32 trans_key;
+ struct omap_dss_cpr_coefs cpr_coefs;
+
+ __u8 trans_enabled; /* bool */
+
+ __u8 interlaced; /* bool */
+ __u8 alpha_blending; /* bool - overrides trans_enabled */
+ __u8 cpr_enabled; /* bool */
+ __u8 swap_rb; /* bool - swap red and blue */
+} __attribute__ ((aligned(4)));
+
+/*
+ * ioctl: DSSCIOC_SETUP_MGR, struct dsscomp_setup_mgr_data
+ *
+ * 1. sets manager of each ovl in composition to the display
+ * 2. calls set_dss_ovl_info() for each ovl to set up the
+ * overlay staging structures (this is a wrapper around ovl->set_info())
+ * 3. calls set_dss_mgr_info() for mgr to set up the manager
+ * staging structures (this is a wrapper around mgr->set_info())
+ * 4. if update is true:
+ * calls manager->apply()
+ * calls driver->update() in a non-blocking fashion
+ * this will program the DSS synchronously
+ *
+ * Notes:
+ *
+ * 1) x, y, w, h only needs to be set if update is true.
+ *
+ * All non-specified pipelines that currently are on the same display
+ * will remain the same as on the previous frame. You may want to
+ * disable unused pipelines to avoid surprises.
+ *
+ * If get_sync_obj is false, it returns 0 on success, <0 error value
+ * on failure.
+ *
+ * If get_sync_obj is true, it returns fd on success, or a negative value
+ * on failure. You can use the fd to wait on (using DSSCIOC_WAIT ioctl()).
+ *
+ * Note: frames do not get eclipsed when the display turns off. Queue a
+ * blank frame to eclipse old frames. Blank frames get eclipsed when
+ * programmed into DSS.
+ *
+ * (A blank frame is queued to the display automatically in Android before
+ * the display is turned off.)
+ *
+ * All overlays to be used on the frame must be listed. There is no way
+ * to add another overlay to a defined frame.
+ */
+enum dsscomp_setup_mode {
+ DSSCOMP_SETUP_MODE_APPLY = (1 << 0), /* applies changes to cache */
+ DSSCOMP_SETUP_MODE_DISPLAY = (1 << 1), /* calls display update */
+ DSSCOMP_SETUP_MODE_CAPTURE = (1 << 2), /* capture to WB */
+
+ /* just apply changes for next vsync/update */
+ DSSCOMP_SETUP_APPLY = DSSCOMP_SETUP_MODE_APPLY,
+ /* trigger an update (wait for vsync) */
+ DSSCOMP_SETUP_DISPLAY =
+ DSSCOMP_SETUP_MODE_APPLY | DSSCOMP_SETUP_MODE_DISPLAY,
+ /* capture to WB - WB must be configured */
+ DSSCOMP_SETUP_CAPTURE =
+ DSSCOMP_SETUP_MODE_APPLY | DSSCOMP_SETUP_MODE_CAPTURE,
+ /* display and capture to WB - WB must be configured */
+ DSSCOMP_SETUP_DISPLAY_CAPTURE =
+ DSSCOMP_SETUP_DISPLAY | DSSCOMP_SETUP_CAPTURE,
+};
+
+struct dsscomp_setup_mgr_data {
+ __u32 sync_id; /* synchronization ID - for debugging */
+
+ struct dss2_rect_t win; /* update region, set w/h to 0 for fullscreen */
+ enum dsscomp_setup_mode mode;
+ __u16 num_ovls; /* # of overlays used in the composition */
+ __u16 get_sync_obj; /* ioctl should return a sync object */
+
+ struct dss2_mgr_info mgr;
+ struct dss2_ovl_info ovls[0]; /* up to 5 overlays to set up */
+};
+
+/*
+ * ioctl: DSSCIOC_CHECK_OVL, struct dsscomp_check_ovl_data
+ *
+ * DISPLAY and/or CAPTURE bits must be filled for the mode field
+ * correctly to be able to decide correctly if DSS can properly
+ * render the overlay.
+ *
+ * ovl.ix is ignored.
+ *
+ * Returns a positive bitmask regarding which overlay of DSS can
+ * render the overlay as it is configured for the display/display's
+ * manager. NOTE: that overlays that are assigned to other displays
+ * may be returned. If there is an invalid configuration (negative
+ * sizes, etc.), a negative error value is returned.
+ *
+ * ovl->decim's min values will be modified to the smallest decimation that
+ * DSS can use to support the overlay configuration.
+ *
+ * Assumptions:
+ * - zorder will be distinct from other pipelines on that manager
+ * - overlay will be enabled and routed to the display specified
+ */
+struct dsscomp_check_ovl_data {
+ enum dsscomp_setup_mode mode;
+ struct dss2_mgr_info mgr;
+ struct dss2_ovl_info ovl;
+};
+
+/*
+ * This structure is used to set up the entire DISPC (all managers),
+ * and is analogous to dsscomp_setup_mgr_data.
+ *
+ * Additional features:
+ * - all overlays that were specified in a prior use of this
+ * structure, and are no longer specified, will be disabled.
+ * - 1D buffers under 4M will be mapped into TILER1D.
+ *
+ * Limitations:
+ * - only DISPLAY mode is supported (DISPLAY and APPLY bits will
+ * automatically be set)
+ * - getting a sync object is not supported.
+ */
+struct dsscomp_setup_dispc_data {
+ __u32 sync_id; /* synchronization ID - for debugging */
+
+ enum dsscomp_setup_mode mode;
+ __u16 num_ovls; /* # of overlays used in the composition */
+ __u16 num_mgrs; /* # of managers used in the composition */
+ __u16 get_sync_obj; /* ioctl should return a sync object */
+
+ struct dss2_mgr_info mgrs[3];
+ struct dss2_ovl_info ovls[5]; /* up to 5 overlays to set up */
+};
+
+/*
+ * ioctl: DSSCIOC_WB_COPY, struct dsscomp_wb_copy_data
+ *,
+ * Requirements:
+ * wb.ix must be OMAP_DSS_WB.
+ *
+ * Returns 0 on success (copy is completed), non-0 on failure.
+ */
+struct dsscomp_wb_copy_data {
+ struct dss2_ovl_info ovl, wb;
+};
+
+/*
+ * ioctl: DSSCIOC_QUERY_DISPLAY, struct dsscomp_display_info
+ *
+ * Gets informations about the display. Fill in ix and modedb_len before
+ * calling ioctl, and rest of the fields are filled in by ioctl. Up to
+ * modedb_len timings are retrieved in the order of display preference.
+ *
+ * Returns: 0 on success, non-0 error value on failure.
+ */
+struct dsscomp_display_info {
+ __u32 ix; /* display index (sysfs/display#) */
+ __u32 overlays_available; /* bitmask of available overlays */
+ __u32 overlays_owned; /* bitmask of owned overlays */
+ enum omap_channel channel;
+ enum omap_dss_display_state state;
+ __u8 enabled; /* bool: resume-state if suspended */
+ struct omap_video_timings timings;
+ struct s3d_disp_info s3d_info; /* any S3D specific information */
+ struct dss2_mgr_info mgr; /* manager information */
+ __u16 width_in_mm; /* screen dimensions */
+ __u16 height_in_mm;
+
+ __u32 modedb_len; /* number of video timings */
+ struct dsscomp_videomode modedb[]; /* display supported timings */
+};
+
+/*
+ * ioctl: DSSCIOC_SETUP_DISPLAY, struct dsscomp_setup_display_data
+ *
+ * Gets informations about the display. Fill in ix before calling
+ * ioctl, and rest of the fields are filled in by ioctl.
+ *
+ * Returns: 0 on success, non-0 error value on failure.
+ */
+struct dsscomp_setup_display_data {
+ __u32 ix; /* display index (sysfs/display#) */
+ struct dsscomp_videomode mode; /* video timings */
+};
+
+/*
+ * ioctl: DSSCIOC_WAIT, struct dsscomp_wait_data
+ *
+ * Use this ioctl to wait for one of the following events:
+ *
+ * A) the moment a composition is programmed into DSS
+ * B) the moment a composition is first displayed (or captured)
+ * C) the moment when a composition is no longer queued or displayed on a
+ * display (it is released). (A composition is assumed to be superceded
+ * when another composition has been programmed into DSS, even if that
+ * subsequent composition does not update/specify all overlays used by
+ * the prior composition; moreover, even if it uses the same buffers.)
+ *
+ * Set timeout to desired timeout value in microseconds.
+ *
+ * This ioctl must be used on the sync object returned by the
+ * DSSCIOC_SETUP_MGR or DSSCIOC_SETUP_DISPC ioctls.
+ *
+ * Returns: >=0 on success, <0 error value on failure (e.g. -ETIME).
+ */
+enum dsscomp_wait_phase {
+ DSSCOMP_WAIT_PROGRAMMED = 1,
+ DSSCOMP_WAIT_DISPLAYED,
+ DSSCOMP_WAIT_RELEASED,
+};
+
+struct dsscomp_wait_data {
+ __u32 timeout_us; /* timeout in microseconds */
+ enum dsscomp_wait_phase phase; /* phase to wait for */
+};
+
+/* IOCTLS */
+#define DSSCIOC_SETUP_MGR _IOW('O', 128, struct dsscomp_setup_mgr_data)
+#define DSSCIOC_CHECK_OVL _IOWR('O', 129, struct dsscomp_check_ovl_data)
+#define DSSCIOC_WB_COPY _IOW('O', 130, struct dsscomp_wb_copy_data)
+#define DSSCIOC_QUERY_DISPLAY _IOWR('O', 131, struct dsscomp_display_info)
+#define DSSCIOC_WAIT _IOW('O', 132, struct dsscomp_wait_data)
+
+#define DSSCIOC_SETUP_DISPC _IOW('O', 133, struct dsscomp_setup_dispc_data)
+#define DSSCIOC_SETUP_DISPLAY _IOW('O', 134, struct dsscomp_setup_display_data)
+#endif
diff --git a/include/video/hdmi_ti_4xxx_ip.h b/include/video/hdmi_ti_4xxx_ip.h
new file mode 100644
index 0000000..49e7415
--- /dev/null
+++ b/include/video/hdmi_ti_4xxx_ip.h
@@ -0,0 +1,394 @@
+/*
+ * hdmi_ti_4xxx_ip.h
+ *
+ * HDMI driver definition for TI OMAP4 processors.
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _HDMI_TI_4xxx_
+#define _HDMI_TI_4xxx_
+
+#define HDMI_HPD_LOW 0x10
+#define HDMI_HPD_HIGH 0x20
+#define HDMI_BCAP 0x40
+#define HDMI_RI_ERR 0x80
+enum hdmi_pll_pwr {
+ HDMI_PLLPWRCMD_ALLOFF = 0,
+ HDMI_PLLPWRCMD_PLLONLY = 1,
+ HDMI_PLLPWRCMD_BOTHON_ALLCLKS = 2,
+ HDMI_PLLPWRCMD_BOTHON_NOPHYCLK = 3
+};
+
+enum hdmi_core_hdmi_dvi {
+ HDMI_DVI = 0,
+ HDMI_HDMI = 1
+};
+
+enum hdmi_deep_color_mode {
+ HDMI_DEEP_COLOR_24BIT = 0,
+ HDMI_DEEP_COLOR_30BIT = 1,
+ HDMI_DEEP_COLOR_36BIT = 2
+};
+
+struct hdmi_ip_data {
+ void __iomem *base_wp; /* HDMI wrapper */
+ unsigned long hdmi_core_sys_offset;
+ unsigned long hdmi_core_av_offset;
+ unsigned long hdmi_pll_offset;
+ unsigned long hdmi_phy_offset;
+};
+
+struct hdmi_video_timings {
+ u16 x_res;
+ u16 y_res;
+ /* Unit: KHz */
+ u32 pixel_clock;
+ u16 hsw;
+ u16 hfp;
+ u16 hbp;
+ u16 vsw;
+ u16 vfp;
+ u16 vbp;
+};
+
+/* HDMI timing structure */
+struct hdmi_timings {
+ struct hdmi_video_timings timings;
+ int vsync_pol;
+ int hsync_pol;
+};
+
+struct hdmi_cm {
+ int code;
+ int mode;
+};
+
+struct hdmi_config {
+ struct fb_videomode timings;
+ struct hdmi_cm cm;
+ enum hdmi_deep_color_mode deep_color;
+};
+
+/* HDMI PLL structure */
+struct hdmi_pll_info {
+ u16 regn;
+ u16 regm;
+ u32 regmf;
+ u16 regm2;
+ u16 regsd;
+ u16 dcofreq;
+};
+
+struct hdmi_core_audio_i2s_config {
+ u8 word_max_length;
+ u8 word_length;
+ u8 in_length_bits;
+ u8 justification;
+ u8 en_high_bitrate_aud;
+ u8 sck_edge_mode;
+ u8 cbit_order;
+ u8 vbit;
+ u8 ws_polarity;
+ u8 direction;
+ u8 shift;
+ u8 active_sds;
+};
+
+
+enum hdmi_audio_i2s_config {
+ HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT = 0,
+ HDMI_AUDIO_I2S_WS_POLARIT_YLOW_IS_RIGHT = 1,
+ HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0,
+ HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1,
+ HDMI_AUDIO_I2S_MAX_WORD_20BITS = 0,
+ HDMI_AUDIO_I2S_MAX_WORD_24BITS = 1,
+ HDMI_AUDIO_I2S_CHST_WORD_NOT_SPECIFIED = 0,
+ HDMI_AUDIO_I2S_CHST_WORD_16_BITS = 1,
+ HDMI_AUDIO_I2S_CHST_WORD_17_BITS = 6,
+ HDMI_AUDIO_I2S_CHST_WORD_18_BITS = 2,
+ HDMI_AUDIO_I2S_CHST_WORD_19_BITS = 4,
+ HDMI_AUDIO_I2S_CHST_WORD_20_BITS_20MAX = 5,
+ HDMI_AUDIO_I2S_CHST_WORD_20_BITS_24MAX = 1,
+ HDMI_AUDIO_I2S_CHST_WORD_21_BITS = 6,
+ HDMI_AUDIO_I2S_CHST_WORD_22_BITS = 2,
+ HDMI_AUDIO_I2S_CHST_WORD_23_BITS = 4,
+ HDMI_AUDIO_I2S_CHST_WORD_24_BITS = 5,
+ HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0,
+ HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1,
+ HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0,
+ HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1,
+ HDMI_AUDIO_I2S_INPUT_LENGTH_NA = 0,
+ HDMI_AUDIO_I2S_INPUT_LENGTH_16 = 2,
+ HDMI_AUDIO_I2S_INPUT_LENGTH_17 = 12,
+ HDMI_AUDIO_I2S_INPUT_LENGTH_18 = 4,
+ HDMI_AUDIO_I2S_INPUT_LENGTH_19 = 8,
+ HDMI_AUDIO_I2S_INPUT_LENGTH_20 = 10,
+ HDMI_AUDIO_I2S_INPUT_LENGTH_21 = 13,
+ HDMI_AUDIO_I2S_INPUT_LENGTH_22 = 5,
+ HDMI_AUDIO_I2S_INPUT_LENGTH_23 = 9,
+ HDMI_AUDIO_I2S_INPUT_LENGTH_24 = 11,
+ HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0,
+ HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1,
+ HDMI_AUDIO_I2S_SD0_EN = 1,
+ HDMI_AUDIO_I2S_SD1_EN = 1 << 1,
+ HDMI_AUDIO_I2S_SD2_EN = 1 << 2,
+ HDMI_AUDIO_I2S_SD3_EN = 1 << 3,
+};
+
+enum hdmi_audio_mclk_mode {
+ HDMI_AUDIO_MCLK_128FS = 0,
+ HDMI_AUDIO_MCLK_256FS = 1,
+ HDMI_AUDIO_MCLK_384FS = 2,
+ HDMI_AUDIO_MCLK_512FS = 3,
+ HDMI_AUDIO_MCLK_768FS = 4,
+ HDMI_AUDIO_MCLK_1024FS = 5,
+ HDMI_AUDIO_MCLK_1152FS = 6,
+ HDMI_AUDIO_MCLK_192FS = 7
+};
+
+
+enum hdmi_core_audio_sample_freq {
+ HDMI_AUDIO_FS_32000 = 0x3,
+ HDMI_AUDIO_FS_44100 = 0x0,
+ HDMI_AUDIO_FS_48000 = 0x2,
+ HDMI_AUDIO_FS_88200 = 0x8,
+ HDMI_AUDIO_FS_96000 = 0xA,
+ HDMI_AUDIO_FS_176400 = 0xC,
+ HDMI_AUDIO_FS_192000 = 0xE,
+ HDMI_AUDIO_FS_NOT_INDICATED = 0x1
+};
+
+enum hdmi_core_audio_layout {
+ HDMI_AUDIO_LAYOUT_2CH = 0,
+ HDMI_AUDIO_LAYOUT_8CH = 1
+};
+
+enum hdmi_core_cts_mode {
+ HDMI_AUDIO_CTS_MODE_HW = 0,
+ HDMI_AUDIO_CTS_MODE_SW = 1
+};
+
+enum hdmi_stereo_channels {
+ HDMI_AUDIO_STEREO_NOCHANNELS = 0,
+ HDMI_AUDIO_STEREO_ONECHANNEL = 1,
+ HDMI_AUDIO_STEREO_TWOCHANNELS = 2,
+ HDMI_AUDIO_STEREO_THREECHANNELS = 3,
+ HDMI_AUDIO_STEREO_FOURCHANNELS = 4
+};
+
+enum hdmi_audio_type {
+ HDMI_AUDIO_TYPE_LPCM = 0,
+ HDMI_AUDIO_TYPE_IEC = 1
+};
+
+enum hdmi_audio_justify {
+ HDMI_AUDIO_JUSTIFY_LEFT = 0,
+ HDMI_AUDIO_JUSTIFY_RIGHT = 1
+};
+
+enum hdmi_audio_sample_order {
+ HDMI_AUDIO_SAMPLE_RIGHT_FIRST = 0,
+ HDMI_AUDIO_SAMPLE_LEFT_FIRST = 1
+};
+
+enum hdmi_audio_samples_perword {
+ HDMI_AUDIO_ONEWORD_ONESAMPLE = 0,
+ HDMI_AUDIO_ONEWORD_TWOSAMPLES = 1
+};
+
+enum hdmi_audio_sample_size {
+ HDMI_AUDIO_SAMPLE_16BITS = 0,
+ HDMI_AUDIO_SAMPLE_24BITS = 1
+};
+
+enum hdmi_audio_transf_mode {
+ HDMI_AUDIO_TRANSF_DMA = 0,
+ HDMI_AUDIO_TRANSF_IRQ = 1
+};
+
+enum hdmi_audio_blk_strt_end_sig {
+ HDMI_AUDIO_BLOCK_SIG_STARTEND_ON = 0,
+ HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF = 1
+};
+
+
+struct hdmi_core_audio_config {
+ struct hdmi_core_audio_i2s_config i2s_cfg;
+ enum hdmi_core_audio_sample_freq freq_sample;
+ bool fs_override;
+ u32 n;
+ u32 cts;
+ u32 aud_par_busclk;
+ enum hdmi_core_audio_layout layout;
+ enum hdmi_core_cts_mode cts_mode;
+ bool use_mclk;
+ enum hdmi_audio_mclk_mode mclk_mode;
+ bool en_acr_pkt;
+ bool en_dsd_audio;
+ bool en_parallel_aud_input;
+ bool en_spdif;
+};
+
+
+
+struct hdmi_audio_format {
+ enum hdmi_stereo_channels stereo_channels;
+ u8 active_chnnls_msk;
+ enum hdmi_audio_type type;
+ enum hdmi_audio_justify justification;
+ enum hdmi_audio_sample_order sample_order;
+ enum hdmi_audio_samples_perword samples_per_word;
+ enum hdmi_audio_sample_size sample_size;
+ enum hdmi_audio_blk_strt_end_sig en_sig_blk_strt_end;
+};
+
+struct hdmi_audio_dma {
+ u8 transfer_size;
+ u8 block_size;
+ enum hdmi_audio_transf_mode mode;
+ u16 fifo_threshold;
+};
+
+/*
+ * Refer to section 8.2 in HDMI 1.3 specification for
+ * details about infoframe databytes
+ */
+struct hdmi_core_infoframe_audio {
+ u8 db1_coding_type;
+ u8 db1_channel_count;
+ u8 db2_sample_freq;
+ u8 db2_sample_size;
+ u8 db4_channel_alloc;
+ bool db5_downmix_inh;
+ u8 db5_lsv; /* Level shift values for downmix */
+};
+
+
+
+/* INFOFRAME_AVI_ and INFOFRAME_AUDIO_ definitions */
+enum hdmi_core_infoframe {
+ HDMI_INFOFRAME_AVI_DB1Y_RGB = 0,
+ HDMI_INFOFRAME_AVI_DB1Y_YUV422 = 1,
+ HDMI_INFOFRAME_AVI_DB1Y_YUV444 = 2,
+ HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF = 0,
+ HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_ON = 1,
+ HDMI_INFOFRAME_AVI_DB1B_NO = 0,
+ HDMI_INFOFRAME_AVI_DB1B_VERT = 1,
+ HDMI_INFOFRAME_AVI_DB1B_HORI = 2,
+ HDMI_INFOFRAME_AVI_DB1B_VERTHORI = 3,
+ HDMI_INFOFRAME_AVI_DB1S_0 = 0,
+ HDMI_INFOFRAME_AVI_DB1S_1 = 1,
+ HDMI_INFOFRAME_AVI_DB1S_2 = 2,
+ HDMI_INFOFRAME_AVI_DB2C_NO = 0,
+ HDMI_INFOFRAME_AVI_DB2C_ITU601 = 1,
+ HDMI_INFOFRAME_AVI_DB2C_ITU709 = 2,
+ HDMI_INFOFRAME_AVI_DB2C_EC_EXTENDED = 3,
+ HDMI_INFOFRAME_AVI_DB2M_NO = 0,
+ HDMI_INFOFRAME_AVI_DB2M_43 = 1,
+ HDMI_INFOFRAME_AVI_DB2M_169 = 2,
+ HDMI_INFOFRAME_AVI_DB2R_SAME = 8,
+ HDMI_INFOFRAME_AVI_DB2R_43 = 9,
+ HDMI_INFOFRAME_AVI_DB2R_169 = 10,
+ HDMI_INFOFRAME_AVI_DB2R_149 = 11,
+ HDMI_INFOFRAME_AVI_DB3ITC_NO = 0,
+ HDMI_INFOFRAME_AVI_DB3ITC_YES = 1,
+ HDMI_INFOFRAME_AVI_DB3EC_XVYUV601 = 0,
+ HDMI_INFOFRAME_AVI_DB3EC_XVYUV709 = 1,
+ HDMI_INFOFRAME_AVI_DB3Q_DEFAULT = 0,
+ HDMI_INFOFRAME_AVI_DB3Q_LR = 1,
+ HDMI_INFOFRAME_AVI_DB3Q_FR = 2,
+ HDMI_INFOFRAME_AVI_DB3SC_NO = 0,
+ HDMI_INFOFRAME_AVI_DB3SC_HORI = 1,
+ HDMI_INFOFRAME_AVI_DB3SC_VERT = 2,
+ HDMI_INFOFRAME_AVI_DB3SC_HORIVERT = 3,
+ HDMI_INFOFRAME_AVI_DB5PR_NO = 0,
+ HDMI_INFOFRAME_AVI_DB5PR_2 = 1,
+ HDMI_INFOFRAME_AVI_DB5PR_3 = 2,
+ HDMI_INFOFRAME_AVI_DB5PR_4 = 3,
+ HDMI_INFOFRAME_AVI_DB5PR_5 = 4,
+ HDMI_INFOFRAME_AVI_DB5PR_6 = 5,
+ HDMI_INFOFRAME_AVI_DB5PR_7 = 6,
+ HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
+ HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
+ HDMI_INFOFRAME_AVI_DB5PR_10 = 9,
+ HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM = 0,
+ HDMI_INFOFRAME_AUDIO_DB1CT_IEC60958 = 1,
+ HDMI_INFOFRAME_AUDIO_DB1CT_AC3 = 2,
+ HDMI_INFOFRAME_AUDIO_DB1CT_MPEG1 = 3,
+ HDMI_INFOFRAME_AUDIO_DB1CT_MP3 = 4,
+ HDMI_INFOFRAME_AUDIO_DB1CT_MPEG2_MULTICH = 5,
+ HDMI_INFOFRAME_AUDIO_DB1CT_AAC = 6,
+ HDMI_INFOFRAME_AUDIO_DB1CT_DTS = 7,
+ HDMI_INFOFRAME_AUDIO_DB1CT_ATRAC = 8,
+ HDMI_INFOFRAME_AUDIO_DB1CT_ONEBIT = 9,
+ HDMI_INFOFRAME_AUDIO_DB1CT_DOLBY_DIGITAL_PLUS = 10,
+ HDMI_INFOFRAME_AUDIO_DB1CT_DTS_HD = 11,
+ HDMI_INFOFRAME_AUDIO_DB1CT_MAT = 12,
+ HDMI_INFOFRAME_AUDIO_DB1CT_DST = 13,
+ HDMI_INFOFRAME_AUDIO_DB1CT_WMA_PRO = 14,
+ HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM = 0,
+ HDMI_INFOFRAME_AUDIO_DB2SF_32000 = 1,
+ HDMI_INFOFRAME_AUDIO_DB2SF_44100 = 2,
+ HDMI_INFOFRAME_AUDIO_DB2SF_48000 = 3,
+ HDMI_INFOFRAME_AUDIO_DB2SF_88200 = 4,
+ HDMI_INFOFRAME_AUDIO_DB2SF_96000 = 5,
+ HDMI_INFOFRAME_AUDIO_DB2SF_176400 = 6,
+ HDMI_INFOFRAME_AUDIO_DB2SF_192000 = 7,
+ HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM = 0,
+ HDMI_INFOFRAME_AUDIO_DB2SS_16BIT = 1,
+ HDMI_INFOFRAME_AUDIO_DB2SS_20BIT = 2,
+ HDMI_INFOFRAME_AUDIO_DB2SS_24BIT = 3,
+ HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PERMITTED = 0,
+ HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PROHIBITED = 1
+};
+
+enum hdmi_aksv_err {
+ HDMI_AKSV_ZERO = 0,
+ HDMI_AKSV_ERROR = 1,
+ HDMI_AKSV_VALID = 2
+};
+
+int hdmi_ti_4xxx_phy_init(struct hdmi_ip_data *ip_data);
+void hdmi_ti_4xxx_phy_off(struct hdmi_ip_data *ip_data, bool set_mode);
+int read_ti_4xxx_edid(struct hdmi_ip_data *ip_data, u8 *pedid, u16 max_length);
+void hdmi_ti_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start);
+int hdmi_ti_4xxx_pll_program(struct hdmi_ip_data *ip_data,
+ struct hdmi_pll_info *fmt);
+int hdmi_ti_4xxx_set_pll_pwr(struct hdmi_ip_data *ip_data, enum hdmi_pll_pwr val);
+void hdmi_ti_4xxx_basic_configure(struct hdmi_ip_data *ip_data,
+ struct hdmi_config *cfg);
+int hdmi_ti_4xxx_rxdet(struct hdmi_ip_data *ip_data);
+int hdmi_ti_4xxx_wp_get_video_state(struct hdmi_ip_data *ip_data);
+u32 hdmi_ti_4xxx_irq_handler(struct hdmi_ip_data *ip_data);
+void hdmi_ti_4xxx_dump_regs(struct hdmi_ip_data *ip_data, struct seq_file *s);
+int hdmi_ti_4xxx_config_audio_acr(struct hdmi_ip_data *ip_data,
+ u32 sample_freq, u32 *n, u32 *cts, u32 pclk);
+void hdmi_ti_4xxx_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
+ struct hdmi_audio_dma *aud_dma);
+
+void hdmi_ti_4xxx_wp_audio_config_format(struct hdmi_ip_data *ip_data,
+ struct hdmi_audio_format *aud_fmt);
+void hdmi_ti_4xxx_core_audio_config(struct hdmi_ip_data *ip_data,
+ struct hdmi_core_audio_config *cfg);
+void hdmi_ti_4xxx_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
+ struct hdmi_core_infoframe_audio *info_aud);
+void hdmi_ti_4xxx_audio_transfer_en(struct hdmi_ip_data *ip_data,
+ bool idle);
+void hdmi_ti_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool idle);
+
+int hdmi_ti_4xxx_set_wait_soft_reset(struct hdmi_ip_data *ip_data);
+int hdmi_ti_4xx_check_aksv_data(struct hdmi_ip_data *ip_data);
+#endif
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index c0d8014..3be0bc36 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -21,8 +21,7 @@
#include <linux/list.h>
#include <linux/kobject.h>
#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <asm/atomic.h>
+#include <linux/fb.h>
#define DISPC_IRQ_FRAMEDONE (1 << 0)
#define DISPC_IRQ_VSYNC (1 << 1)
@@ -43,8 +42,11 @@
#define DISPC_IRQ_WAKEUP (1 << 16)
#define DISPC_IRQ_SYNC_LOST2 (1 << 17)
#define DISPC_IRQ_VSYNC2 (1 << 18)
+#define DISPC_IRQ_VID3_END_WIN (1 << 19)
+#define DISPC_IRQ_VID3_FIFO_UNDERFLOW (1 << 20)
#define DISPC_IRQ_ACBIAS_COUNT_STAT2 (1 << 21)
#define DISPC_IRQ_FRAMEDONE2 (1 << 22)
+#define DISPC_IRQ_FRAMEDONETV (1 << 24)
struct omap_dss_device;
struct omap_overlay_manager;
@@ -62,7 +64,8 @@
enum omap_plane {
OMAP_DSS_GFX = 0,
OMAP_DSS_VIDEO1 = 1,
- OMAP_DSS_VIDEO2 = 2
+ OMAP_DSS_VIDEO2 = 2,
+ OMAP_DSS_VIDEO3 = 3,
};
enum omap_channel {
@@ -126,6 +129,11 @@
OMAP_DSS_LCD_TFT = 1<<20,
};
+enum omap_dss_dsi_type {
+ OMAP_DSS_DSI_TYPE_CMD_MODE = 0,
+ OMAP_DSS_DSI_TYPE_VIDEO_MODE,
+};
+
enum omap_dss_venc_type {
OMAP_DSS_VENC_TYPE_COMPOSITE,
OMAP_DSS_VENC_TYPE_SVIDEO,
@@ -148,6 +156,11 @@
OMAP_DSS_DISPLAY_SUSPENDED,
};
+enum omap_dispc_irq_type {
+ OMAP_DISPC_IRQ_TYPE_FRAMEDONE,
+ OMAP_DISPC_IRQ_TYPE_VSYNC,
+};
+
/* XXX perhaps this should be removed */
enum omap_dss_overlay_managers {
OMAP_DSS_OVL_MGR_LCD,
@@ -158,6 +171,7 @@
enum omap_dss_rotation_type {
OMAP_DSS_ROT_DMA = 0,
OMAP_DSS_ROT_VRFB = 1,
+ OMAP_DSS_ROT_TILER = 2,
};
/* clockwise rotation angle */
@@ -188,6 +202,13 @@
OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */
};
+enum omap_overlay_zorder {
+ OMAP_DSS_OVL_ZORDER_0 = 0,
+ OMAP_DSS_OVL_ZORDER_1 = 1,
+ OMAP_DSS_OVL_ZORDER_2 = 2,
+ OMAP_DSS_OVL_ZORDER_3 = 3,
+};
+
/* RFBI */
struct rfbi_timings {
@@ -244,9 +265,11 @@
int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel);
int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel);
+int dsi_video_mode_enable(struct omap_dss_device *dssdev, u8 data_type);
+
/* Board specific data */
struct omap_dss_board_info {
- int (*get_last_off_on_transaction_id)(struct device *dev);
+ int (*get_context_loss_count)(struct device *dev);
int num_devices;
struct omap_dss_device **devices;
struct omap_dss_device *default_device;
@@ -266,8 +289,6 @@
struct omap_display_platform_data {
struct omap_dss_board_info *board_data;
/* TODO: Additional members to be added when PM is considered */
-
- bool (*opt_clock_available)(const char *clk_role);
};
struct omap_video_timings {
@@ -300,6 +321,41 @@
extern const struct omap_video_timings omap_dss_ntsc_timings;
#endif
+enum omapdss_completion_status {
+ DSS_COMPLETION_PROGRAMMED = (1 << 1),
+ DSS_COMPLETION_DISPLAYED = (1 << 2),
+
+ DSS_COMPLETION_CHANGED_SET = (1 << 3),
+ DSS_COMPLETION_CHANGED_CACHE = (1 << 4),
+ DSS_COMPLETION_CHANGED = (3 << 3),
+
+ DSS_COMPLETION_RELEASED = (15 << 5),
+ DSS_COMPLETION_ECLIPSED_SET = (1 << 5),
+ DSS_COMPLETION_ECLIPSED_CACHE = (1 << 6),
+ DSS_COMPLETION_ECLIPSED_SHADOW = (1 << 7),
+ DSS_COMPLETION_TORN = (1 << 8),
+};
+
+struct omapdss_ovl_cb {
+ /* optional callback method */
+ u32 (*fn)(void *data, int id, int status);
+ void *data;
+ u32 mask;
+};
+
+struct omap_dss_cpr_coefs {
+ s16 rr, rg, rb;
+ s16 gr, gg, gb;
+ s16 br, bg, bb;
+};
+
+struct omap_dss_cconv_coefs {
+ s16 ry, rcr, rcb;
+ s16 gy, gcr, gcb;
+ s16 by, bcr, bcb;
+ u16 full_range;
+} __attribute__ ((aligned(4)));
+
struct omap_overlay_info {
bool enabled;
@@ -320,6 +376,11 @@
u16 out_height; /* if 0, out_height == height */
u8 global_alpha;
u8 pre_mult_alpha;
+ enum omap_overlay_zorder zorder;
+ u16 min_x_decim, max_x_decim, min_y_decim, max_y_decim;
+ struct omap_dss_cconv_coefs cconv;
+
+ struct omapdss_ovl_cb cb;
};
struct omap_overlay {
@@ -359,6 +420,11 @@
bool trans_enabled;
bool alpha_enabled;
+
+ struct omapdss_ovl_cb cb;
+
+ bool cpr_enable;
+ struct omap_dss_cpr_coefs cpr_coefs;
};
struct omap_overlay_manager {
@@ -393,6 +459,8 @@
int (*apply)(struct omap_overlay_manager *mgr);
int (*wait_for_go)(struct omap_overlay_manager *mgr);
int (*wait_for_vsync)(struct omap_overlay_manager *mgr);
+ int (*blank)(struct omap_overlay_manager *mgr, bool wait_for_vsync);
+ void (*dump_cb)(struct omap_overlay_manager *mgr, struct seq_file *s);
int (*enable)(struct omap_overlay_manager *mgr);
int (*disable)(struct omap_overlay_manager *mgr);
@@ -405,6 +473,9 @@
enum omap_channel channel;
+ bool first_vsync;
+ bool sync_lost_error;
+
union {
struct {
u8 data_lines;
@@ -420,6 +491,7 @@
} sdi;
struct {
+ enum omap_dss_dsi_type type;
u8 clk_lane;
u8 clk_pol;
u8 data1_lane;
@@ -430,7 +502,6 @@
u8 data3_pol;
u8 data4_lane;
u8 data4_pol;
-
int module;
bool ext_te;
@@ -461,12 +532,15 @@
u16 regm_dsi;
u16 lp_clk_div;
+ unsigned offset_ddr_clk;
enum omap_dss_clk_source dsi_fclk_src;
} dsi;
struct {
u16 regn;
u16 regm2;
+
+ u32 max_pixclk_khz;
} hdmi;
} clocks;
@@ -478,6 +552,10 @@
int acb; /* ac-bias pin frequency */
enum omap_panel_config config;
+ struct fb_monspecs monspecs;
+
+ u32 width_in_um;
+ u32 height_in_um;
} panel;
struct {
@@ -486,6 +564,9 @@
} ctrl;
int reset_gpio;
+ int hpd_gpio;
+
+ bool skip_init;
int max_backlight_level;
@@ -507,6 +588,8 @@
enum omap_dss_display_state state;
+ struct blocking_notifier_head state_notifiers;
+
/* platform specific */
int (*platform_enable)(struct omap_dss_device *dssdev);
void (*platform_disable)(struct omap_dss_device *dssdev);
@@ -568,6 +651,15 @@
int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
u32 (*get_wss)(struct omap_dss_device *dssdev);
+ int (*get_modedb)(struct omap_dss_device *dssdev,
+ struct fb_videomode *modedb,
+ int modedb_len);
+ int (*set_mode)(struct omap_dss_device *dssdev,
+ struct fb_videomode *mode);
+
+ /* for wrapping around state changes */
+ void (*disable_orig)(struct omap_dss_device *display);
+ int (*enable_orig)(struct omap_dss_device *display);
};
int omap_dss_register_driver(struct omap_dss_driver *);
@@ -601,9 +693,14 @@
int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
unsigned long timeout);
+void omap_dispc_set_irq_type(int channel, enum omap_dispc_irq_type type);
+
#define to_dss_driver(x) container_of((x), struct omap_dss_driver, driver)
#define to_dss_device(x) container_of((x), struct omap_dss_device, dev)
+void omapdss_display_get_dimensions(struct omap_dss_device *dssdev,
+ u32 *width_in_um, u32 *height_in_um);
+
void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
bool enable);
int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable);
@@ -643,4 +740,18 @@
int omap_rfbi_configure(struct omap_dss_device *dssdev, int pixel_size,
int data_lines);
+int omap_dss_manager_unregister_callback(struct omap_overlay_manager *mgr,
+ struct omapdss_ovl_cb *cb);
+
+/* generic callback handling */
+static inline void dss_ovl_cb(struct omapdss_ovl_cb *cb, int id, int status)
+{
+ if (cb->fn && (cb->mask & status))
+ cb->mask &= cb->fn(cb->data, id, status);
+ if (status & DSS_COMPLETION_RELEASED)
+ cb->mask = 0;
+ if (!cb->mask)
+ cb->fn = NULL;
+}
+
#endif
diff --git a/kernel/Makefile b/kernel/Makefile
index 2d64cfc..e4fd98b 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -101,6 +101,7 @@
obj-$(CONFIG_TRACEPOINTS) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o
+obj-$(CONFIG_CPU_PM) += cpu_pm.o
obj-$(CONFIG_PERF_EVENTS) += events/
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
new file mode 100644
index 0000000..249152e
--- /dev/null
+++ b/kernel/cpu_pm.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpu_pm.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
+
+static DEFINE_RWLOCK(cpu_pm_notifier_lock);
+static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain);
+
+static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
+{
+ int ret;
+
+ ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
+ nr_to_call, nr_calls);
+
+ return notifier_to_errno(ret);
+}
+
+/**
+ * cpu_pm_register_notifier - register a driver with cpu_pm
+ * @nb: notifier block to register
+ *
+ * Add a driver to a list of drivers that are notified about
+ * CPU and CPU cluster low power entry and exit.
+ *
+ * This function may sleep, and has the same return conditions as
+ * raw_notifier_chain_register.
+ */
+int cpu_pm_register_notifier(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret;
+
+ write_lock_irqsave(&cpu_pm_notifier_lock, flags);
+ ret = raw_notifier_chain_register(&cpu_pm_notifier_chain, nb);
+ write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
+
+/**
+ * cpu_pm_unregister_notifier - unregister a driver with cpu_pm
+ * @nb: notifier block to be unregistered
+ *
+ * Remove a driver from the CPU PM notifier list.
+ *
+ * This function may sleep, and has the same return conditions as
+ * raw_notifier_chain_unregister.
+ */
+int cpu_pm_unregister_notifier(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret;
+
+ write_lock_irqsave(&cpu_pm_notifier_lock, flags);
+ ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
+ write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
+
+/**
+ * cpm_pm_enter - CPU low power entry notifier
+ *
+ * Notifies listeners that a single CPU is entering a low power state that may
+ * cause some blocks in the same power domain as the cpu to reset.
+ *
+ * Must be called on the affected CPU with interrupts disabled. Platform is
+ * responsible for ensuring that cpu_pm_enter is not called twice on the same
+ * CPU before cpu_pm_exit is called. Notified drivers can include VFP
+ * co-processor, interrupt controller and it's PM extensions, local CPU
+ * timers context save/restore which shouldn't be interrupted. Hence it
+ * must be called with interrupts disabled.
+ *
+ * Return conditions are same as __raw_notifier_call_chain.
+ */
+int cpu_pm_enter(void)
+{
+ int nr_calls;
+ int ret = 0;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
+ if (ret)
+ /*
+ * Inform listeners (nr_calls - 1) about failure of CPU PM
+ * PM entry who are notified earlier to prepare for it.
+ */
+ cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_enter);
+
+/**
+ * cpm_pm_exit - CPU low power exit notifier
+ *
+ * Notifies listeners that a single CPU is exiting a low power state that may
+ * have caused some blocks in the same power domain as the cpu to reset.
+ *
+ * Notified drivers can include VFP co-processor, interrupt controller
+ * and it's PM extensions, local CPU timers context save/restore which
+ * shouldn't be interrupted. Hence it must be called with interrupts disabled.
+ *
+ * Return conditions are same as __raw_notifier_call_chain.
+ */
+int cpu_pm_exit(void)
+{
+ int ret;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_exit);
+
+/**
+ * cpm_cluster_pm_enter - CPU cluster low power entry notifier
+ *
+ * Notifies listeners that all cpus in a power domain are entering a low power
+ * state that may cause some blocks in the same power domain to reset.
+ *
+ * Must be called after cpu_pm_enter has been called on all cpus in the power
+ * domain, and before cpu_pm_exit has been called on any cpu in the power
+ * domain. Notified drivers can include VFP co-processor, interrupt controller
+ * and it's PM extensions, local CPU timers context save/restore which
+ * shouldn't be interrupted. Hence it must be called with interrupts disabled.
+ *
+ * Must be called with interrupts disabled.
+ *
+ * Return conditions are same as __raw_notifier_call_chain.
+ */
+int cpu_cluster_pm_enter(void)
+{
+ int nr_calls;
+ int ret = 0;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
+ if (ret)
+ /*
+ * Inform listeners (nr_calls - 1) about failure of CPU cluster
+ * PM entry who are notified earlier to prepare for it.
+ */
+ cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
+
+/**
+ * cpm_cluster_pm_exit - CPU cluster low power exit notifier
+ *
+ * Notifies listeners that all cpus in a power domain are exiting form a
+ * low power state that may have caused some blocks in the same power domain
+ * to reset.
+ *
+ * Must be called after cpu_pm_exit has been called on all cpus in the power
+ * domain, and before cpu_pm_exit has been called on any cpu in the power
+ * domain. Notified drivers can include VFP co-processor, interrupt controller
+ * and it's PM extensions, local CPU timers context save/restore which
+ * shouldn't be interrupted. Hence it must be called with interrupts disabled.
+ *
+ * Return conditions are same as __raw_notifier_call_chain.
+ */
+int cpu_cluster_pm_exit(void)
+{
+ int ret;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit);
+
+#ifdef CONFIG_PM
+static int cpu_pm_suspend(void)
+{
+ int ret;
+
+ ret = cpu_pm_enter();
+ if (ret)
+ return ret;
+
+ ret = cpu_cluster_pm_enter();
+ return ret;
+}
+
+static void cpu_pm_resume(void)
+{
+ cpu_cluster_pm_exit();
+ cpu_pm_exit();
+}
+
+static struct syscore_ops cpu_pm_syscore_ops = {
+ .suspend = cpu_pm_suspend,
+ .resume = cpu_pm_resume,
+};
+
+static int cpu_pm_init(void)
+{
+ register_syscore_ops(&cpu_pm_syscore_ops);
+ return 0;
+}
+core_initcall(cpu_pm_init);
+#endif
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index b90fb99..4e701e6 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -295,6 +295,10 @@
def_bool y
depends on PM_RUNTIME && HAVE_CLK
+config CPU_PM
+ bool
+ depends on SUSPEND || CPU_IDLE
+
config SUSPEND_TIME
bool "Log time spent in suspend"
---help---
diff --git a/security/Kconfig b/security/Kconfig
index e0f08b5..d4ffb55 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -188,6 +188,8 @@
source security/integrity/ima/Kconfig
+source security/smc/Kconfig
+
choice
prompt "Default security module"
default DEFAULT_SECURITY_SELINUX if SECURITY_SELINUX
diff --git a/security/Makefile b/security/Makefile
index 8bb0fe9..968c101 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -26,3 +26,4 @@
# Object integrity file lists
subdir-$(CONFIG_IMA) += integrity/ima
obj-$(CONFIG_IMA) += integrity/ima/built-in.o
+obj-$(CONFIG_SECURITY_MIDDLEWARE_COMPONENT) += smc/
diff --git a/security/smc/Kconfig b/security/smc/Kconfig
new file mode 100644
index 0000000..9fcd1f6
--- /dev/null
+++ b/security/smc/Kconfig
@@ -0,0 +1,45 @@
+config TF_MSHIELD
+ bool
+
+config SECURITY_MIDDLEWARE_COMPONENT
+ bool "Enable SMC Driver"
+ depends on ARCH_OMAP3 || ARCH_OMAP4
+ default n
+ select TF_MSHIELD
+ help
+ This option adds kernel support for communication with the SMC
+ Protected Application.
+
+ If you are unsure how to answer this question, answer N.
+
+config SMC_KERNEL_CRYPTO
+ bool "Register SMC into kernel crypto subsytem"
+ depends on SECURITY_MIDDLEWARE_COMPONENT
+ default y
+ help
+ This option enables crypto subsystem to use SMC and OMAP hardware
+ accelerators.
+
+ If you are unsure how to answer this question, answer Y.
+
+config SECURE_TRACE
+ bool "Enable SMC secure traces"
+ depends on SECURITY_MIDDLEWARE_COMPONENT && ARCH_OMAP4
+ default y
+ help
+ This option enables traces from the SMC Protected Application to be
+ displayed in kernel logs.
+
+config TF_DRIVER_DEBUG_SUPPORT
+ bool "Debug support"
+ depends on SECURITY_MIDDLEWARE_COMPONENT
+ default n
+ help
+ This options enables debug traces in the driver.
+
+config SMC_BENCH_SECURE_CYCLE
+ bool "Enable secure cycles benchmarks"
+ depends on TF_DRIVER_DEBUG_SUPPORT && ARCH_OMAP4
+ default n
+ help
+ This options enables benchmarks.
diff --git a/security/smc/Makefile b/security/smc/Makefile
new file mode 100644
index 0000000..80cf430
--- /dev/null
+++ b/security/smc/Makefile
@@ -0,0 +1,3 @@
+ifeq ($(CONFIG_SECURITY_MIDDLEWARE_COMPONENT),y)
+obj-$(CONFIG_ARCH_OMAP4) += omap4/
+endif
diff --git a/security/smc/omap4/Makefile b/security/smc/omap4/Makefile
new file mode 100644
index 0000000..de75cc2
--- /dev/null
+++ b/security/smc/omap4/Makefile
@@ -0,0 +1,35 @@
+#
+# Copyright (c) 2006-2010 Trusted Logic S.A.
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+# MA 02111-1307 USA
+#
+
+ifdef S_VERSION_BUILD
+EXTRA_CFLAGS += -DS_VERSION_BUILD=$(S_VERSION_BUILD)
+endif
+
+EXTRA_CFLAGS += -Iarch/arm/mach-omap2
+
+tf_driver-objs += scxlnx_util.o
+tf_driver-objs += scxlnx_device.o
+tf_driver-objs += scx_public_crypto.o
+tf_driver-objs += scx_public_crypto_Digest.o
+tf_driver-objs += scx_public_crypto_AES.o
+tf_driver-objs += scx_public_dma.o
+tf_driver-objs += scxlnx_comm_mshield.o
+
+obj-$(CONFIG_SECURITY_MIDDLEWARE_COMPONENT) += tf_driver.o
diff --git a/security/smc/omap4/scx_protocol.h b/security/smc/omap4/scx_protocol.h
new file mode 100644
index 0000000..80653eb
--- /dev/null
+++ b/security/smc/omap4/scx_protocol.h
@@ -0,0 +1,676 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __SCX_PROTOCOL_H__
+#define __SCX_PROTOCOL_H__
+
+/*----------------------------------------------------------------------------
+ *
+ * This header file defines the structure used in the SChannel Protocol.
+ * See your Product Reference Manual for a specification of the SChannel
+ * protocol.
+ *---------------------------------------------------------------------------*/
+
+/*
+ * The driver interface version returned by the version ioctl
+ */
+#define SCX_DRIVER_INTERFACE_VERSION 0x04000000
+
+/*
+ * Protocol version handling
+ */
+#define SCX_S_PROTOCOL_MAJOR_VERSION (0x06)
+#define GET_PROTOCOL_MAJOR_VERSION(a) (a >> 24)
+#define GET_PROTOCOL_MINOR_VERSION(a) ((a >> 16) & 0xFF)
+
+/*
+ * The size, in bytes, of the L1 Shared Buffer.
+ */
+#define SCX_COMM_BUFFER_SIZE (0x1000) /* 4kB*/
+
+/*
+ * The S flag of the nConfigFlags_S register.
+ */
+#define SCX_CONFIG_FLAG_S (1 << 3)
+
+/*
+ * The TimeSlot field of the nSyncSerial_N register.
+ */
+#define SCX_SYNC_SERIAL_TIMESLOT_N (1)
+
+/*
+ * nStatus_S related defines.
+ */
+#define SCX_STATUS_P_MASK (0X00000001)
+#define SCX_STATUS_POWER_STATE_SHIFT (3)
+#define SCX_STATUS_POWER_STATE_MASK (0x1F << SCX_STATUS_POWER_STATE_SHIFT)
+
+/*
+ * Possible power states of the POWER_STATE field of the nStatus_S register
+ */
+#define SCX_POWER_MODE_COLD_BOOT (0)
+#define SCX_POWER_MODE_WARM_BOOT (1)
+#define SCX_POWER_MODE_ACTIVE (3)
+#define SCX_POWER_MODE_READY_TO_SHUTDOWN (5)
+#define SCX_POWER_MODE_READY_TO_HIBERNATE (7)
+#define SCX_POWER_MODE_WAKEUP (8)
+#define SCX_POWER_MODE_PANIC (15)
+
+/*
+ * Possible nCommand values for MANAGEMENT commands
+ */
+#define SCX_MANAGEMENT_HIBERNATE (1)
+#define SCX_MANAGEMENT_SHUTDOWN (2)
+#define SCX_MANAGEMENT_PREPARE_FOR_CORE_OFF (3)
+#define SCX_MANAGEMENT_RESUME_FROM_CORE_OFF (4)
+
+/*
+ * The capacity of the Normal Word message queue, in number of slots.
+ */
+#define SCX_N_MESSAGE_QUEUE_CAPACITY (512)
+
+/*
+ * The capacity of the Secure World message answer queue, in number of slots.
+ */
+#define SCX_S_ANSWER_QUEUE_CAPACITY (256)
+
+/*
+ * The value of the S-timeout register indicating an infinite timeout.
+ */
+#define SCX_S_TIMEOUT_0_INFINITE (0xFFFFFFFF)
+#define SCX_S_TIMEOUT_1_INFINITE (0xFFFFFFFF)
+
+/*
+ * The value of the S-timeout register indicating an immediate timeout.
+ */
+#define SCX_S_TIMEOUT_0_IMMEDIATE (0x0)
+#define SCX_S_TIMEOUT_1_IMMEDIATE (0x0)
+
+/*
+ * Identifies the get protocol version SMC.
+ */
+#define SCX_SMC_GET_PROTOCOL_VERSION (0XFFFFFFFB)
+
+/*
+ * Identifies the init SMC.
+ */
+#define SCX_SMC_INIT (0XFFFFFFFF)
+
+/*
+ * Identifies the reset irq SMC.
+ */
+#define SCX_SMC_RESET_IRQ (0xFFFFFFFE)
+
+/*
+ * Identifies the SET_W3B SMC.
+ */
+#define SCX_SMC_WAKE_UP (0xFFFFFFFD)
+
+/*
+ * Identifies the STOP SMC.
+ */
+#define SCX_SMC_STOP (0xFFFFFFFC)
+
+/*
+ * Identifies the n-yield SMC.
+ */
+#define SCX_SMC_N_YIELD (0X00000003)
+
+
+/* Possible stop commands for SMC_STOP */
+#define SCSTOP_HIBERNATE (0xFFFFFFE1)
+#define SCSTOP_SHUTDOWN (0xFFFFFFE2)
+
+/*
+ * representation of an UUID.
+ */
+struct SCX_UUID {
+ u32 time_low;
+ u16 time_mid;
+ u16 time_hi_and_version;
+ u8 clock_seq_and_node[8];
+};
+
+
+/**
+ * Command parameters.
+ */
+struct SCX_COMMAND_PARAM_VALUE {
+ u32 a;
+ u32 b;
+};
+
+struct SCX_COMMAND_PARAM_TEMP_MEMREF {
+ u32 nDescriptor; /* data pointer for exchange message.*/
+ u32 nSize;
+ u32 nOffset;
+};
+
+struct SCX_COMMAND_PARAM_MEMREF {
+ u32 hBlock;
+ u32 nSize;
+ u32 nOffset;
+};
+
+union SCX_COMMAND_PARAM {
+ struct SCX_COMMAND_PARAM_VALUE sValue;
+ struct SCX_COMMAND_PARAM_TEMP_MEMREF sTempMemref;
+ struct SCX_COMMAND_PARAM_MEMREF sMemref;
+};
+
+/**
+ * Answer parameters.
+ */
+struct SCX_ANSWER_PARAM_VALUE {
+ u32 a;
+ u32 b;
+};
+
+struct SCX_ANSWER_PARAM_SIZE {
+ u32 _ignored;
+ u32 nSize;
+};
+
+union SCX_ANSWER_PARAM {
+ struct SCX_ANSWER_PARAM_SIZE sSize;
+ struct SCX_ANSWER_PARAM_VALUE sValue;
+};
+
+/*
+ * Descriptor tables capacity
+ */
+#define SCX_MAX_W3B_COARSE_PAGES (2)
+#define SCX_MAX_COARSE_PAGES (8)
+#define SCX_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT (8)
+#define SCX_DESCRIPTOR_TABLE_CAPACITY \
+ (1 << SCX_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT)
+#define SCX_DESCRIPTOR_TABLE_CAPACITY_MASK \
+ (SCX_DESCRIPTOR_TABLE_CAPACITY - 1)
+/* Shared memories coarse pages can map up to 1MB */
+#define SCX_MAX_COARSE_PAGE_MAPPED_SIZE \
+ (PAGE_SIZE * SCX_DESCRIPTOR_TABLE_CAPACITY)
+/* Shared memories cannot exceed 8MB */
+#define SCX_MAX_SHMEM_SIZE \
+ (SCX_MAX_COARSE_PAGE_MAPPED_SIZE << 3)
+
+/*
+ * Buffer size for version description fields
+ */
+#define SCX_DESCRIPTION_BUFFER_LENGTH 64
+
+/*
+ * Shared memory type flags.
+ */
+#define SCX_SHMEM_TYPE_READ (0x00000001)
+#define SCX_SHMEM_TYPE_WRITE (0x00000002)
+
+/*
+ * Shared mem flags
+ */
+#define SCX_SHARED_MEM_FLAG_INPUT 1
+#define SCX_SHARED_MEM_FLAG_OUTPUT 2
+#define SCX_SHARED_MEM_FLAG_INOUT 3
+
+
+/*
+ * Parameter types
+ */
+#define SCX_PARAM_TYPE_NONE 0x0
+#define SCX_PARAM_TYPE_VALUE_INPUT 0x1
+#define SCX_PARAM_TYPE_VALUE_OUTPUT 0x2
+#define SCX_PARAM_TYPE_VALUE_INOUT 0x3
+#define SCX_PARAM_TYPE_MEMREF_TEMP_INPUT 0x5
+#define SCX_PARAM_TYPE_MEMREF_TEMP_OUTPUT 0x6
+#define SCX_PARAM_TYPE_MEMREF_TEMP_INOUT 0x7
+#define SCX_PARAM_TYPE_MEMREF_INPUT 0xD
+#define SCX_PARAM_TYPE_MEMREF_OUTPUT 0xE
+#define SCX_PARAM_TYPE_MEMREF_INOUT 0xF
+
+#define SCX_PARAM_TYPE_MEMREF_FLAG 0x4
+#define SCX_PARAM_TYPE_REGISTERED_MEMREF_FLAG 0x8
+
+
+#define SCX_MAKE_PARAM_TYPES(t0, t1, t2, t3) \
+ ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12))
+#define SCX_GET_PARAM_TYPE(t, i) (((t) >> (4 * i)) & 0xF)
+
+/*
+ * Login types.
+ */
+#define SCX_LOGIN_PUBLIC 0x00000000
+#define SCX_LOGIN_USER 0x00000001
+#define SCX_LOGIN_GROUP 0x00000002
+#define SCX_LOGIN_APPLICATION 0x00000004
+#define SCX_LOGIN_APPLICATION_USER 0x00000005
+#define SCX_LOGIN_APPLICATION_GROUP 0x00000006
+#define SCX_LOGIN_AUTHENTICATION 0x80000000
+#define SCX_LOGIN_PRIVILEGED 0x80000002
+
+/* Login variants */
+
+#define SCX_LOGIN_VARIANT(mainType, os, variant) \
+ ((mainType) | (1 << 27) | ((os) << 16) | ((variant) << 8))
+
+#define SCX_LOGIN_GET_MAIN_TYPE(type) \
+ ((type) & ~SCX_LOGIN_VARIANT(0, 0xFF, 0xFF))
+
+#define SCX_LOGIN_OS_ANY 0x00
+#define SCX_LOGIN_OS_LINUX 0x01
+#define SCX_LOGIN_OS_ANDROID 0x04
+
+/* OS-independent variants */
+#define SCX_LOGIN_USER_NONE \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_USER, SCX_LOGIN_OS_ANY, 0xFF)
+#define SCX_LOGIN_GROUP_NONE \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_GROUP, SCX_LOGIN_OS_ANY, 0xFF)
+#define SCX_LOGIN_APPLICATION_USER_NONE \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_USER, SCX_LOGIN_OS_ANY, 0xFF)
+#define SCX_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_AUTHENTICATION, SCX_LOGIN_OS_ANY, 0x01)
+#define SCX_LOGIN_PRIVILEGED_KERNEL \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_PRIVILEGED, SCX_LOGIN_OS_ANY, 0x01)
+
+/* Linux variants */
+#define SCX_LOGIN_USER_LINUX_EUID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_USER, SCX_LOGIN_OS_LINUX, 0x01)
+#define SCX_LOGIN_GROUP_LINUX_GID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_GROUP, SCX_LOGIN_OS_LINUX, 0x01)
+#define SCX_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION, SCX_LOGIN_OS_LINUX, 0x01)
+#define SCX_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_USER, SCX_LOGIN_OS_LINUX, 0x01)
+#define SCX_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_GROUP, SCX_LOGIN_OS_LINUX, 0x01)
+
+/* Android variants */
+#define SCX_LOGIN_USER_ANDROID_EUID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_USER, SCX_LOGIN_OS_ANDROID, 0x01)
+#define SCX_LOGIN_GROUP_ANDROID_GID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_GROUP, SCX_LOGIN_OS_ANDROID, 0x01)
+#define SCX_LOGIN_APPLICATION_ANDROID_UID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION, SCX_LOGIN_OS_ANDROID, 0x01)
+#define SCX_LOGIN_APPLICATION_USER_ANDROID_UID_EUID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_USER, SCX_LOGIN_OS_ANDROID, \
+ 0x01)
+#define SCX_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_GROUP, SCX_LOGIN_OS_ANDROID, \
+ 0x01)
+
+/*
+ * return origins
+ */
+#define SCX_ORIGIN_COMMS 2
+#define SCX_ORIGIN_TEE 3
+#define SCX_ORIGIN_TRUSTED_APP 4
+/*
+ * The SCX message types.
+ */
+#define SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT 0x02
+#define SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT 0xFD
+#define SCX_MESSAGE_TYPE_REGISTER_SHARED_MEMORY 0xF7
+#define SCX_MESSAGE_TYPE_RELEASE_SHARED_MEMORY 0xF9
+#define SCX_MESSAGE_TYPE_OPEN_CLIENT_SESSION 0xF0
+#define SCX_MESSAGE_TYPE_CLOSE_CLIENT_SESSION 0xF2
+#define SCX_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND 0xF5
+#define SCX_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND 0xF4
+#define SCX_MESSAGE_TYPE_MANAGEMENT 0xFE
+
+
+/*
+ * The error codes
+ */
+#define S_SUCCESS 0x00000000
+#define S_ERROR_NO_DATA 0xFFFF000B
+#define S_ERROR_OUT_OF_MEMORY 0xFFFF000C
+
+
+struct SCX_COMMAND_HEADER {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo;
+ u32 nOperationID;
+};
+
+struct SCX_ANSWER_HEADER {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo;
+ u32 nOperationID;
+ u32 nErrorCode;
+};
+
+/*
+ * CREATE_DEVICE_CONTEXT command message.
+ */
+struct SCX_COMMAND_CREATE_DEVICE_CONTEXT {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ u32 nOperationID;
+ u32 nDeviceContextID;
+};
+
+/*
+ * CREATE_DEVICE_CONTEXT answer message.
+ */
+struct SCX_ANSWER_CREATE_DEVICE_CONTEXT {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 nErrorCode;
+ /* an opaque Normal World identifier for the device context */
+ u32 hDeviceContext;
+};
+
+/*
+ * DESTROY_DEVICE_CONTEXT command message.
+ */
+struct SCX_COMMAND_DESTROY_DEVICE_CONTEXT {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ u32 nOperationID;
+ u32 hDeviceContext;
+};
+
+/*
+ * DESTROY_DEVICE_CONTEXT answer message.
+ */
+struct SCX_ANSWER_DESTROY_DEVICE_CONTEXT {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 nErrorCode;
+ u32 nDeviceContextID;
+};
+
+/*
+ * OPEN_CLIENT_SESSION command message.
+ */
+struct SCX_COMMAND_OPEN_CLIENT_SESSION {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nParamTypes;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 hDeviceContext;
+ u32 nCancellationID;
+ u64 sTimeout;
+ struct SCX_UUID sDestinationUUID;
+ union SCX_COMMAND_PARAM sParams[4];
+ u32 nLoginType;
+ /*
+ * Size = 0 for public, [16] for group identification, [20] for
+ * authentication
+ */
+ u8 sLoginData[20];
+};
+
+/*
+ * OPEN_CLIENT_SESSION answer message.
+ */
+struct SCX_ANSWER_OPEN_CLIENT_SESSION {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u8 nReturnOrigin;
+ u8 __nReserved;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 nErrorCode;
+ u32 hClientSession;
+ union SCX_ANSWER_PARAM sAnswers[4];
+};
+
+/*
+ * CLOSE_CLIENT_SESSION command message.
+ */
+struct SCX_COMMAND_CLOSE_CLIENT_SESSION {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 hDeviceContext;
+ u32 hClientSession;
+};
+
+/*
+ * CLOSE_CLIENT_SESSION answer message.
+ */
+struct SCX_ANSWER_CLOSE_CLIENT_SESSION {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 nErrorCode;
+};
+
+
+/*
+ * REGISTER_SHARED_MEMORY command message
+ */
+struct SCX_COMMAND_REGISTER_SHARED_MEMORY {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMemoryFlags;
+ u32 nOperationID;
+ u32 hDeviceContext;
+ u32 nBlockID;
+ u32 nSharedMemSize;
+ u32 nSharedMemStartOffset;
+ u32 nSharedMemDescriptors[SCX_MAX_COARSE_PAGES];
+};
+
+/*
+ * REGISTER_SHARED_MEMORY answer message.
+ */
+struct SCX_ANSWER_REGISTER_SHARED_MEMORY {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 nErrorCode;
+ u32 hBlock;
+};
+
+/*
+ * RELEASE_SHARED_MEMORY command message.
+ */
+struct SCX_COMMAND_RELEASE_SHARED_MEMORY {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 hDeviceContext;
+ u32 hBlock;
+};
+
+/*
+ * RELEASE_SHARED_MEMORY answer message.
+ */
+struct SCX_ANSWER_RELEASE_SHARED_MEMORY {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ u32 nOperationID;
+ u32 nErrorCode;
+ u32 nBlockID;
+};
+
+/*
+ * INVOKE_CLIENT_COMMAND command message.
+ */
+struct SCX_COMMAND_INVOKE_CLIENT_COMMAND {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nParamTypes;
+ u32 nOperationID;
+ u32 hDeviceContext;
+ u32 hClientSession;
+ u64 sTimeout;
+ u32 nCancellationID;
+ u32 nClientCommandIdentifier;
+ union SCX_COMMAND_PARAM sParams[4];
+};
+
+/*
+ * INVOKE_CLIENT_COMMAND command answer.
+ */
+struct SCX_ANSWER_INVOKE_CLIENT_COMMAND {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u8 nReturnOrigin;
+ u8 __nReserved;
+ u32 nOperationID;
+ u32 nErrorCode;
+ union SCX_ANSWER_PARAM sAnswers[4];
+};
+
+/*
+ * CANCEL_CLIENT_OPERATION command message.
+ */
+struct SCX_COMMAND_CANCEL_CLIENT_OPERATION {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 hDeviceContext;
+ u32 hClientSession;
+ u32 nCancellationID;
+};
+
+struct SCX_ANSWER_CANCEL_CLIENT_OPERATION {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ u32 nOperationID;
+ u32 nErrorCode;
+};
+
+/*
+ * MANAGEMENT command message.
+ */
+struct SCX_COMMAND_MANAGEMENT {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nCommand;
+ u32 nOperationID;
+ u32 nW3BSize;
+ u32 nW3BStartOffset;
+ u32 nSharedMemDescriptors[1];
+};
+
+/*
+ * POWER_MANAGEMENT answer message.
+ * The message does not provide message specific parameters.
+ * Therefore no need to define a specific answer structure
+ */
+
+/*
+ * Structure for L2 messages
+ */
+union SCX_COMMAND_MESSAGE {
+ struct SCX_COMMAND_HEADER sHeader;
+ struct SCX_COMMAND_CREATE_DEVICE_CONTEXT sCreateDeviceContextMessage;
+ struct SCX_COMMAND_DESTROY_DEVICE_CONTEXT sDestroyDeviceContextMessage;
+ struct SCX_COMMAND_OPEN_CLIENT_SESSION sOpenClientSessionMessage;
+ struct SCX_COMMAND_CLOSE_CLIENT_SESSION sCloseClientSessionMessage;
+ struct SCX_COMMAND_REGISTER_SHARED_MEMORY sRegisterSharedMemoryMessage;
+ struct SCX_COMMAND_RELEASE_SHARED_MEMORY sReleaseSharedMemoryMessage;
+ struct SCX_COMMAND_INVOKE_CLIENT_COMMAND sInvokeClientCommandMessage;
+ struct SCX_COMMAND_CANCEL_CLIENT_OPERATION
+ sCancelClientOperationMessage;
+ struct SCX_COMMAND_MANAGEMENT sManagementMessage;
+};
+
+/*
+ * Structure for any L2 answer
+ */
+
+union SCX_ANSWER_MESSAGE {
+ struct SCX_ANSWER_HEADER sHeader;
+ struct SCX_ANSWER_CREATE_DEVICE_CONTEXT sCreateDeviceContextAnswer;
+ struct SCX_ANSWER_OPEN_CLIENT_SESSION sOpenClientSessionAnswer;
+ struct SCX_ANSWER_CLOSE_CLIENT_SESSION sCloseClientSessionAnswer;
+ struct SCX_ANSWER_REGISTER_SHARED_MEMORY sRegisterSharedMemoryAnswer;
+ struct SCX_ANSWER_RELEASE_SHARED_MEMORY sReleaseSharedMemoryAnswer;
+ struct SCX_ANSWER_INVOKE_CLIENT_COMMAND sInvokeClientCommandAnswer;
+ struct SCX_ANSWER_DESTROY_DEVICE_CONTEXT sDestroyDeviceContextAnswer;
+ struct SCX_ANSWER_CANCEL_CLIENT_OPERATION sCancelClientOperationAnswer;
+};
+
+/* Structure of the Communication Buffer */
+struct SCHANNEL_C1S_BUFFER {
+ u32 nConfigFlags_S;
+ u32 nW3BSizeMax_S;
+ u32 nReserved0;
+ u32 nW3BSizeCurrent_S;
+ u8 sReserved1[48];
+ u8 sVersionDescription[SCX_DESCRIPTION_BUFFER_LENGTH];
+ u32 nStatus_S;
+ u32 sReserved2;
+ u32 nSyncSerial_N;
+ u32 nSyncSerial_S;
+ u64 sTime_N[2];
+ u64 sTimeout_S[2];
+ u32 nFirstCommand;
+ u32 nFirstFreeCommand;
+ u32 nFirstAnswer;
+ u32 nFirstFreeAnswer;
+ u32 nW3BDescriptors[128];
+ #ifdef CONFIG_TF_MSHIELD
+ u8 sRPCTraceBuffer[140];
+ u8 sRPCShortcutBuffer[180];
+ #else
+ u8 sReserved3[320];
+ #endif
+ u32 sCommandQueue[SCX_N_MESSAGE_QUEUE_CAPACITY];
+ u32 sAnswerQueue[SCX_S_ANSWER_QUEUE_CAPACITY];
+};
+
+
+/*
+ * SCX_VERSION_INFORMATION_BUFFER structure description
+ * Description of the sVersionBuffer handed over from user space to kernel space
+ * This field is filled by the driver during a CREATE_DEVICE_CONTEXT ioctl
+ * and handed back to user space
+ */
+struct SCX_VERSION_INFORMATION_BUFFER {
+ u8 sDriverDescription[65];
+ u8 sSecureWorldDescription[65];
+};
+
+
+/* The IOCTLs the driver supports */
+#include <linux/ioctl.h>
+
+#define IOCTL_SCX_GET_VERSION _IO('z', 0)
+#define IOCTL_SCX_EXCHANGE _IOWR('z', 1, union SCX_COMMAND_MESSAGE)
+#define IOCTL_SCX_GET_DESCRIPTION _IOR('z', 2, \
+ struct SCX_VERSION_INFORMATION_BUFFER)
+
+#endif /* !defined(__SCX_PROTOCOL_H__) */
diff --git a/security/smc/omap4/scx_public_crypto.c b/security/smc/omap4/scx_public_crypto.c
new file mode 100644
index 0000000..d6b751c
--- /dev/null
+++ b/security/smc/omap4/scx_public_crypto.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "scxlnx_defs.h"
+#include "scxlnx_util.h"
+#include "scxlnx_mshield.h"
+#include "scx_public_crypto.h"
+#include "scx_public_dma.h"
+
+#define IO_ADDRESS OMAP2_L4_IO_ADDRESS
+
+#define S_SUCCESS 0x00000000
+#define S_ERROR_GENERIC 0xFFFF0000
+#define S_ERROR_ACCESS_DENIED 0xFFFF0001
+#define S_ERROR_BAD_FORMAT 0xFFFF0005
+#define S_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define S_ERROR_OUT_OF_MEMORY 0xFFFF000C
+#define S_ERROR_SHORT_BUFFER 0xFFFF0010
+#define S_ERROR_UNREACHABLE 0xFFFF3013
+#define S_ERROR_SERVICE 0xFFFF1000
+
+#define CKR_OK 0x00000000
+
+#define PUBLIC_CRYPTO_TIMEOUT_CONST 0x000FFFFF
+
+#define RPC_AES1_CODE PUBLIC_CRYPTO_HWA_AES1
+#define RPC_AES2_CODE PUBLIC_CRYPTO_HWA_AES2
+#define RPC_DES_CODE PUBLIC_CRYPTO_HWA_DES
+#define RPC_SHA_CODE PUBLIC_CRYPTO_HWA_SHA
+
+#define RPC_CRYPTO_COMMAND_MASK 0x000003c0
+
+#define RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR 0x200
+#define RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_UNLOCK 0x000
+#define RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_LOCK 0x001
+
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT 0x240
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_LOCK_AES1 RPC_AES1_CODE
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_LOCK_AES2 RPC_AES2_CODE
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_LOCK_DES RPC_DES_CODE
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_LOCK_SHA RPC_SHA_CODE
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_SUSPEND 0x010
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_UNINSTALL 0x020
+
+#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS 0x280
+#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_AES1 RPC_AES1_CODE
+#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_AES2 RPC_AES2_CODE
+#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_DES RPC_DES_CODE
+#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_SHA RPC_SHA_CODE
+#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_RESUME 0x010
+
+#define RPC_CLEAR_GLOBAL_KEY_CONTEXT 0x2c0
+#define RPC_CLEAR_GLOBAL_KEY_CONTEXT_CLEARED_AES 0x001
+#define RPC_CLEAR_GLOBAL_KEY_CONTEXT_CLEARED_DES 0x002
+
+#define ENABLE_CLOCK true
+#define DISABLE_CLOCK false
+
+/*---------------------------------------------------------------------------*/
+/*RPC IN/OUT structures for CUS implementation */
+/*---------------------------------------------------------------------------*/
+
+struct RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_OUT {
+ u32 nShortcutID;
+ u32 nError;
+};
+
+struct RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_IN {
+ u32 nDeviceContextID;
+ u32 hClientSession;
+ u32 nCommandID;
+ u32 hKeyContext;
+ /**
+ *The identifier of the HWA accelerator that this shortcut uses!
+ *Possible values are:
+ *- 1 (RPC_AES1_CODE)
+ *- 2 (RPC_AES2_CODE)
+ *- 4 (RPC_DES_CODE)
+ *- 8 (RPC_SHA_CODE)
+ **/
+ u32 nHWAID;
+ /**
+ *This field defines the algorithm, direction, mode, key size.
+ *It contains some of the bits of the corresponding "CTRL" register
+ *of the accelerator.
+ *
+ *More precisely:
+ *For AES1 accelerator, nHWA_CTRL contains the following bits:
+ *- CTR (bit 6):
+ * when 1, selects CTR mode.
+ * when 0, selects CBC or ECB mode (according to CBC bit)
+ *- CBC (bit 5)
+ * when 1, selects CBC mode (but only if CTR=0)
+ * when 0, selects EBC mode (but only if CTR=0)
+ *- DIRECTION (bit 2)
+ * 0: decryption
+ * 1: encryption
+ *
+ *For the DES2 accelerator, nHWA_CTRL contains the following bits:
+ *- CBC (bit 4): 1 for CBC, 0 for ECB
+ *- DIRECTION (bit 2): 0 for decryption, 1 for encryption
+ *
+ *For the SHA accelerator, nHWA_CTRL contains the following bits:
+ *- ALGO (bit 2:1):
+ * 0x0: MD5
+ * 0x1: SHA1
+ * 0x2: SHA-224
+ * 0x3: SHA-256
+ **/
+ u32 nHWA_CTRL;
+ union PUBLIC_CRYPTO_OPERATION_STATE sOperationState;
+};
+
+struct RPC_LOCK_HWA_SUSPEND_SHORTCUT_OUT {
+ union PUBLIC_CRYPTO_OPERATION_STATE sOperationState;
+};
+
+struct RPC_LOCK_HWA_SUSPEND_SHORTCUT_IN {
+ u32 nShortcutID;
+};
+
+struct RPC_RESUME_SHORTCUT_UNLOCK_HWA_IN {
+ u32 nShortcutID;
+ u32 hAES1KeyContext;
+ u32 hAES2KeyContext;
+ u32 hDESKeyContext;
+ union PUBLIC_CRYPTO_OPERATION_STATE sOperationState;
+};
+
+/*------------------------------------------------------------------------- */
+/*
+ * HWA public lock or unlock one HWA according algo specified by nHWAID
+ */
+void PDrvCryptoLockUnlockHWA(u32 nHWAID, bool bDoLock)
+{
+ int is_sem = 0;
+ struct semaphore *s = NULL;
+ struct mutex *m = NULL;
+ struct SCXLNX_DEVICE *dev = SCXLNXGetDevice();
+
+ dprintk(KERN_INFO "PDrvCryptoLockUnlockHWA:nHWAID=0x%04X bDoLock=%d\n",
+ nHWAID, bDoLock);
+
+ switch (nHWAID) {
+ case RPC_AES1_CODE:
+ s = &dev->sAES1CriticalSection;
+ is_sem = 1;
+ break;
+ case RPC_AES2_CODE:
+ s = &dev->sAES2CriticalSection;
+ is_sem = 1;
+ break;
+ case RPC_DES_CODE:
+ m = &dev->sDESCriticalSection;
+ break;
+ default:
+ case RPC_SHA_CODE:
+ m = &dev->sSHACriticalSection;
+ break;
+ }
+
+ if (bDoLock == LOCK_HWA) {
+ dprintk(KERN_INFO "PDrvCryptoLockUnlockHWA: "
+ "Wait for HWAID=0x%04X\n", nHWAID);
+ if (is_sem) {
+ while (down_trylock(s))
+ cpu_relax();
+ } else {
+ while (!mutex_trylock(m))
+ cpu_relax();
+ }
+ dprintk(KERN_INFO "PDrvCryptoLockUnlockHWA: "
+ "Locked on HWAID=0x%04X\n", nHWAID);
+ } else {
+ if (is_sem)
+ up(s);
+ else
+ mutex_unlock(m);
+ dprintk(KERN_INFO "PDrvCryptoLockUnlockHWA: "
+ "Released for HWAID=0x%04X\n", nHWAID);
+ }
+}
+
+/*------------------------------------------------------------------------- */
+/**
+ *Initialize the public crypto DMA channels, global HWA semaphores and handles
+ */
+u32 SCXPublicCryptoInit(void)
+{
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+ u32 nError = PUBLIC_CRYPTO_OPERATION_SUCCESS;
+
+ /* Initialize HWAs */
+ PDrvCryptoAESInit();
+ PDrvCryptoDigestInit();
+
+ /*initialize the HWA semaphores */
+ sema_init(&pDevice->sAES1CriticalSection, 1);
+ sema_init(&pDevice->sAES2CriticalSection, 1);
+ mutex_init(&pDevice->sSHACriticalSection);
+
+ /*initialize the current key handle loaded in the AESn/DES HWA */
+ pDevice->hAES1SecureKeyContext = 0;
+ pDevice->hAES2SecureKeyContext = 0;
+ pDevice->bSHAM1IsPublic = false;
+
+ /*initialize the DMA semaphores */
+ mutex_init(&pDevice->sm.sDMALock);
+
+ /*allocate DMA buffer */
+ pDevice->nDMABufferLength = PAGE_SIZE * 16;
+ pDevice->pDMABuffer = dma_alloc_coherent(NULL,
+ pDevice->nDMABufferLength,
+ &(pDevice->pDMABufferPhys),
+ GFP_KERNEL);
+ if (pDevice->pDMABuffer == NULL) {
+ printk(KERN_ERR
+ "SCXPublicCryptoInit: Out of memory for DMA buffer\n");
+ nError = S_ERROR_OUT_OF_MEMORY;
+ }
+
+ return nError;
+}
+
+/*------------------------------------------------------------------------- */
+/*
+ *Initialize the device context CUS fields (shortcut semaphore and public CUS
+ *list)
+ */
+void SCXPublicCryptoInitDeviceContext(struct SCXLNX_CONNECTION *pDeviceContext)
+{
+ /*initialize the CUS list in the given device context */
+ spin_lock_init(&(pDeviceContext->shortcutListCriticalSectionLock));
+ INIT_LIST_HEAD(&(pDeviceContext->ShortcutList));
+}
+
+/*------------------------------------------------------------------------- */
+/**
+ *Terminate the public crypto (including DMA)
+ */
+void SCXPublicCryptoTerminate()
+{
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ if (pDevice->pDMABuffer != NULL) {
+ dma_free_coherent(NULL, pDevice->nDMABufferLength,
+ pDevice->pDMABuffer,
+ pDevice->pDMABufferPhys);
+ pDevice->pDMABuffer = NULL;
+ }
+
+ PDrvCryptoDigestExit();
+ PDrvCryptoAESExit();
+}
+
+/*------------------------------------------------------------------------- */
+
+void SCXPublicCryptoWaitForReadyBitInfinitely(u32 *pRegister, u32 vBit)
+{
+ while (!(INREG32(pRegister) & vBit))
+ ;
+}
+
+/*------------------------------------------------------------------------- */
+
+u32 SCXPublicCryptoWaitForReadyBit(u32 *pRegister, u32 vBit)
+{
+ u32 timeoutCounter = PUBLIC_CRYPTO_TIMEOUT_CONST;
+
+ while ((!(INREG32(pRegister) & vBit)) && ((--timeoutCounter) != 0))
+ ;
+
+ if (timeoutCounter == 0)
+ return PUBLIC_CRYPTO_ERR_TIMEOUT;
+
+ return PUBLIC_CRYPTO_OPERATION_SUCCESS;
+}
+
+/*------------------------------------------------------------------------- */
+
+static DEFINE_SPINLOCK(clk_lock);
+
+void SCXPublicCryptoDisableClock(uint32_t vClockPhysAddr)
+{
+ u32 *pClockReg;
+ u32 val;
+ unsigned long flags;
+
+ dprintk(KERN_INFO "SCXPublicCryptoDisableClock: " \
+ "vClockPhysAddr=0x%08X\n",
+ vClockPhysAddr);
+
+ /* Ensure none concurrent access when changing clock registers */
+ spin_lock_irqsave(&clk_lock, flags);
+
+ pClockReg = (u32 *)IO_ADDRESS(vClockPhysAddr);
+
+ val = __raw_readl(pClockReg);
+ val &= ~(0x3);
+ __raw_writel(val, pClockReg);
+
+ /* Wait for clock to be fully disabled */
+ while ((__raw_readl(pClockReg) & 0x30000) == 0)
+ ;
+
+ spin_unlock_irqrestore(&clk_lock, flags);
+
+ tf_l4sec_clkdm_allow_idle(false, true);
+}
+
+/*------------------------------------------------------------------------- */
+
+void SCXPublicCryptoEnableClock(uint32_t vClockPhysAddr)
+{
+ u32 *pClockReg;
+ u32 val;
+ unsigned long flags;
+
+ dprintk(KERN_INFO "SCXPublicCryptoEnableClock: " \
+ "vClockPhysAddr=0x%08X\n",
+ vClockPhysAddr);
+
+ tf_l4sec_clkdm_wakeup(false, true);
+
+ /* Ensure none concurrent access when changing clock registers */
+ spin_lock_irqsave(&clk_lock, flags);
+
+ pClockReg = (u32 *)IO_ADDRESS(vClockPhysAddr);
+
+ val = __raw_readl(pClockReg);
+ val |= 0x2;
+ __raw_writel(val, pClockReg);
+
+ /* Wait for clock to be fully enabled */
+ while ((__raw_readl(pClockReg) & 0x30000) != 0)
+ ;
+
+ spin_unlock_irqrestore(&clk_lock, flags);
+}
+
diff --git a/security/smc/omap4/scx_public_crypto.h b/security/smc/omap4/scx_public_crypto.h
new file mode 100644
index 0000000..984cb18
--- /dev/null
+++ b/security/smc/omap4/scx_public_crypto.h
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c)2006-2008 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __SCX_PUBLIC_CRYPTO_H
+#define __SCX_PUBLIC_CRYPTO_H
+
+#include "scxlnx_defs.h"
+#include <linux/io.h>
+#include <mach/io.h>
+
+#include <clockdomain.h>
+
+/*-------------------------------------------------------------------------- */
+
+#define PUBLIC_CRYPTO_HWA_AES1 0x1
+#define PUBLIC_CRYPTO_HWA_AES2 0x2
+#define PUBLIC_CRYPTO_HWA_DES 0x4
+#define PUBLIC_CRYPTO_HWA_SHA 0x8
+
+#define OUTREG32(a, b) __raw_writel(b, a)
+#define INREG32(a) __raw_readl(a)
+#define SETREG32(x, y) OUTREG32(x, INREG32(x) | (y))
+#define CLRREG32(x, y) OUTREG32(x, INREG32(x) & ~(y))
+
+#define PUBLIC_CRYPTO_CLKSTCTRL_CLOCK_REG 0x4A009580
+#define PUBLIC_CRYPTO_AES1_CLOCK_REG 0x4A0095A0
+#define PUBLIC_CRYPTO_AES2_CLOCK_REG 0x4A0095A8
+#define PUBLIC_CRYPTO_DES3DES_CLOCK_REG 0x4A0095B0
+#define PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG 0x4A0095C8
+
+#define BYTES_TO_LONG(a)(u32)(a[0] | (a[1]<<8) | (a[2]<<16) | (a[3]<<24))
+#define LONG_TO_BYTE(a, b) { a[0] = (u8)((b) & 0xFF); \
+ a[1] = (u8)(((b) >> 8) & 0xFF); \
+ a[2] = (u8)(((b) >> 16) & 0xFF); \
+ a[3] = (u8)(((b) >> 24) & 0xFF); }
+
+#define IS_4_BYTES_ALIGNED(x)((!((x) & 0x3)) ? true : false)
+
+#define TF_SMC_OMAP4_PUBLIC_DMA
+
+/*
+ *The size limit to trigger DMA for AES, DES and Digest.
+ *0xFFFFFFFF means "never"
+ */
+#ifdef TF_SMC_OMAP4_PUBLIC_DMA
+#define DMA_TRIGGER_IRQ_AES 128
+#define DMA_TRIGGER_IRQ_DES 128
+#define DMA_TRIGGER_IRQ_DIGEST 1024
+#else
+#define DMA_TRIGGER_IRQ_AES 0xFFFFFFFF
+#define DMA_TRIGGER_IRQ_DES 0xFFFFFFFF
+#define DMA_TRIGGER_IRQ_DIGEST 0xFFFFFFFF
+#endif
+
+/*Error code constants */
+#define PUBLIC_CRYPTO_OPERATION_SUCCESS 0x00000000
+#define PUBLIC_CRYPTO_ERR_ACCESS_DENIED 0x00000001
+#define PUBLIC_CRYPTO_ERR_OUT_OF_MEMORY 0x00000002
+#define PUBLIC_CRYPTO_ERR_BAD_PARAMETERS 0x00000003
+#define PUBLIC_CRYPTO_ERR_TIMEOUT 0x00000004
+
+/*DMA mode constants */
+#define PUBLIC_CRYPTO_DMA_USE_NONE 0x00000000 /*No DMA used*/
+/*DMA with active polling used */
+#define PUBLIC_CRYPTO_DMA_USE_POLLING 0x00000001
+#define PUBLIC_CRYPTO_DMA_USE_IRQ 0x00000002 /*DMA with IRQ used*/
+
+#define PUBLIC_CRYPTO_REG_SET_BIT(x, y) OUTREG32(x, INREG32(x) | y);
+#define PUBLIC_CRYPTO_REG_UNSET_BIT(x, y) OUTREG32(x, INREG32(x) & (~y));
+
+#define AES_BLOCK_SIZE 16
+#define DES_BLOCK_SIZE 8
+#define HASH_BLOCK_SIZE 64
+
+#define HASH_MD5_LENGTH 16
+#define HASH_SHA1_LENGTH 20
+#define HASH_SHA224_LENGTH 28
+#define HASH_SHA256_LENGTH 32
+
+#define PUBLIC_CRYPTO_DIGEST_MAX_SIZE 32
+#define PUBLIC_CRYPTO_IV_MAX_SIZE 16
+
+#define PUBLIC_CRYPTO_HW_CLOCK_ADDR (0x48004A14)
+#define PUBLIC_CRYPTO_HW_AUTOIDLE_ADDR (0x48004A34)
+
+#define PUBLIC_CRYPTO_HW_CLOCK1_ADDR (0x48004A10)
+#define PUBLIC_CRYPTO_HW_AUTOIDLE1_ADDR (0x48004A30)
+
+#define DIGEST_CTRL_ALGO_MD5 0
+#define DIGEST_CTRL_ALGO_SHA1 1
+#define DIGEST_CTRL_ALGO_SHA224 2
+#define DIGEST_CTRL_ALGO_SHA256 3
+
+/*-------------------------------------------------------------------------- */
+/*
+ *The magic word.
+ */
+#define CRYPTOKI_UPDATE_SHORTCUT_CONTEXT_MAGIC 0x45EF683C
+
+/*-------------------------------------------------------------------------- */
+/* CUS context structure */
+/*-------------------------------------------------------------------------- */
+
+/* State of an AES operation */
+struct PUBLIC_CRYPTO_AES_OPERATION_STATE {
+ u32 AES_IV_0;
+ u32 AES_IV_1;
+ u32 AES_IV_2;
+ u32 AES_IV_3;
+
+ u32 CTRL;
+
+ /* Only used by Linux crypto API interface */
+ u32 KEY1_0;
+ u32 KEY1_1;
+ u32 KEY1_2;
+ u32 KEY1_3;
+ u32 KEY1_4;
+ u32 KEY1_5;
+ u32 KEY1_6;
+ u32 KEY1_7;
+
+ u32 key_is_public;
+};
+
+struct PUBLIC_CRYPTO_DES_OPERATION_STATE {
+ u32 DES_IV_L;
+ u32 DES_IV_H;
+};
+
+#define HASH_BLOCK_BYTES_LENGTH 64
+
+struct PUBLIC_CRYPTO_SHA_OPERATION_STATE {
+ /* Current digest */
+ u32 SHA_DIGEST_A;
+ u32 SHA_DIGEST_B;
+ u32 SHA_DIGEST_C;
+ u32 SHA_DIGEST_D;
+ u32 SHA_DIGEST_E;
+ u32 SHA_DIGEST_F;
+ u32 SHA_DIGEST_G;
+ u32 SHA_DIGEST_H;
+
+ /* This buffer contains a partial chunk */
+ u8 pChunkBuffer[HASH_BLOCK_BYTES_LENGTH];
+
+ /* Number of bytes stored in pChunkBuffer (0..64) */
+ u32 nChunkLength;
+
+ /*
+ * Total number of bytes processed so far
+ * (not including the partial chunk)
+ */
+ u32 nBytesProcessed;
+
+ u32 CTRL;
+};
+
+union PUBLIC_CRYPTO_OPERATION_STATE {
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE aes;
+ struct PUBLIC_CRYPTO_DES_OPERATION_STATE des;
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE sha;
+};
+
+/*
+ *Fully describes a public crypto operation
+ *(i.e., an operation that has a shortcut attached).
+ */
+struct CRYPTOKI_UPDATE_SHORTCUT_CONTEXT {
+ /*
+ *Identifies the public crypto operation in the list of all public
+ *operations.
+ */
+ struct list_head list;
+
+ u32 nMagicNumber; /*Must be set to
+ *{CRYPTOKI_UPDATE_SHORTCUT_CONTEXT_MAGIC} */
+
+ /*basic fields */
+ u32 hClientSession;
+ u32 nCommandID;
+ u32 nHWAID;
+ u32 nHWA_CTRL;
+ u32 hKeyContext;
+ union PUBLIC_CRYPTO_OPERATION_STATE sOperationState;
+ u32 nUseCount;
+ bool bSuspended;
+};
+
+struct CRYPTOKI_UPDATE_PARAMS {
+ /*fields for data processing of an update command */
+ u32 nInputDataLength;
+ u8 *pInputData;
+ struct SCXLNX_SHMEM_DESC *pInputShmem;
+
+ u32 nResultDataLength;
+ u8 *pResultData;
+ struct SCXLNX_SHMEM_DESC *pOutputShmem;
+
+ u8 *pS2CDataBuffer;
+ u32 nS2CDataBufferMaxLength;
+};
+
+/*-------------------------------------------------------------------------- */
+/*
+ *Public crypto API (Top level)
+ */
+
+/*
+*Initialize the public crypto DMA chanels and global HWA semaphores
+ */
+u32 SCXPublicCryptoInit(void);
+
+/*
+ *Initialize the device context CUS fields
+ *(shortcut semaphore and public CUS list)
+ */
+void SCXPublicCryptoInitDeviceContext(struct SCXLNX_CONNECTION *pDeviceContext);
+
+/**
+ *Terminate the public crypto (including DMA)
+ */
+void SCXPublicCryptoTerminate(void);
+
+int SCXPublicCryptoTryShortcutedUpdate(struct SCXLNX_CONNECTION *pConn,
+ struct SCX_COMMAND_INVOKE_CLIENT_COMMAND *pMessage,
+ struct SCX_ANSWER_INVOKE_CLIENT_COMMAND *pAnswer);
+
+int SCXPublicCryptoExecuteRPCCommand(u32 nRPCCommand, void *pRPCSharedBuffer);
+
+/*-------------------------------------------------------------------------- */
+/*
+ *Helper methods
+ */
+u32 SCXPublicCryptoWaitForReadyBit(u32 *pRegister, u32 vBit);
+void SCXPublicCryptoWaitForReadyBitInfinitely(u32 *pRegister, u32 vBit);
+
+void SCXPublicCryptoEnableClock(uint32_t vClockPhysAddr);
+void SCXPublicCryptoDisableClock(uint32_t vClockPhysAddr);
+
+#define LOCK_HWA true
+#define UNLOCK_HWA false
+
+void PDrvCryptoLockUnlockHWA(u32 nHWAID, bool bDoLock);
+
+/*---------------------------------------------------------------------------*/
+/* AES operations */
+/*---------------------------------------------------------------------------*/
+
+void PDrvCryptoAESInit(void);
+void PDrvCryptoAESExit(void);
+
+#ifdef CONFIG_SMC_KERNEL_CRYPTO
+int register_smc_public_crypto_aes(void);
+void unregister_smc_public_crypto_aes(void);
+#else
+static inline int register_smc_public_crypto_aes(void)
+{
+ return 0;
+}
+
+static inline void unregister_smc_public_crypto_aes(void) {}
+#endif
+
+/**
+ *This function performs an AES update operation.
+ *
+ *The AES1 accelerator is assumed loaded with the correct key
+ *
+ *AES_CTRL: defines the mode and direction
+ *pAESState: defines the operation IV
+ *pSrc: Input buffer to process.
+ *pDest: Output buffer containing the processed data.
+ *
+ *nbBlocks number of block(s)to process.
+ */
+bool PDrvCryptoUpdateAES(struct PUBLIC_CRYPTO_AES_OPERATION_STATE *pAESState,
+ u8 *pSrc, u8 *pDest, u32 nbBlocks);
+
+/*---------------------------------------------------------------------------*/
+/* DES/DES3 operations */
+/*---------------------------------------------------------------------------*/
+
+void PDrvCryptoDESInit(void);
+void PDrvCryptoDESExit(void);
+
+/**
+ *This function performs a DES update operation.
+ *
+ *The DES accelerator is assumed loaded with the correct key
+ *
+ *DES_CTRL: defines the mode and direction
+ *pDESState: defines the operation IV
+ *pSrc: Input buffer to process.
+ *pDest: Output buffer containing the processed data.
+ *nbBlocks: Number of block(s)to process.
+ */
+bool PDrvCryptoUpdateDES(u32 DES_CTRL,
+ struct PUBLIC_CRYPTO_DES_OPERATION_STATE *pDESState,
+ u8 *pSrc, u8 *pDest, u32 nbBlocks);
+
+/*---------------------------------------------------------------------------*/
+/* Digest operations */
+/*---------------------------------------------------------------------------*/
+
+void PDrvCryptoDigestInit(void);
+void PDrvCryptoDigestExit(void);
+
+#ifdef CONFIG_SMC_KERNEL_CRYPTO
+int register_smc_public_crypto_digest(void);
+void unregister_smc_public_crypto_digest(void);
+#else
+static inline int register_smc_public_crypto_digest(void)
+{
+ return 0;
+}
+
+static inline void unregister_smc_public_crypto_digest(void) {}
+#endif
+
+/**
+ *This function performs a HASH update Operation.
+ *
+ *SHA_CTRL: defines the algorithm
+ *pSHAState: State of the operation
+ *pData: Input buffer to process
+ *dataLength: Length in bytes of the input buffer.
+ */
+void PDrvCryptoUpdateHash(
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState,
+ u8 *pData, u32 dataLength);
+
+#endif /*__SCX_PUBLIC_CRYPTO_H */
diff --git a/security/smc/omap4/scx_public_crypto_AES.c b/security/smc/omap4/scx_public_crypto_AES.c
new file mode 100644
index 0000000..96b065f
--- /dev/null
+++ b/security/smc/omap4/scx_public_crypto_AES.c
@@ -0,0 +1,1180 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "scxlnx_defs.h"
+#include "scxlnx_util.h"
+#include "scx_public_crypto.h"
+#include "scx_public_dma.h"
+#include "scxlnx_mshield.h"
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/aes.h>
+#include <mach/io.h>
+
+/*
+ *AES Hardware Accelerator: Base address
+ */
+#define AES1_REGS_HW_ADDR 0x4B501000
+#define AES2_REGS_HW_ADDR 0x4B701000
+
+/*
+ *CTRL register Masks
+ */
+#define AES_CTRL_OUTPUT_READY_BIT (1<<0)
+#define AES_CTRL_INPUT_READY_BIT (1<<1)
+
+#define AES_CTRL_GET_DIRECTION(x) (x&4)
+#define AES_CTRL_DIRECTION_DECRYPT 0
+#define AES_CTRL_DIRECTION_ENCRYPT (1<<2)
+
+#define AES_CTRL_GET_KEY_SIZE(x) (x & 0x18)
+#define AES_CTRL_KEY_SIZE_128 0x08
+#define AES_CTRL_KEY_SIZE_192 0x10
+#define AES_CTRL_KEY_SIZE_256 0x18
+
+#define AES_CTRL_GET_MODE(x) ((x & 0x60) >> 5)
+#define AES_CTRL_IS_MODE_CBC(x) (AES_CTRL_GET_MODE(x) == 1)
+#define AES_CTRL_IS_MODE_ECB(x) (AES_CTRL_GET_MODE(x) == 0)
+#define AES_CTRL_IS_MODE_CTR(x) ((AES_CTRL_GET_MODE(x) == 2) || \
+ (AES_CTRL_GET_MODE(x) == 3))
+#define AES_CTRL_MODE_CBC_BIT 0x20
+#define AES_CTRL_MODE_ECB_BIT 0
+#define AES_CTRL_MODE_CTR_BIT 0x40
+
+#define AES_CTRL_GET_CTR_WIDTH(x) (x&0x180)
+#define AES_CTRL_CTR_WIDTH_32 0
+#define AES_CTRL_CTR_WIDTH_64 0x80
+#define AES_CTRL_CTR_WIDTH_96 0x100
+#define AES_CTRL_CTR_WIDTH_128 0x180
+
+/*
+ * SYSCONFIG register masks
+ */
+#define AES_SYSCONFIG_DMA_REQ_IN_EN_BIT (1 << 5)
+#define AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT (1 << 6)
+
+
+/*----------------------------------------------------------------------*/
+/* AES Context */
+/*----------------------------------------------------------------------*/
+/**
+ *This structure contains the registers of the AES HW accelerator.
+ */
+struct AESReg_t {
+ u32 AES_KEY2_6; /* 0x00 */
+ u32 AES_KEY2_7; /* 0xO4 */
+ u32 AES_KEY2_4; /* 0x08 */
+ u32 AES_KEY2_5; /* 0x0C */
+ u32 AES_KEY2_2; /* 0x10 */
+ u32 AES_KEY2_3; /* 0x14 */
+ u32 AES_KEY2_0; /* 0x18 */
+ u32 AES_KEY2_1; /* 0x1C */
+
+ u32 AES_KEY1_6; /* 0x20 */
+ u32 AES_KEY1_7; /* 0x24 */
+ u32 AES_KEY1_4; /* 0x28 */
+ u32 AES_KEY1_5; /* 0x2C */
+ u32 AES_KEY1_2; /* 0x30 */
+ u32 AES_KEY1_3; /* 0x34 */
+ u32 AES_KEY1_0; /* 0x38 */
+ u32 AES_KEY1_1; /* 0x3C */
+
+ u32 AES_IV_IN_0; /* 0x40 */
+ u32 AES_IV_IN_1; /* 0x44 */
+ u32 AES_IV_IN_2; /* 0x48 */
+ u32 AES_IV_IN_3; /* 0x4C */
+
+ u32 AES_CTRL; /* 0x50 */
+
+ u32 AES_C_LENGTH_0; /* 0x54 */
+ u32 AES_C_LENGTH_1; /* 0x58 */
+ u32 AES_AUTH_LENGTH; /* 0x5C */
+
+ u32 AES_DATA_IN_0; /* 0x60 */
+ u32 AES_DATA_IN_1; /* 0x64 */
+ u32 AES_DATA_IN_2; /* 0x68 */
+ u32 AES_DATA_IN_3; /* 0x6C */
+
+ u32 AES_TAG_OUT_0; /* 0x70 */
+ u32 AES_TAG_OUT_1; /* 0x74 */
+ u32 AES_TAG_OUT_2; /* 0x78 */
+ u32 AES_TAG_OUT_3; /* 0x7C */
+
+ u32 AES_REVISION; /* 0x80 */
+ u32 AES_SYSCONFIG; /* 0x84 */
+
+ u32 AES_SYSSTATUS; /* 0x88 */
+
+};
+static struct AESReg_t *pAESReg_t;
+
+#ifdef CONFIG_SMC_KERNEL_CRYPTO
+#define FLAGS_FAST BIT(7)
+#define FLAGS_BUSY 8
+
+struct aes_hwa_ctx {
+ unsigned long flags;
+
+ spinlock_t lock;
+ struct crypto_queue queue;
+
+ struct tasklet_struct task;
+
+ struct ablkcipher_request *req;
+ size_t total;
+ struct scatterlist *in_sg;
+ size_t in_offset;
+ struct scatterlist *out_sg;
+ size_t out_offset;
+
+ size_t buflen;
+ void *buf_in;
+ size_t dma_size;
+ int dma_in;
+ int dma_lch_in;
+ dma_addr_t dma_addr_in;
+ void *buf_out;
+ int dma_out;
+ int dma_lch_out;
+ dma_addr_t dma_addr_out;
+
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *ctx;
+};
+static struct aes_hwa_ctx *aes_ctx;
+#endif
+
+/*---------------------------------------------------------------------------
+ *Forward declarations
+ *------------------------------------------------------------------------- */
+
+static void PDrvCryptoUpdateAESWithDMA(u8 *pSrc, u8 *pDest,
+ u32 nbBlocks);
+
+/*----------------------------------------------------------------------------
+ *Save HWA registers into the specified operation state structure
+ *--------------------------------------------------------------------------*/
+static void PDrvCryptoSaveAESRegisters(
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *pAESState)
+{
+ dprintk(KERN_INFO "PDrvCryptoSaveAESRegisters: \
+ pAESState(%p) <- pAESReg_t(%p): CTRL=0x%08x\n",
+ pAESState, pAESReg_t, pAESState->CTRL);
+
+ /*Save the IV if we are in CBC or CTR mode (not required for ECB) */
+ if (!AES_CTRL_IS_MODE_ECB(pAESState->CTRL)) {
+ pAESState->AES_IV_0 = INREG32(&pAESReg_t->AES_IV_IN_0);
+ pAESState->AES_IV_1 = INREG32(&pAESReg_t->AES_IV_IN_1);
+ pAESState->AES_IV_2 = INREG32(&pAESReg_t->AES_IV_IN_2);
+ pAESState->AES_IV_3 = INREG32(&pAESReg_t->AES_IV_IN_3);
+ }
+}
+
+/*----------------------------------------------------------------------------
+ *Restore the HWA registers from the operation state structure
+ *---------------------------------------------------------------------------*/
+static void PDrvCryptoRestoreAESRegisters(
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *pAESState)
+{
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ dprintk(KERN_INFO "PDrvCryptoRestoreAESRegisters: \
+ pAESReg_t(%p) <- pAESState(%p): CTRL=0x%08x\n",
+ pAESReg_t, pAESState, pAESState->CTRL);
+
+ if (pAESState->key_is_public) {
+ OUTREG32(&pAESReg_t->AES_KEY1_0, pAESState->KEY1_0);
+ OUTREG32(&pAESReg_t->AES_KEY1_1, pAESState->KEY1_1);
+ OUTREG32(&pAESReg_t->AES_KEY1_2, pAESState->KEY1_2);
+ OUTREG32(&pAESReg_t->AES_KEY1_3, pAESState->KEY1_3);
+ OUTREG32(&pAESReg_t->AES_KEY1_4, pAESState->KEY1_4);
+ OUTREG32(&pAESReg_t->AES_KEY1_5, pAESState->KEY1_5);
+ OUTREG32(&pAESReg_t->AES_KEY1_6, pAESState->KEY1_6);
+ OUTREG32(&pAESReg_t->AES_KEY1_7, pAESState->KEY1_7);
+
+ /*
+ * Make sure a potential secure key that has been overwritten by
+ * the previous code is reinstalled before performing other
+ * public crypto operations.
+ */
+ pDevice->hAES1SecureKeyContext = 0;
+ } else {
+ pAESState->CTRL |= INREG32(&pAESReg_t->AES_CTRL);
+ }
+
+ /*
+ * Restore the IV first if we are in CBC or CTR mode
+ * (not required for ECB)
+ */
+ if (!AES_CTRL_IS_MODE_ECB(pAESState->CTRL)) {
+ OUTREG32(&pAESReg_t->AES_IV_IN_0, pAESState->AES_IV_0);
+ OUTREG32(&pAESReg_t->AES_IV_IN_1, pAESState->AES_IV_1);
+ OUTREG32(&pAESReg_t->AES_IV_IN_2, pAESState->AES_IV_2);
+ OUTREG32(&pAESReg_t->AES_IV_IN_3, pAESState->AES_IV_3);
+ }
+
+ /* Then set the CTRL register:
+ * overwrite the CTRL only when needed, because unconditionally doing
+ * it leads to break the HWA process (observed by experimentation)
+ */
+
+ pAESState->CTRL = (pAESState->CTRL & (3 << 3)) /* key size */
+ | (pAESState->CTRL & ((1 << 2) | (1 << 5) | (1 << 6)))
+ | (0x3 << 7) /* Always set CTR_WIDTH to 128-bit */;
+
+ if ((pAESState->CTRL & 0x1FC) !=
+ (INREG32(&pAESReg_t->AES_CTRL) & 0x1FC))
+ OUTREG32(&pAESReg_t->AES_CTRL, pAESState->CTRL & 0x1FC);
+
+ /* Set the SYSCONFIG register to 0 */
+ OUTREG32(&pAESReg_t->AES_SYSCONFIG, 0);
+}
+
+/*-------------------------------------------------------------------------- */
+
+void PDrvCryptoAESInit(void)
+{
+ pAESReg_t = omap_ioremap(AES1_REGS_HW_ADDR, SZ_1M, MT_DEVICE);
+ if (pAESReg_t == NULL)
+ panic("Unable to remap AES1 module");
+}
+
+void PDrvCryptoAESExit(void)
+{
+ omap_iounmap(pAESReg_t);
+}
+
+bool PDrvCryptoUpdateAES(struct PUBLIC_CRYPTO_AES_OPERATION_STATE *pAESState,
+ u8 *pSrc, u8 *pDest, u32 nbBlocks)
+{
+ u32 nbr_of_blocks;
+ u32 vTemp;
+ u8 *pProcessSrc = pSrc;
+ u8 *pProcessDest = pDest;
+ u32 dmaUse = PUBLIC_CRYPTO_DMA_USE_NONE;
+
+ /*
+ *Choice of the processing type
+ */
+ if (nbBlocks * AES_BLOCK_SIZE >= DMA_TRIGGER_IRQ_AES)
+ dmaUse = PUBLIC_CRYPTO_DMA_USE_IRQ;
+
+ dprintk(KERN_INFO "PDrvCryptoUpdateAES: \
+ pSrc=0x%08x, pDest=0x%08x, nbBlocks=0x%08x, dmaUse=0x%08x\n",
+ (unsigned int)pSrc,
+ (unsigned int)pDest,
+ (unsigned int)nbBlocks,
+ (unsigned int)dmaUse);
+
+ if (nbBlocks == 0) {
+ dprintk(KERN_INFO "PDrvCryptoUpdateAES: Nothing to process\n");
+ return true;
+ }
+
+ if ((AES_CTRL_GET_DIRECTION(INREG32(&pAESReg_t->AES_CTRL)) !=
+ AES_CTRL_GET_DIRECTION(pAESState->CTRL)) &&
+ !pAESState->key_is_public) {
+ dprintk(KERN_WARNING "HWA configured for another direction\n");
+ return false;
+ }
+
+ /*Restore the registers of the accelerator from the operation state */
+ PDrvCryptoRestoreAESRegisters(pAESState);
+
+ if (dmaUse == PUBLIC_CRYPTO_DMA_USE_IRQ) {
+ /* Perform the update with DMA */
+ PDrvCryptoUpdateAESWithDMA(pProcessSrc,
+ pProcessDest, nbBlocks);
+ } else {
+ for (nbr_of_blocks = 0;
+ nbr_of_blocks < nbBlocks; nbr_of_blocks++) {
+
+ /*We wait for the input ready */
+
+ /*Crash the system as this should never occur */
+ if (SCXPublicCryptoWaitForReadyBit(
+ (u32 *)&pAESReg_t->AES_CTRL,
+ AES_CTRL_INPUT_READY_BIT) !=
+ PUBLIC_CRYPTO_OPERATION_SUCCESS)
+ panic("Wait too long for AES hardware \
+ accelerator Input data to be ready\n");
+
+ /* We copy the 16 bytes of data src->reg */
+ vTemp = (u32) BYTES_TO_LONG(pProcessSrc);
+ OUTREG32(&pAESReg_t->AES_DATA_IN_0, vTemp);
+ pProcessSrc += 4;
+ vTemp = (u32) BYTES_TO_LONG(pProcessSrc);
+ OUTREG32(&pAESReg_t->AES_DATA_IN_1, vTemp);
+ pProcessSrc += 4;
+ vTemp = (u32) BYTES_TO_LONG(pProcessSrc);
+ OUTREG32(&pAESReg_t->AES_DATA_IN_2, vTemp);
+ pProcessSrc += 4;
+ vTemp = (u32) BYTES_TO_LONG(pProcessSrc);
+ OUTREG32(&pAESReg_t->AES_DATA_IN_3, vTemp);
+ pProcessSrc += 4;
+
+ /* We wait for the output ready */
+ SCXPublicCryptoWaitForReadyBitInfinitely(
+ (u32 *)&pAESReg_t->AES_CTRL,
+ AES_CTRL_OUTPUT_READY_BIT);
+
+ /* We copy the 16 bytes of data reg->dest */
+ vTemp = INREG32(&pAESReg_t->AES_DATA_IN_0);
+ LONG_TO_BYTE(pProcessDest, vTemp);
+ pProcessDest += 4;
+ vTemp = INREG32(&pAESReg_t->AES_DATA_IN_1);
+ LONG_TO_BYTE(pProcessDest, vTemp);
+ pProcessDest += 4;
+ vTemp = INREG32(&pAESReg_t->AES_DATA_IN_2);
+ LONG_TO_BYTE(pProcessDest, vTemp);
+ pProcessDest += 4;
+ vTemp = INREG32(&pAESReg_t->AES_DATA_IN_3);
+ LONG_TO_BYTE(pProcessDest, vTemp);
+ pProcessDest += 4;
+ }
+ }
+
+ /* Save the accelerator registers into the operation state */
+ PDrvCryptoSaveAESRegisters(pAESState);
+
+ dprintk(KERN_INFO "PDrvCryptoUpdateAES: Done\n");
+
+ return true;
+}
+
+/*-------------------------------------------------------------------------- */
+/*
+ *Static function, perform AES encryption/decryption using the DMA for data
+ *transfer.
+ *
+ *inputs: pSrc : pointer of the input data to process
+ * nbBlocks : number of block to process
+ * dmaUse : PUBLIC_CRYPTO_DMA_USE_IRQ (use irq to monitor end of DMA)
+ * | PUBLIC_CRYPTO_DMA_USE_POLLING (poll the end of DMA)
+ *output: pDest : pointer of the output data (can be eq to pSrc)
+ */
+static void PDrvCryptoUpdateAESWithDMA(u8 *pSrc, u8 *pDest, u32 nbBlocks)
+{
+ /*
+ *Note: The DMA only sees physical addresses !
+ */
+
+ int dma_ch0;
+ int dma_ch1;
+ struct omap_dma_channel_params ch0_parameters;
+ struct omap_dma_channel_params ch1_parameters;
+ u32 nLength = nbBlocks * AES_BLOCK_SIZE;
+ u32 nLengthLoop = 0;
+ u32 nbBlocksLoop = 0;
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ dprintk(KERN_INFO
+ "PDrvCryptoUpdateAESWithDMA: In=0x%08x, Out=0x%08x, Len=%u\n",
+ (unsigned int)pSrc,
+ (unsigned int)pDest,
+ (unsigned int)nLength);
+
+ /*lock the DMA */
+ mutex_lock(&pDevice->sm.sDMALock);
+
+ if (scxPublicDMARequest(&dma_ch0) != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
+ mutex_unlock(&pDevice->sm.sDMALock);
+ return;
+ }
+ if (scxPublicDMARequest(&dma_ch1) != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
+ scxPublicDMARelease(dma_ch0);
+ mutex_unlock(&pDevice->sm.sDMALock);
+ return;
+ }
+
+ while (nLength > 0) {
+
+ /*
+ * At this time, we are sure that the DMAchannels
+ *are available and not used by other public crypto operation
+ */
+
+ /*DMA used for Input and Output */
+ OUTREG32(&pAESReg_t->AES_SYSCONFIG,
+ INREG32(&pAESReg_t->AES_SYSCONFIG)
+ | AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT
+ | AES_SYSCONFIG_DMA_REQ_IN_EN_BIT);
+
+ /*check length */
+ if (nLength <= pDevice->nDMABufferLength)
+ nLengthLoop = nLength;
+ else
+ nLengthLoop = pDevice->nDMABufferLength;
+
+ /*The length is always a multiple of the block size */
+ nbBlocksLoop = nLengthLoop / AES_BLOCK_SIZE;
+
+ /*
+ *Copy the data from the input buffer into a preallocated
+ *buffer which is aligned on the beginning of a page.
+ *This may prevent potential issues when flushing/invalidating
+ *the buffer as the cache lines are 64 bytes long.
+ */
+ memcpy(pDevice->pDMABuffer, pSrc, nLengthLoop);
+
+ /*DMA1: Mem -> AES */
+ scxPublicSetDMAChannelCommonParams(&ch0_parameters,
+ nbBlocksLoop,
+ DMA_CEN_Elts_per_Frame_AES,
+ AES1_REGS_HW_ADDR + 0x60,
+ (u32)pDevice->pDMABufferPhys,
+ OMAP44XX_DMA_AES1_P_DATA_IN_REQ);
+
+ ch0_parameters.src_amode = OMAP_DMA_AMODE_POST_INC;
+ ch0_parameters.dst_amode = OMAP_DMA_AMODE_CONSTANT;
+ ch0_parameters.src_or_dst_synch = OMAP_DMA_DST_SYNC;
+
+ dprintk(KERN_INFO "PDrvCryptoUpdateAESWithDMA: \
+ scxPublicDMASetParams(ch0)\n");
+ scxPublicDMASetParams(dma_ch0, &ch0_parameters);
+
+ omap_set_dma_src_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_dest_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_16);
+
+ /*DMA2: AES -> Mem */
+ scxPublicSetDMAChannelCommonParams(&ch1_parameters,
+ nbBlocksLoop,
+ DMA_CEN_Elts_per_Frame_AES,
+ (u32)pDevice->pDMABufferPhys,
+ AES1_REGS_HW_ADDR + 0x60,
+ OMAP44XX_DMA_AES1_P_DATA_OUT_REQ);
+
+ ch1_parameters.src_amode = OMAP_DMA_AMODE_CONSTANT;
+ ch1_parameters.dst_amode = OMAP_DMA_AMODE_POST_INC;
+ ch1_parameters.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
+
+ dprintk(KERN_INFO "PDrvCryptoUpdateAESWithDMA: \
+ scxPublicDMASetParams(ch1)\n");
+ scxPublicDMASetParams(dma_ch1, &ch1_parameters);
+
+ omap_set_dma_src_burst_mode(dma_ch1, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_dest_burst_mode(dma_ch1, OMAP_DMA_DATA_BURST_16);
+
+ wmb();
+
+ dprintk(KERN_INFO
+ "PDrvCryptoUpdateAESWithDMA: Start DMA channel %d\n",
+ (unsigned int)dma_ch1);
+ scxPublicDMAStart(dma_ch1, OMAP_DMA_BLOCK_IRQ);
+ dprintk(KERN_INFO
+ "PDrvCryptoUpdateAESWithDMA: Start DMA channel %d\n",
+ (unsigned int)dma_ch0);
+ scxPublicDMAStart(dma_ch0, OMAP_DMA_BLOCK_IRQ);
+
+ dprintk(KERN_INFO
+ "PDrvCryptoUpdateAESWithDMA: Waiting for IRQ\n");
+ scxPublicDMAWait(2);
+
+ /*Unset DMA synchronisation requests */
+ OUTREG32(&pAESReg_t->AES_SYSCONFIG,
+ INREG32(&pAESReg_t->AES_SYSCONFIG)
+ & (~AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT)
+ & (~AES_SYSCONFIG_DMA_REQ_IN_EN_BIT));
+
+ scxPublicDMAClearChannel(dma_ch0);
+ scxPublicDMAClearChannel(dma_ch1);
+
+ /*
+ *The dma transfer is complete
+ */
+
+ /*The DMA output is in the preallocated aligned buffer
+ *and needs to be copied to the output buffer.*/
+ memcpy(pDest, pDevice->pDMABuffer, nLengthLoop);
+
+ pSrc += nLengthLoop;
+ pDest += nLengthLoop;
+ nLength -= nLengthLoop;
+ }
+
+ /*For safety reasons, let's clean the working buffer */
+ memset(pDevice->pDMABuffer, 0, nLengthLoop);
+
+ /*release the DMA */
+ scxPublicDMARelease(dma_ch0);
+ scxPublicDMARelease(dma_ch1);
+
+ mutex_unlock(&pDevice->sm.sDMALock);
+
+ dprintk(KERN_INFO "PDrvCryptoUpdateAESWithDMA: Success\n");
+}
+
+#ifdef CONFIG_SMC_KERNEL_CRYPTO
+/*
+ * AES HWA registration into kernel crypto framework
+ */
+
+static void sg_copy_buf(void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes, int out)
+{
+ struct scatter_walk walk;
+
+ if (!nbytes)
+ return;
+
+ scatterwalk_start(&walk, sg);
+ scatterwalk_advance(&walk, start);
+ scatterwalk_copychunks(buf, &walk, nbytes, out);
+ scatterwalk_done(&walk, out, 0);
+}
+
+static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
+ size_t buflen, size_t total, int out)
+{
+ unsigned int count, off = 0;
+
+ while (buflen && total) {
+ count = min((*sg)->length - *offset, total);
+ count = min(count, buflen);
+
+ if (!count)
+ return off;
+
+ sg_copy_buf(buf + off, *sg, *offset, count, out);
+
+ off += count;
+ buflen -= count;
+ *offset += count;
+ total -= count;
+
+ if (*offset == (*sg)->length) {
+ *sg = sg_next(*sg);
+ if (*sg)
+ *offset = 0;
+ else
+ total = 0;
+ }
+ }
+
+ return off;
+}
+
+static int aes_dma_start(struct aes_hwa_ctx *ctx)
+{
+ int err, fast = 0, in, out;
+ size_t count;
+ dma_addr_t addr_in, addr_out;
+ struct omap_dma_channel_params dma_params;
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state =
+ crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(ctx->req));
+
+ if (sg_is_last(ctx->in_sg) && sg_is_last(ctx->out_sg)) {
+ in = IS_ALIGNED((u32)ctx->in_sg->offset, sizeof(u32));
+ out = IS_ALIGNED((u32)ctx->out_sg->offset, sizeof(u32));
+
+ fast = in && out;
+ }
+
+ if (fast) {
+ count = min(ctx->total, sg_dma_len(ctx->in_sg));
+ count = min(count, sg_dma_len(ctx->out_sg));
+
+ if (count != ctx->total)
+ return -EINVAL;
+
+ err = dma_map_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
+ if (!err)
+ return -EINVAL;
+
+ err = dma_map_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE);
+ if (!err) {
+ dma_unmap_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
+ return -EINVAL;
+ }
+
+ addr_in = sg_dma_address(ctx->in_sg);
+ addr_out = sg_dma_address(ctx->out_sg);
+
+ ctx->flags |= FLAGS_FAST;
+ } else {
+ count = sg_copy(&ctx->in_sg, &ctx->in_offset, ctx->buf_in,
+ ctx->buflen, ctx->total, 0);
+
+ addr_in = ctx->dma_addr_in;
+ addr_out = ctx->dma_addr_out;
+
+ ctx->flags &= ~FLAGS_FAST;
+ }
+
+ ctx->total -= count;
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_AES1, LOCK_HWA);
+
+ /* Configure HWA */
+ SCXPublicCryptoEnableClock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+
+ PDrvCryptoRestoreAESRegisters(state);
+
+ OUTREG32(&pAESReg_t->AES_SYSCONFIG, INREG32(&pAESReg_t->AES_SYSCONFIG)
+ | AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT
+ | AES_SYSCONFIG_DMA_REQ_IN_EN_BIT);
+
+ ctx->dma_size = count;
+ if (!fast)
+ dma_sync_single_for_device(NULL, addr_in, count,
+ DMA_TO_DEVICE);
+
+ dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+ dma_params.frame_count = count / AES_BLOCK_SIZE;
+ dma_params.elem_count = DMA_CEN_Elts_per_Frame_AES;
+ dma_params.src_ei = 0;
+ dma_params.src_fi = 0;
+ dma_params.dst_ei = 0;
+ dma_params.dst_fi = 0;
+ dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
+
+ /* IN */
+ dma_params.trigger = ctx->dma_in;
+ dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC;
+ dma_params.dst_start = AES1_REGS_HW_ADDR + 0x60;
+ dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT;
+ dma_params.src_start = addr_in;
+ dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;
+
+ omap_set_dma_params(ctx->dma_lch_in, &dma_params);
+
+ omap_set_dma_dest_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_src_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_16);
+
+ /* OUT */
+ dma_params.trigger = ctx->dma_out;
+ dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
+ dma_params.src_start = AES1_REGS_HW_ADDR + 0x60;
+ dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT;
+ dma_params.dst_start = addr_out;
+ dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;
+
+ omap_set_dma_params(ctx->dma_lch_out, &dma_params);
+
+ omap_set_dma_dest_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_src_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_16);
+
+ /* Is this really needed? */
+ omap_disable_dma_irq(ctx->dma_lch_in, OMAP_DMA_DROP_IRQ);
+ omap_enable_dma_irq(ctx->dma_lch_in, OMAP_DMA_BLOCK_IRQ);
+ omap_disable_dma_irq(ctx->dma_lch_out, OMAP_DMA_DROP_IRQ);
+ omap_enable_dma_irq(ctx->dma_lch_out, OMAP_DMA_BLOCK_IRQ);
+
+ wmb();
+
+ omap_start_dma(ctx->dma_lch_in);
+ omap_start_dma(ctx->dma_lch_out);
+
+ return 0;
+}
+
+static int aes_dma_stop(struct aes_hwa_ctx *ctx)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state =
+ crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(ctx->req));
+ int err = 0;
+ size_t count;
+
+ dprintk(KERN_INFO "aes_dma_stop(%p)\n", ctx);
+
+ PDrvCryptoSaveAESRegisters(state);
+
+ if (!AES_CTRL_IS_MODE_ECB(state->CTRL)) {
+ u32 *ptr = (u32 *) ctx->req->info;
+
+ ptr[0] = state->AES_IV_0;
+ ptr[1] = state->AES_IV_1;
+ ptr[2] = state->AES_IV_2;
+ ptr[3] = state->AES_IV_3;
+ }
+
+ OUTREG32(&pAESReg_t->AES_SYSCONFIG, 0);
+
+ SCXPublicCryptoDisableClock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_AES1, UNLOCK_HWA);
+
+ omap_stop_dma(ctx->dma_lch_in);
+ omap_stop_dma(ctx->dma_lch_out);
+
+ if (ctx->flags & FLAGS_FAST) {
+ dma_unmap_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE);
+ dma_unmap_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
+ } else {
+ dma_sync_single_for_device(NULL, ctx->dma_addr_out,
+ ctx->dma_size, DMA_FROM_DEVICE);
+
+ /* Copy data */
+ count = sg_copy(&ctx->out_sg, &ctx->out_offset, ctx->buf_out,
+ ctx->buflen, ctx->dma_size, 1);
+ if (count != ctx->dma_size)
+ err = -EINVAL;
+ }
+
+ if (err || !ctx->total)
+ ctx->req->base.complete(&ctx->req->base, err);
+
+ return err;
+}
+
+static void aes_dma_callback(int lch, u16 ch_status, void *data)
+{
+ struct aes_hwa_ctx *ctx = data;
+
+ if (lch == ctx->dma_lch_out)
+ tasklet_schedule(&ctx->task);
+}
+
+static int aes_dma_init(struct aes_hwa_ctx *ctx)
+{
+ int err = -ENOMEM;
+
+ ctx->dma_lch_out = -1;
+ ctx->dma_lch_in = -1;
+
+ ctx->buflen = PAGE_SIZE;
+ ctx->buflen &= ~(AES_BLOCK_SIZE - 1);
+
+ dprintk(KERN_INFO "aes_dma_init(%p)\n", ctx);
+
+ /* Allocate and map cache buffers */
+ ctx->buf_in = dma_alloc_coherent(NULL, ctx->buflen, &ctx->dma_addr_in,
+ GFP_KERNEL);
+ if (!ctx->buf_in) {
+ dprintk(KERN_ERR "SMC: Unable to alloc AES in cache buffer\n");
+ return -ENOMEM;
+ }
+
+ ctx->buf_out = dma_alloc_coherent(NULL, ctx->buflen, &ctx->dma_addr_out,
+ GFP_KERNEL);
+ if (!ctx->buf_out) {
+ dprintk(KERN_ERR "SMC: Unable to alloc AES out cache buffer\n");
+ dma_free_coherent(NULL, ctx->buflen, ctx->buf_in,
+ ctx->dma_addr_in);
+ return -ENOMEM;
+ }
+
+ /* Request DMA channels */
+ err = omap_request_dma(0, "smc-aes-rx", aes_dma_callback, ctx,
+ &ctx->dma_lch_in);
+ if (err) {
+ dprintk(KERN_ERR "SMC: Unable to request AES RX DMA channel\n");
+ goto err_dma_in;
+ }
+
+ err = omap_request_dma(0, "smc-aes-rx", aes_dma_callback,
+ ctx, &ctx->dma_lch_out);
+ if (err) {
+ dprintk(KERN_ERR "SMC: Unable to request AES TX DMA channel\n");
+ goto err_dma_out;
+ }
+
+ dprintk(KERN_INFO "aes_dma_init(%p) configured DMA channels"
+ "(RX = %d, TX = %d)\n", ctx, ctx->dma_lch_in, ctx->dma_lch_out);
+
+ return 0;
+
+err_dma_out:
+ omap_free_dma(ctx->dma_lch_in);
+err_dma_in:
+ dma_free_coherent(NULL, ctx->buflen, ctx->buf_in, ctx->dma_addr_in);
+ dma_free_coherent(NULL, ctx->buflen, ctx->buf_out, ctx->dma_addr_out);
+
+ return err;
+}
+
+static void aes_dma_cleanup(struct aes_hwa_ctx *ctx)
+{
+ omap_free_dma(ctx->dma_lch_out);
+ omap_free_dma(ctx->dma_lch_in);
+ dma_free_coherent(NULL, ctx->buflen, ctx->buf_in, ctx->dma_addr_in);
+ dma_free_coherent(NULL, ctx->buflen, ctx->buf_out, ctx->dma_addr_out);
+}
+
+static int aes_handle_req(struct aes_hwa_ctx *ctx)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state;
+ struct crypto_async_request *async_req, *backlog;
+ struct ablkcipher_request *req;
+ unsigned long flags;
+
+ if (ctx->total)
+ goto start;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ backlog = crypto_get_backlog(&ctx->queue);
+ async_req = crypto_dequeue_request(&ctx->queue);
+ if (!async_req)
+ clear_bit(FLAGS_BUSY, &ctx->flags);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
+ if (!async_req)
+ return 0;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ablkcipher_request_cast(async_req);
+
+ ctx->req = req;
+ ctx->total = req->nbytes;
+ ctx->in_offset = 0;
+ ctx->in_sg = req->src;
+ ctx->out_offset = 0;
+ ctx->out_sg = req->dst;
+
+ state = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+
+ if (!AES_CTRL_IS_MODE_ECB(state->CTRL)) {
+ u32 *ptr = (u32 *) req->info;
+
+ state->AES_IV_0 = ptr[0];
+ state->AES_IV_1 = ptr[1];
+ state->AES_IV_2 = ptr[2];
+ state->AES_IV_3 = ptr[3];
+ }
+
+start:
+ return aes_dma_start(ctx);
+}
+
+static void aes_tasklet(unsigned long data)
+{
+ struct aes_hwa_ctx *ctx = (struct aes_hwa_ctx *) data;
+
+ aes_dma_stop(ctx);
+ aes_handle_req(ctx);
+}
+
+/* Generic */
+static int aes_setkey(struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state,
+ const u8 *key, unsigned int keylen)
+{
+ u32 *ptr = (u32 *)key;
+
+ switch (keylen) {
+ case 16:
+ state->CTRL |= AES_CTRL_KEY_SIZE_128;
+ break;
+ case 24:
+ state->CTRL |= AES_CTRL_KEY_SIZE_192;
+ break;
+ case 32:
+ state->CTRL |= AES_CTRL_KEY_SIZE_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ state->KEY1_0 = ptr[0];
+ state->KEY1_1 = ptr[1];
+ state->KEY1_2 = ptr[2];
+ state->KEY1_3 = ptr[3];
+ if (keylen >= 24) {
+ state->KEY1_4 = ptr[4];
+ state->KEY1_5 = ptr[5];
+ }
+ if (keylen == 32) {
+ state->KEY1_6 = ptr[6];
+ state->KEY1_7 = ptr[7];
+ }
+
+ state->key_is_public = 1;
+
+ return 0;
+}
+
+static int aes_operate(struct ablkcipher_request *req)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&aes_ctx->lock, flags);
+ err = ablkcipher_enqueue_request(&aes_ctx->queue, req);
+ spin_unlock_irqrestore(&aes_ctx->lock, flags);
+
+ if (!test_and_set_bit(FLAGS_BUSY, &aes_ctx->flags))
+ aes_handle_req(aes_ctx);
+
+ return err;
+}
+
+static int aes_encrypt(struct ablkcipher_request *req)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state =
+ crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+
+ state->CTRL |= AES_CTRL_DIRECTION_ENCRYPT;
+
+ return aes_operate(req);
+}
+
+static int aes_decrypt(struct ablkcipher_request *req)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state =
+ crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+
+ state->CTRL &= ~(AES_CTRL_DIRECTION_ENCRYPT);
+ state->CTRL |= AES_CTRL_DIRECTION_DECRYPT;
+
+ return aes_operate(req);
+}
+
+static int aes_single_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state = crypto_tfm_ctx(tfm);
+
+ state->CTRL = AES_CTRL_MODE_ECB_BIT;
+
+ return aes_setkey(state, key, keylen);
+}
+
+static void aes_single_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state = crypto_tfm_ctx(tfm);
+
+ state->CTRL |= AES_CTRL_DIRECTION_ENCRYPT;
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_AES1, LOCK_HWA);
+
+ SCXPublicCryptoEnableClock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+ PDrvCryptoUpdateAES(state, (u8 *) in, out, 1);
+ SCXPublicCryptoDisableClock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_AES1, UNLOCK_HWA);
+}
+
+static void aes_single_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state =
+ crypto_tfm_ctx(tfm);
+
+ state->CTRL &= ~(AES_CTRL_DIRECTION_ENCRYPT);
+ state->CTRL |= AES_CTRL_DIRECTION_DECRYPT;
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_AES1, LOCK_HWA);
+
+ SCXPublicCryptoEnableClock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+ PDrvCryptoUpdateAES(state, (u8 *) in, out, 1);
+ SCXPublicCryptoDisableClock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_AES1, UNLOCK_HWA);
+}
+
+/* AES ECB */
+static int aes_ecb_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state =
+ crypto_ablkcipher_ctx(tfm);
+
+ state->CTRL = AES_CTRL_MODE_ECB_BIT;
+
+ return aes_setkey(state, key, keylen);
+}
+
+/* AES CBC */
+static int aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state =
+ crypto_ablkcipher_ctx(tfm);
+
+ state->CTRL = AES_CTRL_MODE_CBC_BIT;
+
+ return aes_setkey(state, key, keylen);
+}
+
+/* AES CTR */
+static int aes_ctr_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state =
+ crypto_ablkcipher_ctx(tfm);
+
+ /* Always defaults to 128-bit counter */
+ state->CTRL = AES_CTRL_MODE_CTR_BIT | AES_CTRL_CTR_WIDTH_128;
+
+ return aes_setkey(state, key, keylen);
+}
+
+static struct crypto_alg smc_aes_alg = {
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_priority = 999,
+ .cra_name = "aes",
+ .cra_driver_name = "aes-smc",
+ .cra_module = THIS_MODULE,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct PUBLIC_CRYPTO_AES_OPERATION_STATE),
+ .cra_alignmask = 3,
+ .cra_list = LIST_HEAD_INIT(smc_aes_alg.cra_list),
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
+ .cia_setkey = aes_single_setkey,
+ .cia_encrypt = aes_single_encrypt,
+ .cia_decrypt = aes_single_decrypt,
+ }
+ },
+};
+
+static struct crypto_alg smc_aes_ecb_alg = {
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_priority = 999,
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "aes-ecb-smc",
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct PUBLIC_CRYPTO_AES_OPERATION_STATE),
+ .cra_alignmask = 3,
+ .cra_list = LIST_HEAD_INIT(smc_aes_ecb_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_ecb_setkey,
+ .encrypt = aes_encrypt,
+ .decrypt = aes_decrypt,
+ }
+ },
+};
+
+static struct crypto_alg smc_aes_cbc_alg = {
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_priority = 999,
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "aes-cbc-smc",
+ .cra_module = THIS_MODULE,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct PUBLIC_CRYPTO_AES_OPERATION_STATE),
+ .cra_alignmask = 3,
+ .cra_list = LIST_HEAD_INIT(smc_aes_cbc_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = PUBLIC_CRYPTO_IV_MAX_SIZE,
+ .setkey = aes_cbc_setkey,
+ .encrypt = aes_encrypt,
+ .decrypt = aes_decrypt,
+ }
+ },
+};
+
+static struct crypto_alg smc_aes_ctr_alg = {
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_priority = 999,
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "aes-ctr-smc",
+ .cra_module = THIS_MODULE,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct PUBLIC_CRYPTO_AES_OPERATION_STATE),
+ .cra_alignmask = 3,
+ .cra_list = LIST_HEAD_INIT(smc_aes_ctr_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = PUBLIC_CRYPTO_IV_MAX_SIZE,
+ .setkey = aes_ctr_setkey,
+ .encrypt = aes_encrypt,
+ .decrypt = aes_decrypt,
+ }
+ },
+};
+
+
+int register_smc_public_crypto_aes(void)
+{
+ int ret;
+
+ aes_ctx = kzalloc(sizeof(struct aes_hwa_ctx), GFP_KERNEL);
+ if (aes_ctx == NULL)
+ return -ENOMEM;
+
+ crypto_init_queue(&aes_ctx->queue, 1);
+ tasklet_init(&aes_ctx->task, aes_tasklet, (unsigned long)aes_ctx);
+ spin_lock_init(&aes_ctx->lock);
+
+ aes_ctx->dma_in = OMAP44XX_DMA_AES1_P_DATA_IN_REQ;
+ aes_ctx->dma_out = OMAP44XX_DMA_AES1_P_DATA_OUT_REQ;
+
+ ret = aes_dma_init(aes_ctx);
+ if (ret)
+ goto err_dma;
+
+ ret = crypto_register_alg(&smc_aes_alg);
+ if (ret)
+ goto err_dma;
+
+ ret = crypto_register_alg(&smc_aes_ecb_alg);
+ if (ret)
+ goto err_ecb;
+
+ ret = crypto_register_alg(&smc_aes_cbc_alg);
+ if (ret)
+ goto err_cbc;
+
+ ret = crypto_register_alg(&smc_aes_ctr_alg);
+ if (ret)
+ goto err_ctr;
+
+ return 0;
+
+err_ctr:
+ crypto_unregister_alg(&smc_aes_cbc_alg);
+err_cbc:
+ crypto_unregister_alg(&smc_aes_ecb_alg);
+err_ecb:
+ crypto_unregister_alg(&smc_aes_alg);
+err_dma:
+ tasklet_kill(&aes_ctx->task);
+ kfree(aes_ctx);
+ return ret;
+}
+
+void unregister_smc_public_crypto_aes(void)
+{
+ if (aes_ctx == NULL)
+ return;
+
+ crypto_unregister_alg(&smc_aes_alg);
+ crypto_unregister_alg(&smc_aes_ecb_alg);
+ crypto_unregister_alg(&smc_aes_cbc_alg);
+ crypto_unregister_alg(&smc_aes_ctr_alg);
+
+ tasklet_kill(&aes_ctx->task);
+
+ aes_dma_cleanup(aes_ctx);
+
+ kfree(aes_ctx);
+}
+#endif
diff --git a/security/smc/omap4/scx_public_crypto_Digest.c b/security/smc/omap4/scx_public_crypto_Digest.c
new file mode 100644
index 0000000..7a40089
--- /dev/null
+++ b/security/smc/omap4/scx_public_crypto_Digest.c
@@ -0,0 +1,964 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include "scxlnx_defs.h"
+#include "scxlnx_util.h"
+#include "scx_public_crypto.h"
+#include "scx_public_dma.h"
+#include "scxlnx_mshield.h"
+
+#include <linux/io.h>
+#include <mach/io.h>
+#include <linux/crypto.h>
+#include <crypto/internal/hash.h>
+
+/*
+ * SHA2/MD5 Hardware Accelerator: Base address for SHA2/MD5 HIB2
+ * This is referenced as the SHA2MD5 module in the Crypto TRM
+ */
+#define DIGEST1_REGS_HW_ADDR 0x4B101000
+
+/*
+ * IRQSTATUS register Masks
+ */
+#define DIGEST_IRQSTATUS_OUTPUT_READY_BIT (1 << 0)
+#define DIGEST_IRQSTATUS_INPUT_READY_BIT (1 << 1)
+#define DIGEST_IRQSTATUS_PARTHASH_READY_BIT (1 << 2)
+#define DIGEST_IRQSTATUS_CONTEXT_READY_BIT (1 << 3)
+
+/*
+ * MODE register Masks
+ */
+#define DIGEST_MODE_GET_ALGO(x) ((x & 0x6) >> 1)
+#define DIGEST_MODE_SET_ALGO(x, a) ((a << 1) | (x & 0xFFFFFFF9))
+
+#define DIGEST_MODE_ALGO_CONST_BIT (1 << 3)
+#define DIGEST_MODE_CLOSE_HASH_BIT (1 << 4)
+
+/*
+ * SYSCONFIG register masks
+ */
+#define DIGEST_SYSCONFIG_PIT_EN_BIT (1 << 2)
+#define DIGEST_SYSCONFIG_PDMA_EN_BIT (1 << 3)
+#define DIGEST_SYSCONFIG_PCONT_SWT_BIT (1 << 6)
+#define DIGEST_SYSCONFIG_PADVANCED_BIT (1 << 7)
+
+/*-------------------------------------------------------------------------*/
+/* Digest Context */
+/*-------------------------------------------------------------------------*/
+/**
+ * This structure contains the registers of the SHA1/MD5 HW accelerator.
+ */
+struct Sha1Md5Reg_t {
+ u32 ODIGEST_A; /* 0x00 Outer Digest A */
+ u32 ODIGEST_B; /* 0x04 Outer Digest B */
+ u32 ODIGEST_C; /* 0x08 Outer Digest C */
+ u32 ODIGEST_D; /* 0x0C Outer Digest D */
+ u32 ODIGEST_E; /* 0x10 Outer Digest E */
+ u32 ODIGEST_F; /* 0x14 Outer Digest F */
+ u32 ODIGEST_G; /* 0x18 Outer Digest G */
+ u32 ODIGEST_H; /* 0x1C Outer Digest H */
+ u32 IDIGEST_A; /* 0x20 Inner Digest A */
+ u32 IDIGEST_B; /* 0x24 Inner Digest B */
+ u32 IDIGEST_C; /* 0x28 Inner Digest C */
+ u32 IDIGEST_D; /* 0x2C Inner Digest D */
+ u32 IDIGEST_E; /* 0x30 Inner Digest E */
+ u32 IDIGEST_F; /* 0x34 Inner Digest F */
+ u32 IDIGEST_G; /* 0x38 Inner Digest G */
+ u32 IDIGEST_H; /* 0x3C Inner Digest H */
+ u32 DIGEST_COUNT; /* 0x40 Digest count */
+ u32 MODE; /* 0x44 Digest mode */
+ u32 LENGTH; /* 0x48 Data length */
+
+ u32 reserved0[13];
+
+ u32 DIN_0; /* 0x80 Data 0 */
+ u32 DIN_1; /* 0x84 Data 1 */
+ u32 DIN_2; /* 0x88 Data 2 */
+ u32 DIN_3; /* 0x8C Data 3 */
+ u32 DIN_4; /* 0x90 Data 4 */
+ u32 DIN_5; /* 0x94 Data 5 */
+ u32 DIN_6; /* 0x98 Data 6 */
+ u32 DIN_7; /* 0x9C Data 7 */
+ u32 DIN_8; /* 0xA0 Data 8 */
+ u32 DIN_9; /* 0xA4 Data 9 */
+ u32 DIN_10; /* 0xA8 Data 10 */
+ u32 DIN_11; /* 0xAC Data 11 */
+ u32 DIN_12; /* 0xB0 Data 12 */
+ u32 DIN_13; /* 0xB4 Data 13 */
+ u32 DIN_14; /* 0xB8 Data 14 */
+ u32 DIN_15; /* 0xBC Data 15 */
+
+ u32 reserved1[16];
+
+ u32 REVISION; /* 0x100 Revision */
+
+ u32 reserved2[3];
+
+ u32 SYSCONFIG; /* 0x110 Config */
+ u32 SYSSTATUS; /* 0x114 Status */
+ u32 IRQSTATUS; /* 0x118 IRQ Status */
+ u32 IRQENABLE; /* 0x11C IRQ Enable */
+};
+
+static struct Sha1Md5Reg_t *pSha1Md5Reg_t;
+
+static const u8 md5OverEmptyString[] = {
+ 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
+ 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e
+};
+
+static const u8 sha1OverEmptyString[] = {
+ 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
+ 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
+ 0xaf, 0xd8, 0x07, 0x09
+};
+
+static const u8 sha224OverEmptyString[] = {
+ 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9,
+ 0x47, 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4,
+ 0x15, 0xa2, 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a,
+ 0xc5, 0xb3, 0xe4, 0x2f
+};
+
+static const u8 sha256OverEmptyString[] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
+ 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
+ 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
+ 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
+};
+
+/*------------------------------------------------------------------------
+ *Forward declarations
+ *------------------------------------------------------------------------- */
+
+static void static_Hash_HwPerform64bDigest(u32 *pData,
+ u32 nAlgo, u32 nBytesProcessed);
+static void static_Hash_HwPerformDmaDigest(u8 *pData, u32 nDataLength,
+ u32 nAlgo, u32 nBytesProcessed);
+
+static void PDrvCryptoUpdateHashWithDMA(
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState,
+ u8 *pData, u32 dataLength);
+
+
+/*-------------------------------------------------------------------------
+ *Save HWA registers into the specified operation state structure
+ *------------------------------------------------------------------------*/
+static void PDrvCryptoSaveHashRegisters(
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState)
+{
+ dprintk(KERN_INFO "PDrvCryptoSaveHashRegisters: State=%p\n",
+ pSHAState);
+
+ pSHAState->SHA_DIGEST_A = INREG32(&pSha1Md5Reg_t->IDIGEST_A);
+ pSHAState->SHA_DIGEST_B = INREG32(&pSha1Md5Reg_t->IDIGEST_B);
+ pSHAState->SHA_DIGEST_C = INREG32(&pSha1Md5Reg_t->IDIGEST_C);
+ pSHAState->SHA_DIGEST_D = INREG32(&pSha1Md5Reg_t->IDIGEST_D);
+ pSHAState->SHA_DIGEST_E = INREG32(&pSha1Md5Reg_t->IDIGEST_E);
+ pSHAState->SHA_DIGEST_F = INREG32(&pSha1Md5Reg_t->IDIGEST_F);
+ pSHAState->SHA_DIGEST_G = INREG32(&pSha1Md5Reg_t->IDIGEST_G);
+ pSHAState->SHA_DIGEST_H = INREG32(&pSha1Md5Reg_t->IDIGEST_H);
+}
+
+/*-------------------------------------------------------------------------
+ *Restore the HWA registers from the operation state structure
+ *-------------------------------------------------------------------------*/
+static void PDrvCryptoRestoreHashRegisters(
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState)
+{
+ dprintk(KERN_INFO "PDrvCryptoRestoreHashRegisters: State=%p\n",
+ pSHAState);
+
+ if (pSHAState->nBytesProcessed != 0) {
+ /*
+ * Some bytes were already processed. Initialize
+ * previous digest
+ */
+ OUTREG32(&pSha1Md5Reg_t->IDIGEST_A, pSHAState->SHA_DIGEST_A);
+ OUTREG32(&pSha1Md5Reg_t->IDIGEST_B, pSHAState->SHA_DIGEST_B);
+ OUTREG32(&pSha1Md5Reg_t->IDIGEST_C, pSHAState->SHA_DIGEST_C);
+ OUTREG32(&pSha1Md5Reg_t->IDIGEST_D, pSHAState->SHA_DIGEST_D);
+ OUTREG32(&pSha1Md5Reg_t->IDIGEST_E, pSHAState->SHA_DIGEST_E);
+ OUTREG32(&pSha1Md5Reg_t->IDIGEST_F, pSHAState->SHA_DIGEST_F);
+ OUTREG32(&pSha1Md5Reg_t->IDIGEST_G, pSHAState->SHA_DIGEST_G);
+ OUTREG32(&pSha1Md5Reg_t->IDIGEST_H, pSHAState->SHA_DIGEST_H);
+ }
+
+ OUTREG32(&pSha1Md5Reg_t->SYSCONFIG, 0);
+}
+
+/*------------------------------------------------------------------------- */
+
+void PDrvCryptoDigestInit(void)
+{
+ pSha1Md5Reg_t = omap_ioremap(DIGEST1_REGS_HW_ADDR, SZ_1M, MT_DEVICE);
+ if (pSha1Md5Reg_t == NULL)
+ panic("Unable to remap SHA2/MD5 module");
+}
+
+void PDrvCryptoDigestExit(void)
+{
+ omap_iounmap(pSha1Md5Reg_t);
+}
+
+void PDrvCryptoUpdateHash(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState,
+ u8 *pData, u32 dataLength)
+{
+ u32 dmaUse = PUBLIC_CRYPTO_DMA_USE_NONE;
+
+ /*
+ *Choice of the processing type
+ */
+ if (dataLength >= DMA_TRIGGER_IRQ_DIGEST)
+ dmaUse = PUBLIC_CRYPTO_DMA_USE_IRQ;
+
+ dprintk(KERN_INFO "PDrvCryptoUpdateHash : "\
+ "Data=0x%08x/%u, Chunk=%u, Processed=%u, dmaUse=0x%08x\n",
+ (u32)pData, (u32)dataLength,
+ pSHAState->nChunkLength, pSHAState->nBytesProcessed,
+ dmaUse);
+
+ if (dataLength == 0) {
+ dprintk(KERN_INFO "PDrvCryptoUpdateHash: "\
+ "Nothing to process\n");
+ return;
+ }
+
+ if (dmaUse != PUBLIC_CRYPTO_DMA_USE_NONE) {
+ /*
+ * Restore the registers of the accelerator from the operation
+ * state
+ */
+ PDrvCryptoRestoreHashRegisters(pSHAState);
+
+ /*perform the updates with DMA */
+ PDrvCryptoUpdateHashWithDMA(pSHAState, pData, dataLength);
+
+ /* Save the accelerator registers into the operation state */
+ PDrvCryptoSaveHashRegisters(pSHAState);
+ } else {
+ /*Non-DMA transfer */
+
+ /*(1)We take the chunk buffer wich contains the last saved
+ *data that could not be yet processed because we had not
+ *enough data to make a 64B buffer. Then we try to make a
+ *64B buffer by concatenating it with the new passed data
+ */
+
+ /*Is there any data in the chunk? If yes is it possible to
+ *make a 64B buffer with the new data passed ? */
+ if ((pSHAState->nChunkLength != 0)
+ && (pSHAState->nChunkLength + dataLength >=
+ HASH_BLOCK_BYTES_LENGTH)) {
+
+ u8 vLengthToComplete =
+ HASH_BLOCK_BYTES_LENGTH - pSHAState->nChunkLength;
+
+ /*So we fill the chunk buffer with the new data to
+ *complete to 64B */
+ memcpy(pSHAState->pChunkBuffer + pSHAState->
+ nChunkLength, pData, vLengthToComplete);
+
+ if (pSHAState->nChunkLength + dataLength ==
+ HASH_BLOCK_BYTES_LENGTH) {
+ /*We'll keep some data for the final */
+ pSHAState->nChunkLength =
+ HASH_BLOCK_BYTES_LENGTH;
+ dprintk(KERN_INFO "PDrvCryptoUpdateHash: "\
+ "Done: Chunk=%u; Processed=%u\n",
+ pSHAState->nChunkLength,
+ pSHAState->nBytesProcessed);
+ return;
+ }
+
+ /*
+ * Restore the registers of the accelerator from the
+ * operation state
+ */
+ PDrvCryptoRestoreHashRegisters(pSHAState);
+
+ /*Then we send this buffer to the HWA */
+ static_Hash_HwPerform64bDigest(
+ (u32 *)pSHAState->pChunkBuffer, pSHAState->CTRL,
+ pSHAState->nBytesProcessed);
+
+ /*
+ * Save the accelerator registers into the operation
+ * state
+ */
+ PDrvCryptoSaveHashRegisters(pSHAState);
+
+ pSHAState->nBytesProcessed =
+ INREG32(&pSha1Md5Reg_t->DIGEST_COUNT);
+
+ /*We have flushed the chunk so it is empty now */
+ pSHAState->nChunkLength = 0;
+
+ /*Then we have less data to process */
+ pData += vLengthToComplete;
+ dataLength -= vLengthToComplete;
+ }
+
+ /*(2)We process all the 64B buffer that we can */
+ if (pSHAState->nChunkLength + dataLength >=
+ HASH_BLOCK_BYTES_LENGTH) {
+
+ while (dataLength > HASH_BLOCK_BYTES_LENGTH) {
+ u8 pTempAlignedBuffer[HASH_BLOCK_BYTES_LENGTH];
+
+ /*
+ *We process a 64B buffer
+ */
+ /*We copy the data to process to an aligned
+ *buffer */
+ memcpy(pTempAlignedBuffer, pData,
+ HASH_BLOCK_BYTES_LENGTH);
+
+ /*Then we send this buffer to the hash
+ *hardware */
+ PDrvCryptoRestoreHashRegisters(pSHAState);
+ static_Hash_HwPerform64bDigest(
+ (u32 *) pTempAlignedBuffer,
+ pSHAState->CTRL,
+ pSHAState->nBytesProcessed);
+ PDrvCryptoSaveHashRegisters(pSHAState);
+
+ pSHAState->nBytesProcessed =
+ INREG32(&pSha1Md5Reg_t->DIGEST_COUNT);
+
+ /*Then we decrease the remaining data of 64B */
+ pData += HASH_BLOCK_BYTES_LENGTH;
+ dataLength -= HASH_BLOCK_BYTES_LENGTH;
+ }
+ }
+
+ /*(3)We look if we have some data that could not be processed
+ *yet because it is not large enough to fill a buffer of 64B */
+ if (dataLength > 0) {
+ if (pSHAState->nChunkLength + dataLength >
+ HASH_BLOCK_BYTES_LENGTH) {
+ /*Should never be in this case !!! */
+ panic("PDrvCryptoUpdateHash: nChunkLength + \
+ dataLength > HASH_BLOCK_BYTES_LENGTH\n");
+ }
+
+ /*So we fill the chunk buffer with the new data to
+ *complete to 64B */
+ memcpy(pSHAState->pChunkBuffer + pSHAState->
+ nChunkLength, pData, dataLength);
+ pSHAState->nChunkLength += dataLength;
+ }
+ }
+
+ dprintk(KERN_INFO "PDrvCryptoUpdateHash: Done: "\
+ "Chunk=%u; Processed=%u\n",
+ pSHAState->nChunkLength, pSHAState->nBytesProcessed);
+}
+
+/*------------------------------------------------------------------------- */
+
+static void static_Hash_HwPerform64bDigest(u32 *pData,
+ u32 nAlgo, u32 nBytesProcessed)
+{
+ u32 nAlgoConstant = 0;
+
+ OUTREG32(&pSha1Md5Reg_t->DIGEST_COUNT, nBytesProcessed);
+
+ if (nBytesProcessed == 0) {
+ /* No bytes processed so far. Will use the algo constant instead
+ of previous digest */
+ nAlgoConstant = 1 << 3;
+ }
+
+ OUTREG32(&pSha1Md5Reg_t->MODE,
+ nAlgoConstant | (nAlgo & 0x6));
+ OUTREG32(&pSha1Md5Reg_t->LENGTH, HASH_BLOCK_BYTES_LENGTH);
+
+ if (SCXPublicCryptoWaitForReadyBit(
+ (u32 *)&pSha1Md5Reg_t->IRQSTATUS,
+ DIGEST_IRQSTATUS_INPUT_READY_BIT)
+ != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
+ /* Crash the system as this should never occur */
+ panic("Wait too long for DIGEST HW accelerator" \
+ "Input data to be ready\n");
+ }
+
+ /*
+ *The pData buffer is a buffer of 64 bytes.
+ */
+ OUTREG32(&pSha1Md5Reg_t->DIN_0, pData[0]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_1, pData[1]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_2, pData[2]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_3, pData[3]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_4, pData[4]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_5, pData[5]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_6, pData[6]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_7, pData[7]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_8, pData[8]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_9, pData[9]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_10, pData[10]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_11, pData[11]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_12, pData[12]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_13, pData[13]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_14, pData[14]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_15, pData[15]);
+
+ /*
+ *Wait until the hash operation is finished.
+ */
+ SCXPublicCryptoWaitForReadyBitInfinitely(
+ (u32 *)&pSha1Md5Reg_t->IRQSTATUS,
+ DIGEST_IRQSTATUS_OUTPUT_READY_BIT);
+}
+
+/*------------------------------------------------------------------------- */
+
+static void static_Hash_HwPerformDmaDigest(u8 *pData, u32 nDataLength,
+ u32 nAlgo, u32 nBytesProcessed)
+{
+ /*
+ *Note: The DMA only sees physical addresses !
+ */
+
+ int dma_ch0;
+ struct omap_dma_channel_params ch0_parameters;
+ u32 nLengthLoop = 0;
+ u32 nAlgoConstant;
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ dprintk(KERN_INFO
+ "static_Hash_HwPerformDmaDigest: Buffer=0x%08x/%u\n",
+ (u32)pData, (u32)nDataLength);
+
+ /*lock the DMA */
+ mutex_lock(&pDevice->sm.sDMALock);
+ if (scxPublicDMARequest(&dma_ch0) != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
+ mutex_unlock(&pDevice->sm.sDMALock);
+ return;
+ }
+
+ while (nDataLength > 0) {
+
+ nAlgoConstant = 0;
+ if (nBytesProcessed == 0) {
+ /*No bytes processed so far. Will use the algo
+ *constant instead of previous digest */
+ nAlgoConstant = 1 << 3;
+ }
+
+ /*check length */
+ if (nDataLength <= pDevice->nDMABufferLength)
+ nLengthLoop = nDataLength;
+ else
+ nLengthLoop = pDevice->nDMABufferLength;
+
+ /*
+ *Copy the data from the input buffer into a preallocated
+ *buffer which is aligned on the beginning of a page.
+ *This may prevent potential issues when flushing/invalidating
+ *the buffer as the cache lines are 64 bytes long.
+ */
+ memcpy(pDevice->pDMABuffer, pData, nLengthLoop);
+
+ /*DMA1: Mem -> HASH */
+ scxPublicSetDMAChannelCommonParams(&ch0_parameters,
+ nLengthLoop / HASH_BLOCK_BYTES_LENGTH,
+ DMA_CEN_Elts_per_Frame_SHA,
+ DIGEST1_REGS_HW_ADDR + 0x80,
+ pDevice->pDMABufferPhys,
+ OMAP44XX_DMA_SHA2_DIN_P);
+
+ /*specific for Mem -> HWA */
+ ch0_parameters.src_amode = OMAP_DMA_AMODE_POST_INC;
+ ch0_parameters.dst_amode = OMAP_DMA_AMODE_CONSTANT;
+ ch0_parameters.src_or_dst_synch = OMAP_DMA_DST_SYNC;
+
+ scxPublicDMASetParams(dma_ch0, &ch0_parameters);
+
+ omap_set_dma_src_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_dest_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_16);
+
+ OUTREG32(&pSha1Md5Reg_t->DIGEST_COUNT, nBytesProcessed);
+ OUTREG32(&pSha1Md5Reg_t->MODE,
+ nAlgoConstant | (nAlgo & 0x6));
+
+ /*
+ * Triggers operation
+ * Interrupt, Free Running + GO (DMA on)
+ */
+ OUTREG32(&pSha1Md5Reg_t->SYSCONFIG,
+ INREG32(&pSha1Md5Reg_t->SYSCONFIG) |
+ DIGEST_SYSCONFIG_PDMA_EN_BIT);
+ OUTREG32(&pSha1Md5Reg_t->LENGTH, nLengthLoop);
+
+ wmb();
+
+ scxPublicDMAStart(dma_ch0, OMAP_DMA_BLOCK_IRQ);
+
+ scxPublicDMAWait(1);
+
+ OUTREG32(&pSha1Md5Reg_t->SYSCONFIG, 0);
+
+ scxPublicDMAClearChannel(dma_ch0);
+
+ pData += nLengthLoop;
+ nDataLength -= nLengthLoop;
+ nBytesProcessed =
+ INREG32(&pSha1Md5Reg_t->DIGEST_COUNT);
+ }
+
+ /*For safety reasons, let's clean the working buffer */
+ memset(pDevice->pDMABuffer, 0, nLengthLoop);
+
+ /*release the DMA */
+ scxPublicDMARelease(dma_ch0);
+
+ mutex_unlock(&pDevice->sm.sDMALock);
+
+ /*
+ * The dma transfert is finished, now wait until the hash
+ * operation is finished.
+ */
+ SCXPublicCryptoWaitForReadyBitInfinitely(
+ (u32 *)&pSha1Md5Reg_t->IRQSTATUS,
+ DIGEST_IRQSTATUS_CONTEXT_READY_BIT);
+}
+
+/*------------------------------------------------------------------------- */
+/*
+ *Static function, perform data digest using the DMA for data transfer.
+ *
+ *inputs:
+ * pData : pointer of the input data to process
+ * dataLength : number of byte to process
+ */
+static void PDrvCryptoUpdateHashWithDMA(
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState,
+ u8 *pData, u32 dataLength)
+{
+ dprintk(KERN_INFO "PDrvCryptoUpdateHashWithDMA\n");
+
+ if (pSHAState->nChunkLength != 0) {
+
+ u32 vLengthToComplete;
+
+ /*Fill the chunk first */
+ if (pSHAState->
+ nChunkLength + dataLength <= HASH_BLOCK_BYTES_LENGTH) {
+
+ /*So we fill the chunk buffer with the new data */
+ memcpy(pSHAState->
+ pChunkBuffer + pSHAState->nChunkLength,
+ pData, dataLength);
+ pSHAState->nChunkLength += dataLength;
+
+ /*We'll keep some data for the final */
+ return;
+ }
+
+ vLengthToComplete = HASH_BLOCK_BYTES_LENGTH - pSHAState->
+ nChunkLength;
+
+ if (vLengthToComplete != 0) {
+ /*So we fill the chunk buffer with the new data to
+ *complete to 64B */
+ memcpy(pSHAState->pChunkBuffer + pSHAState->
+ nChunkLength, pData, vLengthToComplete);
+ }
+
+ /*Then we send this buffer to the HWA (no DMA) */
+ static_Hash_HwPerform64bDigest(
+ (u32 *)pSHAState->pChunkBuffer, pSHAState->CTRL,
+ pSHAState->nBytesProcessed);
+
+ pSHAState->nBytesProcessed =
+ INREG32(&pSha1Md5Reg_t->DIGEST_COUNT);
+
+ /*We have flushed the chunk so it is empty now */
+ pSHAState->nChunkLength = 0;
+
+ /*Update the data buffer depending of the data already
+ *processed */
+ pData += vLengthToComplete;
+ dataLength -= vLengthToComplete;
+ }
+
+ if (dataLength > HASH_BLOCK_BYTES_LENGTH) {
+
+ /*DMA only manages data length that is multiple of 64b */
+ u32 vDmaProcessSize = dataLength & 0xFFFFFFC0;
+
+ if (vDmaProcessSize == dataLength) {
+ /*We keep one block for the final */
+ vDmaProcessSize -= HASH_BLOCK_BYTES_LENGTH;
+ }
+
+ static_Hash_HwPerformDmaDigest(pData, vDmaProcessSize,
+ pSHAState->CTRL, pSHAState->nBytesProcessed);
+
+ pSHAState->nBytesProcessed =
+ INREG32(&pSha1Md5Reg_t->DIGEST_COUNT);
+ pData += vDmaProcessSize;
+ dataLength -= vDmaProcessSize;
+ }
+
+ /*At that point, there is less than 64b left to process*/
+ if ((dataLength == 0) || (dataLength > HASH_BLOCK_BYTES_LENGTH)) {
+ /*Should never be in this case !!! */
+ panic("PDrvCryptoUpdateHASHWithDMA: \
+ Remaining dataLength=%u\n", dataLength);
+ }
+
+ /*We now fill the chunk buffer with the remaining data */
+ memcpy(pSHAState->pChunkBuffer, pData, dataLength);
+ pSHAState->nChunkLength = dataLength;
+}
+
+#ifdef CONFIG_SMC_KERNEL_CRYPTO
+static void PDrvCryptoInitHash(u32 alg,
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state)
+{
+ memset(state, 0, sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE));
+
+ state->CTRL = alg << 1;
+}
+
+static int static_Hash_HwReadDigest(u32 algo, u8 *out)
+{
+ u32 regs, tmp;
+ u32 idx = 0, i;
+
+ switch (algo) {
+ case DIGEST_CTRL_ALGO_MD5:
+ regs = 4;
+ break;
+ case DIGEST_CTRL_ALGO_SHA1:
+ regs = 5;
+ break;
+ case DIGEST_CTRL_ALGO_SHA224:
+ regs = 7;
+ break;
+ case DIGEST_CTRL_ALGO_SHA256:
+ regs = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < regs; i++) {
+ tmp = INREG32(&pSha1Md5Reg_t->IDIGEST_A + i);
+
+ out[idx++] = (u8) ((tmp >> 0) & 0xff);
+ out[idx++] = (u8) ((tmp >> 8) & 0xff);
+ out[idx++] = (u8) ((tmp >> 16) & 0xff);
+ out[idx++] = (u8) ((tmp >> 24) & 0xff);
+ }
+
+ return 0;
+}
+
+static int PDrvCryptoFinalHash(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state,
+ u8 *out)
+{
+ u32 *data = (u32 *) state->pChunkBuffer;
+
+ /* Hashing an empty string? */
+ if (state->nBytesProcessed + state->nChunkLength == 0) {
+ switch (DIGEST_MODE_GET_ALGO(state->CTRL)) {
+ case DIGEST_CTRL_ALGO_MD5:
+ memcpy(out, md5OverEmptyString, HASH_MD5_LENGTH);
+ break;
+ case DIGEST_CTRL_ALGO_SHA1:
+ memcpy(out, sha1OverEmptyString, HASH_SHA1_LENGTH);
+ break;
+ case DIGEST_CTRL_ALGO_SHA224:
+ memcpy(out, sha224OverEmptyString, HASH_SHA224_LENGTH);
+ break;
+ case DIGEST_CTRL_ALGO_SHA256:
+ memcpy(out, sha256OverEmptyString, HASH_SHA256_LENGTH);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ PDrvCryptoRestoreHashRegisters(state);
+
+ /*
+ * At this point, the chunk buffer should contain the last block of data
+ * needed for the final.
+ */
+ OUTREG32(&pSha1Md5Reg_t->DIGEST_COUNT, state->nBytesProcessed);
+ OUTREG32(&pSha1Md5Reg_t->MODE,
+ (state->CTRL & 0x6) | 0x10 |
+ (state->nBytesProcessed == 0) << 3);
+ OUTREG32(&pSha1Md5Reg_t->LENGTH, state->nChunkLength);
+
+ if (SCXPublicCryptoWaitForReadyBit(
+ (u32 *) &pSha1Md5Reg_t->IRQSTATUS,
+ DIGEST_IRQSTATUS_INPUT_READY_BIT)
+ != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
+ /* Crash the system as this should never occur */
+ panic("Wait too long for DIGEST HW accelerator"
+ "Input data to be ready\n");
+ }
+
+ OUTREG32(&pSha1Md5Reg_t->DIN_0, data[0]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_1, data[1]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_2, data[2]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_3, data[3]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_4, data[4]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_5, data[5]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_6, data[6]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_7, data[7]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_8, data[8]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_9, data[9]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_10, data[10]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_11, data[11]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_12, data[12]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_13, data[13]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_14, data[14]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_15, data[15]);
+
+ /* Wait till the hash operation is finished */
+ SCXPublicCryptoWaitForReadyBitInfinitely(
+ (u32 *) &pSha1Md5Reg_t->IRQSTATUS,
+ DIGEST_IRQSTATUS_OUTPUT_READY_BIT);
+
+ return static_Hash_HwReadDigest(DIGEST_MODE_GET_ALGO(state->CTRL), out);
+}
+
+/*
+ * Digest HWA registration into kernel crypto framework
+ */
+
+static int digest_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc);
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_SHA, LOCK_HWA);
+
+ SCXPublicCryptoEnableClock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
+
+ PDrvCryptoUpdateHash(state, (u8 *) data, len);
+
+ SCXPublicCryptoDisableClock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_SHA, UNLOCK_HWA);
+
+ return 0;
+}
+
+static int digest_final(struct shash_desc *desc, u8 *out)
+{
+ int ret;
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc);
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_SHA, LOCK_HWA);
+
+ SCXPublicCryptoEnableClock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
+
+ ret = PDrvCryptoFinalHash(state, out);
+
+ SCXPublicCryptoDisableClock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_SHA, UNLOCK_HWA);
+
+ return ret;
+}
+
+static int digest_import(struct shash_desc *desc, const void *in)
+{
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc);
+
+ memcpy(state, in, sizeof(*state));
+ return 0;
+}
+
+static int digest_export(struct shash_desc *desc, void *out)
+{
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc);
+
+ memcpy(out, state, sizeof(*state));
+ return 0;
+}
+
+/* MD5 */
+static int md5_init(struct shash_desc *desc)
+{
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc);
+
+ PDrvCryptoInitHash(DIGEST_CTRL_ALGO_MD5, state);
+
+ return 0;
+}
+
+static struct shash_alg smc_md5_alg = {
+ .digestsize = HASH_MD5_LENGTH,
+ .init = md5_init,
+ .update = digest_update,
+ .final = digest_final,
+ .export = digest_export,
+ .import = digest_import,
+ .descsize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE),
+ .statesize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "md5-smc",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 999,
+ .cra_blocksize = HASH_BLOCK_BYTES_LENGTH,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+/* SHA1 */
+static int sha1_init(struct shash_desc *desc)
+{
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc);
+
+ PDrvCryptoInitHash(DIGEST_CTRL_ALGO_SHA1, state);
+
+ return 0;
+}
+
+static struct shash_alg smc_sha1_alg = {
+ .digestsize = HASH_SHA1_LENGTH,
+ .init = sha1_init,
+ .update = digest_update,
+ .final = digest_final,
+ .export = digest_export,
+ .import = digest_import,
+ .descsize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE),
+ .statesize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-smc",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 999,
+ .cra_blocksize = HASH_BLOCK_BYTES_LENGTH,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+/* SHA224 */
+static int sha224_init(struct shash_desc *desc)
+{
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc);
+
+ PDrvCryptoInitHash(DIGEST_CTRL_ALGO_SHA224, state);
+
+ return 0;
+}
+
+static struct shash_alg smc_sha224_alg = {
+ .digestsize = HASH_SHA224_LENGTH,
+ .init = sha224_init,
+ .update = digest_update,
+ .final = digest_final,
+ .export = digest_export,
+ .import = digest_import,
+ .descsize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE),
+ .statesize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-smc",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 999,
+ .cra_blocksize = HASH_BLOCK_BYTES_LENGTH,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+/* SHA256 */
+static int sha256_init(struct shash_desc *desc)
+{
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc);
+
+ PDrvCryptoInitHash(DIGEST_CTRL_ALGO_SHA256, state);
+
+ return 0;
+}
+
+static struct shash_alg smc_sha256_alg = {
+ .digestsize = HASH_SHA256_LENGTH,
+ .init = sha256_init,
+ .update = digest_update,
+ .final = digest_final,
+ .export = digest_export,
+ .import = digest_import,
+ .descsize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE),
+ .statesize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-smc",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 999,
+ .cra_blocksize = HASH_BLOCK_BYTES_LENGTH,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+int register_smc_public_crypto_digest(void)
+{
+ int ret;
+
+ dprintk(KERN_INFO "SMC: Registering digest algorithms\n");
+
+ ret = crypto_register_shash(&smc_md5_alg);
+ if (ret)
+ return ret;
+
+ ret = crypto_register_shash(&smc_sha1_alg);
+ if (ret)
+ goto sha1_err;
+
+ ret = crypto_register_shash(&smc_sha224_alg);
+ if (ret)
+ goto sha224_err;
+
+ ret = crypto_register_shash(&smc_sha256_alg);
+ if (ret)
+ goto sha256_err;
+
+ return 0;
+
+sha256_err:
+ crypto_unregister_shash(&smc_sha224_alg);
+sha224_err:
+ crypto_unregister_shash(&smc_sha1_alg);
+sha1_err:
+ crypto_unregister_shash(&smc_md5_alg);
+ return ret;
+}
+
+void unregister_smc_public_crypto_digest(void)
+{
+ dprintk(KERN_INFO "SMC: Unregistering digest algorithms\n");
+
+ crypto_unregister_shash(&smc_md5_alg);
+ crypto_unregister_shash(&smc_sha1_alg);
+ crypto_unregister_shash(&smc_sha224_alg);
+ crypto_unregister_shash(&smc_sha256_alg);
+}
+#endif
diff --git a/security/smc/omap4/scx_public_dma.c b/security/smc/omap4/scx_public_dma.c
new file mode 100644
index 0000000..743c333
--- /dev/null
+++ b/security/smc/omap4/scx_public_dma.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "scxlnx_defs.h"
+#include "scxlnx_util.h"
+#include "scx_public_dma.h"
+
+#include <asm/atomic.h>
+
+static atomic_t g_dmaEventFlag = ATOMIC_INIT(0);
+
+/*------------------------------------------------------------------------ */
+/*
+ * Internal functions
+ */
+
+static void scxPublicDMACallback(int lch, u16 ch_status, void *data)
+{
+ atomic_inc(&g_dmaEventFlag);
+}
+
+/*------------------------------------------------------------------------ */
+/*
+ * Public DMA API
+ */
+
+u32 scxPublicDMARequest(int *lch)
+{
+ int dma_ch_out = 0;
+
+ if (lch == NULL)
+ return PUBLIC_CRYPTO_ERR_BAD_PARAMETERS;
+
+ if (omap_request_dma(0, "SMC Public Crypto",
+ scxPublicDMACallback, NULL, &dma_ch_out) != 0)
+ return PUBLIC_CRYPTO_ERR_OUT_OF_MEMORY;
+
+ omap_disable_dma_irq(dma_ch_out, OMAP_DMA_DROP_IRQ |
+ OMAP_DMA_BLOCK_IRQ);
+
+ *lch = dma_ch_out;
+
+ return PUBLIC_CRYPTO_OPERATION_SUCCESS;
+}
+
+/*------------------------------------------------------------------------ */
+/*
+ * Release a DMA channel
+ */
+u32 scxPublicDMARelease(int lch)
+{
+ omap_free_dma(lch);
+
+ return PUBLIC_CRYPTO_OPERATION_SUCCESS;
+}
+
+/*------------------------------------------------------------------------ */
+
+void scxPublicDMASetParams(int lch, struct omap_dma_channel_params *pParams)
+{
+ omap_set_dma_params(lch, pParams);
+}
+
+/*------------------------------------------------------------------------ */
+
+void scxPublicDMAStart(int lch, int interruptMask)
+{
+ atomic_set(&g_dmaEventFlag, 0);
+ omap_enable_dma_irq(lch, interruptMask);
+ omap_start_dma(lch);
+}
+
+/*------------------------------------------------------------------------ */
+
+void scxPublicDMADisableChannel(int lch)
+{
+ omap_stop_dma(lch);
+}
+
+/*------------------------------------------------------------------------ */
+
+void scxPublicDMAClearChannel(int lch)
+{
+ omap_clear_dma(lch);
+}
+
+/*------------------------------------------------------------------------ */
+
+void scxPublicDMAWait(int nr_of_cb)
+{
+ while (atomic_read(&g_dmaEventFlag) < nr_of_cb)
+ cpu_relax();
+}
+
+/*------------------------------------------------------------------------ */
+/*
+ * Perform common DMA channel setup, used to factorize the code
+ *
+ * Output: struct omap_dma_channel_params *pDMAChannel
+ * Inputs: u32 nbBlocks Number of block of the transfer
+ * u32 nbElements Number of elements of the transfer
+ * u32 nDstStart Destination address
+ * u32 nSrcStart Source address
+ * u32 nTriggerID Trigger ID
+ */
+void scxPublicSetDMAChannelCommonParams(
+ struct omap_dma_channel_params *pDMAChannel,
+ u32 nbBlocks, u32 nbElements,
+ u32 nDstStart, u32 nSrcStart, u32 nTriggerID)
+{
+ pDMAChannel->data_type = OMAP_DMA_DATA_TYPE_S32;
+ pDMAChannel->elem_count = nbElements;
+ pDMAChannel->frame_count = nbBlocks;
+ pDMAChannel->src_ei = 0;
+ pDMAChannel->src_fi = 0;
+ pDMAChannel->dst_ei = 0;
+ pDMAChannel->dst_fi = 0;
+ pDMAChannel->sync_mode = OMAP_DMA_SYNC_FRAME;
+ pDMAChannel->src_start = nSrcStart;
+ pDMAChannel->dst_start = nDstStart;
+ pDMAChannel->trigger = nTriggerID;
+}
diff --git a/security/smc/omap4/scx_public_dma.h b/security/smc/omap4/scx_public_dma.h
new file mode 100644
index 0000000..ddd19b2
--- /dev/null
+++ b/security/smc/omap4/scx_public_dma.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __SCX_PUBLIC_DMA_H
+#define __SCX_PUBLIC_DMA_H
+
+#include <linux/dma-mapping.h>
+#include <plat/dma.h>
+#include <plat/dma-44xx.h>
+
+#include "scx_public_crypto.h"
+
+/*---------------------------------------------------------------------------
+ * Cache management (implemented in the assembler file)
+ *-------------------------------------------------------------------------- */
+
+u32 v7_dma_flush_range(u32 nVAStart, u32 nVAEnd);
+u32 v7_dma_inv_range(u32 nVAStart, u32 nVAEnd);
+
+/*-------------------------------------------------------------------------- */
+/*
+ * Public DMA API
+ */
+
+/*
+ * CEN Masks
+ */
+#define DMA_CEN_Elts_per_Frame_AES 4
+#define DMA_CEN_Elts_per_Frame_DES 2
+#define DMA_CEN_Elts_per_Frame_SHA 16
+
+/*
+ * Request a DMA channel
+ */
+u32 scxPublicDMARequest(int *lch);
+
+/*
+ * Release a DMA channel
+ */
+u32 scxPublicDMARelease(int lch);
+
+/**
+ * This function waits for the DMA IRQ.
+ */
+void scxPublicDMAWait(int nr_of_cb);
+
+/*
+ * This function starts a DMA operation.
+ *
+ * lch DMA channel ID.
+ * interruptMask Configures the Channel Interrupt Control Register.
+ */
+void scxPublicDMAStart(int lch, int interruptMask);
+
+void scxPublicSetDMAChannelCommonParams(
+ struct omap_dma_channel_params *pDMAChannel,
+ u32 nbBlocks, u32 nbElements, u32 nDstStart,
+ u32 nSrcStart, u32 nTriggerID);
+void scxPublicDMASetParams(int lch, struct omap_dma_channel_params *pParams);
+void scxPublicDMADisableChannel(int lch);
+void scxPublicDMAClearChannel(int lch);
+
+#endif /*__SCX_PUBLIC_DMA_H */
diff --git a/security/smc/omap4/scxlnx_comm_mshield.c b/security/smc/omap4/scxlnx_comm_mshield.c
new file mode 100644
index 0000000..ccd2098
--- /dev/null
+++ b/security/smc/omap4/scxlnx_comm_mshield.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/div64.h>
+#include <asm/system.h>
+#include <asm/cputype.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+#include <linux/jiffies.h>
+#include <linux/dma-mapping.h>
+#include <linux/cpu.h>
+
+#include <asm/cacheflush.h>
+
+#include <clockdomain.h>
+
+#include "scxlnx_defs.h"
+
+#ifdef CONFIG_HAS_WAKELOCK
+static struct wake_lock g_tf_wake_lock;
+static atomic_t tf_wake_lock_count = ATOMIC_INIT(0);
+#endif
+
+static struct clockdomain *smc_l4_sec_clkdm;
+static atomic_t smc_l4_sec_clkdm_use_count = ATOMIC_INIT(0);
+
+static int __init tf_early_init(void)
+{
+ smc_l4_sec_clkdm = clkdm_lookup("l4_secure_clkdm");
+ if (smc_l4_sec_clkdm == NULL)
+ return -EFAULT;
+
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_init(&g_tf_wake_lock, WAKE_LOCK_SUSPEND,
+ SCXLNX_DEVICE_BASE_NAME);
+#endif
+
+ return 0;
+}
+early_initcall(tf_early_init);
+
+/*--------------------------------------------------------------------------
+ * L4 SEC Clock domain handling
+ *-------------------------------------------------------------------------- */
+
+void tf_l4sec_clkdm_wakeup(bool use_spin_lock, bool wakelock)
+{
+ if (use_spin_lock)
+ spin_lock(&SCXLNXGetDevice()->sm.lock);
+#ifdef CONFIG_HAS_WAKELOCK
+ if (wakelock) {
+ atomic_inc(&tf_wake_lock_count);
+ wake_lock(&g_tf_wake_lock);
+ }
+#endif
+ atomic_inc(&smc_l4_sec_clkdm_use_count);
+ clkdm_wakeup(smc_l4_sec_clkdm);
+ if (use_spin_lock)
+ spin_unlock(&SCXLNXGetDevice()->sm.lock);
+}
+
+void tf_l4sec_clkdm_allow_idle(bool use_spin_lock, bool wakeunlock)
+{
+ if (use_spin_lock)
+ spin_lock(&SCXLNXGetDevice()->sm.lock);
+ if (atomic_dec_return(&smc_l4_sec_clkdm_use_count) == 0)
+ clkdm_allow_idle(smc_l4_sec_clkdm);
+#ifdef CONFIG_HAS_WAKELOCK
+ if (wakeunlock)
+ if (atomic_dec_return(&tf_wake_lock_count) == 0)
+ wake_unlock(&g_tf_wake_lock);
+#endif
+ if (use_spin_lock)
+ spin_unlock(&SCXLNXGetDevice()->sm.lock);
+}
+
diff --git a/security/smc/omap4/scxlnx_defs.h b/security/smc/omap4/scxlnx_defs.h
new file mode 100644
index 0000000..a6dcb9c
--- /dev/null
+++ b/security/smc/omap4/scxlnx_defs.h
@@ -0,0 +1,539 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __SCXLNX_DEFS_H__
+#define __SCXLNX_DEFS_H__
+
+#include <asm/atomic.h>
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/sysdev.h>
+#include <linux/sysfs.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+
+#include "scx_protocol.h"
+
+/*----------------------------------------------------------------------------*/
+
+#define SIZE_1KB 0x400
+
+/*
+ * Maximum number of shared memory blocks that can be reigsters in a connection
+ */
+#define SCXLNX_SHMEM_MAX_COUNT (64)
+
+/*
+ * Describes the possible types of shared memories
+ *
+ * SCXLNX_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM :
+ * The descriptor describes a registered shared memory.
+ * Its coarse pages are preallocated when initializing the
+ * connection
+ * SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM :
+ * The descriptor describes a registered shared memory.
+ * Its coarse pages are not preallocated
+ * SCXLNX_SHMEM_TYPE_PM_HIBERNATE :
+ * The descriptor describes a power management shared memory.
+ */
+enum SCXLNX_SHMEM_TYPE {
+ SCXLNX_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM = 0,
+ SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM,
+ SCXLNX_SHMEM_TYPE_PM_HIBERNATE,
+};
+
+
+/*
+ * This structure contains a pointer on a coarse page table
+ */
+struct SCXLNX_COARSE_PAGE_TABLE {
+ /*
+ * Identifies the coarse page table descriptor in
+ * sFreeCoarsePageTables list
+ */
+ struct list_head list;
+
+ /*
+ * The address of the coarse page table
+ */
+ u32 *pDescriptors;
+
+ /*
+ * The address of the array containing this coarse page table
+ */
+ struct SCXLNX_COARSE_PAGE_TABLE_ARRAY *pParent;
+};
+
+
+#define SCXLNX_PAGE_DESCRIPTOR_TYPE_NORMAL 0
+#define SCXLNX_PAGE_DESCRIPTOR_TYPE_PREALLOCATED 1
+
+/*
+ * This structure describes an array of up to 4 coarse page tables
+ * allocated within a single 4KB page.
+ */
+struct SCXLNX_COARSE_PAGE_TABLE_ARRAY {
+ /*
+ * identifies the element in the sCoarsePageTableArrays list
+ */
+ struct list_head list;
+
+ /*
+ * Type of page descriptor
+ * can take any of SCXLNX_PAGE_DESCRIPTOR_TYPE_XXX value
+ */
+ u32 nType;
+
+ struct SCXLNX_COARSE_PAGE_TABLE sCoarsePageTables[4];
+
+ /*
+ * A counter of the number of coarse pages currently used
+ * the max value should be 4 (one coarse page table is 1KB while one
+ * page is 4KB)
+ */
+ u8 nReferenceCount;
+};
+
+
+/*
+ * This structure describes a list of coarse page table arrays
+ * with some of the coarse page tables free. It is used
+ * when the driver needs to allocate a new coarse page
+ * table.
+ */
+struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT {
+ /*
+ * The spin lock protecting concurrent access to the structure.
+ */
+ spinlock_t lock;
+
+ /*
+ * The list of allocated coarse page table arrays
+ */
+ struct list_head sCoarsePageTableArrays;
+
+ /*
+ * The list of free coarse page tables
+ */
+ struct list_head sFreeCoarsePageTables;
+};
+
+
+/*
+ * Fully describes a shared memory block
+ */
+struct SCXLNX_SHMEM_DESC {
+ /*
+ * Identifies the shared memory descriptor in the list of free shared
+ * memory descriptors
+ */
+ struct list_head list;
+
+ /*
+ * Identifies the type of shared memory descriptor
+ */
+ enum SCXLNX_SHMEM_TYPE nType;
+
+ /*
+ * The identifier of the block of shared memory, as returned by the
+ * Secure World.
+ * This identifier is hBlock field of a REGISTER_SHARED_MEMORY answer
+ */
+ u32 hIdentifier;
+
+ /* Client buffer */
+ u8 *pBuffer;
+
+ /* Up to eight coarse page table context */
+ struct SCXLNX_COARSE_PAGE_TABLE *pCoarsePageTable[SCX_MAX_COARSE_PAGES];
+
+ u32 nNumberOfCoarsePageTables;
+
+ /* Reference counter */
+ atomic_t nRefCnt;
+};
+
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * This structure describes the communication with the Secure World
+ *
+ * Note that this driver supports only one instance of the Secure World
+ */
+struct SCXLNX_COMM {
+ /*
+ * The spin lock protecting concurrent access to the structure.
+ */
+ spinlock_t lock;
+
+ /*
+ * Bit vector with the following possible flags:
+ * - SCXLNX_COMM_FLAG_IRQ_REQUESTED: If set, indicates that
+ * the IRQ has been successfuly requested.
+ * - SCXLNX_COMM_FLAG_TERMINATING: If set, indicates that the
+ * communication with the Secure World is being terminated.
+ * Transmissions to the Secure World are not permitted
+ * - SCXLNX_COMM_FLAG_W3B_ALLOCATED: If set, indicates that the
+ * W3B buffer has been allocated.
+ *
+ * This bit vector must be accessed with the kernel's atomic bitwise
+ * operations.
+ */
+ unsigned long nFlags;
+
+ /*
+ * The virtual address of the L1 shared buffer.
+ */
+ struct SCHANNEL_C1S_BUFFER *pBuffer;
+
+ /*
+ * The wait queue the client threads are waiting on.
+ */
+ wait_queue_head_t waitQueue;
+
+#ifdef CONFIG_TF_TRUSTZONE
+ /*
+ * The interrupt line used by the Secure World.
+ */
+ int nSoftIntIrq;
+
+ /* ----- W3B ----- */
+ /* shared memory descriptor to identify the W3B */
+ struct SCXLNX_SHMEM_DESC sW3BShmemDesc;
+
+ /* Virtual address of the kernel allocated shared memory */
+ u32 nW3BShmemVAddr;
+
+ /* offset of data in shared memory coarse pages */
+ u32 nW3BShmemOffset;
+
+ u32 nW3BShmemSize;
+
+ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT
+ sW3BAllocationContext;
+#endif
+#ifdef CONFIG_TF_MSHIELD
+ /*
+ * The SE SDP can only be initialized once...
+ */
+ int bSEInitialized;
+
+ /* Virtual address of the L0 communication buffer */
+ void *pInitSharedBuffer;
+
+ /*
+ * Lock to be held by a client when executing an RPC
+ */
+ struct mutex sRPCLock;
+
+ /*
+ * Lock to protect concurrent accesses to DMA channels
+ */
+ struct mutex sDMALock;
+#endif
+};
+
+
+#define SCXLNX_COMM_FLAG_IRQ_REQUESTED (0)
+#define SCXLNX_COMM_FLAG_PA_AVAILABLE (1)
+#define SCXLNX_COMM_FLAG_TERMINATING (2)
+#define SCXLNX_COMM_FLAG_W3B_ALLOCATED (3)
+#define SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED (4)
+
+/*----------------------------------------------------------------------------*/
+
+struct SCXLNX_DEVICE_STATS {
+ struct kobject kobj;
+
+ struct kobj_type kobj_type;
+
+ struct attribute kobj_stat_attribute;
+
+ struct attribute *kobj_attribute_list[2];
+
+ atomic_t stat_pages_allocated;
+ atomic_t stat_memories_allocated;
+ atomic_t stat_pages_locked;
+};
+
+/*
+ * This structure describes the information about one device handled by the
+ * driver. Note that the driver supports only a single device. see the global
+ * variable g_SCXLNXDevice
+ */
+struct SCXLNX_DEVICE {
+ /*
+ * The device number for the device.
+ */
+ dev_t nDevNum;
+
+ /*
+ * Interfaces the system device with the kernel.
+ */
+ struct sys_device sysdev;
+
+ /*
+ * Interfaces the char device with the kernel.
+ */
+ struct cdev cdev;
+
+#ifdef CONFIG_TF_MSHIELD
+ struct cdev cdev_ctrl;
+
+ /*
+ * Globals for CUS
+ */
+ /* Current key handles loaded in HWAs */
+ u32 hAES1SecureKeyContext;
+ u32 hAES2SecureKeyContext;
+ u32 hDESSecureKeyContext;
+ bool bSHAM1IsPublic;
+
+ /* Semaphores used to serialize HWA accesses */
+ struct semaphore sAES1CriticalSection;
+ struct semaphore sAES2CriticalSection;
+ struct mutex sDESCriticalSection;
+ struct mutex sSHACriticalSection;
+
+ /*
+ * An aligned and correctly shaped pre-allocated buffer used for DMA
+ * transfers
+ */
+ u32 nDMABufferLength;
+ u8 *pDMABuffer;
+ dma_addr_t pDMABufferPhys;
+
+ /* Workspace allocated at boot time and reserved to the Secure World */
+ u32 nWorkspaceAddr;
+ u32 nWorkspaceSize;
+#endif
+
+ /*
+ * Communications with the SM.
+ */
+ struct SCXLNX_COMM sm;
+
+ /*
+ * Lists the connections attached to this device. A connection is
+ * created each time a user space application "opens" a file descriptor
+ * on the driver
+ */
+ struct list_head conns;
+
+ /*
+ * The spin lock used to protect concurrent access to the connection
+ * list.
+ */
+ spinlock_t connsLock;
+
+ struct SCXLNX_DEVICE_STATS sDeviceStats;
+
+ /*
+ * A Mutex to provide exlusive locking of the ioctl()
+ */
+ struct mutex dev_mutex;
+};
+
+/* the bits of the nFlags field of the SCXLNX_DEVICE structure */
+#define SCXLNX_DEVICE_FLAG_CDEV_INITIALIZED (0)
+#define SCXLNX_DEVICE_FLAG_SYSDEV_CLASS_REGISTERED (1)
+#define SCXLNX_DEVICE_FLAG_SYSDEV_REGISTERED (2)
+#define SCXLNX_DEVICE_FLAG_CDEV_REGISTERED (3)
+#define SCXLNX_DEVICE_FLAG_CDEV_ADDED (4)
+#define SCXLNX_DEVICE_SYSFS_REGISTERED (5)
+
+/*----------------------------------------------------------------------------*/
+/*
+ * This type describes a connection state.
+ * This is used to determine whether a message is valid or not.
+ *
+ * Messages are only valid in a certain device state.
+ * Messages may be invalidated between the start of the ioctl call and the
+ * moment the message is sent to the Secure World.
+ *
+ * SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT :
+ * The connection has no DEVICE_CONTEXT created and no
+ * CREATE_DEVICE_CONTEXT being processed by the Secure World
+ * SCXLNX_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT :
+ * The connection has a CREATE_DEVICE_CONTEXT being processed by the Secure
+ * World
+ * SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT :
+ * The connection has a DEVICE_CONTEXT created and no
+ * DESTROY_DEVICE_CONTEXT is being processed by the Secure World
+ * SCXLNX_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT :
+ * The connection has a DESTROY_DEVICE_CONTEXT being processed by the Secure
+ * World
+ */
+enum SCXLNX_CONN_STATE {
+ SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT = 0,
+ SCXLNX_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT,
+ SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT,
+ SCXLNX_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT
+};
+
+
+/*
+ * This type describes the status of the command.
+ *
+ * PENDING:
+ * The initial state; the command has not been sent yet.
+ * SENT:
+ * The command has been sent, we are waiting for an answer.
+ * ABORTED:
+ * The command cannot be sent because the device context is invalid.
+ * Note that this only covers the case where some other thread
+ * sent a DESTROY_DEVICE_CONTEXT command.
+ */
+enum SCXLNX_COMMAND_STATE {
+ SCXLNX_COMMAND_STATE_PENDING = 0,
+ SCXLNX_COMMAND_STATE_SENT,
+ SCXLNX_COMMAND_STATE_ABORTED
+};
+
+
+/*
+ * This structure describes a connection to the driver
+ * A connection is created each time an application opens a file descriptor on
+ * the driver
+ */
+struct SCXLNX_CONNECTION {
+ /*
+ * Identifies the connection in the list of the connections attached to
+ * the same device.
+ */
+ struct list_head list;
+
+ /*
+ * State of the connection.
+ */
+ enum SCXLNX_CONN_STATE nState;
+
+ /*
+ * A pointer to the corresponding device structure
+ */
+ struct SCXLNX_DEVICE *pDevice;
+
+ /*
+ * A spinlock to use to access nState
+ */
+ spinlock_t stateLock;
+
+ /*
+ * Counts the number of operations currently pending on the connection.
+ * (for debug only)
+ */
+ atomic_t nPendingOpCounter;
+
+ /*
+ * A handle for the device context
+ */
+ u32 hDeviceContext;
+
+ /*
+ * Lists the used shared memory descriptors
+ */
+ struct list_head sUsedSharedMemoryList;
+
+ /*
+ * Lists the free shared memory descriptors
+ */
+ struct list_head sFreeSharedMemoryList;
+
+ /*
+ * A mutex to use to access this structure
+ */
+ struct mutex sharedMemoriesMutex;
+
+ /*
+ * Counts the number of shared memories registered.
+ */
+ atomic_t nShmemAllocated;
+
+ /*
+ * Page to retrieve memory properties when
+ * registering shared memory through REGISTER_SHARED_MEMORY
+ * messages
+ */
+ struct vm_area_struct **ppVmas;
+
+ /*
+ * coarse page table allocation context
+ */
+ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT sAllocationContext;
+
+#ifdef CONFIG_TF_MSHIELD
+ /* Lists all the Cryptoki Update Shortcuts */
+ struct list_head ShortcutList;
+
+ /* Lock to protect concurrent accesses to ShortcutList */
+ spinlock_t shortcutListCriticalSectionLock;
+#endif
+};
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * The nOperationID field of a message points to this structure.
+ * It is used to identify the thread that triggered the message transmission
+ * Whoever reads an answer can wake up that thread using the completion event
+ */
+struct SCXLNX_ANSWER_STRUCT {
+ bool bAnswerCopied;
+ union SCX_ANSWER_MESSAGE *pAnswer;
+};
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * The ASCII-C string representation of the base name of the devices managed by
+ * this driver.
+ */
+#define SCXLNX_DEVICE_BASE_NAME "tf_driver"
+
+
+/**
+ * The major and minor numbers of the registered character device driver.
+ * Only 1 instance of the driver is supported.
+ */
+#define SCXLNX_DEVICE_MINOR_NUMBER (0)
+
+struct SCXLNX_DEVICE *SCXLNXGetDevice(void);
+
+#define CLEAN_CACHE_CFG_MASK (~0xC) /* 1111 0011 */
+
+/*----------------------------------------------------------------------------*/
+/*
+ * Kernel Differences
+ */
+
+#ifdef CONFIG_ANDROID
+#define GROUP_INFO get_current_groups()
+#else
+#define GROUP_INFO (current->group_info)
+#endif
+
+#endif /* !defined(__SCXLNX_DEFS_H__) */
diff --git a/security/smc/omap4/scxlnx_device.c b/security/smc/omap4/scxlnx_device.c
new file mode 100644
index 0000000..cd9d56b
--- /dev/null
+++ b/security/smc/omap4/scxlnx_device.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/pm.h>
+#include <linux/sysdev.h>
+#include <linux/vmalloc.h>
+#include <linux/signal.h>
+#ifdef CONFIG_ANDROID
+#include <linux/device.h>
+#endif
+
+#include "scx_protocol.h"
+#include "scxlnx_defs.h"
+#include "scxlnx_util.h"
+#ifdef CONFIG_TF_MSHIELD
+#include <plat/cpu.h>
+#include "scx_public_crypto.h"
+#endif
+
+/* The single device supported by this driver */
+static struct SCXLNX_DEVICE g_SCXLNXDevice = {0, };
+
+/*----------------------------------------------------------------------------
+ * Implementations
+ *----------------------------------------------------------------------------*/
+
+struct SCXLNX_DEVICE *SCXLNXGetDevice(void)
+{
+ return &g_SCXLNXDevice;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int __init register_dmcrypt_engines(void)
+{
+ int ret;
+
+ printk(KERN_INFO "Entered register_dmcrypt_engines");
+
+ ret = SCXPublicCryptoInit();
+ if (ret) {
+ printk(KERN_ERR "register_dmcrypt_engines():"
+ " SCXPublicCryptoInit failed, (error %d)!\n", ret);
+ goto out;
+ }
+
+ ret = register_smc_public_crypto_aes();
+ if (ret) {
+ printk(KERN_ERR "register_dmcrypt_engines():"
+ " regiser_smc_public_crypto_aes failed, (error %d)!\n", ret);
+ goto out;
+ }
+
+ ret = register_smc_public_crypto_digest();
+ if (ret) {
+ printk(KERN_ERR "register_dmcrypt_engines():"
+ " regiser_smc_public_crypto_digest failed, (error %d)!\n", ret);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+module_init(register_dmcrypt_engines);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Trusted Logic S.A.");
diff --git a/security/smc/omap4/scxlnx_mshield.h b/security/smc/omap4/scxlnx_mshield.h
new file mode 100644
index 0000000..9457ca9
--- /dev/null
+++ b/security/smc/omap4/scxlnx_mshield.h
@@ -0,0 +1,44 @@
+/**
+ * Copyright (c) 2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __SCXLNX_MSHIELD_H__
+#define __SCXLNX_MSHIELD_H__
+
+#include "scxlnx_defs.h"
+
+int SCXLNXCtrlDeviceRegister(void);
+
+int SCXLNXCommStart(struct SCXLNX_COMM *pComm,
+ u32 nWorkspaceAddr, u32 nWorkspaceSize,
+ u8 *pPABufferVAddr, u32 nPABufferSize,
+ u8 *pPropertiesBuffer, u32 nPropertiesBufferLength);
+
+/* Assembler entry points to/from secure */
+u32 schedule_secure_world(u32 app_id, u32 proc_id, u32 flags, u32 args);
+u32 rpc_handler(u32 p1, u32 p2, u32 p3, u32 p4);
+u32 read_mpidr(void);
+
+/* L4 SEC clockdomain enabling/disabling */
+void tf_l4sec_clkdm_wakeup(bool use_spin_lock, bool wakelock);
+void tf_l4sec_clkdm_allow_idle(bool use_spin_lock, bool wakeunlock);
+
+/* Delayed secure resume */
+int tf_delayed_secure_resume(void);
+
+#endif
diff --git a/security/smc/omap4/scxlnx_util.c b/security/smc/omap4/scxlnx_util.c
new file mode 100644
index 0000000..90cd831
--- /dev/null
+++ b/security/smc/omap4/scxlnx_util.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+#include <linux/mman.h>
+#include "scxlnx_util.h"
+
+void *internal_kmalloc(size_t nSize, int nPriority)
+{
+ void *pResult;
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ pResult = kmalloc(nSize, nPriority);
+
+ if (pResult != NULL)
+ atomic_inc(
+ &pDevice->sDeviceStats.stat_memories_allocated);
+
+ return pResult;
+}
+
+void internal_kfree(void *pMemory)
+{
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ if (pMemory != NULL)
+ atomic_dec(
+ &pDevice->sDeviceStats.stat_memories_allocated);
+ return kfree(pMemory);
+}
+
diff --git a/security/smc/omap4/scxlnx_util.h b/security/smc/omap4/scxlnx_util.h
new file mode 100644
index 0000000..4569ec2
--- /dev/null
+++ b/security/smc/omap4/scxlnx_util.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+#ifndef __SCXLNX_UTIL_H__
+#define __SCXLNX_UTIL_H__
+
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/crypto.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <asm/byteorder.h>
+
+#include "scx_protocol.h"
+#include "scxlnx_defs.h"
+
+/*----------------------------------------------------------------------------
+ * Debug printing routines
+ *----------------------------------------------------------------------------*/
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+
+void addressCacheProperty(unsigned long va);
+
+#define dprintk printk
+
+void SCXLNXDumpL1SharedBuffer(struct SCHANNEL_C1S_BUFFER *pBuf);
+
+void SCXLNXDumpMessage(union SCX_COMMAND_MESSAGE *pMessage);
+
+void SCXLNXDumpAnswer(union SCX_ANSWER_MESSAGE *pAnswer);
+
+#ifdef CONFIG_SMC_BENCH_SECURE_CYCLE
+void setupCounters(void);
+void runBogoMIPS(void);
+int runCodeSpeed(unsigned int nLoop);
+int runDataSpeed(unsigned int nLoop, unsigned long nVA);
+#endif /* CONFIG_SMC_BENCH_SECURE_CYCLE */
+
+#else /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
+
+#define dprintk(args...) do { ; } while (0)
+#define SCXLNXDumpL1SharedBuffer(pBuf) ((void) 0)
+#define SCXLNXDumpMessage(pMessage) ((void) 0)
+#define SCXLNXDumpAnswer(pAnswer) ((void) 0)
+
+#endif /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
+
+#define SHA1_DIGEST_SIZE 20
+
+/*----------------------------------------------------------------------------
+ * Process identification
+ *----------------------------------------------------------------------------*/
+
+int SCXLNXConnGetCurrentProcessHash(void *pHash);
+
+int SCXLNXConnHashApplicationPathAndData(char *pBuffer, void *pData,
+ u32 nDataLen);
+
+/*----------------------------------------------------------------------------
+ * Statistic computation
+ *----------------------------------------------------------------------------*/
+
+void *internal_kmalloc(size_t nSize, int nPriority);
+void internal_kfree(void *pMemory);
+void internal_vunmap(void *pMemory);
+void *internal_vmalloc(size_t nSize);
+void internal_vfree(void *pMemory);
+unsigned long internal_get_zeroed_page(int nPriority);
+void internal_free_page(unsigned long pPage);
+int internal_get_user_pages(
+ struct task_struct *tsk,
+ struct mm_struct *mm,
+ unsigned long start,
+ int len,
+ int write,
+ int force,
+ struct page **pages,
+ struct vm_area_struct **vmas);
+void internal_get_page(struct page *page);
+void internal_page_cache_release(struct page *page);
+#endif /* __SCXLNX_UTIL_H__ */
+
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 3388442..cd69b38 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1961,6 +1961,9 @@
struct snd_pcm_runtime *runtime;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
+ /* TODO: consider and -EINVAL here */
+ if (substream->hw_no_buffer)
+ snd_printd("%s: warning this PCM is host less\n", __func__);
runtime = substream->runtime;
if (snd_BUG_ON(!substream->ops->copy && !runtime->dma_area))
return -EINVAL;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 7393551..4e9ebae 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -850,6 +850,7 @@
if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
return -EBADFD;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+ !substream->hw_no_buffer &&
!snd_pcm_playback_data(substream))
return -EPIPE;
runtime->trigger_master = substream;
@@ -2048,6 +2049,12 @@
goto error;
}
+ if (substream->ops == NULL) {
+ snd_printd("cannot open back end PCMs directly\n");
+ err = -ENODEV;
+ goto error;
+ }
+
if ((err = substream->ops->open(substream)) < 0)
goto error;
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 1ed61c5..0af7016 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -1,4 +1,4 @@
-snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
+snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o soc-dsp.o
obj-$(CONFIG_SND_SOC) += snd-soc-core.o
obj-$(CONFIG_SND_SOC) += codecs/
diff --git a/sound/soc/atmel/atmel-pcm.c b/sound/soc/atmel/atmel-pcm.c
index d0e7532..42f699c 100644
--- a/sound/soc/atmel/atmel-pcm.c
+++ b/sound/soc/atmel/atmel-pcm.c
@@ -364,9 +364,11 @@
\*--------------------------------------------------------------------------*/
static u64 atmel_pcm_dmamask = 0xffffffff;
-static int atmel_pcm_new(struct snd_card *card,
- struct snd_soc_dai *dai, struct snd_pcm *pcm)
+static int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_pcm *pcm = rtd->pcm;
int ret = 0;
if (!card->dev->dma_mask)
diff --git a/sound/soc/au1x/dbdma2.c b/sound/soc/au1x/dbdma2.c
index 10fdd28..20bb53a 100644
--- a/sound/soc/au1x/dbdma2.c
+++ b/sound/soc/au1x/dbdma2.c
@@ -319,10 +319,11 @@
snd_pcm_lib_preallocate_free_for_all(pcm);
}
-static int au1xpsc_pcm_new(struct snd_card *card,
- struct snd_soc_dai *dai,
- struct snd_pcm *pcm)
+static int au1xpsc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_pcm *pcm = rtd->pcm;
+
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
card->dev, AU1XPSC_BUFFER_MIN_BYTES, (4096 * 1024) - 1);
diff --git a/sound/soc/blackfin/bf5xx-ac97-pcm.c b/sound/soc/blackfin/bf5xx-ac97-pcm.c
index 98b44b3..9e59f68 100644
--- a/sound/soc/blackfin/bf5xx-ac97-pcm.c
+++ b/sound/soc/blackfin/bf5xx-ac97-pcm.c
@@ -418,9 +418,11 @@
static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
-int bf5xx_pcm_ac97_new(struct snd_card *card, struct snd_soc_dai *dai,
- struct snd_pcm *pcm)
+int bf5xx_pcm_ac97_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_pcm *pcm = rtd->pcm;
int ret = 0;
pr_debug("%s enter\n", __func__);
diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
index f1fd95b..c42fb73 100644
--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
+++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
@@ -257,9 +257,11 @@
static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
-int bf5xx_pcm_i2s_new(struct snd_card *card, struct snd_soc_dai *dai,
- struct snd_pcm *pcm)
+int bf5xx_pcm_i2s_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_pcm *pcm = rtd->pcm;
int ret = 0;
pr_debug("%s enter\n", __func__);
diff --git a/sound/soc/blackfin/bf5xx-tdm-pcm.c b/sound/soc/blackfin/bf5xx-tdm-pcm.c
index 07cfc7a..c95cc03 100644
--- a/sound/soc/blackfin/bf5xx-tdm-pcm.c
+++ b/sound/soc/blackfin/bf5xx-tdm-pcm.c
@@ -283,9 +283,11 @@
static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
-static int bf5xx_pcm_tdm_new(struct snd_card *card, struct snd_soc_dai *dai,
- struct snd_pcm *pcm)
+static int bf5xx_pcm_tdm_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_pcm *pcm = rtd->pcm;
int ret = 0;
if (!card->dev->dma_mask)
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 98175a0..40e3a82 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -240,6 +240,10 @@
tristate
config SND_SOC_TWL6040
+ select TWL6040_CODEC
+ tristate
+
+config SND_SOC_OMAP_HDMI_CODEC
tristate
config SND_SOC_UDA134X
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index fd85584..04e7b26 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -36,6 +36,7 @@
snd-soc-tlv320dac33-objs := tlv320dac33.o
snd-soc-twl4030-objs := twl4030.o
snd-soc-twl6040-objs := twl6040.o
+snd-soc-omap-hdmi-codec-objs := omap-hdmi-codec.o
snd-soc-uda134x-objs := uda134x.o
snd-soc-uda1380-objs := uda1380.o
snd-soc-wl1273-objs := wl1273.o
@@ -128,6 +129,7 @@
obj-$(CONFIG_SND_SOC_TLV320DAC33) += snd-soc-tlv320dac33.o
obj-$(CONFIG_SND_SOC_TWL4030) += snd-soc-twl4030.o
obj-$(CONFIG_SND_SOC_TWL6040) += snd-soc-twl6040.o
+obj-$(CONFIG_SND_SOC_OMAP_HDMI_CODEC) += snd-soc-omap-hdmi-codec.o
obj-$(CONFIG_SND_SOC_UDA134X) += snd-soc-uda134x.o
obj-$(CONFIG_SND_SOC_UDA1380) += snd-soc-uda1380.o
obj-$(CONFIG_SND_SOC_WL1273) += snd-soc-wl1273.o
diff --git a/sound/soc/codecs/omap-hdmi-codec.c b/sound/soc/codecs/omap-hdmi-codec.c
new file mode 100644
index 0000000..b1f3c72
--- /dev/null
+++ b/sound/soc/codecs/omap-hdmi-codec.c
@@ -0,0 +1,492 @@
+/*
+ * ALSA SoC HMDI codec driver
+ *
+ * Author: Ricardo Neri <ricardo.neri@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include <plat/omap_hwmod.h>
+#include <video/omapdss.h>
+#include <video/hdmi_ti_4xxx_ip.h>
+
+#include "../../../drivers/video/omap2/dss/dss_features.h"
+#include "../../../drivers/video/omap2/dss/dss.h"
+
+#define HDMI_WP 0x0
+#define HDMI_CORE_SYS 0x400
+#define HDMI_CORE_AV 0x900
+#define HDMI_PLLCTRL 0x200
+#define HDMI_PHY 0x300
+
+/* hdmi configuration params */
+struct hdmi_params {
+ int format;
+ int sample_freq;
+ int channels_nr;
+};
+
+
+/* codec private data */
+struct hdmi_codec_data {
+ struct hdmi_audio_format audio_fmt;
+ struct hdmi_audio_dma audio_dma;
+ struct hdmi_core_audio_config audio_core_cfg;
+ struct hdmi_core_infoframe_audio aud_if_cfg;
+ struct hdmi_ip_data ip_data;
+ struct omap_hwmod *oh;
+ struct omap_dss_device *dssdev;
+ struct notifier_block notifier;
+ struct hdmi_params params;
+ struct delayed_work delayed_work;
+ struct workqueue_struct *workqueue;
+ int active;
+} hdmi_data;
+
+
+static int hdmi_audio_set_configuration(struct hdmi_codec_data *priv)
+{
+ struct hdmi_audio_format *audio_format = &priv->audio_fmt;
+ struct hdmi_audio_dma *audio_dma = &priv->audio_dma;
+ struct hdmi_core_audio_config *core_cfg = &priv->audio_core_cfg;
+ struct hdmi_core_infoframe_audio *aud_if_cfg = &priv->aud_if_cfg;
+ int err, n, cts, channel_alloc;
+ enum hdmi_core_audio_sample_freq sample_freq;
+ u32 pclk = omapdss_hdmi_get_pixel_clock();
+
+ switch (priv->params.format) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ core_cfg->i2s_cfg.word_max_length =
+ HDMI_AUDIO_I2S_MAX_WORD_20BITS;
+ core_cfg->i2s_cfg.word_length =
+ HDMI_AUDIO_I2S_CHST_WORD_16_BITS;
+ core_cfg->i2s_cfg.in_length_bits =
+ HDMI_AUDIO_I2S_INPUT_LENGTH_16;
+ core_cfg->i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
+ audio_format->samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
+ audio_format->sample_size = HDMI_AUDIO_SAMPLE_16BITS;
+ audio_format->justification = HDMI_AUDIO_JUSTIFY_LEFT;
+ audio_dma->transfer_size = 0x10;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ core_cfg->i2s_cfg.word_max_length =
+ HDMI_AUDIO_I2S_MAX_WORD_24BITS;
+ core_cfg->i2s_cfg.word_length =
+ HDMI_AUDIO_I2S_CHST_WORD_24_BITS;
+ core_cfg->i2s_cfg.in_length_bits =
+ HDMI_AUDIO_I2S_INPUT_LENGTH_24;
+ audio_format->samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
+ audio_format->sample_size = HDMI_AUDIO_SAMPLE_24BITS;
+ audio_format->justification = HDMI_AUDIO_JUSTIFY_RIGHT;
+ core_cfg->i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
+ audio_dma->transfer_size = 0x20;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+
+ switch (priv->params.sample_freq) {
+ case 32000:
+ sample_freq = HDMI_AUDIO_FS_32000;
+ break;
+ case 44100:
+ sample_freq = HDMI_AUDIO_FS_44100;
+ break;
+ case 48000:
+ sample_freq = HDMI_AUDIO_FS_48000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = hdmi_ti_4xxx_config_audio_acr(&priv->ip_data,
+ priv->params.sample_freq, &n, &cts, pclk);
+ if (err < 0)
+ return err;
+
+ /* Audio wrapper config */
+ audio_format->type = HDMI_AUDIO_TYPE_LPCM;
+ audio_format->sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
+ /* Disable start/stop signals of IEC 60958 blocks */
+ audio_format->en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF;
+
+ audio_dma->block_size = 0xC0;
+ audio_dma->mode = HDMI_AUDIO_TRANSF_DMA;
+ audio_dma->fifo_threshold = 0x20; /* in number of samples */
+
+ hdmi_ti_4xxx_wp_audio_config_dma(&priv->ip_data, audio_dma);
+ hdmi_ti_4xxx_wp_audio_config_format(&priv->ip_data, audio_format);
+
+ /*
+ * I2S config
+ */
+ core_cfg->i2s_cfg.en_high_bitrate_aud = false;
+ /* Only used with high bitrate audio */
+ core_cfg->i2s_cfg.cbit_order = false;
+ /* Serial data and word select should change on sck rising edge */
+ core_cfg->i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
+ core_cfg->i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
+ /* Set I2S word select polarity */
+ core_cfg->i2s_cfg.ws_polarity = HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT;
+ core_cfg->i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
+ /* Set serial data to word select shift. See Phillips spec. */
+ core_cfg->i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
+
+ /* Core audio config */
+ core_cfg->freq_sample = sample_freq;
+ core_cfg->n = n;
+ core_cfg->cts = cts;
+ if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
+ core_cfg->aud_par_busclk = 0;
+ core_cfg->cts_mode = HDMI_AUDIO_CTS_MODE_SW;
+ core_cfg->use_mclk = cpu_is_omap446x();
+ } else {
+ core_cfg->aud_par_busclk = (((128 * 31) - 1) << 8);
+ core_cfg->cts_mode = HDMI_AUDIO_CTS_MODE_HW;
+ core_cfg->use_mclk = true;
+ core_cfg->mclk_mode = HDMI_AUDIO_MCLK_128FS;
+ }
+ core_cfg->en_spdif = false;
+ /* Use sample frequency from channel status word */
+ core_cfg->fs_override = true;
+ /* Enable ACR packets */
+ core_cfg->en_acr_pkt = true;
+ /* Disable direct streaming digital audio */
+ core_cfg->en_dsd_audio = false;
+ /* Use parallel audio interface */
+ core_cfg->en_parallel_aud_input = true;
+
+ /* Number of channels */
+
+ switch (priv->params.channels_nr) {
+ case 2:
+ core_cfg->layout = HDMI_AUDIO_LAYOUT_2CH;
+ channel_alloc = 0x0;
+ audio_format->stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
+ audio_format->active_chnnls_msk = 0x03;
+ /* Enable one of the four available serial data channels */
+ core_cfg->i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
+ break;
+ case 6:
+ core_cfg->layout = HDMI_AUDIO_LAYOUT_8CH;
+ channel_alloc = 0xB;
+ audio_format->stereo_channels = HDMI_AUDIO_STEREO_FOURCHANNELS;
+ audio_format->active_chnnls_msk = 0x3f;
+ /* Enable all of the four available serial data channels */
+ core_cfg->i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN |
+ HDMI_AUDIO_I2S_SD1_EN | HDMI_AUDIO_I2S_SD2_EN |
+ HDMI_AUDIO_I2S_SD3_EN;
+ break;
+ case 8:
+ core_cfg->layout = HDMI_AUDIO_LAYOUT_8CH;
+ channel_alloc = 0x13;
+ audio_format->stereo_channels = HDMI_AUDIO_STEREO_FOURCHANNELS;
+ audio_format->active_chnnls_msk = 0xff;
+ /* Enable all of the four available serial data channels */
+ core_cfg->i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN |
+ HDMI_AUDIO_I2S_SD1_EN | HDMI_AUDIO_I2S_SD2_EN |
+ HDMI_AUDIO_I2S_SD3_EN;
+ break;
+ default:
+ pr_err("Unsupported number of channels\n");
+ return -EINVAL;
+ }
+
+ hdmi_ti_4xxx_core_audio_config(&priv->ip_data, core_cfg);
+ hdmi_ti_4xxx_wp_audio_config_format(&priv->ip_data, audio_format);
+
+ /*
+ * Configure packet
+ * info frame audio see doc CEA861-D page 74
+ */
+ aud_if_cfg->db1_coding_type = HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM;
+ aud_if_cfg->db1_channel_count = priv->params.channels_nr;
+ aud_if_cfg->db2_sample_freq = HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM;
+ aud_if_cfg->db2_sample_size = HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM;
+ aud_if_cfg->db4_channel_alloc = channel_alloc;
+ aud_if_cfg->db5_downmix_inh = false;
+ aud_if_cfg->db5_lsv = 0;
+
+ hdmi_ti_4xxx_core_audio_infoframe_config(&priv->ip_data, aud_if_cfg);
+ return 0;
+
+}
+
+int hdmi_audio_notifier_callback(struct notifier_block *nb,
+ unsigned long arg, void *ptr)
+{
+ enum omap_dss_display_state state = arg;
+
+ if (state == OMAP_DSS_DISPLAY_ACTIVE) {
+ /* this happens just after hdmi_power_on */
+ hdmi_audio_set_configuration(&hdmi_data);
+ if (hdmi_data.active) {
+ omap_hwmod_set_slave_idlemode(hdmi_data.oh,
+ HWMOD_IDLEMODE_NO);
+ hdmi_ti_4xxx_wp_audio_enable(&hdmi_data.ip_data, 1);
+ queue_delayed_work(hdmi_data.workqueue,
+ &hdmi_data.delayed_work,
+ msecs_to_jiffies(1));
+ }
+ } else {
+ cancel_delayed_work(&hdmi_data.delayed_work);
+ }
+ return 0;
+}
+
+static void hdmi_audio_work(struct work_struct *work)
+{
+ hdmi_ti_4xxx_audio_transfer_en(&hdmi_data.ip_data, 1);
+}
+
+int hdmi_audio_match(struct omap_dss_device *dssdev, void *arg)
+{
+ return sysfs_streq(dssdev->name , "hdmi");
+}
+
+static int hdmi_audio_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct hdmi_codec_data *priv = snd_soc_codec_get_drvdata(codec);
+
+ priv->params.format = params_format(params);
+ priv->params.sample_freq = params_rate(params);
+ priv->params.channels_nr = params_channels(params);
+ return hdmi_audio_set_configuration(priv);
+}
+
+static int hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct hdmi_codec_data *priv = snd_soc_codec_get_drvdata(codec);
+ int err = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ /*
+ * switch to no-idle to avoid DSS_L3_ICLK clock
+ * to be shutdown during audio activity (as per TRM)
+ */
+ omap_hwmod_set_slave_idlemode(priv->oh,
+ HWMOD_IDLEMODE_NO);
+ hdmi_ti_4xxx_wp_audio_enable(&priv->ip_data, 1);
+ queue_delayed_work(priv->workqueue, &priv->delayed_work,
+ msecs_to_jiffies(1));
+
+ priv->active = 1;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ cancel_delayed_work(&hdmi_data.delayed_work);
+ priv->active = 0;
+ hdmi_ti_4xxx_audio_transfer_en(&priv->ip_data, 0);
+ hdmi_ti_4xxx_wp_audio_enable(&priv->ip_data, 0);
+ /*
+ * switch back to smart-idle & wakeup capable
+ * after audio activity stops
+ */
+ omap_hwmod_set_slave_idlemode(priv->oh,
+ HWMOD_IDLEMODE_SMART_WKUP);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int hdmi_audio_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ if (!omapdss_hdmi_get_mode()) {
+ pr_err("Current video settings do not support audio.\n");
+ return -EIO;
+ }
+ return 0;
+}
+static int hdmi_probe(struct snd_soc_codec *codec)
+{
+ struct platform_device *pdev = to_platform_device(codec->dev);
+ struct resource *hdmi_rsrc;
+ int ret = 0;
+
+ snd_soc_codec_set_drvdata(codec, &hdmi_data);
+
+ hdmi_rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!hdmi_rsrc) {
+ dev_err(&pdev->dev, "Cannot obtain IORESOURCE_MEM HDMI\n");
+ ret = -EINVAL;
+ goto res_err;
+ }
+
+
+ hdmi_data.oh = omap_hwmod_lookup("dss_hdmi");
+
+ if (!hdmi_data.oh) {
+ dev_err(&pdev->dev, "can't find omap_hwmod for hdmi\n");
+ ret = -ENODEV;
+ goto res_err;
+ }
+
+ /* Base address taken from platform */
+ hdmi_data.ip_data.base_wp = ioremap(hdmi_rsrc->start,
+ resource_size(hdmi_rsrc));
+
+ if (!hdmi_data.ip_data.base_wp) {
+ dev_err(&pdev->dev, "can't ioremap WP\n");
+ ret = -ENOMEM;
+ goto res_err;
+ }
+
+ hdmi_data.ip_data.hdmi_core_sys_offset = HDMI_CORE_SYS;
+ hdmi_data.ip_data.hdmi_core_av_offset = HDMI_CORE_AV;
+ hdmi_data.ip_data.hdmi_pll_offset = HDMI_PLLCTRL;
+ hdmi_data.ip_data.hdmi_phy_offset = HDMI_PHY;
+
+ hdmi_data.dssdev = omap_dss_find_device(NULL, hdmi_audio_match);
+
+ if (!hdmi_data.dssdev) {
+ dev_err(&pdev->dev, "can't find HDMI device\n");
+ ret = -ENODEV;
+ goto dssdev_err;
+ }
+
+ hdmi_data.notifier.notifier_call = hdmi_audio_notifier_callback;
+ blocking_notifier_chain_register(&hdmi_data.dssdev->state_notifiers,
+ &hdmi_data.notifier);
+
+ hdmi_data.workqueue = create_singlethread_workqueue("hdmi-codec");
+
+ INIT_DELAYED_WORK(&hdmi_data.delayed_work, hdmi_audio_work);
+
+ return 0;
+
+dssdev_err:
+ iounmap(hdmi_data.ip_data.base_wp);
+res_err:
+ return ret;
+
+}
+
+static int hdmi_remove(struct snd_soc_codec *codec)
+{
+ struct hdmi_codec_data *priv = snd_soc_codec_get_drvdata(codec);
+
+ blocking_notifier_chain_unregister(&priv->dssdev->state_notifiers,
+ &priv->notifier);
+ iounmap(priv->ip_data.base_wp);
+ kfree(priv);
+ return 0;
+}
+
+
+static struct snd_soc_codec_driver hdmi_audio_codec_drv = {
+ .probe = hdmi_probe,
+ .remove = hdmi_remove,
+};
+
+static struct snd_soc_dai_ops hdmi_audio_codec_ops = {
+ .hw_params = hdmi_audio_hw_params,
+ .trigger = hdmi_audio_trigger,
+ .startup = hdmi_audio_startup,
+};
+
+static struct snd_soc_dai_driver hdmi_codec_dai_drv = {
+ .name = "hdmi-audio-codec",
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_32000 |
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &hdmi_audio_codec_ops,
+};
+
+static __devinit int hdmi_codec_probe(struct platform_device *pdev)
+{
+ int r;
+
+ /* Register ASoC codec DAI */
+ r = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv,
+ &hdmi_codec_dai_drv, 1);
+ if (r) {
+ dev_err(&pdev->dev, "can't register ASoC HDMI audio codec\n");
+ return r;
+ }
+
+ return 0;
+}
+
+static int __devexit hdmi_codec_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_codec(&pdev->dev);
+ return 0;
+}
+
+
+static struct platform_driver hdmi_codec_driver = {
+ .probe = hdmi_codec_probe,
+ .remove = __devexit_p(hdmi_codec_remove),
+ .driver = {
+ .name = "omap-hdmi-codec",
+ .owner = THIS_MODULE,
+ },
+};
+
+
+static int __init hdmi_codec_init(void)
+{
+ return platform_driver_register(&hdmi_codec_driver);
+}
+module_init(hdmi_codec_init);
+
+static void __exit hdmi_codec_exit(void)
+{
+ platform_driver_unregister(&hdmi_codec_driver);
+}
+module_exit(hdmi_codec_exit);
+
+
+MODULE_AUTHOR("Ricardo Neri <ricardo.neri@ti.com>");
+MODULE_DESCRIPTION("ASoC HDMI codec driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index 4c33663..b23fb26 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -29,6 +29,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/i2c/twl.h>
+#include <linux/mfd/twl6040-codec.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -57,6 +58,8 @@
#define TWL6040_HSR_VOL_SHIFT 4
#define TWL6040_HF_VOL_MASK 0x1F
#define TWL6040_HF_VOL_SHIFT 0
+#define TWL6040_EP_VOL_MASK 0x1E
+#define TWL6040_EP_VOL_SHIFT 1
struct twl6040_output {
u16 active;
@@ -77,25 +80,32 @@
/* codec private data */
struct twl6040_data {
- int audpwron;
- int naudint;
int codec_powered;
int pll;
- int non_lp;
+ int power_mode_forced;
+ int headset_mode;
+ unsigned int clk_in;
unsigned int sysclk;
+ u16 hs_left_step;
+ u16 hs_right_step;
+ u16 hf_left_step;
+ u16 hf_right_step;
+ u16 ep_step;
struct snd_pcm_hw_constraint_list *sysclk_constraints;
- struct completion ready;
struct twl6040_jack_data hs_jack;
struct snd_soc_codec *codec;
struct workqueue_struct *workqueue;
struct delayed_work delayed_work;
struct mutex mutex;
struct twl6040_output headset;
+ struct twl6040_output earphone;
struct twl6040_output handsfree;
struct workqueue_struct *hf_workqueue;
struct workqueue_struct *hs_workqueue;
+ struct workqueue_struct *ep_workqueue;
struct delayed_work hs_delayed_work;
struct delayed_work hf_delayed_work;
+ struct delayed_work ep_delayed_work;
};
/*
@@ -120,12 +130,12 @@
0x1B, /* TWL6040_LINEGAIN 0x0F */
0x00, /* TWL6040_HSLCTL 0x10 */
0x00, /* TWL6040_HSRCTL 0x11 */
- 0x00, /* TWL6040_HSGAIN 0x12 */
- 0x00, /* TWL6040_EARCTL 0x13 */
+ 0xFF, /* TWL6040_HSGAIN 0x12 */
+ 0x1E, /* TWL6040_EARCTL 0x13 */
0x00, /* TWL6040_HFLCTL 0x14 */
- 0x00, /* TWL6040_HFLGAIN 0x15 */
+ 0x1D, /* TWL6040_HFLGAIN 0x15 */
0x00, /* TWL6040_HFRCTL 0x16 */
- 0x00, /* TWL6040_HFRGAIN 0x17 */
+ 0x1D, /* TWL6040_HFRGAIN 0x17 */
0x00, /* TWL6040_VIBCTLL 0x18 */
0x00, /* TWL6040_VIBDATL 0x19 */
0x00, /* TWL6040_VIBCTLR 0x1A */
@@ -151,59 +161,59 @@
0x00, /* TWL6040_STATUS (ro) 0x2E */
};
-/*
- * twl6040 vio/gnd registers:
- * registers under vio/gnd supply can be accessed
- * before the power-up sequence, after NRESPWRON goes high
- */
-static const int twl6040_vio_reg[TWL6040_VIOREGNUM] = {
- TWL6040_REG_ASICID,
- TWL6040_REG_ASICREV,
- TWL6040_REG_INTID,
- TWL6040_REG_INTMR,
- TWL6040_REG_NCPCTL,
- TWL6040_REG_LDOCTL,
- TWL6040_REG_AMICBCTL,
- TWL6040_REG_DMICBCTL,
- TWL6040_REG_HKCTL1,
- TWL6040_REG_HKCTL2,
- TWL6040_REG_GPOCTL,
- TWL6040_REG_TRIM1,
- TWL6040_REG_TRIM2,
- TWL6040_REG_TRIM3,
- TWL6040_REG_HSOTRIM,
- TWL6040_REG_HFOTRIM,
- TWL6040_REG_ACCCTL,
- TWL6040_REG_STATUS,
-};
-/*
- * twl6040 vdd/vss registers:
- * registers under vdd/vss supplies can only be accessed
- * after the power-up sequence
- */
-static const int twl6040_vdd_reg[TWL6040_VDDREGNUM] = {
- TWL6040_REG_HPPLLCTL,
- TWL6040_REG_LPPLLCTL,
- TWL6040_REG_LPPLLDIV,
- TWL6040_REG_MICLCTL,
- TWL6040_REG_MICRCTL,
- TWL6040_REG_MICGAIN,
- TWL6040_REG_LINEGAIN,
- TWL6040_REG_HSLCTL,
- TWL6040_REG_HSRCTL,
- TWL6040_REG_HSGAIN,
- TWL6040_REG_EARCTL,
- TWL6040_REG_HFLCTL,
- TWL6040_REG_HFLGAIN,
- TWL6040_REG_HFRCTL,
- TWL6040_REG_HFRGAIN,
- TWL6040_REG_VIBCTLL,
- TWL6040_REG_VIBDATL,
- TWL6040_REG_VIBCTLR,
- TWL6040_REG_VIBDATR,
- TWL6040_REG_ALB,
- TWL6040_REG_DLB,
+/* twl6040 vio/gnd registers: registers under vio/gnd supply can be accessed
+ * twl6040 vdd/vss registers: registers under vdd/vss supplies can only be
+ * accessed after the power-up sequence */
+
+static const u8 twl6040_reg_supply[TWL6040_CACHEREGNUM] = {
+ TWL6040_NO_SUPPLY, /* not used */
+ TWL6040_VIO_SUPPLY, /* TWL6040_ASICID (ro) */
+ TWL6040_VIO_SUPPLY, /* TWL6040_ASICREV (ro) */
+ TWL6040_VIO_SUPPLY, /* TWL6040_INTID */
+ TWL6040_VIO_SUPPLY, /* TWL6040_INTMR */
+ TWL6040_VIO_SUPPLY, /* TWL6040_NCPCTRL */
+ TWL6040_VIO_SUPPLY, /* TWL6040_LDOCTL */
+ TWL6040_VDD_SUPPLY, /* TWL6040_HPPLLCTL */
+ TWL6040_VDD_SUPPLY, /* TWL6040_LPPLLCTL */
+ TWL6040_VDD_SUPPLY, /* TWL6040_LPPLLDIV */
+ TWL6040_VIO_SUPPLY, /* TWL6040_AMICBCTL */
+ TWL6040_VIO_SUPPLY, /* TWL6040_DMICBCTL */
+ TWL6040_VDD_SUPPLY, /* TWL6040_MICLCTL */
+ TWL6040_VDD_SUPPLY, /* TWL6040_MICRCTL */
+ TWL6040_VDD_SUPPLY, /* TWL6040_MICGAIN */
+ TWL6040_VDD_SUPPLY, /* TWL6040_LINEGAIN */
+ TWL6040_VDD_SUPPLY, /* TWL6040_HSLCTL */
+ TWL6040_VDD_SUPPLY, /* TWL6040_HSRCTL */
+ TWL6040_VDD_SUPPLY, /* TWL6040_HSGAIN */
+ TWL6040_VDD_SUPPLY, /* TWL6040_EARCTL */
+ TWL6040_VDD_SUPPLY, /* TWL6040_HFLCTL */
+ TWL6040_VDD_SUPPLY, /* TWL6040_HFLGAIN */
+ TWL6040_VDD_SUPPLY, /* TWL6040_HFRCTL */
+ TWL6040_VDD_SUPPLY, /* TWL6040_HFRGAIN */
+ TWL6040_VDD_SUPPLY, /* TWL6040_VIBCTLL */
+ TWL6040_VDD_SUPPLY, /* TWL6040_VIBDATL */
+ TWL6040_VDD_SUPPLY, /* TWL6040_VIBCTLR */
+ TWL6040_VDD_SUPPLY, /* TWL6040_VIBDATR */
+ TWL6040_VIO_SUPPLY, /* TWL6040_HKCTL1 */
+ TWL6040_VIO_SUPPLY, /* TWL6040_HKCTL2 */
+ TWL6040_VIO_SUPPLY, /* TWL6040_GPOCTL */
+ TWL6040_VDD_SUPPLY, /* TWL6040_ALB */
+ TWL6040_VDD_SUPPLY, /* TWL6040_DLB */
+ TWL6040_NO_SUPPLY, /* not used */
+ TWL6040_NO_SUPPLY, /* not used */
+ TWL6040_NO_SUPPLY, /* not used */
+ TWL6040_NO_SUPPLY, /* not used */
+ TWL6040_NO_SUPPLY, /* not used */
+ TWL6040_NO_SUPPLY, /* not used */
+ TWL6040_NO_SUPPLY, /* not used */
+ TWL6040_VIO_SUPPLY, /* TWL6040_TRIM1 */
+ TWL6040_VIO_SUPPLY, /* TWL6040_TRIM2 */
+ TWL6040_VIO_SUPPLY, /* TWL6040_TRIM3 */
+ TWL6040_VIO_SUPPLY, /* TWL6040_HSOTRIM */
+ TWL6040_VIO_SUPPLY, /* TWL6040_HFOTRIM */
+ TWL6040_VIO_SUPPLY, /* TWL6040_ACCCTL */
+ TWL6040_VIO_SUPPLY, /* TWL6040_STATUS (ro) */
};
/*
@@ -237,14 +247,21 @@
* read from twl6040 hardware register
*/
static int twl6040_read_reg_volatile(struct snd_soc_codec *codec,
- unsigned int reg)
+ unsigned int reg)
{
- u8 value;
+ struct twl6040 *twl6040 = codec->control_data;
+ struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+ u8 value = 0;
if (reg >= TWL6040_CACHEREGNUM)
return -EIO;
- twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &value, reg);
+ /* read access not supported while in sleep state */
+ if ((twl6040_reg_supply[reg] == TWL6040_VDD_SUPPLY) &&
+ !priv->codec_powered)
+ return -EINVAL;
+
+ value = twl6040_reg_read(twl6040, reg);
twl6040_write_reg_cache(codec, reg, value);
return value;
@@ -256,29 +273,52 @@
static int twl6040_write(struct snd_soc_codec *codec,
unsigned int reg, unsigned int value)
{
+ struct twl6040 *twl6040 = codec->control_data;
+ struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+ int ret = 0;
+
if (reg >= TWL6040_CACHEREGNUM)
return -EIO;
twl6040_write_reg_cache(codec, reg, value);
- return twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, value, reg);
+
+ if ((twl6040_reg_supply[reg] == TWL6040_VIO_SUPPLY) ||
+ priv->codec_powered)
+ ret = twl6040_reg_write(twl6040, reg, value);
+ else
+ dev_dbg(codec->dev, "deferring register 0x%02x write: %02x\n",
+ reg, value);
+
+ return ret;
}
static void twl6040_init_vio_regs(struct snd_soc_codec *codec)
{
u8 *cache = codec->reg_cache;
- int reg, i;
+ int reg;
- /* allow registers to be accessed by i2c */
- twl6040_write(codec, TWL6040_REG_ACCCTL, cache[TWL6040_REG_ACCCTL]);
-
- for (i = 0; i < TWL6040_VIOREGNUM; i++) {
- reg = twl6040_vio_reg[i];
- /* skip read-only registers (ASICID, ASICREV, STATUS) */
+ for (reg = 0; reg < TWL6040_CACHEREGNUM; reg++) {
+ if (twl6040_reg_supply[reg] != TWL6040_VIO_SUPPLY)
+ continue;
+ /*
+ * skip read-only registers (ASICID, ASICREV, STATUS)
+ * and registers shared among MFD children
+ */
switch (reg) {
case TWL6040_REG_ASICID:
case TWL6040_REG_ASICREV:
+ case TWL6040_REG_INTID:
+ case TWL6040_REG_INTMR:
+ case TWL6040_REG_NCPCTL:
+ case TWL6040_REG_LDOCTL:
+ case TWL6040_REG_GPOCTL:
+ case TWL6040_REG_ACCCTL:
case TWL6040_REG_STATUS:
continue;
+ case TWL6040_REG_HSOTRIM:
+ case TWL6040_REG_HFOTRIM:
+ twl6040_read_reg_volatile(codec, reg);
+ continue;
default:
break;
}
@@ -289,10 +329,24 @@
static void twl6040_init_vdd_regs(struct snd_soc_codec *codec)
{
u8 *cache = codec->reg_cache;
- int reg, i;
+ int reg;
- for (i = 0; i < TWL6040_VDDREGNUM; i++) {
- reg = twl6040_vdd_reg[i];
+ for (reg = 0; reg < TWL6040_CACHEREGNUM; reg++) {
+ if (twl6040_reg_supply[reg] != TWL6040_VDD_SUPPLY)
+ continue;
+ /* skip vibra and pll registers */
+ switch (reg) {
+ case TWL6040_REG_VIBCTLL:
+ case TWL6040_REG_VIBDATL:
+ case TWL6040_REG_VIBCTLR:
+ case TWL6040_REG_VIBDATR:
+ case TWL6040_REG_HPPLLCTL:
+ case TWL6040_REG_LPPLLCTL:
+ case TWL6040_REG_LPPLLDIV:
+ continue;
+ default:
+ break;
+ }
twl6040_write(codec, reg, cache[reg]);
}
}
@@ -317,7 +371,11 @@
if (headset->ramp == TWL6040_RAMP_UP) {
/* ramp step up */
if (val < headset->left_vol) {
- val += left_step;
+ if (val + left_step > headset->left_vol)
+ val = headset->left_vol;
+ else
+ val += left_step;
+
reg &= ~TWL6040_HSL_VOL_MASK;
twl6040_write(codec, TWL6040_REG_HSGAIN,
(reg | (~val & TWL6040_HSL_VOL_MASK)));
@@ -327,7 +385,11 @@
} else if (headset->ramp == TWL6040_RAMP_DOWN) {
/* ramp step down */
if (val > 0x0) {
- val -= left_step;
+ if ((int)val - (int)left_step < 0)
+ val = 0;
+ else
+ val -= left_step;
+
reg &= ~TWL6040_HSL_VOL_MASK;
twl6040_write(codec, TWL6040_REG_HSGAIN, reg |
(~val & TWL6040_HSL_VOL_MASK));
@@ -344,7 +406,11 @@
if (headset->ramp == TWL6040_RAMP_UP) {
/* ramp step up */
if (val < headset->right_vol) {
- val += right_step;
+ if (val + right_step > headset->right_vol)
+ val = headset->right_vol;
+ else
+ val += right_step;
+
reg &= ~TWL6040_HSR_VOL_MASK;
twl6040_write(codec, TWL6040_REG_HSGAIN,
(reg | (~val << TWL6040_HSR_VOL_SHIFT)));
@@ -354,7 +420,11 @@
} else if (headset->ramp == TWL6040_RAMP_DOWN) {
/* ramp step down */
if (val > 0x0) {
- val -= right_step;
+ if ((int)val - (int)right_step < 0)
+ val = 0;
+ else
+ val -= right_step;
+
reg &= ~TWL6040_HSR_VOL_MASK;
twl6040_write(codec, TWL6040_REG_HSGAIN,
reg | (~val << TWL6040_HSR_VOL_SHIFT));
@@ -385,7 +455,11 @@
if (handsfree->ramp == TWL6040_RAMP_UP) {
/* ramp step up */
if (val < handsfree->left_vol) {
- val += left_step;
+ if (val + left_step > handsfree->left_vol)
+ val = handsfree->left_vol;
+ else
+ val += left_step;
+
reg &= ~TWL6040_HF_VOL_MASK;
twl6040_write(codec, TWL6040_REG_HFLGAIN,
reg | (0x1D - val));
@@ -395,7 +469,11 @@
} else if (handsfree->ramp == TWL6040_RAMP_DOWN) {
/* ramp step down */
if (val > 0) {
- val -= left_step;
+ if ((int)val - (int)left_step < 0)
+ val = 0;
+ else
+ val -= left_step;
+
reg &= ~TWL6040_HF_VOL_MASK;
twl6040_write(codec, TWL6040_REG_HFLGAIN,
reg | (0x1D - val));
@@ -412,7 +490,11 @@
if (handsfree->ramp == TWL6040_RAMP_UP) {
/* ramp step up */
if (val < handsfree->right_vol) {
- val += right_step;
+ if (val + right_step > handsfree->right_vol)
+ val = handsfree->right_vol;
+ else
+ val += right_step;
+
reg &= ~TWL6040_HF_VOL_MASK;
twl6040_write(codec, TWL6040_REG_HFRGAIN,
reg | (0x1D - val));
@@ -422,10 +504,16 @@
} else if (handsfree->ramp == TWL6040_RAMP_DOWN) {
/* ramp step down */
if (val > 0) {
- val -= right_step;
+ if ((int)val - (int)right_step < 0)
+ val = 0;
+ else
+ val -= right_step;
+
reg &= ~TWL6040_HF_VOL_MASK;
twl6040_write(codec, TWL6040_REG_HFRGAIN,
reg | (0x1D - val));
+ } else {
+ right_complete = 1;
}
}
@@ -433,6 +521,57 @@
}
/*
+ * Ramp Earpiece PGA volume to minimise pops at stream startup and shutdown.
+ */
+static inline int twl6040_ep_ramp_step(struct snd_soc_codec *codec,
+ unsigned int step)
+{
+
+ struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+ struct twl6040_output *earphone = &priv->earphone;
+ int complete = 0;
+ u8 reg, val;
+
+ step = (step > 0xF) ? 0xF : step;
+ reg = twl6040_read_reg_cache(codec, TWL6040_REG_EARCTL);
+ val = (~reg & TWL6040_EP_VOL_MASK) >> TWL6040_EP_VOL_SHIFT;
+
+ if (earphone->ramp == TWL6040_RAMP_UP) {
+ /* ramp step up */
+ if (val < earphone->left_vol) {
+ if (val + step > earphone->left_vol)
+ val = earphone->left_vol;
+ else
+ val += step;
+
+ reg &= ~TWL6040_EP_VOL_MASK;
+ val = ~val << TWL6040_EP_VOL_SHIFT;
+ twl6040_write(codec, TWL6040_REG_EARCTL,
+ reg | (val & TWL6040_EP_VOL_MASK));
+ } else {
+ complete = 1;
+ }
+ } else if (earphone->ramp == TWL6040_RAMP_DOWN) {
+ /* ramp step down */
+ if (val > 0x0) {
+ if ((int)val - (int)step < 0)
+ val = 0;
+ else
+ val -= step;
+
+ reg &= ~TWL6040_EP_VOL_MASK;
+ val = ~val << TWL6040_EP_VOL_SHIFT;
+ twl6040_write(codec, TWL6040_REG_EARCTL,
+ reg | (val & TWL6040_EP_VOL_MASK));
+ } else {
+ complete = 1;
+ }
+ }
+
+ return complete;
+}
+
+/*
* This work ramps both output PGAs at stream start/stop time to
* minimise pop associated with DAPM power switching.
*/
@@ -451,11 +590,9 @@
/* HS PGA volumes have 4 bits of resolution to ramp */
for (i = 0; i <= 16; i++) {
- headset_complete = 1;
- if (headset->ramp != TWL6040_RAMP_NONE)
- headset_complete = twl6040_hs_ramp_step(codec,
- headset->left_step,
- headset->right_step);
+ headset_complete = twl6040_hs_ramp_step(codec,
+ headset->left_step,
+ headset->right_step);
/* ramp finished ? */
if (headset_complete)
@@ -496,11 +633,9 @@
/* HF PGA volumes have 5 bits of resolution to ramp */
for (i = 0; i <= 32; i++) {
- handsfree_complete = 1;
- if (handsfree->ramp != TWL6040_RAMP_NONE)
- handsfree_complete = twl6040_hf_ramp_step(codec,
- handsfree->left_step,
- handsfree->right_step);
+ handsfree_complete = twl6040_hf_ramp_step(codec,
+ handsfree->left_step,
+ handsfree->right_step);
/* ramp finished ? */
if (handsfree_complete)
@@ -526,6 +661,48 @@
handsfree->ramp = TWL6040_RAMP_NONE;
}
+static void twl6040_pga_ep_work(struct work_struct *work)
+{
+ struct twl6040_data *priv =
+ container_of(work, struct twl6040_data, ep_delayed_work.work);
+ struct snd_soc_codec *codec = priv->codec;
+ struct twl6040_output *earphone = &priv->earphone;
+ unsigned int delay = earphone->step_delay;
+ int i, earphone_complete;
+
+ /* do we need to ramp at all ? */
+ if (earphone->ramp == TWL6040_RAMP_NONE)
+ return;
+
+ /* Earpiece PGA volumes have 4 bits of resolution to ramp */
+ for (i = 0; i <= 16; i++) {
+ earphone_complete = twl6040_ep_ramp_step(codec,
+ earphone->left_step);
+
+ /* ramp finished ? */
+ if (earphone_complete)
+ break;
+
+ /*
+ * TODO: tune: delay is longer over 0dB
+ * as increases are larger.
+ */
+ if (i >= 8)
+ schedule_timeout_interruptible(msecs_to_jiffies(delay +
+ (delay >> 1)));
+ else
+ schedule_timeout_interruptible(msecs_to_jiffies(delay));
+ }
+
+ if (earphone->ramp == TWL6040_RAMP_DOWN) {
+ earphone->active = 0;
+ complete(&earphone->ramp_done);
+ } else {
+ earphone->active = 1;
+ }
+ earphone->ramp = TWL6040_RAMP_NONE;
+}
+
static int pga_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -536,22 +713,29 @@
struct workqueue_struct *queue;
switch (w->shift) {
+ case 0:
+ out = &priv->earphone;
+ work = &priv->ep_delayed_work;
+ queue = priv->ep_workqueue;
+ out->left_step = priv->ep_step;
+ out->step_delay = 5; /* 5 ms between volume ramp steps */
+ break;
case 2:
case 3:
out = &priv->headset;
work = &priv->hs_delayed_work;
queue = priv->hs_workqueue;
+ out->left_step = priv->hs_left_step;
+ out->right_step = priv->hs_right_step;
out->step_delay = 5; /* 5 ms between volume ramp steps */
break;
case 4:
out = &priv->handsfree;
work = &priv->hf_delayed_work;
queue = priv->hf_workqueue;
+ out->left_step = priv->hf_left_step;
+ out->right_step = priv->hf_right_step;
out->step_delay = 5; /* 5 ms between volume ramp steps */
- if (SND_SOC_DAPM_EVENT_ON(event))
- priv->non_lp++;
- else
- priv->non_lp--;
break;
default:
return -1;
@@ -579,8 +763,6 @@
if (!delayed_work_pending(work)) {
/* use volume ramp for power-down */
- out->left_step = 1;
- out->right_step = 1;
out->ramp = TWL6040_RAMP_DOWN;
INIT_COMPLETION(out->ramp_done);
@@ -596,88 +778,6 @@
return 0;
}
-/* twl6040 codec manual power-up sequence */
-static void twl6040_power_up(struct snd_soc_codec *codec)
-{
- u8 ncpctl, ldoctl, lppllctl, accctl;
-
- ncpctl = twl6040_read_reg_cache(codec, TWL6040_REG_NCPCTL);
- ldoctl = twl6040_read_reg_cache(codec, TWL6040_REG_LDOCTL);
- lppllctl = twl6040_read_reg_cache(codec, TWL6040_REG_LPPLLCTL);
- accctl = twl6040_read_reg_cache(codec, TWL6040_REG_ACCCTL);
-
- /* enable reference system */
- ldoctl |= TWL6040_REFENA;
- twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
- msleep(10);
- /* enable internal oscillator */
- ldoctl |= TWL6040_OSCENA;
- twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
- udelay(10);
- /* enable high-side ldo */
- ldoctl |= TWL6040_HSLDOENA;
- twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
- udelay(244);
- /* enable negative charge pump */
- ncpctl |= TWL6040_NCPENA | TWL6040_NCPOPEN;
- twl6040_write(codec, TWL6040_REG_NCPCTL, ncpctl);
- udelay(488);
- /* enable low-side ldo */
- ldoctl |= TWL6040_LSLDOENA;
- twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
- udelay(244);
- /* enable low-power pll */
- lppllctl |= TWL6040_LPLLENA;
- twl6040_write(codec, TWL6040_REG_LPPLLCTL, lppllctl);
- /* reset state machine */
- accctl |= TWL6040_RESETSPLIT;
- twl6040_write(codec, TWL6040_REG_ACCCTL, accctl);
- mdelay(5);
- accctl &= ~TWL6040_RESETSPLIT;
- twl6040_write(codec, TWL6040_REG_ACCCTL, accctl);
- /* disable internal oscillator */
- ldoctl &= ~TWL6040_OSCENA;
- twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
-}
-
-/* twl6040 codec manual power-down sequence */
-static void twl6040_power_down(struct snd_soc_codec *codec)
-{
- u8 ncpctl, ldoctl, lppllctl, accctl;
-
- ncpctl = twl6040_read_reg_cache(codec, TWL6040_REG_NCPCTL);
- ldoctl = twl6040_read_reg_cache(codec, TWL6040_REG_LDOCTL);
- lppllctl = twl6040_read_reg_cache(codec, TWL6040_REG_LPPLLCTL);
- accctl = twl6040_read_reg_cache(codec, TWL6040_REG_ACCCTL);
-
- /* enable internal oscillator */
- ldoctl |= TWL6040_OSCENA;
- twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
- udelay(10);
- /* disable low-power pll */
- lppllctl &= ~TWL6040_LPLLENA;
- twl6040_write(codec, TWL6040_REG_LPPLLCTL, lppllctl);
- /* disable low-side ldo */
- ldoctl &= ~TWL6040_LSLDOENA;
- twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
- udelay(244);
- /* disable negative charge pump */
- ncpctl &= ~(TWL6040_NCPENA | TWL6040_NCPOPEN);
- twl6040_write(codec, TWL6040_REG_NCPCTL, ncpctl);
- udelay(488);
- /* disable high-side ldo */
- ldoctl &= ~TWL6040_HSLDOENA;
- twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
- udelay(244);
- /* disable internal oscillator */
- ldoctl &= ~TWL6040_OSCENA;
- twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
- /* disable reference system */
- ldoctl &= ~TWL6040_REFENA;
- twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
- msleep(10);
-}
-
/* set headset dac and driver power mode */
static int headset_power_mode(struct snd_soc_codec *codec, int high_perf)
{
@@ -701,27 +801,32 @@
return 0;
}
-static int twl6040_hs_dac_event(struct snd_soc_dapm_widget *w,
+static int twl6040_dac_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
msleep(1);
return 0;
}
-static int twl6040_power_mode_event(struct snd_soc_dapm_widget *w,
+static int twl6040_ep_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+ int ret = 0;
- if (SND_SOC_DAPM_EVENT_ON(event))
- priv->non_lp++;
- else
- priv->non_lp--;
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ /* Earphone doesn't support low power mode */
+ priv->power_mode_forced = 1;
+ ret = headset_power_mode(codec, 1);
+ } else {
+ priv->power_mode_forced = 0;
+ ret = headset_power_mode(codec, priv->headset_mode);
+ }
msleep(1);
- return 0;
+ return ret;
}
static void twl6040_hs_jack_report(struct snd_soc_codec *codec,
@@ -766,32 +871,18 @@
}
/* audio interrupt handler */
-static irqreturn_t twl6040_naudint_handler(int irq, void *data)
+static irqreturn_t twl6040_audio_handler(int irq, void *data)
{
struct snd_soc_codec *codec = data;
+ struct twl6040 *twl6040 = codec->control_data;
struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
u8 intid;
- twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &intid, TWL6040_REG_INTID);
-
- if (intid & TWL6040_THINT)
- dev_alert(codec->dev, "die temp over-limit detection\n");
+ intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
if ((intid & TWL6040_PLUGINT) || (intid & TWL6040_UNPLUGINT))
queue_delayed_work(priv->workqueue, &priv->delayed_work,
- msecs_to_jiffies(200));
-
- if (intid & TWL6040_HOOKINT)
- dev_info(codec->dev, "hook detection\n");
-
- if (intid & TWL6040_HFINT)
- dev_alert(codec->dev, "hf drivers over current detection\n");
-
- if (intid & TWL6040_VIBINT)
- dev_alert(codec->dev, "vib drivers over current detection\n");
-
- if (intid & TWL6040_READYINT)
- complete(&priv->ready);
+ msecs_to_jiffies(200));
return IRQ_HANDLED;
}
@@ -807,13 +898,16 @@
int ret;
unsigned int reg = mc->reg;
- /* For HS and HF we shadow the values and only actually write
+ /* For HS and EP we shadow the values and only actually write
* them out when active in order to ensure the amplifier comes on
* as quietly as possible. */
switch (reg) {
case TWL6040_REG_HSGAIN:
out = &twl6040_priv->headset;
break;
+ case TWL6040_REG_EARCTL:
+ out = &twl6040_priv->earphone;
+ break;
default:
break;
}
@@ -848,7 +942,10 @@
ucontrol->value.integer.value[0] = out->left_vol;
ucontrol->value.integer.value[1] = out->right_vol;
return 0;
-
+ case TWL6040_REG_EARCTL:
+ out = &twl6040_priv->earphone;
+ ucontrol->value.integer.value[0] = out->left_vol;
+ return 0;
default:
break;
}
@@ -919,33 +1016,6 @@
return snd_soc_get_volsw_2r(kcontrol, ucontrol);
}
-/* double control with volume update */
-#define SOC_TWL6040_DOUBLE_TLV(xname, xreg, shift_left, shift_right, xmax,\
- xinvert, tlv_array)\
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
- .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
- SNDRV_CTL_ELEM_ACCESS_READWRITE,\
- .tlv.p = (tlv_array), \
- .info = snd_soc_info_volsw, .get = twl6040_get_volsw, \
- .put = twl6040_put_volsw, \
- .private_value = (unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .shift = shift_left, .rshift = shift_right,\
- .max = xmax, .platform_max = xmax, .invert = xinvert} }
-
-/* double control with volume update */
-#define SOC_TWL6040_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax,\
- xinvert, tlv_array)\
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
- .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
- SNDRV_CTL_ELEM_ACCESS_READWRITE | \
- SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
- .tlv.p = (tlv_array), \
- .info = snd_soc_info_volsw_2r, \
- .get = twl6040_get_volsw_2r, .put = twl6040_put_volsw_2r_vu, \
- .private_value = (unsigned long)&(struct soc_mixer_control) \
- {.reg = reg_left, .rreg = reg_right, .shift = xshift, \
- .rshift = xshift, .max = xmax, .invert = xinvert}, }
-
/*
* MICATT volume control:
* from -6 to 0 dB in 6 dB steps
@@ -954,9 +1024,9 @@
/*
* MICGAIN volume control:
- * from -6 to 30 dB in 6 dB steps
+ * from 6 to 30 dB in 6 dB steps
*/
-static DECLARE_TLV_DB_SCALE(mic_amp_tlv, -600, 600, 0);
+static DECLARE_TLV_DB_SCALE(mic_amp_tlv, 600, 600, 0);
/*
* AFMGAIN volume control:
@@ -1040,6 +1110,44 @@
static const struct snd_kcontrol_new ep_driver_switch_controls =
SOC_DAPM_SINGLE("Switch", TWL6040_REG_EARCTL, 0, 1, 0);
+/* Headset power mode */
+static const char *twl6040_headset_power_texts[] = {
+ "Low-Power", "High-Performance",
+};
+
+static const struct soc_enum twl6040_headset_power_enum =
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(twl6040_headset_power_texts),
+ twl6040_headset_power_texts);
+
+static int twl6040_headset_power_get_enum(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.enumerated.item[0] = priv->headset_mode;
+
+ return 0;
+}
+
+static int twl6040_headset_power_put_enum(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+ int high_perf = ucontrol->value.enumerated.item[0];
+ int ret;
+
+ if (priv->power_mode_forced)
+ return -EPERM;
+
+ ret = headset_power_mode(codec, high_perf);
+ if (!ret)
+ priv->headset_mode = high_perf;
+
+ return ret;
+}
+
static const struct snd_kcontrol_new twl6040_snd_controls[] = {
/* Capture gains */
SOC_DOUBLE_TLV("Capture Preamplifier Volume",
@@ -1052,12 +1160,19 @@
TWL6040_REG_LINEGAIN, 0, 3, 7, 0, afm_amp_tlv),
/* Playback gains */
- SOC_TWL6040_DOUBLE_TLV("Headset Playback Volume",
- TWL6040_REG_HSGAIN, 0, 4, 0xF, 1, hs_tlv),
- SOC_TWL6040_DOUBLE_R_TLV("Handsfree Playback Volume",
- TWL6040_REG_HFLGAIN, TWL6040_REG_HFRGAIN, 0, 0x1D, 1, hf_tlv),
- SOC_SINGLE_TLV("Earphone Playback Volume",
- TWL6040_REG_EARCTL, 1, 0xF, 1, ep_tlv),
+ SOC_DOUBLE_EXT_TLV("Headset Playback Volume",
+ TWL6040_REG_HSGAIN, 0, 4, 0xF, 1,
+ twl6040_get_volsw, twl6040_put_volsw, hs_tlv),
+ SOC_DOUBLE_R_EXT_TLV("Handsfree Playback Volume",
+ TWL6040_REG_HFLGAIN, TWL6040_REG_HFRGAIN, 0, 0x1D, 1,
+ twl6040_get_volsw_2r, twl6040_put_volsw_2r_vu, hf_tlv),
+ SOC_SINGLE_EXT_TLV("Earphone Playback Volume",
+ TWL6040_REG_EARCTL, 1, 0xF, 1,
+ twl6040_get_volsw, twl6040_put_volsw, ep_tlv),
+
+ SOC_ENUM_EXT("Headset Power Mode", twl6040_headset_power_enum,
+ twl6040_headset_power_get_enum,
+ twl6040_headset_power_put_enum),
};
static const struct snd_soc_dapm_widget twl6040_dapm_widgets[] = {
@@ -1112,19 +1227,19 @@
/* DACs */
SND_SOC_DAPM_DAC_E("HSDAC Left", "Headset Playback",
TWL6040_REG_HSLCTL, 0, 0,
- twl6040_hs_dac_event,
+ twl6040_dac_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_DAC_E("HSDAC Right", "Headset Playback",
TWL6040_REG_HSRCTL, 0, 0,
- twl6040_hs_dac_event,
+ twl6040_dac_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_DAC_E("HFDAC Left", "Handsfree Playback",
TWL6040_REG_HFLCTL, 0, 0,
- twl6040_power_mode_event,
+ twl6040_dac_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_DAC_E("HFDAC Right", "Handsfree Playback",
TWL6040_REG_HFRCTL, 0, 0,
- twl6040_power_mode_event,
+ twl6040_dac_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX("HF Left Playback",
@@ -1154,10 +1269,14 @@
TWL6040_REG_HSRCTL, 2, 0, NULL, 0,
pga_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
- SND_SOC_DAPM_SWITCH_E("Earphone Driver",
+ SND_SOC_DAPM_SWITCH_E("Earphone Enable",
SND_SOC_NOPM, 0, 0, &ep_driver_switch_controls,
- twl6040_power_mode_event,
+ twl6040_ep_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_OUT_DRV_E("Earphone Driver",
+ SND_SOC_NOPM, 0, 0, NULL, 0,
+ pga_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
/* Analog playback PGAs */
SND_SOC_DAPM_PGA("HFDAC Left PGA",
@@ -1200,7 +1319,8 @@
{"HSOR", NULL, "Headset Right Driver"},
/* Earphone playback path */
- {"Earphone Driver", "Switch", "HSDAC Left"},
+ {"Earphone Enable", "Switch", "HSDAC Left"},
+ {"Earphone Driver", NULL, "Earphone Enable"},
{"EP", NULL, "Earphone Driver"},
{"HF Left Playback", "HF DAC", "HFDAC Left"},
@@ -1231,111 +1351,16 @@
return 0;
}
-static int twl6040_power_up_completion(struct snd_soc_codec *codec,
- int naudint)
-{
- struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
- int time_left;
- u8 intid;
-
- time_left = wait_for_completion_timeout(&priv->ready,
- msecs_to_jiffies(144));
-
- if (!time_left) {
- twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &intid,
- TWL6040_REG_INTID);
- if (!(intid & TWL6040_READYINT)) {
- dev_err(codec->dev, "timeout waiting for READYINT\n");
- return -ETIMEDOUT;
- }
- }
-
- priv->codec_powered = 1;
-
- return 0;
-}
-
-static int twl6040_set_bias_level(struct snd_soc_codec *codec,
- enum snd_soc_bias_level level)
-{
- struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
- int audpwron = priv->audpwron;
- int naudint = priv->naudint;
- int ret;
-
- switch (level) {
- case SND_SOC_BIAS_ON:
- break;
- case SND_SOC_BIAS_PREPARE:
- break;
- case SND_SOC_BIAS_STANDBY:
- if (priv->codec_powered)
- break;
-
- if (gpio_is_valid(audpwron)) {
- /* use AUDPWRON line */
- gpio_set_value(audpwron, 1);
-
- /* wait for power-up completion */
- ret = twl6040_power_up_completion(codec, naudint);
- if (ret)
- return ret;
-
- /* sync registers updated during power-up sequence */
- twl6040_read_reg_volatile(codec, TWL6040_REG_NCPCTL);
- twl6040_read_reg_volatile(codec, TWL6040_REG_LDOCTL);
- twl6040_read_reg_volatile(codec, TWL6040_REG_LPPLLCTL);
- } else {
- /* use manual power-up sequence */
- twl6040_power_up(codec);
- priv->codec_powered = 1;
- }
-
- /* initialize vdd/vss registers with reg_cache */
- twl6040_init_vdd_regs(codec);
-
- /* Set external boost GPO */
- twl6040_write(codec, TWL6040_REG_GPOCTL, 0x02);
-
- /* Set initial minimal gain values */
- twl6040_write(codec, TWL6040_REG_HSGAIN, 0xFF);
- twl6040_write(codec, TWL6040_REG_EARCTL, 0x1E);
- twl6040_write(codec, TWL6040_REG_HFLGAIN, 0x1D);
- twl6040_write(codec, TWL6040_REG_HFRGAIN, 0x1D);
- break;
- case SND_SOC_BIAS_OFF:
- if (!priv->codec_powered)
- break;
-
- if (gpio_is_valid(audpwron)) {
- /* use AUDPWRON line */
- gpio_set_value(audpwron, 0);
-
- /* power-down sequence latency */
- udelay(500);
-
- /* sync registers updated during power-down sequence */
- twl6040_read_reg_volatile(codec, TWL6040_REG_NCPCTL);
- twl6040_read_reg_volatile(codec, TWL6040_REG_LDOCTL);
- twl6040_write_reg_cache(codec, TWL6040_REG_LPPLLCTL,
- 0x00);
- } else {
- /* use manual power-down sequence */
- twl6040_power_down(codec);
- }
-
- priv->codec_powered = 0;
- break;
- }
-
- codec->dapm.bias_level = level;
-
- return 0;
-}
-
/* set of rates for each pll: low-power and high-performance */
static unsigned int lp_rates[] = {
+ 8000,
+ 11250,
+ 16000,
+ 22500,
+ 32000,
+ 44100,
+ 48000,
88200,
96000,
};
@@ -1346,6 +1371,10 @@
};
static unsigned int hp_rates[] = {
+ 8000,
+ 16000,
+ 32000,
+ 48000,
96000,
};
@@ -1354,6 +1383,47 @@
.list = hp_rates,
};
+static int twl6040_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct twl6040 *twl6040 = codec->control_data;
+ struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ if (priv->codec_powered)
+ break;
+
+ twl6040_enable(twl6040);
+ priv->codec_powered = 1;
+
+ priv->sysclk_constraints = &lp_constraints;
+
+ /* initialize vdd/vss registers with reg_cache */
+ twl6040_init_vdd_regs(codec);
+
+ break;
+ case SND_SOC_BIAS_OFF:
+ if (!priv->codec_powered)
+ break;
+
+ twl6040_disable(twl6040);
+ priv->codec_powered = 0;
+ break;
+ }
+
+ codec->dapm.bias_level = level;
+ /* get pll and sysclk after power transition */
+ priv->pll = twl6040_get_pll(twl6040);
+ priv->sysclk = twl6040_get_sysclk(twl6040);
+
+ return 0;
+}
+
static int twl6040_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -1374,15 +1444,11 @@
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_codec *codec = rtd->codec;
+ struct twl6040 *twl6040 = codec->control_data;
struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
- u8 lppllctl;
+ unsigned int sysclk;
int rate;
-
- /* nothing to do for high-perf pll, it supports only 48 kHz */
- if (priv->pll == TWL6040_HPPLL_ID)
- return 0;
-
- lppllctl = twl6040_read_reg_cache(codec, TWL6040_REG_LPPLLCTL);
+ int ret;
rate = params_rate(params);
switch (rate) {
@@ -1390,23 +1456,27 @@
case 22500:
case 44100:
case 88200:
- lppllctl |= TWL6040_LPLLFIN;
- priv->sysclk = 17640000;
+ sysclk = 17640000;
break;
case 8000:
case 16000:
case 32000:
case 48000:
case 96000:
- lppllctl &= ~TWL6040_LPLLFIN;
- priv->sysclk = 19200000;
+ sysclk = 19200000;
break;
default:
dev_err(codec->dev, "unsupported rate %d\n", rate);
return -EINVAL;
}
- twl6040_write(codec, TWL6040_REG_LPPLLCTL, lppllctl);
+ ret = twl6040_set_pll(twl6040, priv->pll, priv->clk_in, sysclk);
+ if (ret) {
+ dev_err(codec->dev, "failed to configure PLL %d", ret);
+ return ret;
+ }
+
+ priv->sysclk = sysclk;
return 0;
}
@@ -1425,23 +1495,19 @@
}
/*
- * capture is not supported at 17.64 MHz,
- * it's reserved for headset low-power playback scenario
+ * In the capture, the Analog path should be turn on and stabilized
+ * before McPDM prepare itself to avoid pop noises.
+ * So the codec startup event is sending through dapm in prepare itself
+ * to ensure that the codec analog path is up before McPDM Uplink FIFO
+ * is going to be activated.
*/
- if ((priv->sysclk == 17640000) &&
- substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
- dev_err(codec->dev,
- "capture mode is not supported at %dHz\n",
- priv->sysclk);
- return -EINVAL;
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ snd_soc_dapm_codec_stream_event(dai->codec,
+ dai->driver->capture.stream_name,
+ SND_SOC_DAPM_STREAM_START);
+ msleep(150);
}
- if ((priv->sysclk == 17640000) && priv->non_lp) {
- dev_err(codec->dev,
- "some enabled paths aren't supported at %dHz\n",
- priv->sysclk);
- return -EPERM;
- }
return 0;
}
@@ -1450,98 +1516,12 @@
{
struct snd_soc_codec *codec = codec_dai->codec;
struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
- u8 hppllctl, lppllctl;
-
- hppllctl = twl6040_read_reg_cache(codec, TWL6040_REG_HPPLLCTL);
- lppllctl = twl6040_read_reg_cache(codec, TWL6040_REG_LPPLLCTL);
switch (clk_id) {
- case TWL6040_SYSCLK_SEL_LPPLL:
- switch (freq) {
- case 32768:
- /* headset dac and driver must be in low-power mode */
- headset_power_mode(codec, 0);
-
- /* clk32k input requires low-power pll */
- lppllctl |= TWL6040_LPLLENA;
- twl6040_write(codec, TWL6040_REG_LPPLLCTL, lppllctl);
- mdelay(5);
- lppllctl &= ~TWL6040_HPLLSEL;
- twl6040_write(codec, TWL6040_REG_LPPLLCTL, lppllctl);
- hppllctl &= ~TWL6040_HPLLENA;
- twl6040_write(codec, TWL6040_REG_HPPLLCTL, hppllctl);
- break;
- default:
- dev_err(codec->dev, "unknown mclk freq %d\n", freq);
- return -EINVAL;
- }
-
- /* lppll divider */
- switch (priv->sysclk) {
- case 17640000:
- lppllctl |= TWL6040_LPLLFIN;
- break;
- case 19200000:
- lppllctl &= ~TWL6040_LPLLFIN;
- break;
- default:
- /* sysclk not yet configured */
- lppllctl &= ~TWL6040_LPLLFIN;
- priv->sysclk = 19200000;
- break;
- }
-
- twl6040_write(codec, TWL6040_REG_LPPLLCTL, lppllctl);
-
- priv->pll = TWL6040_LPPLL_ID;
+ case TWL6040_LPPLL_ID:
priv->sysclk_constraints = &lp_constraints;
break;
- case TWL6040_SYSCLK_SEL_HPPLL:
- hppllctl &= ~TWL6040_MCLK_MSK;
-
- switch (freq) {
- case 12000000:
- /* mclk input, pll enabled */
- hppllctl |= TWL6040_MCLK_12000KHZ |
- TWL6040_HPLLSQRBP |
- TWL6040_HPLLENA;
- break;
- case 19200000:
- /* mclk input, pll disabled */
- hppllctl |= TWL6040_MCLK_19200KHZ |
- TWL6040_HPLLSQRENA |
- TWL6040_HPLLBP;
- break;
- case 26000000:
- /* mclk input, pll enabled */
- hppllctl |= TWL6040_MCLK_26000KHZ |
- TWL6040_HPLLSQRBP |
- TWL6040_HPLLENA;
- break;
- case 38400000:
- /* clk slicer, pll disabled */
- hppllctl |= TWL6040_MCLK_38400KHZ |
- TWL6040_HPLLSQRENA |
- TWL6040_HPLLBP;
- break;
- default:
- dev_err(codec->dev, "unknown mclk freq %d\n", freq);
- return -EINVAL;
- }
-
- /* headset dac and driver must be in high-performance mode */
- headset_power_mode(codec, 1);
-
- twl6040_write(codec, TWL6040_REG_HPPLLCTL, hppllctl);
- udelay(500);
- lppllctl |= TWL6040_HPLLSEL;
- twl6040_write(codec, TWL6040_REG_LPPLLCTL, lppllctl);
- lppllctl &= ~TWL6040_LPLLENA;
- twl6040_write(codec, TWL6040_REG_LPPLLCTL, lppllctl);
-
- /* high-performance pll can provide only 19.2 MHz */
- priv->pll = TWL6040_HPPLL_ID;
- priv->sysclk = 19200000;
+ case TWL6040_HPPLL_ID:
priv->sysclk_constraints = &hp_constraints;
break;
default:
@@ -1549,6 +1529,23 @@
return -EINVAL;
}
+ priv->pll = clk_id;
+ priv->clk_in = freq;
+
+ return 0;
+}
+
+static int twl6040_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+ /*
+ * pop-noise reduction sequence requires to shutdown
+ * analog side before CPU DAI
+ */
+ if (mute)
+ snd_soc_dapm_codec_stream_event(dai->codec,
+ dai->driver->playback.stream_name,
+ SND_SOC_DAPM_STREAM_STOP);
+
return 0;
}
@@ -1557,17 +1554,12 @@
.hw_params = twl6040_hw_params,
.prepare = twl6040_prepare,
.set_sysclk = twl6040_set_dai_sysclk,
+ .digital_mute = twl6040_digital_mute,
};
-static struct snd_soc_dai_driver twl6040_dai = {
- .name = "twl6040-hifi",
- .playback = {
- .stream_name = "Playback",
- .channels_min = 1,
- .channels_max = 4,
- .rates = TWL6040_RATES,
- .formats = TWL6040_FORMATS,
- },
+static struct snd_soc_dai_driver twl6040_dai[] = {
+{
+ .name = "twl6040-ul",
.capture = {
.stream_name = "Capture",
.channels_min = 1,
@@ -1576,6 +1568,40 @@
.formats = TWL6040_FORMATS,
},
.ops = &twl6040_dai_ops,
+},
+{
+ .name = "twl6040-dl1",
+ .playback = {
+ .stream_name = "Headset Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = TWL6040_RATES,
+ .formats = TWL6040_FORMATS,
+ },
+ .ops = &twl6040_dai_ops,
+},
+{
+ .name = "twl6040-dl2",
+ .playback = {
+ .stream_name = "Handsfree Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = TWL6040_RATES,
+ .formats = TWL6040_FORMATS,
+ },
+ .ops = &twl6040_dai_ops,
+},
+{
+ .name = "twl6040-vib",
+ .playback = {
+ .stream_name = "Vibra Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .formats = TWL6040_FORMATS,
+ },
+ .ops = &twl6040_dai_ops,
+},
};
#ifdef CONFIG_PM
@@ -1588,8 +1614,10 @@
static int twl6040_resume(struct snd_soc_codec *codec)
{
- twl6040_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- twl6040_set_bias_level(codec, codec->dapm.suspend_bias_level);
+ if (codec->dapm.bias_level != codec->dapm.suspend_bias_level) {
+ twl6040_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+ twl6040_set_bias_level(codec, codec->dapm.suspend_bias_level);
+ }
return 0;
}
@@ -1600,11 +1628,9 @@
static int twl6040_probe(struct snd_soc_codec *codec)
{
- struct twl4030_codec_data *twl_codec = codec->dev->platform_data;
struct twl6040_data *priv;
- int audpwron, naudint;
+ struct twl4030_codec_audio_data *pdata = dev_get_platdata(codec->dev);
int ret = 0;
- u8 icrev, intmr = TWL6040_ALLINT_MSK;
priv = kzalloc(sizeof(struct twl6040_data), GFP_KERNEL);
if (priv == NULL)
@@ -1612,21 +1638,33 @@
snd_soc_codec_set_drvdata(codec, priv);
priv->codec = codec;
+ codec->control_data = dev_get_drvdata(codec->dev->parent);
+ codec->dapm.idle_bias_off = 1;
- twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &icrev, TWL6040_REG_ASICREV);
+ if (pdata && pdata->hs_left_step && pdata->hs_right_step) {
+ priv->hs_left_step = pdata->hs_left_step;
+ priv->hs_right_step = pdata->hs_right_step;
+ } else {
+ priv->hs_left_step = 1;
+ priv->hs_right_step = 1;
+ }
- if (twl_codec && (icrev > 0))
- audpwron = twl_codec->audpwron_gpio;
+ if (pdata && pdata->hf_left_step && pdata->hf_right_step) {
+ priv->hf_left_step = pdata->hf_left_step;
+ priv->hf_right_step = pdata->hf_right_step;
+ } else {
+ priv->hf_left_step = 1;
+ priv->hf_right_step = 1;
+ }
+
+ if (pdata && pdata->ep_step)
+ priv->ep_step = pdata->ep_step;
else
- audpwron = -EINVAL;
+ priv->ep_step = 1;
- if (twl_codec)
- naudint = twl_codec->naudint_irq;
- else
- naudint = 0;
-
- priv->audpwron = audpwron;
- priv->naudint = naudint;
+ /* default is low-power mode */
+ priv->headset_mode = 1;
+ priv->sysclk_constraints = &lp_constraints;
priv->workqueue = create_singlethread_workqueue("twl6040-codec");
if (!priv->workqueue) {
@@ -1638,55 +1676,40 @@
mutex_init(&priv->mutex);
- init_completion(&priv->ready);
init_completion(&priv->headset.ramp_done);
init_completion(&priv->handsfree.ramp_done);
-
- if (gpio_is_valid(audpwron)) {
- ret = gpio_request(audpwron, "audpwron");
- if (ret)
- goto gpio1_err;
-
- ret = gpio_direction_output(audpwron, 0);
- if (ret)
- goto gpio2_err;
-
- priv->codec_powered = 0;
-
- /* enable only codec ready interrupt */
- intmr &= ~(TWL6040_READYMSK | TWL6040_PLUGMSK);
-
- /* reset interrupt status to allow correct power up sequence */
- twl6040_read_reg_volatile(codec, TWL6040_REG_INTID);
- }
- twl6040_write(codec, TWL6040_REG_INTMR, intmr);
-
- if (naudint) {
- /* audio interrupt */
- ret = request_threaded_irq(naudint, NULL,
- twl6040_naudint_handler,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "twl6040_codec", codec);
- if (ret)
- goto gpio2_err;
- }
-
- /* init vio registers */
- twl6040_init_vio_regs(codec);
+ init_completion(&priv->earphone.ramp_done);
priv->hf_workqueue = create_singlethread_workqueue("twl6040-hf");
if (priv->hf_workqueue == NULL) {
ret = -ENOMEM;
- goto irq_err;
+ goto hfwork_err;
}
priv->hs_workqueue = create_singlethread_workqueue("twl6040-hs");
if (priv->hs_workqueue == NULL) {
ret = -ENOMEM;
- goto wq_err;
+ goto hswork_err;
+ }
+ priv->ep_workqueue = create_singlethread_workqueue("twl6040-ep");
+ if (priv->ep_workqueue == NULL) {
+ ret = -ENOMEM;
+ goto epwork_err;
}
INIT_DELAYED_WORK(&priv->hs_delayed_work, twl6040_pga_hs_work);
INIT_DELAYED_WORK(&priv->hf_delayed_work, twl6040_pga_hf_work);
+ INIT_DELAYED_WORK(&priv->ep_delayed_work, twl6040_pga_ep_work);
+
+ ret = twl6040_request_irq(codec->control_data, TWL6040_IRQ_PLUG,
+ twl6040_audio_handler, "twl6040_irq_plug",
+ codec);
+ if (ret) {
+ dev_err(codec->dev, "PLUG IRQ request failed: %d\n", ret);
+ goto irq_err;
+ }
+
+ /* init vio registers */
+ twl6040_init_vio_regs(codec);
/* power on device */
ret = twl6040_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
@@ -1700,16 +1723,14 @@
return 0;
bias_err:
- destroy_workqueue(priv->hs_workqueue);
-wq_err:
- destroy_workqueue(priv->hf_workqueue);
+ twl6040_free_irq(codec->control_data, TWL6040_IRQ_PLUG, codec);
irq_err:
- if (naudint)
- free_irq(naudint, codec);
-gpio2_err:
- if (gpio_is_valid(audpwron))
- gpio_free(audpwron);
-gpio1_err:
+ destroy_workqueue(priv->ep_workqueue);
+epwork_err:
+ destroy_workqueue(priv->hs_workqueue);
+hswork_err:
+ destroy_workqueue(priv->hf_workqueue);
+hfwork_err:
destroy_workqueue(priv->workqueue);
work_err:
kfree(priv);
@@ -1719,20 +1740,13 @@
static int twl6040_remove(struct snd_soc_codec *codec)
{
struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
- int audpwron = priv->audpwron;
- int naudint = priv->naudint;
twl6040_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- if (gpio_is_valid(audpwron))
- gpio_free(audpwron);
-
- if (naudint)
- free_irq(naudint, codec);
-
+ twl6040_free_irq(codec->control_data, TWL6040_IRQ_PLUG, codec);
destroy_workqueue(priv->workqueue);
destroy_workqueue(priv->hf_workqueue);
destroy_workqueue(priv->hs_workqueue);
+ destroy_workqueue(priv->ep_workqueue);
kfree(priv);
return 0;
@@ -1754,7 +1768,7 @@
static int __devinit twl6040_codec_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev,
- &soc_codec_dev_twl6040, &twl6040_dai, 1);
+ &soc_codec_dev_twl6040, twl6040_dai, ARRAY_SIZE(twl6040_dai));
}
static int __devexit twl6040_codec_remove(struct platform_device *pdev)
diff --git a/sound/soc/codecs/twl6040.h b/sound/soc/codecs/twl6040.h
index 23aeed0..105a6fd 100644
--- a/sound/soc/codecs/twl6040.h
+++ b/sound/soc/codecs/twl6040.h
@@ -22,123 +22,7 @@
#ifndef __TWL6040_H__
#define __TWL6040_H__
-#define TWL6040_REG_ASICID 0x01
-#define TWL6040_REG_ASICREV 0x02
-#define TWL6040_REG_INTID 0x03
-#define TWL6040_REG_INTMR 0x04
-#define TWL6040_REG_NCPCTL 0x05
-#define TWL6040_REG_LDOCTL 0x06
-#define TWL6040_REG_HPPLLCTL 0x07
-#define TWL6040_REG_LPPLLCTL 0x08
-#define TWL6040_REG_LPPLLDIV 0x09
-#define TWL6040_REG_AMICBCTL 0x0A
-#define TWL6040_REG_DMICBCTL 0x0B
-#define TWL6040_REG_MICLCTL 0x0C
-#define TWL6040_REG_MICRCTL 0x0D
-#define TWL6040_REG_MICGAIN 0x0E
-#define TWL6040_REG_LINEGAIN 0x0F
-#define TWL6040_REG_HSLCTL 0x10
-#define TWL6040_REG_HSRCTL 0x11
-#define TWL6040_REG_HSGAIN 0x12
-#define TWL6040_REG_EARCTL 0x13
-#define TWL6040_REG_HFLCTL 0x14
-#define TWL6040_REG_HFLGAIN 0x15
-#define TWL6040_REG_HFRCTL 0x16
-#define TWL6040_REG_HFRGAIN 0x17
-#define TWL6040_REG_VIBCTLL 0x18
-#define TWL6040_REG_VIBDATL 0x19
-#define TWL6040_REG_VIBCTLR 0x1A
-#define TWL6040_REG_VIBDATR 0x1B
-#define TWL6040_REG_HKCTL1 0x1C
-#define TWL6040_REG_HKCTL2 0x1D
-#define TWL6040_REG_GPOCTL 0x1E
-#define TWL6040_REG_ALB 0x1F
-#define TWL6040_REG_DLB 0x20
-#define TWL6040_REG_TRIM1 0x28
-#define TWL6040_REG_TRIM2 0x29
-#define TWL6040_REG_TRIM3 0x2A
-#define TWL6040_REG_HSOTRIM 0x2B
-#define TWL6040_REG_HFOTRIM 0x2C
-#define TWL6040_REG_ACCCTL 0x2D
-#define TWL6040_REG_STATUS 0x2E
-
-#define TWL6040_CACHEREGNUM (TWL6040_REG_STATUS + 1)
-
-#define TWL6040_VIOREGNUM 18
-#define TWL6040_VDDREGNUM 21
-
-/* INTID (0x03) fields */
-
-#define TWL6040_THINT 0x01
-#define TWL6040_PLUGINT 0x02
-#define TWL6040_UNPLUGINT 0x04
-#define TWL6040_HOOKINT 0x08
-#define TWL6040_HFINT 0x10
-#define TWL6040_VIBINT 0x20
-#define TWL6040_READYINT 0x40
-
-/* INTMR (0x04) fields */
-
-#define TWL6040_PLUGMSK 0x02
-#define TWL6040_READYMSK 0x40
-#define TWL6040_ALLINT_MSK 0x7B
-
-/* NCPCTL (0x05) fields */
-
-#define TWL6040_NCPENA 0x01
-#define TWL6040_NCPOPEN 0x40
-
-/* LDOCTL (0x06) fields */
-
-#define TWL6040_LSLDOENA 0x01
-#define TWL6040_HSLDOENA 0x04
-#define TWL6040_REFENA 0x40
-#define TWL6040_OSCENA 0x80
-
-/* HPPLLCTL (0x07) fields */
-
-#define TWL6040_HPLLENA 0x01
-#define TWL6040_HPLLRST 0x02
-#define TWL6040_HPLLBP 0x04
-#define TWL6040_HPLLSQRENA 0x08
-#define TWL6040_HPLLSQRBP 0x10
-#define TWL6040_MCLK_12000KHZ (0 << 5)
-#define TWL6040_MCLK_19200KHZ (1 << 5)
-#define TWL6040_MCLK_26000KHZ (2 << 5)
-#define TWL6040_MCLK_38400KHZ (3 << 5)
-#define TWL6040_MCLK_MSK 0x60
-
-/* LPPLLCTL (0x08) fields */
-
-#define TWL6040_LPLLENA 0x01
-#define TWL6040_LPLLRST 0x02
-#define TWL6040_LPLLSEL 0x04
-#define TWL6040_LPLLFIN 0x08
-#define TWL6040_HPLLSEL 0x10
-
-/* HSLCTL (0x10) fields */
-
-#define TWL6040_HSDACMODEL 0x02
-#define TWL6040_HSDRVMODEL 0x08
-
-/* HSRCTL (0x11) fields */
-
-#define TWL6040_HSDACMODER 0x02
-#define TWL6040_HSDRVMODER 0x08
-
-/* ACCCTL (0x2D) fields */
-
-#define TWL6040_RESETSPLIT 0x04
-
-#define TWL6040_SYSCLK_SEL_LPPLL 1
-#define TWL6040_SYSCLK_SEL_HPPLL 2
-
-#define TWL6040_HPPLL_ID 1
-#define TWL6040_LPPLL_ID 2
-
-/* STATUS (0x2E) fields */
-
-#define TWL6040_PLUGCOMP 0x02
+#include <linux/mfd/twl6040-codec.h>
void twl6040_hs_jack_detect(struct snd_soc_codec *codec,
struct snd_soc_jack *jack, int report);
diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c
index 9d35b8c..29759e1 100644
--- a/sound/soc/davinci/davinci-pcm.c
+++ b/sound/soc/davinci/davinci-pcm.c
@@ -811,9 +811,11 @@
static u64 davinci_pcm_dmamask = 0xffffffff;
-static int davinci_pcm_new(struct snd_card *card,
- struct snd_soc_dai *dai, struct snd_pcm *pcm)
+static int davinci_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_pcm *pcm = rtd->pcm;
int ret;
if (!card->dev->dma_mask)
diff --git a/sound/soc/ep93xx/ep93xx-pcm.c b/sound/soc/ep93xx/ep93xx-pcm.c
index a456e49..e27c417 100644
--- a/sound/soc/ep93xx/ep93xx-pcm.c
+++ b/sound/soc/ep93xx/ep93xx-pcm.c
@@ -266,9 +266,11 @@
static u64 ep93xx_pcm_dmamask = 0xffffffff;
-static int ep93xx_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
- struct snd_pcm *pcm)
+static int ep93xx_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_pcm *pcm = rtd->pcm;
int ret = 0;
if (!card->dev->dma_mask)
diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
index 6680c0b..5312d1b 100644
--- a/sound/soc/fsl/fsl_dma.c
+++ b/sound/soc/fsl/fsl_dma.c
@@ -294,9 +294,11 @@
* Regardless of where the memory is actually allocated, since the device can
* technically DMA to any 36-bit address, we do need to set the DMA mask to 36.
*/
-static int fsl_dma_new(struct snd_card *card, struct snd_soc_dai *dai,
- struct snd_pcm *pcm)
+static int fsl_dma_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_pcm *pcm = rtd->pcm;
static u64 fsl_dma_dmamask = DMA_BIT_MASK(36);
int ret;
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
index cbaf8b7..61d2ecc 100644
--- a/sound/soc/fsl/mpc5200_dma.c
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -299,10 +299,11 @@
};
static u64 psc_dma_dmamask = 0xffffffff;
-static int psc_dma_new(struct snd_card *card, struct snd_soc_dai *dai,
- struct snd_pcm *pcm)
+static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_pcm_runtime *rtd = pcm->private_data;
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_pcm *pcm = rtd->pcm;
struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
size_t size = psc_dma_hardware.buffer_bytes_max;
int rc = 0;
diff --git a/sound/soc/imx/imx-pcm-fiq.c b/sound/soc/imx/imx-pcm-fiq.c
index 413b78d..309c59e 100644
--- a/sound/soc/imx/imx-pcm-fiq.c
+++ b/sound/soc/imx/imx-pcm-fiq.c
@@ -238,12 +238,14 @@
static int ssi_irq = 0;
-static int imx_pcm_fiq_new(struct snd_card *card, struct snd_soc_dai *dai,
- struct snd_pcm *pcm)
+static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_pcm *pcm = rtd->pcm;
int ret;
- ret = imx_pcm_new(card, dai, pcm);
+ ret = imx_pcm_new(rtd);
if (ret)
return ret;
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c
index 3b56254..3a676cd 100644
--- a/sound/soc/imx/imx-ssi.c
+++ b/sound/soc/imx/imx-ssi.c
@@ -388,10 +388,11 @@
static u64 imx_pcm_dmamask = DMA_BIT_MASK(32);
-int imx_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
- struct snd_pcm *pcm)
+int imx_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
-
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_pcm *pcm = rtd->pcm;
int ret = 0;
if (!card->dev->dma_mask)
diff --git a/sound/soc/imx/imx-ssi.h b/sound/soc/imx/imx-ssi.h
index dc8a875..0a84cec 100644
--- a/sound/soc/imx/imx-ssi.h
+++ b/sound/soc/imx/imx-ssi.h
@@ -225,8 +225,7 @@
struct imx_ssi *ssi);
int snd_imx_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
-int imx_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
- struct snd_pcm *pcm);
+int imx_pcm_new(struct snd_soc_pcm_runtime *rtd);
void imx_pcm_free(struct snd_pcm *pcm);
/*
diff --git a/sound/soc/jz4740/jz4740-pcm.c b/sound/soc/jz4740/jz4740-pcm.c
index fb1483f..a7c9578 100644
--- a/sound/soc/jz4740/jz4740-pcm.c
+++ b/sound/soc/jz4740/jz4740-pcm.c
@@ -299,9 +299,11 @@
static u64 jz4740_pcm_dmamask = DMA_BIT_MASK(32);
-int jz4740_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
- struct snd_pcm *pcm)
+int jz4740_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_pcm *pcm = rtd->pcm;
int ret = 0;
if (!card->dev->dma_mask)
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index e13c6ce..cd33de1 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -312,9 +312,11 @@
return 0;
}
-static int kirkwood_dma_new(struct snd_card *card,
- struct snd_soc_dai *dai, struct snd_pcm *pcm)
+static int kirkwood_dma_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_pcm *pcm = rtd->pcm;
int ret;
if (!card->dev->dma_mask)
diff --git a/sound/soc/nuc900/nuc900-pcm.c b/sound/soc/nuc900/nuc900-pcm.c
index 8263f56..d589ef1 100644
--- a/sound/soc/nuc900/nuc900-pcm.c
+++ b/sound/soc/nuc900/nuc900-pcm.c
@@ -315,9 +315,12 @@
}
static u64 nuc900_pcm_dmamask = DMA_BIT_MASK(32);
-static int nuc900_dma_new(struct snd_card *card,
- struct snd_soc_dai *dai, struct snd_pcm *pcm)
+static int nuc900_dma_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_pcm *pcm = rtd->pcm;
+
if (!card->dev->dma_mask)
card->dev->dma_mask = &nuc900_pcm_dmamask;
if (!card->dev->coherent_dma_mask)
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index 99054cf..ffcfeee 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -2,6 +2,14 @@
tristate "SoC Audio for the Texas Instruments OMAP chips"
depends on ARCH_OMAP
+config SND_OMAP_SOC_ABE_DSP
+ tristate
+ select SND_DYNAMIC_MINORS
+
+config SND_OMAP_SOC_MCASP
+ tristate
+ select SND_SOC_SPDIF
+
config SND_OMAP_SOC_MCBSP
tristate
select OMAP_MCBSP
@@ -9,6 +17,12 @@
config SND_OMAP_SOC_MCPDM
tristate
+config SND_OMAP_SOC_ABE
+ tristate
+
+config SND_OMAP_SOC_HDMI
+ tristate
+
config SND_OMAP_SOC_N810
tristate "SoC Audio support for Nokia N810"
depends on SND_OMAP_SOC && MACH_NOKIA_N810 && I2C
@@ -92,13 +106,27 @@
SDP3430.
config SND_OMAP_SOC_SDP4430
- tristate "SoC Audio support for Texas Instruments SDP4430"
- depends on TWL4030_CORE && SND_OMAP_SOC && MACH_OMAP_4430SDP
+ tristate "SoC Audio support for Texas Instruments SDP4430 or PandaBoard"
+ depends on TWL4030_CORE && (MACH_OMAP_4430SDP || MACH_OMAP4_PANDA)
select SND_OMAP_SOC_MCPDM
select SND_SOC_TWL6040
+ select SND_OMAP_SOC_ABE
+ select SND_OMAP_SOC_MCBSP
+ select SND_SOC_DMIC
+ select SND_OMAP_SOC_DMIC
+ select SND_OMAP_SOC_ABE_DSP
help
Say Y if you want to add support for SoC audio on Texas Instruments
- SDP4430.
+ SDP4430 or PandaBoard.
+
+config SND_OMAP_SOC_OMAP4_HDMI
+ tristate "SoC Audio support for Texas Instruments OMAP4 HDMI"
+ depends on SND_OMAP_SOC && OMAP4_DSS_HDMI && OMAP2_DSS && ARCH_OMAP4
+ select SND_OMAP_SOC_HDMI
+ select SND_SOC_OMAP_HDMI_CODEC
+ help
+ Say Y if you want to add support for SoC HDMI audio on Texas Instruments
+ OMAP4 chips
config SND_OMAP_SOC_OMAP3_PANDORA
tristate "SoC Audio support for OMAP3 Pandora"
diff --git a/sound/soc/omap/Makefile b/sound/soc/omap/Makefile
index 6c2c87e..f1deddc 100644
--- a/sound/soc/omap/Makefile
+++ b/sound/soc/omap/Makefile
@@ -1,11 +1,19 @@
# OMAP Platform Support
snd-soc-omap-objs := omap-pcm.o
+snd-soc-omap-mcasp-objs := omap-mcasp.o
snd-soc-omap-mcbsp-objs := omap-mcbsp.o
-snd-soc-omap-mcpdm-objs := omap-mcpdm.o mcpdm.o
+snd-soc-omap-mcpdm-objs := omap-mcpdm.o
+snd-soc-omap-abe-objs := omap-abe.o
+snd-soc-omap-abe-dsp-objs := omap-abe-dsp.o
+snd-soc-omap-hdmi-objs := omap-hdmi.o
obj-$(CONFIG_SND_OMAP_SOC) += snd-soc-omap.o
+obj-$(CONFIG_SND_OMAP_SOC_MCASP) += snd-soc-omap-mcasp.o
obj-$(CONFIG_SND_OMAP_SOC_MCBSP) += snd-soc-omap-mcbsp.o
obj-$(CONFIG_SND_OMAP_SOC_MCPDM) += snd-soc-omap-mcpdm.o
+obj-$(CONFIG_SND_OMAP_SOC_ABE) += snd-soc-omap-abe.o
+obj-$(CONFIG_SND_OMAP_SOC_ABE_DSP) += snd-soc-omap-abe-dsp.o abe/
+obj-$(CONFIG_SND_OMAP_SOC_HDMI) += snd-soc-omap-hdmi.o
# OMAP Machine Support
snd-soc-n810-objs := n810.o
@@ -21,6 +29,7 @@
snd-soc-omap3beagle-objs := omap3beagle.o
snd-soc-zoom2-objs := zoom2.o
snd-soc-igep0020-objs := igep0020.o
+snd-soc-omap4-hdmi-objs := omap4-hdmi-card.o
obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o
obj-$(CONFIG_SND_OMAP_SOC_RX51) += snd-soc-rx51.o
@@ -36,3 +45,4 @@
obj-$(CONFIG_SND_OMAP_SOC_OMAP3_BEAGLE) += snd-soc-omap3beagle.o
obj-$(CONFIG_SND_OMAP_SOC_ZOOM2) += snd-soc-zoom2.o
obj-$(CONFIG_SND_OMAP_SOC_IGEP0020) += snd-soc-igep0020.o
+obj-$(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) += snd-soc-omap4-hdmi.o
diff --git a/sound/soc/omap/abe/Makefile b/sound/soc/omap/abe/Makefile
new file mode 100644
index 0000000..0d5649b
--- /dev/null
+++ b/sound/soc/omap/abe/Makefile
@@ -0,0 +1,14 @@
+snd-soc-abe-hal-objs += abe_main.o \
+ abe_core.o \
+ abe_gain.o \
+ abe_port.o \
+ abe_aess.o \
+ abe_dbg.o \
+ abe_dat.o \
+ abe_ini.o \
+ abe_irq.o \
+ abe_seq.o \
+ abe_asrc.o \
+ port_mgr.o \
+
+obj-$(CONFIG_SND_OMAP_SOC_ABE_DSP) += snd-soc-abe-hal.o
diff --git a/sound/soc/omap/abe/abe.h b/sound/soc/omap/abe/abe.h
new file mode 100644
index 0000000..103a57cb
--- /dev/null
+++ b/sound/soc/omap/abe/abe.h
@@ -0,0 +1,141 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _ABE_H_
+#define _ABE_H_
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include "abe_def.h"
+#include "abe_define.h"
+#include "abe_fw.h"
+#include "abe_ext.h"
+#include "abe_dbg.h"
+
+/*
+ * BASIC TYPES
+ */
+#define MAX_UINT8 ((((1L << 7) - 1) << 1) + 1)
+#define MAX_UINT16 ((((1L << 15) - 1) << 1) + 1)
+#define MAX_UINT32 ((((1L << 31) - 1) << 1) + 1)
+
+#define s8 char
+#define u8 unsigned char
+#define s16 short
+#define u16 unsigned short
+#define s32 int
+#define u32 unsigned int
+
+struct omap_abe_equ {
+ /* type of filter */
+ u32 equ_type;
+ /* filter length */
+ u32 equ_length;
+ union {
+ /* parameters are the direct and recursive coefficients in */
+ /* Q6.26 integer fixed-point format. */
+ s32 type1[NBEQ1];
+ struct {
+ /* center frequency of the band [Hz] */
+ s32 freq[NBEQ2];
+ /* gain of each band. [dB] */
+ s32 gain[NBEQ2];
+ /* Q factor of this band [dB] */
+ s32 q[NBEQ2];
+ } type2;
+ } coef;
+ s32 equ_param3;
+};
+
+extern struct omap_abe *abe;
+
+void omap_abe_dbg_log(struct omap_abe *abe, u32 x, u32 y, u32 z, u32 t);
+void omap_abe_dbg_error(struct omap_abe *abe, int level, int error);
+int omap_abe_set_opp_processing(struct omap_abe *abe, u32 opp);
+int omap_abe_connect_debug_trace(struct omap_abe *abe,
+ struct omap_abe_dma *dma2);
+
+int omap_abe_use_compensated_gain(struct omap_abe *abe, int on_off);
+int omap_abe_write_equalizer(struct omap_abe *abe,
+ u32 id, struct omap_abe_equ *param);
+
+int omap_abe_disable_gain(struct omap_abe *abe, u32 id, u32 p);
+int omap_abe_enable_gain(struct omap_abe *abe, u32 id, u32 p);
+int omap_abe_mute_gain(struct omap_abe *abe, u32 id, u32 p);
+int omap_abe_unmute_gain(struct omap_abe *abe, u32 id, u32 p);
+
+int omap_abe_write_gain(struct omap_abe *abe,
+ u32 id, s32 f_g, u32 ramp, u32 p);
+int omap_abe_write_mixer(struct omap_abe *abe,
+ u32 id, s32 f_g, u32 f_ramp, u32 p);
+int omap_abe_read_gain(struct omap_abe *abe,
+ u32 id, u32 *f_g, u32 p);
+int omap_abe_read_mixer(struct omap_abe *abe,
+ u32 id, u32 *f_g, u32 p);
+
+/*
+ * MACROS
+ */
+#define _log(x, y, z, t) { if (x & abe->dbg.mask) omap_abe_dbg_log(abe, x, y, z, t); }
+
+#endif/* _ABE_H_ */
diff --git a/sound/soc/omap/abe/abe_aess.c b/sound/soc/omap/abe/abe_aess.c
new file mode 100644
index 0000000..eb35b58
--- /dev/null
+++ b/sound/soc/omap/abe/abe_aess.c
@@ -0,0 +1,191 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include "abe_dbg.h"
+#include "abe.h"
+#include "abe_mem.h"
+#include "abe_aess.h"
+
+/**
+ * omap_abe_hw_configuration
+ *
+ */
+void omap_abe_hw_configuration(struct omap_abe *abe)
+{
+ /* enables the DMAreq from AESS AESS_DMAENABLE_SET = 255 */
+ omap_abe_reg_writel(abe, AESS_DMAENABLE_SET, DMA_ENABLE_ALL);
+ /* enables the MCU IRQ from AESS to Cortex A9 */
+ omap_abe_reg_writel(abe, AESS_MCU_IRQENABLE_SET, INT_SET);
+}
+
+/**
+ * omap_abe_clear_irq - clear ABE interrupt
+ * @abe: Pointer on abe handle
+ *
+ * This subroutine is call to clear MCU Irq
+ */
+int omap_abe_clear_irq(struct omap_abe *abe)
+{
+ omap_abe_reg_writel(abe, ABE_MCU_IRQSTATUS, INT_CLR);
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_clear_irq);
+
+/**
+ * abe_write_event_generator - Selects event generator source
+ * @abe: Pointer on abe handle
+ * @e: Event Generation Counter, McPDM, DMIC or default.
+ *
+ * Loads the AESS event generator hardware source.
+ * Loads the firmware parameters accordingly.
+ * Indicates to the FW which data stream is the most important to preserve
+ * in case all the streams are asynchronous.
+ * If the parameter is "default", then HAL decides which Event source
+ * is the best appropriate based on the opened ports.
+ *
+ * When neither the DMIC and the McPDM are activated, the AE will have
+ * its EVENT generator programmed with the EVENT_COUNTER.
+ * The event counter will be tuned in order to deliver a pulse frequency higher
+ * than 96 kHz.
+ * The DPLL output at 100% OPP is MCLK = (32768kHz x6000) = 196.608kHz
+ * The ratio is (MCLK/96000)+(1<<1) = 2050
+ * (1<<1) in order to have the same speed at 50% and 100% OPP
+ * (only 15 MSB bits are used at OPP50%)
+ */
+int omap_abe_write_event_generator(struct omap_abe *abe, u32 e)
+{
+ u32 event, selection;
+ u32 counter = EVENT_GENERATOR_COUNTER_DEFAULT;
+
+ _log(ABE_ID_WRITE_EVENT_GENERATOR, e, 0, 0);
+
+ switch (e) {
+ case EVENT_TIMER:
+ selection = EVENT_SOURCE_COUNTER;
+ event = 0;
+ break;
+ case EVENT_44100:
+ selection = EVENT_SOURCE_COUNTER;
+ event = 0;
+ counter = EVENT_GENERATOR_COUNTER_44100;
+ break;
+ default:
+ omap_abe_dbg_error(abe, OMAP_ABE_ERR_API, ABE_BLOCK_COPY_ERR);
+ }
+ omap_abe_reg_writel(abe, EVENT_GENERATOR_COUNTER, counter);
+ omap_abe_reg_writel(abe, EVENT_SOURCE_SELECTION, selection);
+ omap_abe_reg_writel(abe, EVENT_GENERATOR_START, EVENT_GENERATOR_ON);
+ omap_abe_reg_writel(abe, AUDIO_ENGINE_SCHEDULER, event);
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_write_event_generator);
+
+/**
+ * omap_abe_start_event_generator - Starts event generator source
+ * @abe: Pointer on abe handle
+ *
+ * Start the event genrator of AESS. No more event will be send to AESS engine.
+ * Upper layer must wait 1/96kHz to be sure that engine reaches
+ * the IDLE instruction.
+ */
+int omap_abe_start_event_generator(struct omap_abe *abe)
+{
+ /* Start the event Generator */
+ omap_abe_reg_writel(abe, EVENT_GENERATOR_START, 1);
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_start_event_generator);
+
+/**
+ * omap_abe_stop_event_generator - Stops event generator source
+ * @abe: Pointer on abe handle
+ *
+ * Stop the event genrator of AESS. No more event will be send to AESS engine.
+ * Upper layer must wait 1/96kHz to be sure that engine reaches
+ * the IDLE instruction.
+ */
+int omap_abe_stop_event_generator(struct omap_abe *abe)
+{
+ /* Stop the event Generator */
+ omap_abe_reg_writel(abe, EVENT_GENERATOR_START, 0);
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_stop_event_generator);
+
+/**
+ * omap_abe_disable_irq - disable MCU/DSP ABE interrupt
+ * @abe: Pointer on abe handle
+ *
+ * This subroutine is disabling ABE MCU/DSP Irq
+ */
+int omap_abe_disable_irq(struct omap_abe *abe)
+{
+ /* disables the DMAreq from AESS AESS_DMAENABLE_CLR = 127
+ * DMA_Req7 will still be enabled as it is used for ABE trace */
+ omap_abe_reg_writel(abe, AESS_DMAENABLE_CLR, 0x7F);
+ /* disables the MCU IRQ from AESS to Cortex A9 */
+ omap_abe_reg_writel(abe, AESS_MCU_IRQENABLE_CLR, 0x01);
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_disable_irq);
diff --git a/sound/soc/omap/abe/abe_aess.h b/sound/soc/omap/abe/abe_aess.h
new file mode 100644
index 0000000..70c54f8e
--- /dev/null
+++ b/sound/soc/omap/abe/abe_aess.h
@@ -0,0 +1,113 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _ABE_AESS_H_
+#define _ABE_AESS_H_
+
+#define AESS_REVISION 0x00
+#define AESS_MCU_IRQSTATUS 0x28
+#define AESS_MCU_IRQENABLE_SET 0x3C
+#define AESS_MCU_IRQENABLE_CLR 0x40
+#define AESS_DMAENABLE_SET 0x60
+#define AESS_DMAENABLE_CLR 0x64
+#define EVENT_GENERATOR_COUNTER 0x68
+#define EVENT_GENERATOR_START 0x6C
+#define EVENT_SOURCE_SELECTION 0x70
+#define AUDIO_ENGINE_SCHEDULER 0x74
+
+/*
+ * AESS_MCU_IRQSTATUS bit field
+ */
+#define INT_CLEAR 0x01
+
+/*
+ * AESS_MCU_IRQENABLE_SET bit field
+ */
+#define INT_SET 0x01
+
+/*
+ * AESS_MCU_IRQENABLE_CLR bit field
+ */
+#define INT_CLR 0x01
+
+/*
+ * AESS_DMAENABLE_SET bit fields
+ */
+#define DMA_ENABLE_ALL 0xFF
+
+/*
+ * AESS_DMAENABLE_CLR bit fields
+ */
+#define DMA_DISABLE_ALL 0xFF
+
+/*
+ * EVENT_GENERATOR_COUNTER COUNTER_VALUE bit field
+ */
+/* PLL output/desired sampling rate = (32768 * 6000)/96000 */
+#define EVENT_GENERATOR_COUNTER_DEFAULT (2048-1)
+/* PLL output/desired sampling rate = (32768 * 6000)/88200 */
+#define EVENT_GENERATOR_COUNTER_44100 (2228-1)
+
+
+int omap_abe_start_event_generator(struct omap_abe *abe);
+int omap_abe_stop_event_generator(struct omap_abe *abe);
+int omap_abe_write_event_generator(struct omap_abe *abe, u32 e);
+
+void omap_abe_hw_configuration(struct omap_abe *abe);
+
+#endif/* _ABE_AESS_H_ */
diff --git a/sound/soc/omap/abe/abe_api.h b/sound/soc/omap/abe/abe_api.h
new file mode 100644
index 0000000..a24ec96
--- /dev/null
+++ b/sound/soc/omap/abe/abe_api.h
@@ -0,0 +1,511 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _ABE_API_H_
+#define _ABE_API_H_
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include "abe_dm_addr.h"
+#include "abe_dbg.h"
+
+#define ABE_TASK_ID(ID) (OMAP_ABE_D_TASKSLIST_ADDR + sizeof(ABE_STask)*(ID))
+
+#define TASK_ASRC_VX_DL_SLT 0
+#define TASK_ASRC_VX_DL_IDX 3
+#define TASK_VX_DL_SLT 1
+#define TASK_VX_DL_IDX 3
+#define TASK_DL2Mixer_SLT 1
+#define TASK_DL2Mixer_IDX 6
+#define TASK_DL1Mixer_SLT 2
+#define TASK_DL1Mixer_IDX 0
+#define TASK_VX_UL_SLT 12
+#define TASK_VX_UL_IDX 5
+#define TASK_BT_DL_48_8_SLT 14
+#define TASK_BT_DL_48_8_IDX 4
+#define TASK_ASRC_BT_UL_SLT 15
+#define TASK_ASRC_BT_UL_IDX 6
+#define TASK_ASRC_VX_UL_SLT 16
+#define TASK_ASRC_VX_UL_IDX 2
+#define TASK_BT_UL_8_48_SLT 17
+#define TASK_BT_UL_8_48_IDX 2
+#define TASK_IO_MM_DL_SLT 18
+#define TASK_IO_MM_DL_IDX 0
+#define TASK_ASRC_BT_DL_SLT 18
+#define TASK_ASRC_BT_DL_IDX 6
+
+/**
+ * abe_reset_hal - reset the ABE/HAL
+ * @rdev: regulator source
+ * @constraints: constraints to apply
+ *
+ * Operations : reset the HAL by reloading the static variables and
+ * default AESS registers.
+ * Called after a PRCM cold-start reset of ABE
+ */
+abehal_status abe_reset_hal(void);
+/**
+ * abe_load_fw_param - Load ABE Firmware memories
+ * @PMEM: Pointer of Program memory data
+ * @PMEM_SIZE: Size of PMEM data
+ * @CMEM: Pointer of Coeffients memory data
+ * @CMEM_SIZE: Size of CMEM data
+ * @SMEM: Pointer of Sample memory data
+ * @SMEM_SIZE: Size of SMEM data
+ * @DMEM: Pointer of Data memory data
+ * @DMEM_SIZE: Size of DMEM data
+ *
+ */
+abehal_status abe_load_fw_param(u32 *FW);
+/**
+ * abe_irq_processing - Process ABE interrupt
+ *
+ * This subroutine is call upon reception of "MA_IRQ_99 ABE_MPU_IRQ" Audio
+ * back-end interrupt. This subroutine will check the ATC Hrdware, the
+ * IRQ_FIFO from the AE and act accordingly. Some IRQ source are originated
+ * for the delivery of "end of time sequenced tasks" notifications, some are
+ * originated from the Ping-Pong protocols, some are generated from
+ * the embedded debugger when the firmware stops on programmable break-points,
+ * etc ...
+ */
+abehal_status abe_irq_processing(void);
+/**
+ * abe_irq_clear - clear ABE interrupt
+ *
+ * This subroutine is call to clear MCU Irq
+ */
+abehal_status abe_clear_irq(void);
+/**
+ * abe_disable_irq - disable MCU/DSP ABE interrupt
+ *
+ * This subroutine is disabling ABE MCU/DSP Irq
+ */
+abehal_status abe_disable_irq(void);
+/*
+ * abe_check_activity - check all ports are closed
+ */
+u32 abe_check_activity(void);
+/**
+ * abe_wakeup - Wakeup ABE
+ *
+ * Wakeup ABE in case of retention
+ */
+abehal_status abe_wakeup(void);
+/**
+ * abe_start_event_generator - Stops event generator source
+ *
+ * Start the event genrator of AESS. No more event will be send to AESS engine.
+ * Upper layer must wait 1/96kHz to be sure that engine reaches
+ * the IDLE instruction.
+ */
+abehal_status abe_start_event_generator(void);
+/**
+ * abe_stop_event_generator - Stops event generator source
+ *
+ * Stop the event genrator of AESS. No more event will be send to AESS engine.
+ * Upper layer must wait 1/96kHz to be sure that engine reaches
+ * the IDLE instruction.
+ */
+abehal_status abe_stop_event_generator(void);
+
+/**
+ * abe_write_event_generator - Selects event generator source
+ * @e: Event Generation Counter, McPDM, DMIC or default.
+ *
+ * Loads the AESS event generator hardware source.
+ * Loads the firmware parameters accordingly.
+ * Indicates to the FW which data stream is the most important to preserve
+ * in case all the streams are asynchronous.
+ * If the parameter is "default", then HAL decides which Event source
+ * is the best appropriate based on the opened ports.
+ *
+ * When neither the DMIC and the McPDM are activated, the AE will have
+ * its EVENT generator programmed with the EVENT_COUNTER.
+ * The event counter will be tuned in order to deliver a pulse frequency higher
+ * than 96 kHz.
+ * The DPLL output at 100% OPP is MCLK = (32768kHz x6000) = 196.608kHz
+ * The ratio is (MCLK/96000)+(1<<1) = 2050
+ * (1<<1) in order to have the same speed at 50% and 100% OPP
+ * (only 15 MSB bits are used at OPP50%)
+ */
+abehal_status abe_write_event_generator(u32 e);
+/**
+ * abe_set_opp_processing - Set OPP mode for ABE Firmware
+ * @opp: OOPP mode
+ *
+ * New processing network and OPP:
+ * 0: Ultra Lowest power consumption audio player (no post-processing, no mixer)
+ * 1: OPP 25% (simple multimedia features, including low-power player)
+ * 2: OPP 50% (multimedia and voice calls)
+ * 3: OPP100% (EANC, multimedia complex use-cases)
+ *
+ * Rearranges the FW task network to the corresponding OPP list of features.
+ * The corresponding AE ports are supposed to be set/reset accordingly before
+ * this switch.
+ *
+ */
+abehal_status abe_set_opp_processing(u32 opp);
+/**
+ * abe_set_ping_pong_buffer
+ * @port: ABE port ID
+ * @n_bytes: Size of Ping/Pong buffer
+ *
+ * Updates the next ping-pong buffer with "size" bytes copied from the
+ * host processor. This API notifies the FW that the data transfer is done.
+ */
+abehal_status abe_set_ping_pong_buffer(u32 port, u32 n_bytes);
+/**
+ * abe_read_next_ping_pong_buffer
+ * @port: ABE portID
+ * @p: Next buffer address (pointer)
+ * @n: Next buffer size (pointer)
+ *
+ * Tell the next base address of the next ping_pong Buffer and its size
+ */
+abehal_status abe_read_next_ping_pong_buffer(u32 port, u32 *p, u32 *n);
+/**
+ * abe_init_ping_pong_buffer
+ * @id: ABE port ID
+ * @size_bytes:size of the ping pong
+ * @n_buffers:number of buffers (2 = ping/pong)
+ * @p:returned address of the ping-pong list of base address (byte offset
+ from DMEM start)
+ *
+ * Computes the base address of the ping_pong buffers
+ */
+abehal_status abe_init_ping_pong_buffer(u32 id, u32 size_bytes, u32 n_buffers,
+ u32 *p);
+/**
+ * abe_read_offset_from_ping_buffer
+ * @id: ABE port ID
+ * @n: returned address of the offset
+ * from the ping buffer start address (in samples)
+ *
+ * Computes the current firmware ping pong read pointer location,
+ * expressed in samples, as the offset from the start address of ping buffer.
+ */
+abehal_status abe_read_offset_from_ping_buffer(u32 id, u32 *n);
+/**
+ * abe_plug_subroutine
+ * @id: returned sequence index after plugging a new subroutine
+ * @f: subroutine address to be inserted
+ * @n: number of parameters of this subroutine
+ * @params: pointer on parameters
+ *
+ * register a list of subroutines for call-back purpose
+ */
+abehal_status abe_plug_subroutine(u32 *id, abe_subroutine2 f, u32 n,
+ u32 *params);
+/**
+ * abe_set_sequence_time_accuracy
+ * @fast: fast counter
+ * @slow: slow counter
+ *
+ */
+abehal_status abe_set_sequence_time_accuracy(u32 fast, u32 slow);
+/**
+ * abe_reset_port
+ * @id: ABE port ID
+ *
+ * stop the port activity and reload default parameters on the associated
+ * processing features.
+ * Clears the internal AE buffers.
+ */
+abehal_status abe_reset_port(u32 id);
+/**
+ * abe_read_remaining_data
+ * @id: ABE port_ID
+ * @n: size pointer to the remaining number of 32bits words
+ *
+ * computes the remaining amount of data in the buffer.
+ */
+abehal_status abe_read_remaining_data(u32 port, u32 *n);
+/**
+ * abe_disable_data_transfer
+ * @id: ABE port id
+ *
+ * disables the ATC descriptor and stop IO/port activities
+ * disable the IO task (@f = 0)
+ * clear ATC DMEM buffer, ATC enabled
+ */
+abehal_status abe_disable_data_transfer(u32 id);
+/**
+ * abe_enable_data_transfer
+ * @ip: ABE port id
+ *
+ * enables the ATC descriptor
+ * reset ATC pointers
+ * enable the IO task (@f <> 0)
+ */
+abehal_status abe_enable_data_transfer(u32 id);
+/**
+ * abe_set_dmic_filter
+ * @d: DMIC decimation ratio : 16/25/32/40
+ *
+ * Loads in CMEM a specific list of coefficients depending on the DMIC sampling
+ * frequency (2.4MHz or 3.84MHz). This table compensates the DMIC decimator
+ * roll-off at 20kHz.
+ * The default table is loaded with the DMIC 2.4MHz recommended configuration.
+ */
+abehal_status abe_set_dmic_filter(u32 d);
+/**
+ * abe_connect_cbpr_dmareq_port
+ * @id: port name
+ * @f: desired data format
+ * @d: desired dma_request line (0..7)
+ * @a: returned pointer to the base address of the CBPr register and number of
+ * samples to exchange during a DMA_request.
+ *
+ * enables the data echange between a DMA and the ABE through the
+ * CBPr registers of AESS.
+ */
+abehal_status abe_connect_cbpr_dmareq_port(u32 id, abe_data_format_t *f, u32 d,
+ abe_dma_t *returned_dma_t);
+/**
+ * abe_connect_irq_ping_pong_port
+ * @id: port name
+ * @f: desired data format
+ * @I: index of the call-back subroutine to call
+ * @s: half-buffer (ping) size
+ * @p: returned base address of the first (ping) buffer)
+ *
+ * enables the data echanges between a direct access to the DMEM
+ * memory of ABE using cache flush. On each IRQ activation a subroutine
+ * registered with "abe_plug_subroutine" will be called. This subroutine
+ * will generate an amount of samples, send them to DMEM memory and call
+ * "abe_set_ping_pong_buffer" to notify the new amount of samples in the
+ * pong buffer.
+ */
+abehal_status abe_connect_irq_ping_pong_port(u32 id, abe_data_format_t *f,
+ u32 subroutine_id, u32 size,
+ u32 *sink, u32 dsp_mcu_flag);
+/**
+ * abe_connect_serial_port()
+ * @id: port name
+ * @f: data format
+ * @i: peripheral ID (McBSP #1, #2, #3)
+ *
+ * Operations : enables the data echanges between a McBSP and an ATC buffer in
+ * DMEM. This API is used connect 48kHz McBSP streams to MM_DL and 8/16kHz
+ * voice streams to VX_UL, VX_DL, BT_VX_UL, BT_VX_DL. It abstracts the
+ * abe_write_port API.
+ */
+abehal_status abe_connect_serial_port(u32 id, abe_data_format_t *f,
+ u32 mcbsp_id);
+/**
+ * abe_read_port_address
+ * @dma: output pointer to the DMA iteration and data destination pointer
+ *
+ * This API returns the address of the DMA register used on this audio port.
+ * Depending on the protocol being used, adds the base address offset L3
+ * (DMA) or MPU (ARM)
+ */
+abehal_status abe_read_port_address(u32 port, abe_dma_t *dma2);
+/**
+ * abe_write_equalizer
+ * @id: name of the equalizer
+ * @param : equalizer coefficients
+ *
+ * Load the coefficients in CMEM.
+ */
+abehal_status abe_write_equalizer(u32 id, abe_equ_t *param);
+/**
+ * abe_write_asrc
+ * @id: name of the port
+ * @param: drift value to compensate [ppm]
+ *
+ * Load the drift variables to the FW memory. This API can be called only
+ * when the corresponding port has been already opened and the ASRC has
+ * been correctly initialized with API abe_init_asrc_... If this API is
+ * used such that the drift has been changed from positive to negative drift
+ * or vice versa, there will be click in the output signal. Loading the drift
+ * value with zero disables the feature.
+ */
+abehal_status abe_write_asrc(u32 port, s32 dppm);
+/**
+ * abe_write_aps
+ * @id: name of the aps filter
+ * @param: table of filter coefficients
+ *
+ * Load the filters and thresholds coefficients in FW memory. This AP
+ * can be called when the corresponding APS is not activated. After
+ * reloading the firmware the default coefficients corresponds to "no APS
+ * activated".
+ * Loading all the coefficients value with zero disables the feature.
+ */
+abehal_status abe_write_aps(u32 id, struct abe_aps_t *param);
+/**
+ * abe_write_mixer
+ * @id: name of the mixer
+ * @param: list of input gains of the mixer
+ * @p: list of port corresponding to the above gains
+ *
+ * Load the gain coefficients in FW memory. This API can be called when
+ * the corresponding MIXER is not activated. After reloading the firmware
+ * the default coefficients corresponds to "all input and output mixer's gain
+ * in mute state". A mixer is disabled with a network reconfiguration
+ * corresponding to an OPP value.
+ */
+abehal_status abe_write_gain(u32 id, s32 f_g, u32 ramp, u32 p);
+abehal_status abe_use_compensated_gain(u32 on_off);
+abehal_status abe_enable_gain(u32 id, u32 p);
+abehal_status abe_disable_gain(u32 id, u32 p);
+abehal_status abe_mute_gain(u32 id, u32 p);
+abehal_status abe_unmute_gain(u32 id, u32 p);
+/**
+ * abe_write_mixer
+ * @id: name of the mixer
+ * @param: input gains and delay ramp of the mixer
+ * @p: port corresponding to the above gains
+ *
+ * Load the gain coefficients in FW memory. This API can be called when
+ * the corresponding MIXER is not activated. After reloading the firmware
+ * the default coefficients corresponds to "all input and output mixer's
+ * gain in mute state". A mixer is disabled with a network reconfiguration
+ * corresponding to an OPP value.
+ */
+abehal_status abe_write_mixer(u32 id, s32 f_g, u32 f_ramp, u32 p);
+/**
+ * abe_read_gain
+ * @id: name of the mixer
+ * @param: list of input gains of the mixer
+ * @p: list of port corresponding to the above gains
+ *
+ */
+abehal_status abe_read_gain(u32 id, u32 *f_g, u32 p);
+/**
+ * abe_read_mixer
+ * @id: name of the mixer
+ * @param: gains of the mixer
+ * @p: port corresponding to the above gains
+ *
+ * Load the gain coefficients in FW memory. This API can be called when
+ * the corresponding MIXER is not activated. After reloading the firmware
+ * the default coefficients corresponds to "all input and output mixer's
+ * gain in mute state". A mixer is disabled with a network reconfiguration
+ * corresponding to an OPP value.
+ */
+abehal_status abe_read_mixer(u32 id, u32 *f_g, u32 p);
+/**
+ * abe_set_router_configuration
+ * @Id: name of the router
+ * @Conf: id of the configuration
+ * @param: list of output index of the route
+ *
+ * The uplink router takes its input from DMIC (6 samples), AMIC (2 samples)
+ * and PORT1/2 (2 stereo ports). Each sample will be individually stored in
+ * an intermediate table of 10 elements. The intermediate table is used to
+ * route the samples to three directions : REC1 mixer, 2 EANC DMIC source of
+ * filtering and MM recording audio path.
+ */
+abehal_status abe_set_router_configuration(u32 id, u32 k, u32 *param);
+/**
+ * ABE_READ_DEBUG_TRACE
+ *
+ * Parameters :
+ * @data: data destination pointer
+ * @n : max number of read data
+ *
+ * Operations :
+ * Reads the AE circular data pointer that holds pairs of debug data +
+ * timestamps, and stores the pairs, via linear addressing, to the parameter
+ * pointer.
+ * Stops the copy when the max parameter is reached or when the FIFO is empty.
+ *
+ * Return value :
+ * None.
+ */
+abehal_status abe_read_debug_trace(u32 *data, u32 *n);
+/**
+ * abe_connect_debug_trace
+ * @dma2:pointer to the DMEM trace buffer
+ *
+ * returns the address and size of the real-time debug trace buffer,
+ * the content of which will vary from one firmware release to an other
+ */
+abehal_status abe_connect_debug_trace(abe_dma_t *dma2);
+/**
+ * abe_set_debug_trace
+ * @debug: debug ID from a list to be defined
+ *
+ * load a mask which filters the debug trace to dedicated types of data
+ */
+abehal_status abe_set_debug_trace(abe_dbg_t debug);
+/**
+ * abe_init_mem - Allocate Kernel space memory map for ABE
+ *
+ * Memory map of ABE memory space for PMEM/DMEM/SMEM/DMEM
+ */
+void abe_init_mem(void __iomem **_io_base);
+
+/**
+ * abe_write_pdmdl_offset - write the desired offset on the DL1/DL2 paths
+ *
+ * Parameters:
+ * path: 1 for the DL1 ABE path, 2 for the DL2 ABE path
+ * offset_left: integer value that will be added on all PDM left samples
+ * offset_right: integer value that will be added on all PDM right samples
+ *
+ */
+void abe_write_pdmdl_offset(u32 path, u32 offset_left, u32 offset_right);
+
+#endif/* _ABE_API_H_ */
diff --git a/sound/soc/omap/abe/abe_asrc.c b/sound/soc/omap/abe/abe_asrc.c
new file mode 100644
index 0000000..4a52235
--- /dev/null
+++ b/sound/soc/omap/abe/abe_asrc.c
@@ -0,0 +1,1231 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include "abe_legacy.h"
+#include "abe_dbg.h"
+
+#include "abe_typedef.h"
+#include "abe_initxxx_labels.h"
+#include "abe_dbg.h"
+#include "abe_mem.h"
+#include "abe_sm_addr.h"
+#include "abe_cm_addr.h"
+
+/**
+ * abe_write_fifo
+ * @mem_bank: currently only ABE_DMEM supported
+ * @addr: FIFO descriptor address ( descriptor fields : READ ptr, WRITE ptr,
+ * FIFO START_ADDR, FIFO END_ADDR)
+ * @data: data to write to FIFO
+ * @number: number of 32-bit words to write to DMEM FIFO
+ *
+ * write DMEM FIFO and update FIFO descriptor,
+ * it is assumed that FIFO descriptor is located in DMEM
+ */
+void abe_write_fifo(u32 memory_bank, u32 descr_addr, u32 *data, u32 nb_data32)
+{
+ u32 fifo_addr[4];
+ u32 i;
+ /* read FIFO descriptor from DMEM */
+ omap_abe_mem_read(abe, OMAP_ABE_DMEM, descr_addr,
+ &fifo_addr[0], 4 * sizeof(u32));
+ /* WRITE ptr < FIFO start address */
+ if (fifo_addr[1] < fifo_addr[2])
+ omap_abe_dbg_error(abe, OMAP_ABE_ERR_DBG,
+ ABE_FW_FIFO_WRITE_PTR_ERR);
+ /* WRITE ptr > FIFO end address */
+ if (fifo_addr[1] > fifo_addr[3])
+ omap_abe_dbg_error(abe, OMAP_ABE_ERR_DBG,
+ ABE_FW_FIFO_WRITE_PTR_ERR);
+ switch (memory_bank) {
+ case ABE_DMEM:
+ for (i = 0; i < nb_data32; i++) {
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ (s32) fifo_addr[1], (u32 *) (data + i),
+ 4);
+ /* increment WRITE pointer */
+ fifo_addr[1] = fifo_addr[1] + 4;
+ if (fifo_addr[1] > fifo_addr[3])
+ fifo_addr[1] = fifo_addr[2];
+ if (fifo_addr[1] == fifo_addr[0])
+ omap_abe_dbg_error(abe, OMAP_ABE_ERR_DBG,
+ ABE_FW_FIFO_WRITE_PTR_ERR);
+ }
+ /* update WRITE pointer in DMEM */
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM, descr_addr +
+ sizeof(u32), &fifo_addr[1], 4);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * abe_write_asrc
+ * @id: name of the port
+ * @param: drift value to compensate [ppm]
+ *
+ * Load the drift variables to the FW memory. This API can be called only
+ * when the corresponding port has been already opened and the ASRC has
+ * been correctly initialized with API abe_init_asrc_... If this API is
+ * used such that the drift has been changed from positive to negative drift
+ * or vice versa, there will be click in the output signal. Loading the drift
+ * value with zero disables the feature.
+ */
+abehal_status abe_write_asrc(u32 port, s32 dppm)
+{
+ s32 dtempvalue, adppm, drift_sign, drift_sign_addr, alpha_params_addr;
+ s32 alpha_params[3];
+ _log(ABE_ID_WRITE_ASRC, port, dppm, dppm >> 8);
+ /*
+ * x = ppm
+ *
+ * - 1000000/x must be multiple of 16
+ * - deltaalpha = round(2^20*x*16/1000000)=round(2^18/5^6*x) on 22 bits.
+ * then shifted by 2bits
+ * - minusdeltaalpha
+ * - oneminusepsilon = 1-deltaalpha/2.
+ *
+ * ppm = 250
+ * - 1000000/250=4000
+ * - deltaalpha = 4194.3 ~ 4195 => 0x00418c
+ */
+ /* examples for -6250 ppm */
+ /* atempvalue32[1] = -1; d_driftsign */
+ /* atempvalue32[3] = 0x00066668; d_deltaalpha */
+ /* atempvalue32[4] = 0xfff99998; d_minusdeltaalpha */
+ /* atempvalue32[5] = 0x003ccccc; d_oneminusepsilon */
+ /* example for 100 ppm */
+ /* atempvalue32[1] = 1;* d_driftsign */
+ /* atempvalue32[3] = 0x00001a38; d_deltaalpha */
+ /* atempvalue32[4] = 0xffffe5c8; d_minusdeltaalpha */
+ /* atempvalue32[5] = 0x003ccccc; d_oneminusepsilon */
+ /* compute new value for the ppm */
+ if (dppm >= 0) {
+ /* d_driftsign */
+ drift_sign = 1;
+ adppm = dppm;
+ } else {
+ /* d_driftsign */
+ drift_sign = -1;
+ adppm = (-1 * dppm);
+ }
+ if (dppm == 0) {
+ /* delta_alpha */
+ alpha_params[0] = 0;
+ /* minusdelta_alpha */
+ alpha_params[1] = 0;
+ /* one_minusepsilon */
+ alpha_params[2] = 0x003ffff0;
+ } else {
+ dtempvalue = (adppm << 4) + adppm - ((adppm * 3481L) / 15625L);
+ /* delta_alpha */
+ alpha_params[0] = dtempvalue << 2;
+ /* minusdelta_alpha */
+ alpha_params[1] = (-dtempvalue) << 2;
+ /* one_minusepsilon */
+ alpha_params[2] = (0x00100000 - (dtempvalue / 2)) << 2;
+ }
+ switch (port) {
+ /* asynchronous sample-rate-converter for the uplink voice path */
+ case OMAP_ABE_VX_DL_PORT:
+ drift_sign_addr = OMAP_ABE_D_ASRCVARS_DL_VX_ADDR + (1 * sizeof(s32));
+ alpha_params_addr = OMAP_ABE_D_ASRCVARS_DL_VX_ADDR + (3 * sizeof(s32));
+ break;
+ /* asynchronous sample-rate-converter for the downlink voice path */
+ case OMAP_ABE_VX_UL_PORT:
+ drift_sign_addr = OMAP_ABE_D_ASRCVARS_UL_VX_ADDR + (1 * sizeof(s32));
+ alpha_params_addr = OMAP_ABE_D_ASRCVARS_UL_VX_ADDR + (3 * sizeof(s32));
+ break;
+ /* asynchronous sample-rate-converter for the BT_UL path */
+ case OMAP_ABE_BT_VX_UL_PORT:
+ drift_sign_addr = OMAP_ABE_D_ASRCVARS_BT_UL_ADDR + (1 * sizeof(s32));
+ alpha_params_addr = OMAP_ABE_D_ASRCVARS_BT_UL_ADDR + (3 * sizeof(s32));
+ break;
+ /* asynchronous sample-rate-converter for the BT_DL path */
+ case OMAP_ABE_BT_VX_DL_PORT:
+ drift_sign_addr = OMAP_ABE_D_ASRCVARS_BT_DL_ADDR + (1 * sizeof(s32));
+ alpha_params_addr = OMAP_ABE_D_ASRCVARS_BT_DL_ADDR + (3 * sizeof(s32));
+ break;
+ default:
+ /* asynchronous sample-rate-converter for the MM_EXT_IN path */
+ case OMAP_ABE_MM_EXT_IN_PORT:
+ drift_sign_addr = OMAP_ABE_D_ASRCVARS_MM_EXT_IN_ADDR + (1 * sizeof(s32));
+ alpha_params_addr =
+ OMAP_ABE_D_ASRCVARS_MM_EXT_IN_ADDR + (3 * sizeof(s32));
+ break;
+ }
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM, drift_sign_addr,
+ (u32 *) &drift_sign, 4);
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM, alpha_params_addr,
+ (u32 *) &alpha_params[0], 12);
+ return 0;
+}
+EXPORT_SYMBOL(abe_write_asrc);
+/**
+ * abe_init_asrc_vx_dl
+ *
+ * Initialize the following ASRC VX_DL parameters :
+ * 1. DriftSign = D_AsrcVars[1] = 1 or -1
+ * 2. Subblock = D_AsrcVars[2] = 0
+ * 3. DeltaAlpha = D_AsrcVars[3] =
+ * (round(nb_phases * drift[ppm] * 10^-6 * 2^20)) << 2
+ * 4. MinusDeltaAlpha = D_AsrcVars[4] =
+ * (-round(nb_phases * drift[ppm] * 10^-6 * 2^20)) << 2
+ * 5. OneMinusEpsilon = D_AsrcVars[5] = 1 - DeltaAlpha/2
+ * 6. AlphaCurrent = 0x000020 (CMEM), initial value of Alpha parameter
+ * 7. BetaCurrent = 0x3fffe0 (CMEM), initial value of Beta parameter
+ * AlphaCurrent + BetaCurrent = 1 (=0x400000 in CMEM = 2^20 << 2)
+ * 8. drift_ASRC = 0 & drift_io = 0
+ * 9. SMEM for ASRC_DL_VX_Coefs pointer
+ * 10. CMEM for ASRC_DL_VX_Coefs pointer
+ * ASRC_DL_VX_Coefs = C_CoefASRC16_VX_ADDR/C_CoefASRC16_VX_sizeof/0/1/
+ * C_CoefASRC15_VX_ADDR/C_CoefASRC15_VX_sizeof/0/1
+ * 11. SMEM for XinASRC_DL_VX pointer
+ * 12. CMEM for XinASRC_DL_VX pointer
+ * XinASRC_DL_VX = S_XinASRC_DL_VX_ADDR/S_XinASRC_DL_VX_sizeof/0/1/0/0/0/0
+ * 13. SMEM for IO_VX_DL_ASRC pointer
+ * 14. CMEM for IO_VX_DL_ASRC pointer
+ * IO_VX_DL_ASRC =
+ * S_XinASRC_DL_VX_ADDR/S_XinASRC_DL_VX_sizeof/
+ * ASRC_DL_VX_FIR_L+ASRC_margin/1/0/0/0/0
+ */
+void abe_init_asrc_vx_dl(s32 dppm)
+{
+ s32 el[45];
+ s32 temp0, temp1, adppm, dtemp, mem_tag, mem_addr;
+ u32 i = 0;
+ u32 n_fifo_el = 42;
+ temp0 = 0;
+ temp1 = 1;
+ /* 1. DriftSign = D_AsrcVars[1] = 1 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_DL_VX_ADDR + (1 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm >= 0) {
+ el[i + 1] = 1;
+ adppm = dppm;
+ } else {
+ el[i + 1] = -1;
+ adppm = (-1 * dppm);
+ }
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ dtemp = (adppm << 4) + adppm - ((adppm * 3481L) / 15625L);
+ /* 2. Subblock = D_AsrcVars[2] = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_DL_VX_ADDR + (2 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 3. DeltaAlpha = D_AsrcVars[3] = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_DL_VX_ADDR + (3 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm == 0)
+ el[i + 1] = 0;
+ else
+ el[i + 1] = dtemp << 2;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 4. MinusDeltaAlpha = D_AsrcVars[4] = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_DL_VX_ADDR + (4 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm == 0)
+ el[i + 1] = 0;
+ else
+ el[i + 1] = (-dtemp) << 2;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /*5. OneMinusEpsilon = D_AsrcVars[5] = 0x00400000 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_DL_VX_ADDR + (5 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm == 0)
+ el[i + 1] = 0x00400000;
+ else
+ el[i + 1] = (0x00100000 - (dtemp / 2)) << 2;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 6. AlphaCurrent = 0x000020 (CMEM) */
+ mem_tag = ABE_CMEM;
+ mem_addr = OMAP_ABE_C_ALPHACURRENT_DL_VX_ADDR;
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = 0x00000020;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 7. BetaCurrent = 0x3fffe0 (CMEM) */
+ mem_tag = ABE_CMEM;
+ mem_addr = OMAP_ABE_C_BETACURRENT_DL_VX_ADDR;
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = 0x003fffe0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 8. drift_ASRC = 0 & drift_io = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_IODESCR_ADDR + (OMAP_ABE_VX_DL_PORT * sizeof(struct ABE_SIODescriptor))
+ + drift_asrc_;
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 9. SMEM for ASRC_DL_VX_Coefs pointer */
+ /* ASRC_DL_VX_Coefs = C_CoefASRC16_VX_ADDR/C_CoefASRC16_VX_sizeof/0/1/
+ C_CoefASRC15_VX_ADDR/C_CoefASRC15_VX_sizeof/0/1 */
+ mem_tag = ABE_SMEM;
+ mem_addr = ASRC_DL_VX_Coefs_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ if (dppm == 0) {
+ el[i + 1] = OMAP_ABE_C_COEFASRC16_VX_ADDR >> 2;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_C_COEFASRC16_VX_SIZE >> 2);
+ el[i + 2] = OMAP_ABE_C_COEFASRC15_VX_ADDR >> 2;
+ el[i + 2] = (el[i + 2] << 8) + (OMAP_ABE_C_COEFASRC15_VX_SIZE >> 2);
+ } else {
+ el[i + 1] = OMAP_ABE_C_COEFASRC1_VX_ADDR >> 2;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_C_COEFASRC1_VX_SIZE >> 2);
+ el[i + 2] = OMAP_ABE_C_COEFASRC2_VX_ADDR >> 2;
+ el[i + 2] = (el[i + 2] << 8) + (OMAP_ABE_C_COEFASRC2_VX_SIZE >> 2);
+ }
+ i = i + 3;
+ /* 10. CMEM for ASRC_DL_VX_Coefs pointer */
+ /* ASRC_DL_VX_Coefs = C_CoefASRC16_VX_ADDR/C_CoefASRC16_VX_sizeof/0/1/
+ C_CoefASRC15_VX_ADDR/C_CoefASRC15_VX_sizeof/0/1 */
+ mem_tag = ABE_CMEM;
+ mem_addr = ASRC_DL_VX_Coefs_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ /* el[i+1] = iam1<<16 + inc1<<12 + iam2<<4 + inc2 */
+ el[i + 1] = (temp0 << 16) + (temp1 << 12) + (temp0 << 4) + temp1;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 11. SMEM for XinASRC_DL_VX pointer */
+ /* XinASRC_DL_VX =
+ S_XinASRC_DL_VX_ADDR/S_XinASRC_DL_VX_sizeof/0/1/0/0/0/0 */
+ mem_tag = ABE_SMEM;
+ mem_addr = XinASRC_DL_VX_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ el[i + 1] = OMAP_ABE_S_XINASRC_DL_VX_ADDR >> 3;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_S_XINASRC_DL_VX_SIZE >> 3);
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 12. CMEM for XinASRC_DL_VX pointer */
+ /* XinASRC_DL_VX =
+ S_XinASRC_DL_VX_ADDR/S_XinASRC_DL_VX_sizeof/0/1/0/0/0/0 */
+ mem_tag = ABE_CMEM;
+ mem_addr = XinASRC_DL_VX_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ /* el[i+1] = iam1<<16 + inc1<<12 + iam2<<4 + inc2 */
+ el[i + 1] = (temp0 << 16) + (temp1 << 12) + (temp0 << 4) + temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 13. SMEM for IO_VX_DL_ASRC pointer */
+ /* IO_VX_DL_ASRC = S_XinASRC_DL_VX_ADDR/S_XinASRC_DL_VX_sizeof/
+ ASRC_DL_VX_FIR_L+ASRC_margin/1/0/0/0/0 */
+ mem_tag = ABE_SMEM;
+ mem_addr = IO_VX_DL_ASRC_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ el[i + 1] = OMAP_ABE_S_XINASRC_DL_VX_ADDR >> 3;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_S_XINASRC_DL_VX_SIZE >> 3);
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 14. CMEM for IO_VX_DL_ASRC pointer */
+ /* IO_VX_DL_ASRC = S_XinASRC_DL_VX_ADDR/S_XinASRC_DL_VX_sizeof/
+ ASRC_DL_VX_FIR_L+ASRC_margin/1/0/0/0/0 */
+ mem_tag = ABE_CMEM;
+ mem_addr = IO_VX_DL_ASRC_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ /* el[i+1] = iam1<<16 + inc1<<12 + iam2<<4 + inc2 */
+ el[i + 1] = ((ASRC_DL_VX_FIR_L + ASRC_margin) << 16) + (temp1 << 12)
+ + (temp0 << 4) + temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ abe_write_fifo(ABE_DMEM, OMAP_ABE_D_FWMEMINITDESCR_ADDR, (u32 *) &el[0],
+ n_fifo_el);
+}
+/**
+ * abe_init_asrc_vx_ul
+ *
+ * Initialize the following ASRC VX_UL parameters :
+ * 1. DriftSign = D_AsrcVars[1] = 1 or -1
+ * 2. Subblock = D_AsrcVars[2] = 0
+ * 3. DeltaAlpha = D_AsrcVars[3] =
+ * (round(nb_phases * drift[ppm] * 10^-6 * 2^20)) << 2
+ * 4. MinusDeltaAlpha = D_AsrcVars[4] =
+ * (-round(nb_phases * drift[ppm] * 10^-6 * 2^20)) << 2
+ * 5. OneMinusEpsilon = D_AsrcVars[5] = 1 - DeltaAlpha/2
+ * 6. AlphaCurrent = 0x000020 (CMEM), initial value of Alpha parameter
+ * 7. BetaCurrent = 0x3fffe0 (CMEM), initial value of Beta parameter
+ * AlphaCurrent + BetaCurrent = 1 (=0x400000 in CMEM = 2^20 << 2)
+ * 8. drift_ASRC = 0 & drift_io = 0
+ * 9. SMEM for ASRC_UL_VX_Coefs pointer
+ * 10. CMEM for ASRC_UL_VX_Coefs pointer
+ * ASRC_UL_VX_Coefs = C_CoefASRC16_VX_ADDR/C_CoefASRC16_VX_sizeof/0/1/
+ * C_CoefASRC15_VX_ADDR/C_CoefASRC15_VX_sizeof/0/1
+ * 11. SMEM for XinASRC_UL_VX pointer
+ * 12. CMEM for XinASRC_UL_VX pointer
+ * XinASRC_UL_VX = S_XinASRC_UL_VX_ADDR/S_XinASRC_UL_VX_sizeof/0/1/0/0/0/0
+ * 13. SMEM for UL_48_8_DEC pointer
+ * 14. CMEM for UL_48_8_DEC pointer
+ * UL_48_8_DEC = S_XinASRC_UL_VX_ADDR/S_XinASRC_UL_VX_sizeof/
+ * ASRC_UL_VX_FIR_L+ASRC_margin/1/0/0/0/0
+ * 15. SMEM for UL_48_16_DEC pointer
+ * 16. CMEM for UL_48_16_DEC pointer
+ * UL_48_16_DEC = S_XinASRC_UL_VX_ADDR/S_XinASRC_UL_VX_sizeof/
+ * ASRC_UL_VX_FIR_L+ASRC_margin/1/0/0/0/0
+ */
+void abe_init_asrc_vx_ul(s32 dppm)
+{
+ s32 el[51];
+ s32 temp0, temp1, adppm, dtemp, mem_tag, mem_addr;
+ u32 i = 0;
+ u32 n_fifo_el = 48;
+ temp0 = 0;
+ temp1 = 1;
+ /* 1. DriftSign = D_AsrcVars[1] = 1 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_UL_VX_ADDR + (1 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm >= 0) {
+ el[i + 1] = 1;
+ adppm = dppm;
+ } else {
+ el[i + 1] = -1;
+ adppm = (-1 * dppm);
+ }
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ dtemp = (adppm << 4) + adppm - ((adppm * 3481L) / 15625L);
+ /* 2. Subblock = D_AsrcVars[2] = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_UL_VX_ADDR + (2 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 3. DeltaAlpha = D_AsrcVars[3] = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_UL_VX_ADDR + (3 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm == 0)
+ el[i + 1] = 0;
+ else
+ el[i + 1] = dtemp << 2;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 4. MinusDeltaAlpha = D_AsrcVars[4] = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_UL_VX_ADDR + (4 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm == 0)
+ el[i + 1] = 0;
+ else
+ el[i + 1] = (-dtemp) << 2;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 5. OneMinusEpsilon = D_AsrcVars[5] = 0x00400000 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_UL_VX_ADDR + (5 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm == 0)
+ el[i + 1] = 0x00400000;
+ else
+ el[i + 1] = (0x00100000 - (dtemp / 2)) << 2;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 6. AlphaCurrent = 0x000020 (CMEM) */
+ mem_tag = ABE_CMEM;
+ mem_addr = OMAP_ABE_C_ALPHACURRENT_UL_VX_ADDR;
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = 0x00000020;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 7. BetaCurrent = 0x3fffe0 (CMEM) */
+ mem_tag = ABE_CMEM;
+ mem_addr = OMAP_ABE_C_BETACURRENT_UL_VX_ADDR;
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = 0x003fffe0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 8. drift_ASRC = 0 & drift_io = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_IODESCR_ADDR + (OMAP_ABE_VX_UL_PORT * sizeof(struct ABE_SIODescriptor))
+ + drift_asrc_;
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 9. SMEM for ASRC_UL_VX_Coefs pointer */
+ /* ASRC_UL_VX_Coefs = C_CoefASRC16_VX_ADDR/C_CoefASRC16_VX_sizeof/0/1/
+ C_CoefASRC15_VX_ADDR/C_CoefASRC15_VX_sizeof/0/1 */
+ mem_tag = ABE_SMEM;
+ mem_addr = ASRC_UL_VX_Coefs_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ if (dppm == 0) {
+ el[i + 1] = OMAP_ABE_C_COEFASRC16_VX_ADDR >> 2;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_C_COEFASRC16_VX_SIZE >> 2);
+ el[i + 2] = OMAP_ABE_C_COEFASRC15_VX_ADDR >> 2;
+ el[i + 2] = (el[i + 2] << 8) + (OMAP_ABE_C_COEFASRC15_VX_SIZE >> 2);
+ } else {
+ el[i + 1] = OMAP_ABE_C_COEFASRC1_VX_ADDR >> 2;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_C_COEFASRC1_VX_SIZE >> 2);
+ el[i + 2] = OMAP_ABE_C_COEFASRC2_VX_ADDR >> 2;
+ el[i + 2] = (el[i + 2] << 8) + (OMAP_ABE_C_COEFASRC2_VX_SIZE >> 2);
+ }
+ i = i + 3;
+ /* 10. CMEM for ASRC_UL_VX_Coefs pointer */
+ /* ASRC_UL_VX_Coefs = C_CoefASRC16_VX_ADDR/C_CoefASRC16_VX_sizeof/0/1/
+ C_CoefASRC15_VX_ADDR/C_CoefASRC15_VX_sizeof/0/1 */
+ mem_tag = ABE_CMEM;
+ mem_addr = ASRC_UL_VX_Coefs_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ /* el[i+1] = iam1<<16 + inc1<<12 + iam2<<4 + inc2 */
+ el[i + 1] = (temp0 << 16) + (temp1 << 12) + (temp0 << 4) + temp1;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 11. SMEM for XinASRC_UL_VX pointer */
+ /* XinASRC_UL_VX = S_XinASRC_UL_VX_ADDR/S_XinASRC_UL_VX_sizeof/0/1/
+ 0/0/0/0 */
+ mem_tag = ABE_SMEM;
+ mem_addr = XinASRC_UL_VX_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ el[i + 1] = OMAP_ABE_S_XINASRC_UL_VX_ADDR >> 3;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_S_XINASRC_UL_VX_SIZE >> 3);
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 12. CMEM for XinASRC_UL_VX pointer */
+ /* XinASRC_UL_VX = S_XinASRC_UL_VX_ADDR/S_XinASRC_UL_VX_sizeof/0/1/
+ 0/0/0/0 */
+ mem_tag = ABE_CMEM;
+ mem_addr = XinASRC_UL_VX_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ /* el[i+1] = iam1<<16 + inc1<<12 + iam2<<4 + inc2 */
+ el[i + 1] = (temp0 << 16) + (temp1 << 12) + (temp0 << 4) + temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 13. SMEM for UL_48_8_DEC pointer */
+ /* UL_48_8_DEC = S_XinASRC_UL_VX_ADDR/S_XinASRC_UL_VX_sizeof/
+ ASRC_UL_VX_FIR_L+ASRC_margin/1/0/0/0/0 */
+ mem_tag = ABE_SMEM;
+ mem_addr = UL_48_8_DEC_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ el[i + 1] = OMAP_ABE_S_XINASRC_UL_VX_ADDR >> 3;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_S_XINASRC_UL_VX_SIZE >> 3);
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 14. CMEM for UL_48_8_DEC pointer */
+ /* UL_48_8_DEC = S_XinASRC_UL_VX_ADDR/S_XinASRC_UL_VX_sizeof/
+ ASRC_UL_VX_FIR_L+ASRC_margin/1/0/0/0/0 */
+ mem_tag = ABE_CMEM;
+ mem_addr = UL_48_8_DEC_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ /* el[i+1] = iam1<<16 + inc1<<12 + iam2<<4 + inc2 */
+ el[i + 1] = ((ASRC_UL_VX_FIR_L + ASRC_margin) << 16) + (temp1 << 12)
+ + (temp0 << 4) + temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 15. SMEM for UL_48_16_DEC pointer */
+ /* UL_48_16_DEC = S_XinASRC_UL_VX_ADDR/S_XinASRC_UL_VX_sizeof/
+ ASRC_UL_VX_FIR_L+ASRC_margin/1/0/0/0/0 */
+ mem_tag = ABE_SMEM;
+ mem_addr = UL_48_16_DEC_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ el[i + 1] = OMAP_ABE_S_XINASRC_UL_VX_ADDR >> 3;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_S_XINASRC_UL_VX_SIZE >> 3);
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 16. CMEM for UL_48_16_DEC pointer */
+ /* UL_48_16_DEC = S_XinASRC_UL_VX_ADDR/S_XinASRC_UL_VX_sizeof/
+ ASRC_UL_VX_FIR_L+ASRC_margin/1/0/0/0/0 */
+ mem_tag = ABE_CMEM;
+ mem_addr = UL_48_16_DEC_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ /* el[i+1] = iam1<<16 + inc1<<12 + iam2<<4 + inc2 */
+ el[i + 1] = ((ASRC_UL_VX_FIR_L + ASRC_margin) << 16) + (temp1 << 12)
+ + (temp0 << 4) + temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ abe_write_fifo(ABE_DMEM, OMAP_ABE_D_FWMEMINITDESCR_ADDR, (u32 *) &el[0],
+ n_fifo_el);
+}
+/**
+ * abe_init_asrc_mm_ext_in
+ *
+ * Initialize the following ASRC MM_EXT_IN parameters :
+ * 1. DriftSign = D_AsrcVars[1] = 1 or -1
+ * 2. Subblock = D_AsrcVars[2] = 0
+ * 3. DeltaAlpha = D_AsrcVars[3] =
+ * (round(nb_phases * drift[ppm] * 10^-6 * 2^20)) << 2
+ * 4. MinusDeltaAlpha = D_AsrcVars[4] =
+ * (-round(nb_phases * drift[ppm] * 10^-6 * 2^20)) << 2
+ * 5. OneMinusEpsilon = D_AsrcVars[5] = 1 - DeltaAlpha/2
+ * 6. AlphaCurrent = 0x000020 (CMEM), initial value of Alpha parameter
+ * 7. BetaCurrent = 0x3fffe0 (CMEM), initial value of Beta parameter
+ * AlphaCurrent + BetaCurrent = 1 (=0x400000 in CMEM = 2^20 << 2)
+ * 8. drift_ASRC = 0 & drift_io = 0
+ * 9. SMEM for ASRC_MM_EXT_IN_Coefs pointer
+ * 10. CMEM for ASRC_MM_EXT_IN_Coefs pointer
+ * ASRC_MM_EXT_IN_Coefs = C_CoefASRC16_MM_ADDR/C_CoefASRC16_MM_sizeof/0/1/
+ * C_CoefASRC15_MM_ADDR/C_CoefASRC15_MM_sizeof/0/1
+ * 11. SMEM for XinASRC_MM_EXT_IN pointer
+ * 12. CMEM for XinASRC_MM_EXT_IN pointer
+ * XinASRC_MM_EXT_IN = S_XinASRC_MM_EXT_IN_ADDR/S_XinASRC_MM_EXT_IN_sizeof/0/1/
+ * 0/0/0/0
+ * 13. SMEM for IO_MM_EXT_IN_ASRC pointer
+ * 14. CMEM for IO_MM_EXT_IN_ASRC pointer
+ * IO_MM_EXT_IN_ASRC = S_XinASRC_MM_EXT_IN_ADDR/S_XinASRC_MM_EXT_IN_sizeof/
+ * ASRC_MM_EXT_IN_FIR_L+ASRC_margin+ASRC_N_48k/1/0/0/0/0
+ */
+void abe_init_asrc_mm_ext_in(s32 dppm)
+{
+ s32 el[45];
+ s32 temp0, temp1, adppm, dtemp, mem_tag, mem_addr;
+ u32 i = 0;
+ u32 n_fifo_el = 42;
+ temp0 = 0;
+ temp1 = 1;
+ /* 1. DriftSign = D_AsrcVars[1] = 1 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_MM_EXT_IN_ADDR + (1 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm >= 0) {
+ el[i + 1] = 1;
+ adppm = dppm;
+ } else {
+ el[i + 1] = -1;
+ adppm = (-1 * dppm);
+ }
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ dtemp = (adppm << 4) + adppm - ((adppm * 3481L) / 15625L);
+ /* 2. Subblock = D_AsrcVars[2] = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_MM_EXT_IN_ADDR + (2 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 3. DeltaAlpha = D_AsrcVars[3] = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_MM_EXT_IN_ADDR + (3 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm == 0)
+ el[i + 1] = 0;
+ else
+ el[i + 1] = dtemp << 2;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 4. MinusDeltaAlpha = D_AsrcVars[4] = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_MM_EXT_IN_ADDR + (4 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm == 0)
+ el[i + 1] = 0;
+ else
+ el[i + 1] = (-dtemp) << 2;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 5. OneMinusEpsilon = D_AsrcVars[5] = 0x00400000 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_MM_EXT_IN_ADDR + (5 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm == 0)
+ el[i + 1] = 0x00400000;
+ else
+ el[i + 1] = (0x00100000 - (dtemp / 2)) << 2;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 6. AlphaCurrent = 0x000020 (CMEM) */
+ mem_tag = ABE_CMEM;
+ mem_addr = OMAP_ABE_C_ALPHACURRENT_MM_EXT_IN_ADDR;
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = 0x00000020;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 7. BetaCurrent = 0x3fffe0 (CMEM) */
+ mem_tag = ABE_CMEM;
+ mem_addr = OMAP_ABE_C_BETACURRENT_MM_EXT_IN_ADDR;
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = 0x003fffe0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 8. drift_ASRC = 0 & drift_io = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_IODESCR_ADDR + (OMAP_ABE_MM_EXT_IN_PORT * sizeof(struct ABE_SIODescriptor))
+ + drift_asrc_;
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 9. SMEM for ASRC_MM_EXT_IN_Coefs pointer */
+ /* ASRC_MM_EXT_IN_Coefs = C_CoefASRC16_MM_ADDR/C_CoefASRC16_MM_sizeof/
+ 0/1/C_CoefASRC15_MM_ADDR/C_CoefASRC15_MM_sizeof/0/1 */
+ mem_tag = ABE_SMEM;
+ mem_addr = ASRC_MM_EXT_IN_Coefs_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ if (dppm == 0) {
+ el[i + 1] = OMAP_ABE_C_COEFASRC16_MM_ADDR >> 2;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_C_COEFASRC16_MM_SIZE >> 2);
+ el[i + 2] = OMAP_ABE_C_COEFASRC15_MM_ADDR >> 2;
+ el[i + 2] = (el[i + 2] << 8) + (OMAP_ABE_C_COEFASRC15_MM_SIZE >> 2);
+ } else {
+ el[i + 1] = OMAP_ABE_C_COEFASRC1_MM_ADDR >> 2;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_C_COEFASRC1_MM_SIZE >> 2);
+ el[i + 2] = OMAP_ABE_C_COEFASRC2_MM_ADDR >> 2;
+ el[i + 2] = (el[i + 2] << 8) + (OMAP_ABE_C_COEFASRC2_MM_SIZE >> 2);
+ }
+ i = i + 3;
+ /*10. CMEM for ASRC_MM_EXT_IN_Coefs pointer */
+ /* ASRC_MM_EXT_IN_Coefs = C_CoefASRC16_MM_ADDR/C_CoefASRC16_MM_sizeof/
+ 0/1/C_CoefASRC15_MM_ADDR/C_CoefASRC15_MM_sizeof/0/1 */
+ mem_tag = ABE_CMEM;
+ mem_addr = ASRC_MM_EXT_IN_Coefs_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ /* el[i+1] = iam1<<16 + inc1<<12 + iam2<<4 + inc2 */
+ el[i + 1] = (temp0 << 16) + (temp1 << 12) + (temp0 << 4) + temp1;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 11. SMEM for XinASRC_MM_EXT_IN pointer */
+ /* XinASRC_MM_EXT_IN = S_XinASRC_MM_EXT_IN_ADDR/
+ S_XinASRC_MM_EXT_IN_sizeof/0/1/0/0/0/0 */
+ mem_tag = ABE_SMEM;
+ mem_addr = XinASRC_MM_EXT_IN_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ el[i + 1] = OMAP_ABE_S_XINASRC_MM_EXT_IN_ADDR >> 3;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_S_XINASRC_MM_EXT_IN_SIZE >> 3);
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 12. CMEM for XinASRC_MM_EXT_IN pointer */
+ /* XinASRC_MM_EXT_IN = S_XinASRC_MM_EXT_IN_ADDR/
+ S_XinASRC_MM_EXT_IN_sizeof/0/1/0/0/0/0 */
+ mem_tag = ABE_CMEM;
+ mem_addr = XinASRC_MM_EXT_IN_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ /* el[i+1] = iam1<<16 + inc1<<12 + iam2<<4 + inc2 */
+ el[i + 1] = (temp0 << 16) + (temp1 << 12) + (temp0 << 4) + temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 13. SMEM for IO_MM_EXT_IN_ASRC pointer */
+ /* IO_MM_EXT_IN_ASRC =
+ S_XinASRC_MM_EXT_IN_ADDR/S_XinASRC_MM_EXT_IN_sizeof/
+ ASRC_MM_EXT_IN_FIR_L+ASRC_margin+ASRC_N_48k/1/0/0/0/0 */
+ mem_tag = ABE_SMEM;
+ mem_addr = IO_MM_EXT_IN_ASRC_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ el[i + 1] = OMAP_ABE_S_XINASRC_MM_EXT_IN_ADDR >> 3;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_S_XINASRC_MM_EXT_IN_SIZE >> 3);
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 14. CMEM for IO_MM_EXT_IN_ASRC pointer */
+ /* IO_MM_EXT_IN_ASRC =
+ S_XinASRC_MM_EXT_IN_ADDR/S_XinASRC_MM_EXT_IN_sizeof/
+ ASRC_MM_EXT_IN_FIR_L+ASRC_margin+ASRC_N_48k/1/0/0/0/0 */
+ mem_tag = ABE_CMEM;
+ mem_addr = IO_MM_EXT_IN_ASRC_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ /* el[i+1] = iam1<<16 + inc1<<12 + iam2<<4 + inc2 */
+ el[i + 1] = ((ASRC_MM_EXT_IN_FIR_L + ASRC_margin + ASRC_N_48k) << 16) +
+ (temp1 << 12) + (temp0 << 4) + temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ abe_write_fifo(ABE_DMEM, OMAP_ABE_D_FWMEMINITDESCR_ADDR, (u32 *) &el[0],
+ n_fifo_el);
+}
+/**
+ * abe_init_asrc_bt_ul
+ *
+ * Initialize the following ASRC BT_UL parameters :
+ * 1. DriftSign = D_AsrcVars[1] = 1 or -1
+ * 2. Subblock = D_AsrcVars[2] = 0
+ * 3. DeltaAlpha = D_AsrcVars[3] =
+ * (round(nb_phases * drift[ppm] * 10^-6 * 2^20)) << 2
+ * 4. MinusDeltaAlpha = D_AsrcVars[4] =
+ * (-round(nb_phases * drift[ppm] * 10^-6 * 2^20)) << 2
+ * 5. OneMinusEpsilon = D_AsrcVars[5] = 1 - DeltaAlpha/2
+ * 6. AlphaCurrent = 0x000020 (CMEM), initial value of Alpha parameter
+ * 7. BetaCurrent = 0x3fffe0 (CMEM), initial value of Beta parameter
+ * AlphaCurrent + BetaCurrent = 1 (=0x400000 in CMEM = 2^20 << 2)
+ * 8. drift_ASRC = 0 & drift_io = 0
+ * 9. SMEM for ASRC_BT_UL_Coefs pointer
+ * 10. CMEM for ASRC_BT_UL_Coefs pointer
+ * ASRC_BT_UL_Coefs = C_CoefASRC16_VX_ADDR/C_CoefASRC16_VX_sizeof/0/1/
+ * C_CoefASRC15_VX_ADDR/C_CoefASRC15_VX_sizeof/0/1
+ * 11. SMEM for XinASRC_BT_UL pointer
+ * 12. CMEM for XinASRC_BT_UL pointer
+ * XinASRC_BT_UL = S_XinASRC_BT_UL_ADDR/S_XinASRC_BT_UL_sizeof/0/1/0/0/0/0
+ * 13. SMEM for IO_BT_UL_ASRC pointer
+ * 14. CMEM for IO_BT_UL_ASRC pointer
+ * IO_BT_UL_ASRC = S_XinASRC_BT_UL_ADDR/S_XinASRC_BT_UL_sizeof/
+ * ASRC_BT_UL_FIR_L+ASRC_margin/1/0/0/0/0
+ */
+void abe_init_asrc_bt_ul(s32 dppm)
+{
+ s32 el[45];
+ s32 temp0, temp1, adppm, dtemp, mem_tag, mem_addr;
+ u32 i = 0;
+ u32 n_fifo_el = 42;
+ temp0 = 0;
+ temp1 = 1;
+ /* 1. DriftSign = D_AsrcVars[1] = 1 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_BT_UL_ADDR + (1 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm >= 0) {
+ el[i + 1] = 1;
+ adppm = dppm;
+ } else {
+ el[i + 1] = -1;
+ adppm = (-1 * dppm);
+ }
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ dtemp = (adppm << 4) + adppm - ((adppm * 3481L) / 15625L);
+ /* 2. Subblock = D_AsrcVars[2] = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_BT_UL_ADDR + (2 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 3. DeltaAlpha = D_AsrcVars[3] = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_BT_UL_ADDR + (3 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm == 0)
+ el[i + 1] = 0;
+ else
+ el[i + 1] = dtemp << 2;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 4. MinusDeltaAlpha = D_AsrcVars[4] = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_BT_UL_ADDR + (4 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm == 0)
+ el[i + 1] = 0;
+ else
+ el[i + 1] = (-dtemp) << 2;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /*5. OneMinusEpsilon = D_AsrcVars[5] = 0x00400000 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_BT_UL_ADDR + (5 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm == 0)
+ el[i + 1] = 0x00400000;
+ else
+ el[i + 1] = (0x00100000 - (dtemp / 2)) << 2;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 6. AlphaCurrent = 0x000020 (CMEM) */
+ mem_tag = ABE_CMEM;
+ mem_addr = OMAP_ABE_C_ALPHACURRENT_BT_UL_ADDR;
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = 0x00000020;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 7. BetaCurrent = 0x3fffe0 (CMEM) */
+ mem_tag = ABE_CMEM;
+ mem_addr = OMAP_ABE_C_BETACURRENT_BT_UL_ADDR;
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = 0x003fffe0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 8. drift_ASRC = 0 & drift_io = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_IODESCR_ADDR + (OMAP_ABE_BT_VX_UL_PORT * sizeof(struct ABE_SIODescriptor))
+ + drift_asrc_;
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 9. SMEM for ASRC_BT_UL_Coefs pointer */
+ /* ASRC_BT_UL_Coefs = C_CoefASRC16_VX_ADDR/C_CoefASRC16_VX_sizeof/0/1/
+ C_CoefASRC15_VX_ADDR/C_CoefASRC15_VX_sizeof/0/1 */
+ mem_tag = ABE_SMEM;
+ mem_addr = ASRC_BT_UL_Coefs_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ if (dppm == 0) {
+ el[i + 1] = OMAP_ABE_C_COEFASRC16_VX_ADDR >> 2;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_C_COEFASRC16_VX_SIZE >> 2);
+ el[i + 2] = OMAP_ABE_C_COEFASRC15_VX_ADDR >> 2;
+ el[i + 2] = (el[i + 2] << 8) + (OMAP_ABE_C_COEFASRC15_VX_SIZE >> 2);
+ } else {
+ el[i + 1] = OMAP_ABE_C_COEFASRC1_VX_ADDR >> 2;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_C_COEFASRC1_VX_SIZE >> 2);
+ el[i + 2] = OMAP_ABE_C_COEFASRC2_VX_ADDR >> 2;
+ el[i + 2] = (el[i + 2] << 8) + (OMAP_ABE_C_COEFASRC2_VX_SIZE >> 2);
+ }
+ i = i + 3;
+ /* 10. CMEM for ASRC_BT_UL_Coefs pointer */
+ /* ASRC_BT_UL_Coefs = C_CoefASRC16_VX_ADDR/C_CoefASRC16_VX_sizeof/0/1/
+ C_CoefASRC15_VX_ADDR/C_CoefASRC15_VX_sizeof/0/1 */
+ mem_tag = ABE_CMEM;
+ mem_addr = ASRC_BT_UL_Coefs_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ /* el[i+1] = iam1<<16 + inc1<<12 + iam2<<4 + inc2 */
+ el[i + 1] = (temp0 << 16) + (temp1 << 12) + (temp0 << 4) + temp1;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 11. SMEM for XinASRC_BT_UL pointer */
+ /* XinASRC_BT_UL = S_XinASRC_BT_UL_ADDR/S_XinASRC_BT_UL_sizeof/0/1/
+ 0/0/0/0 */
+ mem_tag = ABE_SMEM;
+ mem_addr = XinASRC_BT_UL_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ el[i + 1] = OMAP_ABE_S_XINASRC_BT_UL_ADDR >> 3;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_S_XINASRC_BT_UL_SIZE >> 3);
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 12. CMEM for XinASRC_BT_UL pointer */
+ /* XinASRC_BT_UL = S_XinASRC_BT_UL_ADDR/S_XinASRC_BT_UL_sizeof/0/1/
+ 0/0/0/0 */
+ mem_tag = ABE_CMEM;
+ mem_addr = XinASRC_BT_UL_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ /* el[i+1] = iam1<<16 + inc1<<12 + iam2<<4 + inc2 */
+ el[i + 1] = (temp0 << 16) + (temp1 << 12) + (temp0 << 4) + temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 13. SMEM for IO_BT_UL_ASRC pointer */
+ /* IO_BT_UL_ASRC = S_XinASRC_BT_UL_ADDR/S_XinASRC_BT_UL_sizeof/
+ ASRC_BT_UL_FIR_L+ASRC_margin/1/0/0/0/0 */
+ mem_tag = ABE_SMEM;
+ mem_addr = IO_BT_UL_ASRC_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ el[i + 1] = OMAP_ABE_S_XINASRC_BT_UL_ADDR >> 3;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_S_XINASRC_BT_UL_SIZE >> 3);
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 14. CMEM for IO_BT_UL_ASRC pointer */
+ /* IO_BT_UL_ASRC = S_XinASRC_BT_UL_ADDR/S_XinASRC_BT_UL_sizeof/
+ ASRC_BT_UL_FIR_L+ASRC_margin/1/0/0/0/0 */
+ mem_tag = ABE_CMEM;
+ mem_addr = IO_BT_UL_ASRC_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ /* el[i+1] = iam1<<16 + inc1<<12 + iam2<<4 + inc2 */
+ el[i + 1] = ((ASRC_BT_UL_FIR_L + ASRC_margin) << 16) + (temp1 << 12)
+ + (temp0 << 4) + temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ abe_write_fifo(ABE_DMEM, OMAP_ABE_D_FWMEMINITDESCR_ADDR, (u32 *) &el[0],
+ n_fifo_el);
+}
+/**
+ * abe_init_asrc_bt_dl
+ *
+ * Initialize the following ASRC BT_DL parameters :
+ * 1. DriftSign = D_AsrcVars[1] = 1 or -1
+ * 2. Subblock = D_AsrcVars[2] = 0
+ * 3. DeltaAlpha = D_AsrcVars[3] =
+ * (round(nb_phases * drift[ppm] * 10^-6 * 2^20)) << 2
+ * 4. MinusDeltaAlpha = D_AsrcVars[4] =
+ * (-round(nb_phases * drift[ppm] * 10^-6 * 2^20)) << 2
+ * 5. OneMinusEpsilon = D_AsrcVars[5] = 1 - DeltaAlpha/2
+ * 6. AlphaCurrent = 0x000020 (CMEM), initial value of Alpha parameter
+ * 7. BetaCurrent = 0x3fffe0 (CMEM), initial value of Beta parameter
+ * AlphaCurrent + BetaCurrent = 1 (=0x400000 in CMEM = 2^20 << 2)
+ * 8. drift_ASRC = 0 & drift_io = 0
+ * 9. SMEM for ASRC_BT_DL_Coefs pointer
+ * 10. CMEM for ASRC_BT_DL_Coefs pointer
+ * ASRC_BT_DL_Coefs = C_CoefASRC16_VX_ADDR/C_CoefASRC16_VX_sizeof/0/1/
+ * C_CoefASRC15_VX_ADDR/C_CoefASRC15_VX_sizeof/0/1
+ * 11. SMEM for XinASRC_BT_DL pointer
+ * 12. CMEM for XinASRC_BT_DL pointer
+ * XinASRC_BT_DL = S_XinASRC_BT_DL_ADDR/S_XinASRC_BT_DL_sizeof/0/1/0/0/0/0
+ * 13. SMEM for DL_48_8_DEC pointer
+ * 14. CMEM for DL_48_8_DEC pointer
+ * DL_48_8_DEC = S_XinASRC_BT_DL_ADDR/S_XinASRC_BT_DL_sizeof/
+ * ASRC_BT_DL_FIR_L+ASRC_margin/1/0/0/0/0
+ * 15. SMEM for DL_48_16_DEC pointer
+ * 16. CMEM for DL_48_16_DEC pointer
+ * DL_48_16_DEC = S_XinASRC_BT_DL_ADDR/S_XinASRC_BT_DL_sizeof/
+ * ASRC_BT_DL_FIR_L+ASRC_margin/1/0/0/0/0
+ */
+void abe_init_asrc_bt_dl(s32 dppm)
+{
+ s32 el[51];
+ s32 temp0, temp1, adppm, dtemp, mem_tag, mem_addr;
+ u32 i = 0;
+ u32 n_fifo_el = 48;
+ temp0 = 0;
+ temp1 = 1;
+ /* 1. DriftSign = D_AsrcVars[1] = 1 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_BT_DL_ADDR + (1 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm >= 0) {
+ el[i + 1] = 1;
+ adppm = dppm;
+ } else {
+ el[i + 1] = -1;
+ adppm = (-1 * dppm);
+ }
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ dtemp = (adppm << 4) + adppm - ((adppm * 3481L) / 15625L);
+ /* 2. Subblock = D_AsrcVars[2] = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_BT_DL_ADDR + (2 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 3. DeltaAlpha = D_AsrcVars[3] = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_BT_DL_ADDR + (3 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm == 0)
+ el[i + 1] = 0;
+ else
+ el[i + 1] = dtemp << 2;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 4. MinusDeltaAlpha = D_AsrcVars[4] = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_BT_DL_ADDR + (4 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm == 0)
+ el[i + 1] = 0;
+ else
+ el[i + 1] = (-dtemp) << 2;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 5. OneMinusEpsilon = D_AsrcVars[5] = 0x00400000 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_ASRCVARS_BT_DL_ADDR + (5 * sizeof(s32));
+ el[i] = (mem_tag << 16) + mem_addr;
+ if (dppm == 0)
+ el[i + 1] = 0x00400000;
+ else
+ el[i + 1] = (0x00100000 - (dtemp / 2)) << 2;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 6. AlphaCurrent = 0x000020 (CMEM) */
+ mem_tag = ABE_CMEM;
+ mem_addr = OMAP_ABE_C_ALPHACURRENT_BT_DL_ADDR;
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = 0x00000020;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 7. BetaCurrent = 0x3fffe0 (CMEM) */
+ mem_tag = ABE_CMEM;
+ mem_addr = OMAP_ABE_C_BETACURRENT_BT_DL_ADDR;
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = 0x003fffe0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 8. drift_ASRC = 0 & drift_io = 0 */
+ mem_tag = ABE_DMEM;
+ mem_addr = OMAP_ABE_D_IODESCR_ADDR + (OMAP_ABE_BT_VX_DL_PORT * sizeof(struct ABE_SIODescriptor))
+ + drift_asrc_;
+ el[i] = (mem_tag << 16) + mem_addr;
+ el[i + 1] = temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 9. SMEM for ASRC_BT_DL_Coefs pointer */
+ /* ASRC_BT_DL_Coefs = C_CoefASRC16_VX_ADDR/C_CoefASRC16_VX_sizeof/0/1/
+ C_CoefASRC15_VX_ADDR/C_CoefASRC15_VX_sizeof/0/1 */
+ mem_tag = ABE_SMEM;
+ mem_addr = ASRC_BT_DL_Coefs_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ if (dppm == 0) {
+ el[i + 1] = OMAP_ABE_C_COEFASRC16_VX_ADDR >> 2;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_C_COEFASRC16_VX_SIZE >> 2);
+ el[i + 2] = OMAP_ABE_C_COEFASRC15_VX_ADDR >> 2;
+ el[i + 2] = (el[i + 2] << 8) + (OMAP_ABE_C_COEFASRC15_VX_SIZE >> 2);
+ } else {
+ el[i + 1] = OMAP_ABE_C_COEFASRC1_VX_ADDR >> 2;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_C_COEFASRC1_VX_SIZE >> 2);
+ el[i + 2] = OMAP_ABE_C_COEFASRC2_VX_ADDR >> 2;
+ el[i + 2] = (el[i + 2] << 8) + (OMAP_ABE_C_COEFASRC2_VX_SIZE >> 2);
+ }
+ i = i + 3;
+ /* 10. CMEM for ASRC_BT_DL_Coefs pointer */
+ /* ASRC_BT_DL_Coefs = C_CoefASRC16_VX_ADDR/C_CoefASRC16_VX_sizeof/0/1/
+ C_CoefASRC15_VX_ADDR/C_CoefASRC15_VX_sizeof/0/1 */
+ mem_tag = ABE_CMEM;
+ mem_addr = ASRC_BT_DL_Coefs_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ /* el[i+1] = iam1<<16 + inc1<<12 + iam2<<4 + inc2 */
+ el[i + 1] = (temp0 << 16) + (temp1 << 12) + (temp0 << 4) + temp1;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 11. SMEM for XinASRC_BT_DL pointer */
+ /* XinASRC_BT_DL =
+ S_XinASRC_BT_DL_ADDR/S_XinASRC_BT_DL_sizeof/0/1/0/0/0/0 */
+ mem_tag = ABE_SMEM;
+ mem_addr = XinASRC_BT_DL_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ el[i + 1] = OMAP_ABE_S_XINASRC_BT_DL_ADDR >> 3;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_S_XINASRC_BT_DL_SIZE >> 3);
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 12. CMEM for XinASRC_BT_DL pointer */
+ /* XinASRC_BT_DL =
+ S_XinASRC_BT_DL_ADDR/S_XinASRC_BT_DL_sizeof/0/1/0/0/0/0 */
+ mem_tag = ABE_CMEM;
+ mem_addr = XinASRC_BT_DL_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ /* el[i+1] = iam1<<16 + inc1<<12 + iam2<<4 + inc2 */
+ el[i + 1] = (temp0 << 16) + (temp1 << 12) + (temp0 << 4) + temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 13. SMEM for DL_48_8_DEC pointer */
+ /* DL_48_8_DEC = S_XinASRC_BT_DL_ADDR/S_XinASRC_BT_DL_sizeof/
+ ASRC_BT_DL_FIR_L+ASRC_margin/1/0/0/0/0 */
+ mem_tag = ABE_SMEM;
+ mem_addr = DL_48_8_DEC_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ el[i + 1] = OMAP_ABE_S_XINASRC_BT_DL_ADDR >> 3;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_S_XINASRC_BT_DL_SIZE >> 3);
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 14. CMEM for DL_48_8_DEC pointer */
+ /* DL_48_8_DEC = S_XinASRC_BT_DL_ADDR/S_XinASRC_BT_DL_sizeof/
+ ASRC_BT_DL_FIR_L+ASRC_margin/1/0/0/0/0 */
+ mem_tag = ABE_CMEM;
+ mem_addr = DL_48_8_DEC_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ /* el[i+1] = iam1<<16 + inc1<<12 + iam2<<4 + inc2 */
+ el[i + 1] = ((ASRC_BT_DL_FIR_L + ASRC_margin) << 16) + (temp1 << 12)
+ + (temp0 << 4) + temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 15. SMEM for DL_48_16_DEC pointer */
+ /* DL_48_16_DEC = S_XinASRC_BT_DL_ADDR/S_XinASRC_BT_DL_sizeof/
+ ASRC_BT_DL_FIR_L+ASRC_margin/1/0/0/0/0 */
+ mem_tag = ABE_SMEM;
+ mem_addr = DL_48_16_DEC_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ el[i + 1] = OMAP_ABE_S_XINASRC_BT_DL_ADDR >> 3;
+ el[i + 1] = (el[i + 1] << 8) + (OMAP_ABE_S_XINASRC_BT_DL_SIZE >> 3);
+ el[i + 2] = temp0;
+ i = i + 3;
+ /* 16. CMEM for DL_48_16_DEC pointer */
+ /* DL_48_16_DEC = S_XinASRC_BT_DL_ADDR/S_XinASRC_BT_DL_sizeof/
+ ASRC_BT_DL_FIR_L+ASRC_margin/1/0/0/0/0 */
+ mem_tag = ABE_CMEM;
+ mem_addr = DL_48_16_DEC_labelID;
+ el[i] = (mem_tag << 16) + (mem_addr << 2);
+ /* el[i+1] = iam1<<16 + inc1<<12 + iam2<<4 + inc2 */
+ el[i + 1] = ((ASRC_BT_DL_FIR_L + ASRC_margin) << 16) + (temp1 << 12)
+ + (temp0 << 4) + temp0;
+ /* dummy field */
+ el[i + 2] = temp0;
+ abe_write_fifo(ABE_DMEM, OMAP_ABE_D_FWMEMINITDESCR_ADDR, (u32 *) &el[0],
+ n_fifo_el);
+}
diff --git a/sound/soc/omap/abe/abe_cm_addr.h b/sound/soc/omap/abe/abe_cm_addr.h
new file mode 100644
index 0000000..2715002
--- /dev/null
+++ b/sound/soc/omap/abe/abe_cm_addr.h
@@ -0,0 +1,221 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#define OMAP_ABE_INIT_CM_ADDR 0x0
+#define OMAP_ABE_INIT_CM_SIZE 0x640
+#define OMAP_ABE_C_DATA_LSB_2_ADDR 0x640
+#define OMAP_ABE_C_DATA_LSB_2_SIZE 0x4
+#define OMAP_ABE_C_1_ALPHA_ADDR 0x644
+#define OMAP_ABE_C_1_ALPHA_SIZE 0x48
+#define OMAP_ABE_C_ALPHA_ADDR 0x68C
+#define OMAP_ABE_C_ALPHA_SIZE 0x48
+#define OMAP_ABE_C_GAINSWRAMP_ADDR 0x6D4
+#define OMAP_ABE_C_GAINSWRAMP_SIZE 0x38
+#define OMAP_ABE_C_GAINS_DL1M_ADDR 0x70C
+#define OMAP_ABE_C_GAINS_DL1M_SIZE 0x10
+#define OMAP_ABE_C_GAINS_DL2M_ADDR 0x71C
+#define OMAP_ABE_C_GAINS_DL2M_SIZE 0x10
+#define OMAP_ABE_C_GAINS_ECHOM_ADDR 0x72C
+#define OMAP_ABE_C_GAINS_ECHOM_SIZE 0x8
+#define OMAP_ABE_C_GAINS_SDTM_ADDR 0x734
+#define OMAP_ABE_C_GAINS_SDTM_SIZE 0x8
+#define OMAP_ABE_C_GAINS_VXRECM_ADDR 0x73C
+#define OMAP_ABE_C_GAINS_VXRECM_SIZE 0x10
+#define OMAP_ABE_C_GAINS_ULM_ADDR 0x74C
+#define OMAP_ABE_C_GAINS_ULM_SIZE 0x10
+#define OMAP_ABE_C_GAINS_BTUL_ADDR 0x75C
+#define OMAP_ABE_C_GAINS_BTUL_SIZE 0x8
+#define OMAP_ABE_C_SDT_COEFS_ADDR 0x764
+#define OMAP_ABE_C_SDT_COEFS_SIZE 0x24
+#define OMAP_ABE_C_COEFASRC1_VX_ADDR 0x788
+#define OMAP_ABE_C_COEFASRC1_VX_SIZE 0x4C
+#define OMAP_ABE_C_COEFASRC2_VX_ADDR 0x7D4
+#define OMAP_ABE_C_COEFASRC2_VX_SIZE 0x4C
+#define OMAP_ABE_C_COEFASRC3_VX_ADDR 0x820
+#define OMAP_ABE_C_COEFASRC3_VX_SIZE 0x4C
+#define OMAP_ABE_C_COEFASRC4_VX_ADDR 0x86C
+#define OMAP_ABE_C_COEFASRC4_VX_SIZE 0x4C
+#define OMAP_ABE_C_COEFASRC5_VX_ADDR 0x8B8
+#define OMAP_ABE_C_COEFASRC5_VX_SIZE 0x4C
+#define OMAP_ABE_C_COEFASRC6_VX_ADDR 0x904
+#define OMAP_ABE_C_COEFASRC6_VX_SIZE 0x4C
+#define OMAP_ABE_C_COEFASRC7_VX_ADDR 0x950
+#define OMAP_ABE_C_COEFASRC7_VX_SIZE 0x4C
+#define OMAP_ABE_C_COEFASRC8_VX_ADDR 0x99C
+#define OMAP_ABE_C_COEFASRC8_VX_SIZE 0x4C
+#define OMAP_ABE_C_COEFASRC9_VX_ADDR 0x9E8
+#define OMAP_ABE_C_COEFASRC9_VX_SIZE 0x4C
+#define OMAP_ABE_C_COEFASRC10_VX_ADDR 0xA34
+#define OMAP_ABE_C_COEFASRC10_VX_SIZE 0x4C
+#define OMAP_ABE_C_COEFASRC11_VX_ADDR 0xA80
+#define OMAP_ABE_C_COEFASRC11_VX_SIZE 0x4C
+#define OMAP_ABE_C_COEFASRC12_VX_ADDR 0xACC
+#define OMAP_ABE_C_COEFASRC12_VX_SIZE 0x4C
+#define OMAP_ABE_C_COEFASRC13_VX_ADDR 0xB18
+#define OMAP_ABE_C_COEFASRC13_VX_SIZE 0x4C
+#define OMAP_ABE_C_COEFASRC14_VX_ADDR 0xB64
+#define OMAP_ABE_C_COEFASRC14_VX_SIZE 0x4C
+#define OMAP_ABE_C_COEFASRC15_VX_ADDR 0xBB0
+#define OMAP_ABE_C_COEFASRC15_VX_SIZE 0x4C
+#define OMAP_ABE_C_COEFASRC16_VX_ADDR 0xBFC
+#define OMAP_ABE_C_COEFASRC16_VX_SIZE 0x4C
+#define OMAP_ABE_C_ALPHACURRENT_UL_VX_ADDR 0xC48
+#define OMAP_ABE_C_ALPHACURRENT_UL_VX_SIZE 0x4
+#define OMAP_ABE_C_BETACURRENT_UL_VX_ADDR 0xC4C
+#define OMAP_ABE_C_BETACURRENT_UL_VX_SIZE 0x4
+#define OMAP_ABE_C_ALPHACURRENT_DL_VX_ADDR 0xC50
+#define OMAP_ABE_C_ALPHACURRENT_DL_VX_SIZE 0x4
+#define OMAP_ABE_C_BETACURRENT_DL_VX_ADDR 0xC54
+#define OMAP_ABE_C_BETACURRENT_DL_VX_SIZE 0x4
+#define OMAP_ABE_C_COEFASRC1_MM_ADDR 0xC58
+#define OMAP_ABE_C_COEFASRC1_MM_SIZE 0x48
+#define OMAP_ABE_C_COEFASRC2_MM_ADDR 0xCA0
+#define OMAP_ABE_C_COEFASRC2_MM_SIZE 0x48
+#define OMAP_ABE_C_COEFASRC3_MM_ADDR 0xCE8
+#define OMAP_ABE_C_COEFASRC3_MM_SIZE 0x48
+#define OMAP_ABE_C_COEFASRC4_MM_ADDR 0xD30
+#define OMAP_ABE_C_COEFASRC4_MM_SIZE 0x48
+#define OMAP_ABE_C_COEFASRC5_MM_ADDR 0xD78
+#define OMAP_ABE_C_COEFASRC5_MM_SIZE 0x48
+#define OMAP_ABE_C_COEFASRC6_MM_ADDR 0xDC0
+#define OMAP_ABE_C_COEFASRC6_MM_SIZE 0x48
+#define OMAP_ABE_C_COEFASRC7_MM_ADDR 0xE08
+#define OMAP_ABE_C_COEFASRC7_MM_SIZE 0x48
+#define OMAP_ABE_C_COEFASRC8_MM_ADDR 0xE50
+#define OMAP_ABE_C_COEFASRC8_MM_SIZE 0x48
+#define OMAP_ABE_C_COEFASRC9_MM_ADDR 0xE98
+#define OMAP_ABE_C_COEFASRC9_MM_SIZE 0x48
+#define OMAP_ABE_C_COEFASRC10_MM_ADDR 0xEE0
+#define OMAP_ABE_C_COEFASRC10_MM_SIZE 0x48
+#define OMAP_ABE_C_COEFASRC11_MM_ADDR 0xF28
+#define OMAP_ABE_C_COEFASRC11_MM_SIZE 0x48
+#define OMAP_ABE_C_COEFASRC12_MM_ADDR 0xF70
+#define OMAP_ABE_C_COEFASRC12_MM_SIZE 0x48
+#define OMAP_ABE_C_COEFASRC13_MM_ADDR 0xFB8
+#define OMAP_ABE_C_COEFASRC13_MM_SIZE 0x48
+#define OMAP_ABE_C_COEFASRC14_MM_ADDR 0x1000
+#define OMAP_ABE_C_COEFASRC14_MM_SIZE 0x48
+#define OMAP_ABE_C_COEFASRC15_MM_ADDR 0x1048
+#define OMAP_ABE_C_COEFASRC15_MM_SIZE 0x48
+#define OMAP_ABE_C_COEFASRC16_MM_ADDR 0x1090
+#define OMAP_ABE_C_COEFASRC16_MM_SIZE 0x48
+#define OMAP_ABE_C_ALPHACURRENT_MM_EXT_IN_ADDR 0x10D8
+#define OMAP_ABE_C_ALPHACURRENT_MM_EXT_IN_SIZE 0x4
+#define OMAP_ABE_C_BETACURRENT_MM_EXT_IN_ADDR 0x10DC
+#define OMAP_ABE_C_BETACURRENT_MM_EXT_IN_SIZE 0x4
+#define OMAP_ABE_C_DL2_L_COEFS_ADDR 0x10E0
+#define OMAP_ABE_C_DL2_L_COEFS_SIZE 0x64
+#define OMAP_ABE_C_DL2_R_COEFS_ADDR 0x1144
+#define OMAP_ABE_C_DL2_R_COEFS_SIZE 0x64
+#define OMAP_ABE_C_DL1_COEFS_ADDR 0x11A8
+#define OMAP_ABE_C_DL1_COEFS_SIZE 0x64
+#define OMAP_ABE_C_SRC_3_LP_COEFS_ADDR 0x120C
+#define OMAP_ABE_C_SRC_3_LP_COEFS_SIZE 0x34
+#define OMAP_ABE_C_SRC_3_LP_GAIN_COEFS_ADDR 0x1240
+#define OMAP_ABE_C_SRC_3_LP_GAIN_COEFS_SIZE 0x34
+#define OMAP_ABE_C_SRC_3_HP_COEFS_ADDR 0x1274
+#define OMAP_ABE_C_SRC_3_HP_COEFS_SIZE 0x14
+#define OMAP_ABE_C_SRC_6_LP_COEFS_ADDR 0x1288
+#define OMAP_ABE_C_SRC_6_LP_COEFS_SIZE 0x34
+#define OMAP_ABE_C_SRC_6_LP_GAIN_COEFS_ADDR 0x12BC
+#define OMAP_ABE_C_SRC_6_LP_GAIN_COEFS_SIZE 0x34
+#define OMAP_ABE_C_SRC_6_HP_COEFS_ADDR 0x12F0
+#define OMAP_ABE_C_SRC_6_HP_COEFS_SIZE 0x1C
+#define OMAP_ABE_C_ALPHACURRENT_ECHO_REF_ADDR 0x130C
+#define OMAP_ABE_C_ALPHACURRENT_ECHO_REF_SIZE 0x4
+#define OMAP_ABE_C_BETACURRENT_ECHO_REF_ADDR 0x1310
+#define OMAP_ABE_C_BETACURRENT_ECHO_REF_SIZE 0x4
+#define OMAP_ABE_C_VIBRA2_CONSTS_ADDR 0x1314
+#define OMAP_ABE_C_VIBRA2_CONSTS_SIZE 0x10
+#define OMAP_ABE_C_VIBRA1_COEFFS_ADDR 0x1324
+#define OMAP_ABE_C_VIBRA1_COEFFS_SIZE 0x2C
+#define OMAP_ABE_C_48_96_LP_COEFS_ADDR 0x1350
+#define OMAP_ABE_C_48_96_LP_COEFS_SIZE 0x3C
+#define OMAP_ABE_C_96_48_AMIC_COEFS_ADDR 0x138C
+#define OMAP_ABE_C_96_48_AMIC_COEFS_SIZE 0x4C
+#define OMAP_ABE_C_96_48_DMIC_COEFS_ADDR 0x13D8
+#define OMAP_ABE_C_96_48_DMIC_COEFS_SIZE 0x4C
+#define OMAP_ABE_C_INPUT_SCALE_ADDR 0x1424
+#define OMAP_ABE_C_INPUT_SCALE_SIZE 0x4
+#define OMAP_ABE_C_OUTPUT_SCALE_ADDR 0x1428
+#define OMAP_ABE_C_OUTPUT_SCALE_SIZE 0x4
+#define OMAP_ABE_C_MUTE_SCALING_ADDR 0x142C
+#define OMAP_ABE_C_MUTE_SCALING_SIZE 0x4
+#define OMAP_ABE_C_GAINS_0DB_ADDR 0x1430
+#define OMAP_ABE_C_GAINS_0DB_SIZE 0x8
+#define OMAP_ABE_C_ALPHACURRENT_BT_DL_ADDR 0x1438
+#define OMAP_ABE_C_ALPHACURRENT_BT_DL_SIZE 0x4
+#define OMAP_ABE_C_BETACURRENT_BT_DL_ADDR 0x143C
+#define OMAP_ABE_C_BETACURRENT_BT_DL_SIZE 0x4
+#define OMAP_ABE_C_ALPHACURRENT_BT_UL_ADDR 0x1440
+#define OMAP_ABE_C_ALPHACURRENT_BT_UL_SIZE 0x4
+#define OMAP_ABE_C_BETACURRENT_BT_UL_ADDR 0x1444
+#define OMAP_ABE_C_BETACURRENT_BT_UL_SIZE 0x4
+#define OMAP_ABE_C_SRC_FIR6_LP_GAIN_COEFS_ADDR 0x1448
+#define OMAP_ABE_C_SRC_FIR6_LP_GAIN_COEFS_SIZE 0x2A0
+#define OMAP_ABE_C_SRC_44P1_COEFS_ADDR 0x16E8
+#define OMAP_ABE_C_SRC_44P1_COEFS_SIZE 0x480
+#define OMAP_ABE_C_SRC_MM_DL_44P1_STEP_ADDR 0x1B68
+#define OMAP_ABE_C_SRC_MM_DL_44P1_STEP_SIZE 0x8
+#define OMAP_ABE_C_SRC_TONES_44P1_STEP_ADDR 0x1B70
+#define OMAP_ABE_C_SRC_TONES_44P1_STEP_SIZE 0x8
+#define OMAP_ABE_C_SRC_44P1_MULFAC2_ADDR 0x1B78
+#define OMAP_ABE_C_SRC_44P1_MULFAC2_SIZE 0x8
+#define OMAP_ABE_C_SRC_FIR12_LP_GAIN_COEFS_ADDR 0x1B80
+#define OMAP_ABE_C_SRC_FIR12_LP_GAIN_COEFS_SIZE 0x1E4
+#define OMAP_ABE_C_SRC_6_HP_NEW_COEFS_ADDR 0x1D64
+#define OMAP_ABE_C_SRC_6_HP_NEW_COEFS_SIZE 0x1C
diff --git a/sound/soc/omap/abe/abe_core.c b/sound/soc/omap/abe/abe_core.c
new file mode 100644
index 0000000..7a9ae11
--- /dev/null
+++ b/sound/soc/omap/abe/abe_core.c
@@ -0,0 +1,929 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include "abe_dbg.h"
+#include "abe.h"
+#include "abe_gain.h"
+#include "abe_aess.h"
+#include "abe_port.h"
+#include "abe_mem.h"
+#include "abe_taskid.h"
+
+#define OMAP_ABE_IRQ_FIFO_MASK ((OMAP_ABE_D_MCUIRQFIFO_SIZE >> 2) - 1)
+
+void abe_init_asrc_vx_dl(s32 dppm);
+void abe_init_asrc_vx_ul(s32 dppm);
+void abe_init_asrc_mm_ext_in(s32 dppm);
+void abe_init_asrc_bt_ul(s32 dppm);
+void abe_init_asrc_bt_dl(s32 dppm);
+
+void abe_src_filters_saturation_monitoring(void);
+void abe_irq_aps(u32 aps_info);
+void abe_irq_ping_pong(void);
+void abe_irq_check_for_sequences(u32 seq_info);
+extern u32 abe_size_pingpong;
+extern u32 abe_base_address_pingpong[];
+
+void abe_add_subroutine(u32 *id, abe_subroutine2 f, u32 nparam, u32 *params);
+
+
+/**
+ * abe_omap_abe_reset_hal - reset the ABE/HAL
+ * @abe: Pointer on abe handle
+ *
+ * Operations : reset the ABE by reloading the static variables and
+ * default AESS registers.
+ * Called after a PRCM cold-start reset of ABE
+ */
+int omap_abe_reset_hal(struct omap_abe *abe)
+{
+ u32 i;
+
+ omap_abe_dbg_reset(&abe->dbg);
+
+ _log(ABE_ID_RESET_HAL, 0, 0, 0);
+
+ /* IRQ & DBG circular read pointer in DMEM */
+ abe->irq_dbg_read_ptr = 0;
+
+ /* default = disable the mixer's adaptive gain control */
+ omap_abe_use_compensated_gain(abe, 0);
+
+ /* reset the default gain values */
+ for (i = 0; i < MAX_NBGAIN_CMEM; i++) {
+ abe->muted_gains_indicator[i] = 0;
+ abe->desired_gains_decibel[i] = (u32) GAIN_MUTE;
+ abe->desired_gains_linear[i] = 0;
+ abe->desired_ramp_delay_ms[i] = 0;
+ abe->muted_gains_decibel[i] = (u32) GAIN_TOOLOW;
+ }
+ omap_abe_hw_configuration(abe);
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_reset_hal);
+
+/**
+ * omap_abe_wakeup - Wakeup ABE
+ * @abe: Pointer on abe handle
+ *
+ * Wakeup ABE in case of retention
+ */
+int omap_abe_wakeup(struct omap_abe *abe)
+{
+ /* Restart event generator */
+ omap_abe_write_event_generator(abe, EVENT_TIMER);
+
+ /* reconfigure DMA Req and MCU Irq visibility */
+ omap_abe_hw_configuration(abe);
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_wakeup);
+
+/**
+ * abe_monitoring
+ *
+ * checks the internal status of ABE and HAL
+ */
+void abe_monitoring(void)
+{
+
+}
+
+/**
+ * omap_abe_irq_processing - Process ABE interrupt
+ * @abe: Pointer on abe handle
+ *
+ * This subroutine is call upon reception of "MA_IRQ_99 ABE_MPU_IRQ" Audio
+ * back-end interrupt. This subroutine will check the ATC Hrdware, the
+ * IRQ_FIFO from the AE and act accordingly. Some IRQ source are originated
+ * for the delivery of "end of time sequenced tasks" notifications, some are
+ * originated from the Ping-Pong protocols, some are generated from
+ * the embedded debugger when the firmware stops on programmable break-points,
+ * etc ...
+ */
+int omap_abe_irq_processing(struct omap_abe *abe)
+{
+ u32 abe_irq_dbg_write_ptr, i, cmem_src, sm_cm;
+ abe_irq_data_t IRQ_data;
+
+ _log(ABE_ID_IRQ_PROCESSING, 0, 0, 0);
+
+ /* extract the write pointer index from CMEM memory (INITPTR format) */
+ /* CMEM address of the write pointer in bytes */
+ cmem_src = MCU_IRQ_FIFO_ptr_labelID << 2;
+ omap_abe_mem_read(abe, OMAP_ABE_CMEM, cmem_src,
+ &sm_cm, sizeof(abe_irq_dbg_write_ptr));
+ /* AESS left-pointer index located on MSBs */
+ abe_irq_dbg_write_ptr = sm_cm >> 16;
+ abe_irq_dbg_write_ptr &= 0xFF;
+ /* loop on the IRQ FIFO content */
+ for (i = 0; i < OMAP_ABE_D_MCUIRQFIFO_SIZE; i++) {
+ /* stop when the FIFO is empty */
+ if (abe_irq_dbg_write_ptr == abe->irq_dbg_read_ptr)
+ break;
+ /* read the IRQ/DBG FIFO */
+ omap_abe_mem_read(abe, OMAP_ABE_DMEM,
+ (OMAP_ABE_D_MCUIRQFIFO_ADDR +
+ (abe->irq_dbg_read_ptr << 2)),
+ (u32 *) &IRQ_data, sizeof(IRQ_data));
+ abe->irq_dbg_read_ptr = (abe->irq_dbg_read_ptr + 1) & OMAP_ABE_IRQ_FIFO_MASK;
+ /* select the source of the interrupt */
+ switch (IRQ_data.tag) {
+ case IRQtag_APS:
+ _log(ABE_ID_IRQ_PROCESSING, IRQ_data.data, 0, 1);
+ abe_irq_aps(IRQ_data.data);
+ break;
+ case IRQtag_PP:
+ _log(ABE_ID_IRQ_PROCESSING, 0, 0, 2);
+ abe_irq_ping_pong();
+ break;
+ case IRQtag_COUNT:
+ _log(ABE_ID_IRQ_PROCESSING, IRQ_data.data, 0, 3);
+ abe_irq_check_for_sequences(IRQ_data.data);
+ abe_monitoring();
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_irq_processing);
+
+/**
+ * oamp_abe_set_ping_pong_buffer
+ * @abe: Pointer on abe handle
+ * @port: ABE port ID
+ * @n_bytes: Size of Ping/Pong buffer
+ *
+ * Updates the next ping-pong buffer with "size" bytes copied from the
+ * host processor. This API notifies the FW that the data transfer is done.
+ */
+int omap_abe_set_ping_pong_buffer(struct omap_abe *abe, u32 port, u32 n_bytes)
+{
+ u32 sio_pp_desc_address, struct_offset, n_samples, datasize,
+ base_and_size, *src;
+ struct ABE_SPingPongDescriptor desc_pp;
+
+ _log(ABE_ID_SET_PING_PONG_BUFFER, port, n_bytes, n_bytes >> 8);
+
+ /* ping_pong is only supported on MM_DL */
+ if (port != OMAP_ABE_MM_DL_PORT) {
+ omap_abe_dbg_error(abe, OMAP_ABE_ERR_API,
+ ABE_PARAMETER_ERROR);
+ }
+ /* translates the number of bytes in samples */
+ /* data size in DMEM words */
+ datasize = omap_abe_dma_port_iter_factor((struct omap_abe_data_format *)&((abe_port[port]).format));
+ /* data size in bytes */
+ datasize = datasize << 2;
+ n_samples = n_bytes / datasize;
+ omap_abe_mem_read(abe, OMAP_ABE_DMEM, OMAP_ABE_D_PINGPONGDESC_ADDR,
+ (u32 *) &desc_pp, sizeof(desc_pp));
+ /*
+ * read the port SIO descriptor and extract the current pointer
+ * address after reading the counter
+ */
+ if ((desc_pp.counter & 0x1) == 0) {
+ struct_offset = (u32) &(desc_pp.nextbuff0_BaseAddr) -
+ (u32) &(desc_pp);
+ base_and_size = desc_pp.nextbuff0_BaseAddr;
+ } else {
+ struct_offset = (u32) &(desc_pp.nextbuff1_BaseAddr) -
+ (u32) &(desc_pp);
+ base_and_size = desc_pp.nextbuff1_BaseAddr;
+ }
+
+ base_and_size = abe->pp_buf_addr[abe->pp_buf_id_next];
+ abe->pp_buf_id_next = (abe->pp_buf_id_next + 1) & 0x03;
+
+ base_and_size = (base_and_size & 0xFFFFL) + (n_samples << 16);
+ sio_pp_desc_address = OMAP_ABE_D_PINGPONGDESC_ADDR + struct_offset;
+ src = &base_and_size;
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM, sio_pp_desc_address,
+ (u32 *) &base_and_size, sizeof(u32));
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_set_ping_pong_buffer);
+
+/**
+ * omap_abe_read_next_ping_pong_buffer
+ * @abe: Pointer on abe handle
+ * @port: ABE portID
+ * @p: Next buffer address (pointer)
+ * @n: Next buffer size (pointer)
+ *
+ * Tell the next base address of the next ping_pong Buffer and its size
+ */
+int omap_abe_read_next_ping_pong_buffer(struct omap_abe *abe, u32 port, u32 *p, u32 *n)
+{
+ u32 sio_pp_desc_address;
+ struct ABE_SPingPongDescriptor desc_pp;
+
+ _log(ABE_ID_READ_NEXT_PING_PONG_BUFFER, port, 0, 0);
+
+ /* ping_pong is only supported on MM_DL */
+ if (port != OMAP_ABE_MM_DL_PORT) {
+ omap_abe_dbg_error(abe, OMAP_ABE_ERR_API,
+ ABE_PARAMETER_ERROR);
+ }
+ /* read the port SIO descriptor and extract the current pointer
+ address after reading the counter */
+ sio_pp_desc_address = OMAP_ABE_D_PINGPONGDESC_ADDR;
+ omap_abe_mem_read(abe, OMAP_ABE_DMEM, sio_pp_desc_address,
+ (u32 *) &desc_pp, sizeof(struct ABE_SPingPongDescriptor));
+ if ((desc_pp.counter & 0x1) == 0) {
+ _log(ABE_ID_READ_NEXT_PING_PONG_BUFFER, port, 0, 0);
+ *p = desc_pp.nextbuff0_BaseAddr;
+ } else {
+ _log(ABE_ID_READ_NEXT_PING_PONG_BUFFER, port, 1, 0);
+ *p = desc_pp.nextbuff1_BaseAddr;
+ }
+ /* translates the number of samples in bytes */
+ *n = abe_size_pingpong;
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_read_next_ping_pong_buffer);
+
+/**
+ * omap_abe_init_ping_pong_buffer
+ * @abe: Pointer on abe handle
+ * @id: ABE port ID
+ * @size_bytes:size of the ping pong
+ * @n_buffers:number of buffers (2 = ping/pong)
+ * @p:returned address of the ping-pong list of base addresses
+ * (byte offset from DMEM start)
+ *
+ * Computes the base address of the ping_pong buffers
+ */
+int omap_abe_init_ping_pong_buffer(struct omap_abe *abe,
+ u32 id, u32 size_bytes, u32 n_buffers,
+ u32 *p)
+{
+ u32 i, dmem_addr;
+
+ _log(ABE_ID_INIT_PING_PONG_BUFFER, id, size_bytes, n_buffers);
+
+ /* ping_pong is supported in 2 buffers configuration right now but FW
+ is ready for ping/pong/pung/pang... */
+ if (id != OMAP_ABE_MM_DL_PORT || n_buffers > MAX_PINGPONG_BUFFERS) {
+ omap_abe_dbg_error(abe, OMAP_ABE_ERR_API,
+ ABE_PARAMETER_ERROR);
+ }
+ for (i = 0; i < n_buffers; i++) {
+ dmem_addr = OMAP_ABE_D_PING_ADDR + (i * size_bytes);
+ /* base addresses of the ping pong buffers in U8 unit */
+ abe_base_address_pingpong[i] = dmem_addr;
+ }
+
+ for (i = 0; i < 4; i++)
+ abe->pp_buf_addr[i] = OMAP_ABE_D_PING_ADDR + (i * size_bytes);
+ abe->pp_buf_id = 0;
+ abe->pp_buf_id_next = 0;
+ abe->pp_first_irq = 1;
+
+ /* global data */
+ abe_size_pingpong = size_bytes;
+ *p = (u32) OMAP_ABE_D_PING_ADDR;
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_init_ping_pong_buffer);
+
+/**
+ * omap_abe_read_offset_from_ping_buffer
+ * @abe: Pointer on abe handle
+ * @id: ABE port ID
+ * @n: returned address of the offset
+ * from the ping buffer start address (in samples)
+ *
+ * Computes the current firmware ping pong read pointer location,
+ * expressed in samples, as the offset from the start address of ping buffer.
+ */
+int omap_abe_read_offset_from_ping_buffer(struct omap_abe *abe,
+ u32 id, u32 *n)
+{
+ u32 sio_pp_desc_address;
+ struct ABE_SPingPongDescriptor desc_pp;
+
+ /* ping_pong is only supported on MM_DL */
+ if (OMAP_ABE_MM_DL_PORT != id) {
+ omap_abe_dbg_error(abe, OMAP_ABE_ERR_API,
+ ABE_PARAMETER_ERROR);
+ } else {
+ /* read the port SIO ping pong descriptor */
+ sio_pp_desc_address = OMAP_ABE_D_PINGPONGDESC_ADDR;
+ omap_abe_mem_read(abe, OMAP_ABE_DMEM,
+ sio_pp_desc_address, (u32 *) &desc_pp,
+ sizeof(struct ABE_SPingPongDescriptor));
+ /* extract the current ping pong buffer read pointer based on
+ the value of the counter */
+ if ((desc_pp.counter & 0x1) == 0) {
+ /* the next is buffer0, hence the current is buffer1 */
+ *n = desc_pp.nextbuff1_Samples -
+ desc_pp.workbuff_Samples;
+ } else {
+ /* the next is buffer1, hence the current is buffer0 */
+ *n = desc_pp.nextbuff0_Samples -
+ desc_pp.workbuff_Samples;
+ }
+ switch (abe_port[OMAP_ABE_MM_DL_PORT].format.samp_format) {
+ case MONO_MSB:
+ case MONO_RSHIFTED_16:
+ case STEREO_16_16:
+ *n += abe->pp_buf_id * abe_size_pingpong / 4;
+ break;
+ case STEREO_MSB:
+ case STEREO_RSHIFTED_16:
+ *n += abe->pp_buf_id * abe_size_pingpong / 8;
+ break;
+ default:
+ omap_abe_dbg_error(abe, OMAP_ABE_ERR_API,
+ ABE_PARAMETER_ERROR);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_read_offset_from_ping_buffer);
+
+/**
+ * abe_set_router_configuration
+ * @Id: name of the router
+ * @Conf: id of the configuration
+ * @param: list of output index of the route
+ *
+ * The uplink router takes its input from DMIC (6 samples), AMIC (2 samples)
+ * and PORT1/2 (2 stereo ports). Each sample will be individually stored in
+ * an intermediate table of 10 elements.
+ *
+ * Example of router table parameter for voice uplink with phoenix microphones
+ *
+ * indexes 0 .. 9 = MM_UL description (digital MICs and MMEXTIN)
+ * DMIC1_L_labelID, DMIC1_R_labelID, DMIC2_L_labelID, DMIC2_R_labelID,
+ * MM_EXT_IN_L_labelID, MM_EXT_IN_R_labelID, ZERO_labelID, ZERO_labelID,
+ * ZERO_labelID, ZERO_labelID,
+ * indexes 10 .. 11 = MM_UL2 description (recording on DMIC3)
+ * DMIC3_L_labelID, DMIC3_R_labelID,
+ * indexes 12 .. 13 = VX_UL description (VXUL based on PDMUL data)
+ * AMIC_L_labelID, AMIC_R_labelID,
+ * indexes 14 .. 15 = RESERVED (NULL)
+ * ZERO_labelID, ZERO_labelID,
+ */
+int omap_abe_set_router_configuration(struct omap_abe *abe,
+ u32 id, u32 k, u32 *param)
+{
+ _log(ABE_ID_SET_ROUTER_CONFIGURATION, id, (u32) param, (u32) param >> 8);
+
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_AUPLINKROUTING_ADDR,
+ param, OMAP_ABE_D_AUPLINKROUTING_SIZE);
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_set_router_configuration);
+
+/**
+ * abe_set_opp_processing - Set OPP mode for ABE Firmware
+ * @opp: OOPP mode
+ *
+ * New processing network and OPP:
+ * 0: Ultra Lowest power consumption audio player (no post-processing, no mixer)
+ * 1: OPP 25% (simple multimedia features, including low-power player)
+ * 2: OPP 50% (multimedia and voice calls)
+ * 3: OPP100% ( multimedia complex use-cases)
+ *
+ * Rearranges the FW task network to the corresponding OPP list of features.
+ * The corresponding AE ports are supposed to be set/reset accordingly before
+ * this switch.
+ *
+ */
+int omap_abe_set_opp_processing(struct omap_abe *abe, u32 opp)
+{
+ u32 dOppMode32, sio_desc_address;
+ struct ABE_SIODescriptor sio_desc;
+
+ _log(ABE_ID_SET_OPP_PROCESSING, opp, 0, 0);
+
+ switch (opp) {
+ case ABE_OPP25:
+ /* OPP25% */
+ dOppMode32 = DOPPMODE32_OPP25;
+ break;
+ case ABE_OPP50:
+ /* OPP50% */
+ dOppMode32 = DOPPMODE32_OPP50;
+ break;
+ default:
+ omap_abe_dbg_error(abe, OMAP_ABE_ERR_API,
+ ABE_BLOCK_COPY_ERR);
+ case ABE_OPP100:
+ /* OPP100% */
+ dOppMode32 = DOPPMODE32_OPP100;
+ break;
+ }
+ /* Write Multiframe inside DMEM */
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_MAXTASKBYTESINSLOT_ADDR, &dOppMode32, sizeof(u32));
+
+#if 0
+ /* Disable BT / MM Ext ASRC dynamic switch */
+
+ sio_desc_address = OMAP_ABE_D_IODESCR_ADDR + (OMAP_ABE_MM_EXT_IN_PORT *
+ sizeof(struct ABE_SIODescriptor));
+ omap_abe_mem_read(abe, OMAP_ABE_DMEM, sio_desc_address,
+ (u32 *) &sio_desc, sizeof(sio_desc));
+ if (dOppMode32 == DOPPMODE32_OPP100) {
+ /* ASRC input buffer, size 40 */
+ sio_desc.smem_addr1 = smem_mm_ext_in_opp100;
+ /* Init MM_EXT_IN ASRC and enable its adaptation */
+ abe_init_asrc_mm_ext_in(250);
+ } else
+ /* at OPP 50 or without ASRC */
+ sio_desc.smem_addr1 = smem_mm_ext_in_opp50;
+
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM, sio_desc_address,
+ (u32 *) &sio_desc, sizeof(sio_desc));
+
+ sio_desc_address = OMAP_ABE_D_IODESCR_ADDR + (OMAP_ABE_BT_VX_UL_PORT *
+ sizeof(struct ABE_SIODescriptor));
+ omap_abe_mem_read(abe, OMAP_ABE_DMEM, sio_desc_address,
+ (u32 *) &sio_desc, sizeof(sio_desc));
+
+ if (abe_port[OMAP_ABE_BT_VX_UL_PORT].format.f == 8000) {
+ if (dOppMode32 == DOPPMODE32_OPP100)
+ /* ASRC input buffer, size 40 */
+ sio_desc.smem_addr1 = smem_bt_vx_ul_opp100;
+ else
+ /* at OPP 50 without ASRC */
+ sio_desc.smem_addr1 = BT_UL_8k_labelID;
+ } else {
+ if (dOppMode32 == DOPPMODE32_OPP100)
+ /* ASRC input buffer, size 40 */
+ sio_desc.smem_addr1 = smem_bt_vx_ul_opp100;
+ else
+ /* at OPP 50 without ASRC */
+ sio_desc.smem_addr1 = BT_UL_16k_labelID;
+ }
+
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM, sio_desc_address,
+ (u32 *) &sio_desc, sizeof(sio_desc));
+
+ sio_desc_address = OMAP_ABE_D_IODESCR_ADDR + (OMAP_ABE_BT_VX_DL_PORT *
+ sizeof(struct ABE_SIODescriptor));
+ omap_abe_mem_read(abe, OMAP_ABE_DMEM, sio_desc_address,
+ (u32 *) &sio_desc, sizeof(sio_desc));
+
+#define ABE_TASK_ID(ID) (OMAP_ABE_D_TASKSLIST_ADDR + sizeof(ABE_STask)*(ID))
+#define TASK_BT_DL_48_8_SLT 14
+#define TASK_BT_DL_48_8_IDX 4
+ if (abe_port[OMAP_ABE_BT_VX_DL_PORT].format.f == 8000) {
+ if (dOppMode32 == DOPPMODE32_OPP100) {
+ abe->MultiFrame[TASK_BT_DL_48_8_SLT][TASK_BT_DL_48_8_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_BT_DL_48_8_FIR_OPP100);
+ sio_desc.smem_addr1 = BT_DL_8k_opp100_labelID;
+ } else {
+ abe->MultiFrame[TASK_BT_DL_48_8_SLT][TASK_BT_DL_48_8_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_BT_DL_48_8_FIR);
+ sio_desc.smem_addr1 = BT_DL_8k_labelID;
+ }
+ } else {
+ if (dOppMode32 == DOPPMODE32_OPP100) {
+ abe->MultiFrame[TASK_BT_DL_48_8_SLT][TASK_BT_DL_48_8_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_BT_DL_48_16_OPP100);
+ sio_desc.smem_addr1 = BT_DL_16k_opp100_labelID;
+ } else {
+ abe->MultiFrame[TASK_BT_DL_48_8_SLT][TASK_BT_DL_48_8_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_BT_DL_48_16);
+ sio_desc.smem_addr1 = BT_DL_16k_labelID;
+ }
+ }
+
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM, OMAP_ABE_D_MULTIFRAME_ADDR,
+ (u32 *) abe->MultiFrame, sizeof(abe->MultiFrame));
+
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM, sio_desc_address,
+ (u32 *) &sio_desc, sizeof(sio_desc));
+
+ if (dOppMode32 == DOPPMODE32_OPP100) {
+ /* Init BT_VX_UL ASRC and enable its adaptation */
+ abe_init_asrc_bt_ul(250);
+ /* Init BT_VX_DL ASRC and enable its adaptation */
+ abe_init_asrc_bt_dl(-250);
+ }
+#endif
+ return 0;
+
+}
+EXPORT_SYMBOL(omap_abe_set_opp_processing);
+
+/**
+ * omap_abe_reset_vx_ul_src_filters - reset VX-UL port SRC filters
+ *
+ * it is assumed that filters are located in SMEM
+ */
+int omap_abe_reset_vx_ul_src_filters(struct omap_abe *abe)
+{
+ if (abe_port[OMAP_ABE_VX_UL_PORT].format.f == 8000) {
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_UL_48_8_LP_DATA_ADDR,
+ OMAP_ABE_S_VX_UL_48_8_LP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_UL_48_8_HP_DATA_ADDR,
+ OMAP_ABE_S_VX_UL_48_8_HP_DATA_SIZE);
+ } else if (abe_port[OMAP_ABE_VX_UL_PORT].format.f == 16000) {
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_UL_48_16_LP_DATA_ADDR,
+ OMAP_ABE_S_VX_UL_48_16_LP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_UL_48_16_HP_DATA_ADDR,
+ OMAP_ABE_S_VX_UL_48_16_HP_DATA_SIZE);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_reset_vx_ul_src_filters);
+
+/**
+ * omap_abe_reset_mic_ul_src_filters - reset AMIC or DMICs or BT UL SRC filters
+ *
+ * it is assumed that filters are located in SMEM
+ */
+int omap_abe_reset_mic_ul_src_filters(struct omap_abe *abe)
+{
+ u16 vx[NBROUTE_UL];
+
+ omap_abe_mem_read(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_AUPLINKROUTING_ADDR,
+ (u32 *)vx, OMAP_ABE_D_AUPLINKROUTING_SIZE);
+
+ switch (vx[12]) {
+ case ZERO_labelID:
+ /* no MIC used */
+ return 0;
+ case DMIC1_L_labelID:
+ case DMIC1_R_labelID:
+ /* DMIC0 used */
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_DMIC0_96_48_DATA_ADDR,
+ OMAP_ABE_S_DMIC0_96_48_DATA_SIZE);
+ break;
+ case DMIC2_L_labelID:
+ case DMIC2_R_labelID:
+ /* DMIC1 used */
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_DMIC1_96_48_DATA_ADDR,
+ OMAP_ABE_S_DMIC1_96_48_DATA_SIZE);
+ break;
+ case DMIC3_L_labelID:
+ case DMIC3_R_labelID:
+ /* DMIC2 used */
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_DMIC2_96_48_DATA_ADDR,
+ OMAP_ABE_S_DMIC2_96_48_DATA_SIZE);
+ break;
+ case BT_UL_L_labelID:
+ case BT_UL_R_labelID:
+ /* BT MIC used */
+ if (abe_port[OMAP_ABE_BT_VX_UL_PORT].format.f == 8000) {
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_UL_8_48_HP_DATA_ADDR,
+ OMAP_ABE_S_BT_UL_8_48_HP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_UL_8_48_LP_DATA_ADDR,
+ OMAP_ABE_S_BT_UL_8_48_LP_DATA_SIZE);
+ } else if (abe_port[OMAP_ABE_BT_VX_UL_PORT].format.f == 16000) {
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_UL_16_48_HP_DATA_ADDR,
+ OMAP_ABE_S_BT_UL_16_48_HP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_UL_16_48_LP_DATA_ADDR,
+ OMAP_ABE_S_BT_UL_16_48_LP_DATA_SIZE);
+ }
+ break;
+ case AMIC_L_labelID:
+ case AMIC_R_labelID:
+ /* AMIC used */
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_AMIC_96_48_DATA_ADDR,
+ OMAP_ABE_S_AMIC_96_48_DATA_SIZE);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_reset_mic_ul_src_filters);
+
+/**
+ * omap_abe_reset_vx_dl_src_filters - reset VX-DL port SRC filters
+ *
+ * it is assumed that filters are located in SMEM
+ */
+int omap_abe_reset_vx_dl_src_filters(struct omap_abe *abe)
+{
+ if (abe_port[OMAP_ABE_VX_DL_PORT].format.f == 8000) {
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_DL_8_48_HP_DATA_ADDR,
+ OMAP_ABE_S_VX_DL_8_48_HP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_DL_8_48_LP_DATA_ADDR,
+ OMAP_ABE_S_VX_DL_8_48_LP_DATA_SIZE);
+ } else if (abe_port[OMAP_ABE_VX_DL_PORT].format.f == 16000) {
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_DL_16_48_HP_DATA_ADDR,
+ OMAP_ABE_S_VX_DL_16_48_HP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_DL_16_48_LP_DATA_ADDR,
+ OMAP_ABE_S_VX_DL_16_48_LP_DATA_SIZE);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_reset_vx_dl_src_filters);
+
+/**
+ * omap_abe_reset_dl1_src_filters - reset DL1 path filters
+ *
+ * it is assumed that filters are located in SMEM
+ */
+int omap_abe_reset_dl1_src_filters(struct omap_abe *abe)
+{
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_DL1_M_EQ_DATA_ADDR,
+ OMAP_ABE_S_DL1_M_EQ_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_EARP_48_96_LP_DATA_ADDR,
+ OMAP_ABE_S_EARP_48_96_LP_DATA_SIZE);
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_reset_dl1_src_filters);
+
+/**
+ * omap_abe_reset_dl2_src_filters - reset DL2 path filters
+ *
+ * it is assumed that filters are located in SMEM
+ */
+int omap_abe_reset_dl2_src_filters(struct omap_abe *abe)
+{
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_DL2_M_LR_EQ_DATA_ADDR,
+ OMAP_ABE_S_DL2_M_LR_EQ_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_IHF_48_96_LP_DATA_ADDR,
+ OMAP_ABE_S_IHF_48_96_LP_DATA_SIZE);
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_reset_dl2_src_filters);
+
+/**
+ * omap_abe_reset_bt_dl_src_filters - reset bluetooth DL SRC path filters
+ *
+ * it is assumed that filters are located in SMEM
+ */
+int omap_abe_reset_bt_dl_src_filters(struct omap_abe *abe)
+{
+ if (abe_port[OMAP_ABE_BT_VX_DL_PORT].format.f == 8000) {
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_DL_48_8_LP_DATA_ADDR,
+ OMAP_ABE_S_BT_DL_48_8_LP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_DL_48_8_HP_DATA_ADDR,
+ OMAP_ABE_S_BT_DL_48_8_HP_DATA_SIZE);
+ } else if (abe_port[OMAP_ABE_BT_VX_DL_PORT].format.f == 16000) {
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_DL_48_16_LP_DATA_ADDR,
+ OMAP_ABE_S_BT_DL_48_16_LP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_DL_48_16_HP_DATA_ADDR,
+ OMAP_ABE_S_BT_DL_48_16_HP_DATA_SIZE);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_reset_bt_dl_src_filters);
+
+/**
+ * abe_check_filter_is_saturating
+ * @abe: pointer to omap_abe struct
+ * @address: filter address
+ * @size: filter size in byte
+ *
+ * Check if filter is saturating
+ * it is assumed that filter is located in SMEM
+ * if saturated return 1 otherwise 0.
+ */
+int abe_check_filter_is_saturating(struct omap_abe *abe, u32 address, u32 size)
+{
+ /* largest buffer size among all filter buffers in SMEM */
+ u32 filter[OMAP_ABE_S_XINASRC_UL_VX_SIZE >> 2];
+ int found = 0, i = 0;
+
+ omap_abe_mem_read(abe, ABE_SMEM, address, filter, size);
+
+ size >>= 2;
+ while ((i < size) && (filter[i] < 0x700000 || filter[i] > 0x900000))
+ i++;
+
+ if (i < size)
+ found = 1;
+
+ return found;
+}
+
+/**
+ * abe_ul_src_filters_saturation_monitoring - monitor ABE UL SRC filters and
+ * check if some are saturating, if it's the case then reset these filters.
+ *
+ * it is assumed that filter is located in SMEM
+ */
+void abe_ul_src_filters_saturation_monitoring(struct omap_abe *abe)
+{
+ int saturating = 0;
+ u16 vx[NBROUTE_UL];
+
+ omap_abe_mem_read(abe, ABE_DMEM,
+ OMAP_ABE_D_AUPLINKROUTING_ADDR,
+ (u32 *)vx, OMAP_ABE_D_AUPLINKROUTING_SIZE);
+
+ switch (vx[12]) {
+ case ZERO_labelID:
+ /* no MIC used */
+ return;
+ case DMIC1_L_labelID:
+ case DMIC1_R_labelID:
+ /* DMIC0 used */
+ saturating = abe_check_filter_is_saturating(abe,
+ OMAP_ABE_S_DMIC1_ADDR,
+ OMAP_ABE_S_DMIC1_SIZE);
+ break;
+ case DMIC2_L_labelID:
+ case DMIC2_R_labelID:
+ /* DMIC1 used */
+ saturating = abe_check_filter_is_saturating(abe,
+ OMAP_ABE_S_DMIC2_ADDR,
+ OMAP_ABE_S_DMIC2_SIZE);
+ break;
+ case DMIC3_L_labelID:
+ case DMIC3_R_labelID:
+ /* DMIC2 used */
+ saturating = abe_check_filter_is_saturating(abe,
+ OMAP_ABE_S_DMIC3_ADDR,
+ OMAP_ABE_S_DMIC3_SIZE);
+ break;
+ case BT_UL_L_labelID:
+ case BT_UL_R_labelID:
+ /* BT MIC used */
+ saturating = abe_check_filter_is_saturating(abe,
+ OMAP_ABE_S_BT_UL_ADDR,
+ OMAP_ABE_S_BT_UL_SIZE);
+ break;
+ case AMIC_L_labelID:
+ case AMIC_R_labelID:
+ /* AMIC used */
+ saturating = abe_check_filter_is_saturating(abe,
+ OMAP_ABE_S_AMIC_ADDR,
+ OMAP_ABE_S_AMIC_SIZE);
+ break;
+ default:
+ return;
+ }
+
+ if (saturating) {
+ omap_abe_reset_mic_ul_src_filters(abe);
+ omap_abe_reset_vx_ul_src_filters(abe);
+ }
+}
+
+/**
+ * abe_vx_dl_src_filters_saturation_monitoring - monitor ABE VX-DL SRC filters and
+ * check if some are saturating, if it's the case then reset these filters.
+ *
+ * it is assumed that filter is located in SMEM
+ */
+void abe_vx_dl_src_filters_saturation_monitoring(struct omap_abe *abe)
+{
+ if (abe_check_filter_is_saturating(abe, OMAP_ABE_S_VX_DL_ADDR,
+ OMAP_ABE_S_VX_DL_SIZE)) {
+ omap_abe_reset_vx_dl_src_filters(abe);
+ omap_abe_reset_dl1_src_filters(abe);
+ omap_abe_reset_dl2_src_filters(abe);
+ }
+
+ if (abe_check_filter_is_saturating(abe, OMAP_ABE_S_BT_DL_ADDR,
+ OMAP_ABE_S_BT_DL_SIZE))
+ omap_abe_reset_bt_dl_src_filters(abe);
+}
+
+/**
+ * abe_src_filters_saturation_monitoring - monitor ABE SRC filters and
+ * check if some are saturating, if it's the case then reset these filters.
+ *
+ * it is assumed that filter is located in SMEM
+ */
+void omap_abe_src_filters_saturation_monitoring(struct omap_abe *abe)
+{
+ abe_ul_src_filters_saturation_monitoring(abe);
+ abe_vx_dl_src_filters_saturation_monitoring(abe);
+}
+
+/**
+ * omap_abe_check_activity - Check if some ABE activity.
+ *
+ * Check if any ABE ports are running.
+ * return 1: still activity on ABE
+ * return 0: no more activity on ABE. Event generator can be stopped
+ *
+ */
+int omap_abe_check_activity(struct omap_abe *abe)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < (LAST_PORT_ID - 1); i++) {
+ if (abe_port[abe_port_priority[i]].status ==
+ OMAP_ABE_PORT_ACTIVITY_RUNNING)
+ break;
+ }
+ if (i < (LAST_PORT_ID - 1))
+ ret = 1;
+ return ret;
+}
+EXPORT_SYMBOL(omap_abe_check_activity);
+
+/**
+ * abe_plug_subroutine
+ * @id: returned sequence index after plugging a new subroutine
+ * @f: subroutine address to be inserted
+ * @n: number of parameters of this subroutine
+ * @params: pointer on parameters
+ *
+ * register a list of subroutines for call-back purpose
+ */
+abehal_status abe_plug_subroutine(u32 *id, abe_subroutine2 f, u32 n,
+ u32 *params)
+{
+ _log(ABE_ID_PLUG_SUBROUTINE, (u32) (*id), (u32) f, n);
+
+ abe_add_subroutine(id, (abe_subroutine2) f, n, (u32 *) params);
+ return 0;
+}
+EXPORT_SYMBOL(abe_plug_subroutine);
diff --git a/sound/soc/omap/abe/abe_dat.c b/sound/soc/omap/abe/abe_dat.c
new file mode 100644
index 0000000..0111bae
--- /dev/null
+++ b/sound/soc/omap/abe/abe_dat.c
@@ -0,0 +1,458 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include "abe_legacy.h"
+
+struct omap_abe *abe;
+
+/*
+ * HAL/FW ports status / format / sampling / protocol(call_back) / features
+ * / gain / name
+ */
+abe_port_t abe_port[LAST_PORT_ID]; /* list of ABE ports */
+const abe_port_t abe_port_init[LAST_PORT_ID] = {
+ /* Status Data Format Drift Call-Back Protocol+selector desc_addr;
+ buf_addr; buf_size; iter; irq_addr irq_data DMA_T $Features
+ reseted at start Port Name for the debug trace */
+ /* DMIC */ {
+ OMAP_ABE_PORT_ACTIVITY_IDLE, {96000, SIX_MSB},
+ NODRIFT, NOCALLBACK, 1, (DMIC_ITER/6),
+ {
+ SNK_P, DMIC_PORT_PROT,
+ {{dmem_dmic, dmem_dmic_size, DMIC_ITER} }
+ },
+ {0, 0},
+ {EQDMIC, 0}, "DMIC"},
+ /* PDM_UL */ {
+ OMAP_ABE_PORT_ACTIVITY_IDLE, {96000, STEREO_MSB},
+ NODRIFT, NOCALLBACK, smem_amic, (MCPDM_UL_ITER/2),
+ {
+ SNK_P, MCPDMUL_PORT_PROT,
+ {{dmem_amic, dmem_amic_size, MCPDM_UL_ITER} }
+ },
+ {0, 0},
+ {EQAMIC, 0}, "PDM_UL"},
+ /* BT_VX_UL */ {
+ OMAP_ABE_PORT_ACTIVITY_IDLE, {8000, STEREO_MSB},
+ NODRIFT, NOCALLBACK, smem_bt_vx_ul_opp50, 1,
+ {
+ SNK_P, SERIAL_PORT_PROT, {{
+ (MCBSP1_DMA_TX*ATC_SIZE),
+ dmem_bt_vx_ul,
+ dmem_bt_vx_ul_size,
+ (1*SCHED_LOOP_8kHz)
+ } }
+ },
+ {0, 0}, {0}, "BT_VX_UL"},
+ /* MM_UL */ {
+ OMAP_ABE_PORT_ACTIVITY_IDLE, {48000, STEREO_MSB},
+ NODRIFT, NOCALLBACK, smem_mm_ul, 1,
+ {
+ SRC_P, DMAREQ_PORT_PROT, {{
+ (CBPr_DMA_RTX3*ATC_SIZE),
+ dmem_mm_ul, dmem_mm_ul_size,
+ (10*SCHED_LOOP_48kHz),
+ ABE_DMASTATUS_RAW, (1 << 3)
+ } }
+ },
+ {CIRCULAR_BUFFER_PERIPHERAL_R__3, 120},
+ {UPROUTE, 0}, "MM_UL"},
+ /* MM_UL2 */ {
+ OMAP_ABE_PORT_ACTIVITY_IDLE, {48000, STEREO_MSB},
+ NODRIFT, NOCALLBACK, smem_mm_ul2, 1,
+ {
+ SRC_P, DMAREQ_PORT_PROT, {{
+ (CBPr_DMA_RTX4*ATC_SIZE),
+ dmem_mm_ul2, dmem_mm_ul2_size,
+ (2*SCHED_LOOP_48kHz),
+ ABE_DMASTATUS_RAW, (1 << 4)
+ } }
+ },
+ {CIRCULAR_BUFFER_PERIPHERAL_R__4, 24},
+ {UPROUTE, 0}, "MM_UL2"},
+ /* VX_UL */ {
+ OMAP_ABE_PORT_ACTIVITY_IDLE, {8000, MONO_MSB},
+ NODRIFT, NOCALLBACK, smem_vx_ul, 1,
+ {
+ SRC_P, DMAREQ_PORT_PROT, {{
+ (CBPr_DMA_RTX2*ATC_SIZE),
+ dmem_vx_ul, dmem_vx_ul_size,
+ (1*SCHED_LOOP_8kHz),
+ ABE_DMASTATUS_RAW, (1 << 2)
+ } }
+ }, {
+ CIRCULAR_BUFFER_PERIPHERAL_R__2, 2},
+ {ASRC2, 0}, "VX_UL"},
+ /* MM_DL */ {
+ OMAP_ABE_PORT_ACTIVITY_IDLE, {48000, STEREO_MSB},
+ NODRIFT, NOCALLBACK, smem_mm_dl, 1,
+ {
+ SNK_P, PINGPONG_PORT_PROT, {{
+ (CBPr_DMA_RTX0*ATC_SIZE),
+ dmem_mm_dl, dmem_mm_dl_size,
+ (2*SCHED_LOOP_48kHz),
+ ABE_DMASTATUS_RAW, (1 << 0)
+ } }
+ },
+ {CIRCULAR_BUFFER_PERIPHERAL_R__0, 24},
+ {ASRC3, 0}, "MM_DL"},
+ /* VX_DL */ {
+ OMAP_ABE_PORT_ACTIVITY_IDLE, {8000, MONO_MSB},
+ NODRIFT, NOCALLBACK, smem_vx_dl, 1,
+ {
+ SNK_P, DMAREQ_PORT_PROT, {{
+ (CBPr_DMA_RTX1*ATC_SIZE),
+ dmem_vx_dl, dmem_vx_dl_size,
+ (1*SCHED_LOOP_8kHz),
+ ABE_DMASTATUS_RAW, (1 << 1)
+ } }
+ },
+ {CIRCULAR_BUFFER_PERIPHERAL_R__1, 2},
+ {ASRC1, 0}, "VX_DL"},
+ /* TONES_DL */ {
+ OMAP_ABE_PORT_ACTIVITY_IDLE, {48000, STEREO_MSB},
+ NODRIFT, NOCALLBACK, smem_tones_dl, 1,
+ {
+ SNK_P, DMAREQ_PORT_PROT, {{
+ (CBPr_DMA_RTX5*ATC_SIZE),
+ dmem_tones_dl,
+ dmem_tones_dl_size,
+ (2*SCHED_LOOP_48kHz),
+ ABE_DMASTATUS_RAW, (1 << 5)
+ } }
+ },
+ {CIRCULAR_BUFFER_PERIPHERAL_R__5, 24},
+ {0}, "TONES_DL"},
+ /* VIB_DL */ {
+ OMAP_ABE_PORT_ACTIVITY_IDLE, {24000, STEREO_MSB},
+ NODRIFT, NOCALLBACK, smem_vib, 1,
+ {
+ SNK_P, DMAREQ_PORT_PROT, {{
+ (CBPr_DMA_RTX6*ATC_SIZE),
+ dmem_vib_dl, dmem_vib_dl_size,
+ (2*SCHED_LOOP_24kHz),
+ ABE_DMASTATUS_RAW, (1 << 6)
+ } }
+ },
+ {CIRCULAR_BUFFER_PERIPHERAL_R__6, 12},
+ {0}, "VIB_DL"},
+ /* BT_VX_DL */ {
+ OMAP_ABE_PORT_ACTIVITY_IDLE, {8000, MONO_MSB},
+ NODRIFT, NOCALLBACK, smem_bt_vx_dl_opp50, 1,
+ {
+ SRC_P, SERIAL_PORT_PROT, {{
+ (MCBSP1_DMA_RX*ATC_SIZE),
+ dmem_bt_vx_dl,
+ dmem_bt_vx_dl_size,
+ (1*SCHED_LOOP_8kHz),
+ } }
+ },
+ {0, 0}, {0}, "BT_VX_DL"},
+ /* PDM_DL */ {
+ OMAP_ABE_PORT_ACTIVITY_IDLE, {96000, SIX_MSB},
+ NODRIFT, NOCALLBACK, 1, (MCPDM_DL_ITER/6),
+ {SRC_P, MCPDMDL_PORT_PROT, {{dmem_mcpdm,
+ dmem_mcpdm_size} } },
+ {0, 0},
+ {MIXDL1, EQ1, APS1, MIXDL2, EQ2L, EQ2R, APS2L, APS2R, 0},
+ "PDM_DL"},
+ /* MM_EXT_OUT */
+ {
+ OMAP_ABE_PORT_ACTIVITY_IDLE, {48000, STEREO_MSB},
+ NODRIFT, NOCALLBACK, smem_mm_ext_out, 1,
+ {
+ SRC_P, SERIAL_PORT_PROT, {{
+ (MCBSP1_DMA_TX*ATC_SIZE),
+ dmem_mm_ext_out, dmem_mm_ext_out_size,
+ (2*SCHED_LOOP_48kHz)
+ } }
+ }, {0, 0}, {0}, "MM_EXT_OUT"},
+ /* MM_EXT_IN */
+ {
+ OMAP_ABE_PORT_ACTIVITY_IDLE, {48000, STEREO_MSB},
+ NODRIFT, NOCALLBACK, smem_mm_ext_in_opp100, 1,
+ {
+ SNK_P, SERIAL_PORT_PROT, {{
+ (MCBSP1_DMA_RX*ATC_SIZE),
+ dmem_mm_ext_in, dmem_mm_ext_in_size,
+ (2*SCHED_LOOP_48kHz)
+ } }
+ },
+ {0, 0}, {0}, "MM_EXT_IN"},
+ /* PCM3_TX */ {
+ OMAP_ABE_PORT_ACTIVITY_IDLE, {48000, STEREO_MSB},
+ NODRIFT, NOCALLBACK, 1, 1,
+ {
+ SRC_P, TDM_SERIAL_PORT_PROT, {{
+ (MCBSP3_DMA_TX *
+ ATC_SIZE),
+ dmem_mm_ext_out,
+ dmem_mm_ext_out_size,
+ (2*SCHED_LOOP_48kHz)
+ } }
+ },
+ {0, 0}, {0}, "TDM_OUT"},
+ /* PCM3_RX */ {
+ OMAP_ABE_PORT_ACTIVITY_IDLE, {48000, STEREO_MSB},
+ NODRIFT, NOCALLBACK, 1, 1,
+ {
+ SRC_P, TDM_SERIAL_PORT_PROT, {{
+ (MCBSP3_DMA_RX *
+ ATC_SIZE),
+ dmem_mm_ext_in,
+ dmem_mm_ext_in_size,
+ (2*SCHED_LOOP_48kHz)
+ } }
+ },
+ {0, 0}, {0}, "TDM_IN"},
+ /* SCHD_DBG_PORT */ {
+ OMAP_ABE_PORT_ACTIVITY_IDLE, {48000, MONO_MSB},
+ NODRIFT, NOCALLBACK, 1, 1,
+ {
+ SRC_P, DMAREQ_PORT_PROT, {{
+ (CBPr_DMA_RTX7 *
+ ATC_SIZE),
+ dmem_mm_trace,
+ dmem_mm_trace_size,
+ (2*SCHED_LOOP_48kHz),
+ ABE_DMASTATUS_RAW,
+ (1 << 4)
+ } }
+ }, {CIRCULAR_BUFFER_PERIPHERAL_R__7, 24},
+ {FEAT_SEQ, FEAT_CTL, FEAT_GAINS, 0}, "SCHD_DBG"},
+};
+/*
+ * AESS/ATC destination and source address translation (except McASPs)
+ * from the original 64bits words address
+ */
+const u32 abe_atc_dstid[ABE_ATC_DESC_SIZE >> 3] = {
+ /* DMA_0 DMIC PDM_DL PDM_UL McB1TX McB1RX McB2TX McB2RX 0 .. 7 */
+ 0, 0, 12, 0, 1, 0, 2, 0,
+ /* McB3TX McB3RX SLIMT0 SLIMT1 SLIMT2 SLIMT3 SLIMT4 SLIMT5 8 .. 15 */
+ 3, 0, 4, 5, 6, 7, 8, 9,
+ /* SLIMT6 SLIMT7 SLIMR0 SLIMR1 SLIMR2 SLIMR3 SLIMR4 SLIMR5 16 .. 23 */
+ 10, 11, 0, 0, 0, 0, 0, 0,
+ /* SLIMR6 SLIMR7 McASP1X ----- ----- McASP1R ----- ----- 24 .. 31 */
+ 0, 0, 14, 0, 0, 0, 0, 0,
+ /* CBPrT0 CBPrT1 CBPrT2 CBPrT3 CBPrT4 CBPrT5 CBPrT6 CBPrT7 32 .. 39 */
+ 63, 63, 63, 63, 63, 63, 63, 63,
+ /* CBP_T0 CBP_T1 CBP_T2 CBP_T3 CBP_T4 CBP_T5 CBP_T6 CBP_T7 40 .. 47 */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* CBP_T8 CBP_T9 CBP_T10 CBP_T11 CBP_T12 CBP_T13 CBP_T14
+ CBP_T15 48 .. 63 */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+};
+const u32 abe_atc_srcid[ABE_ATC_DESC_SIZE >> 3] = {
+ /* DMA_0 DMIC PDM_DL PDM_UL McB1TX McB1RX McB2TX McB2RX 0 .. 7 */
+ 0, 12, 0, 13, 0, 1, 0, 2,
+ /* McB3TX McB3RX SLIMT0 SLIMT1 SLIMT2 SLIMT3 SLIMT4 SLIMT5 8 .. 15 */
+ 0, 3, 0, 0, 0, 0, 0, 0,
+ /* SLIMT6 SLIMT7 SLIMR0 SLIMR1 SLIMR2 SLIMR3 SLIMR4 SLIMR5 16 .. 23 */
+ 0, 0, 4, 5, 6, 7, 8, 9,
+ /* SLIMR6 SLIMR7 McASP1X ----- ----- McASP1R ----- ----- 24 .. 31 */
+ 10, 11, 0, 0, 0, 14, 0, 0,
+ /* CBPrT0 CBPrT1 CBPrT2 CBPrT3 CBPrT4 CBPrT5 CBPrT6 CBPrT7 32 .. 39 */
+ 63, 63, 63, 63, 63, 63, 63, 63,
+ /* CBP_T0 CBP_T1 CBP_T2 CBP_T3 CBP_T4 CBP_T5 CBP_T6 CBP_T7 40 .. 47 */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* CBP_T8 CBP_T9 CBP_T10 CBP_T11 CBP_T12 CBP_T13 CBP_T14
+ CBP_T15 48 .. 63 */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+};
+/*
+ * preset default routing configurations
+ * This is given as implementation EXAMPLES
+ * the programmer uses "abe_set_router_configuration" with its own tables
+ */
+const abe_router_t abe_router_ul_table_preset[NBROUTE_CONFIG][NBROUTE_UL] = {
+ /* VOICE UPLINK WITH PHOENIX MICROPHONES - UPROUTE_CONFIG_AMIC */
+ {
+ /* 0 .. 9 = MM_UL */
+ DMIC1_L_labelID, DMIC1_R_labelID, DMIC2_L_labelID, DMIC2_R_labelID,
+ MM_EXT_IN_L_labelID, MM_EXT_IN_R_labelID, AMIC_L_labelID,
+ AMIC_L_labelID,
+ ZERO_labelID, ZERO_labelID,
+ /* 10 .. 11 = MM_UL2 */
+ AMIC_L_labelID, AMIC_L_labelID,
+ /* 12 .. 13 = VX_UL */
+ AMIC_L_labelID, AMIC_R_labelID,
+ /* 14 .. 15 = RESERVED */
+ ZERO_labelID, ZERO_labelID,
+ },
+ /* VOICE UPLINK WITH THE FIRST DMIC PAIR - UPROUTE_CONFIG_DMIC1 */
+ {
+ /* 0 .. 9 = MM_UL */
+ DMIC2_L_labelID, DMIC2_R_labelID, DMIC3_L_labelID, DMIC3_R_labelID,
+ DMIC1_L_labelID, DMIC1_R_labelID, ZERO_labelID, ZERO_labelID,
+ ZERO_labelID, ZERO_labelID,
+ /* 10 .. 11 = MM_UL2 */
+ DMIC1_L_labelID, DMIC1_R_labelID,
+ /* 12 .. 13 = VX_UL */
+ DMIC1_L_labelID, DMIC1_R_labelID,
+ /* 14 .. 15 = RESERVED */
+ ZERO_labelID, ZERO_labelID,
+ },
+ /* VOICE UPLINK WITH THE SECOND DMIC PAIR - UPROUTE_CONFIG_DMIC2 */
+ {
+ /* 0 .. 9 = MM_UL */
+ DMIC3_L_labelID, DMIC3_R_labelID, DMIC1_L_labelID, DMIC1_R_labelID,
+ DMIC2_L_labelID, DMIC2_R_labelID, ZERO_labelID, ZERO_labelID,
+ ZERO_labelID, ZERO_labelID,
+ /* 10 .. 11 = MM_UL2 */
+ DMIC2_L_labelID, DMIC2_R_labelID,
+ /* 12 .. 13 = VX_UL */
+ DMIC2_L_labelID, DMIC2_R_labelID,
+ /* 14 .. 15 = RESERVED */
+ ZERO_labelID, ZERO_labelID,
+ },
+ /* VOICE UPLINK WITH THE LAST DMIC PAIR - UPROUTE_CONFIG_DMIC3 */
+ {
+ /* 0 .. 9 = MM_UL */
+ AMIC_L_labelID, AMIC_R_labelID, DMIC2_L_labelID, DMIC2_R_labelID,
+ DMIC3_L_labelID, DMIC3_R_labelID, ZERO_labelID, ZERO_labelID,
+ ZERO_labelID, ZERO_labelID,
+ /* 10 .. 11 = MM_UL2 */
+ DMIC3_L_labelID, DMIC3_R_labelID,
+ /* 12 .. 13 = VX_UL */
+ DMIC3_L_labelID, DMIC3_R_labelID,
+ /* 14 .. 15 = RESERVED */
+ ZERO_labelID, ZERO_labelID,
+ },
+ /* VOICE UPLINK WITH THE BT - UPROUTE_CONFIG_BT */
+ {
+ /* 0 .. 9 = MM_UL */
+ BT_UL_L_labelID, BT_UL_R_labelID, DMIC2_L_labelID, DMIC2_R_labelID,
+ DMIC3_L_labelID, DMIC3_R_labelID, DMIC1_L_labelID, DMIC1_R_labelID,
+ ZERO_labelID, ZERO_labelID,
+ /* 10 .. 11 = MM_UL2 */
+ AMIC_L_labelID, AMIC_R_labelID,
+ /* 12 .. 13 = VX_UL */
+ BT_UL_L_labelID, BT_UL_R_labelID,
+ /* 14 .. 15 = RESERVED */
+ ZERO_labelID, ZERO_labelID,
+ },
+ /* VOICE UPLINK WITH THE BT - UPROUTE_ECHO_MMUL2 */
+ {
+ /* 0 .. 9 = MM_UL */
+ MM_EXT_IN_L_labelID, MM_EXT_IN_R_labelID, BT_UL_L_labelID,
+ BT_UL_R_labelID, AMIC_L_labelID, AMIC_R_labelID,
+ ZERO_labelID, ZERO_labelID, ZERO_labelID, ZERO_labelID,
+ /* 10 .. 11 = MM_UL2 */
+ EchoRef_L_labelID, EchoRef_R_labelID,
+ /* 12 .. 13 = VX_UL */
+ AMIC_L_labelID, AMIC_L_labelID,
+ /* 14 .. 15 = RESERVED */
+ ZERO_labelID, ZERO_labelID,
+ },
+};
+/* all default routing configurations */
+abe_router_t abe_router_ul_table[NBROUTE_CONFIG_MAX][NBROUTE_UL];
+
+const abe_sequence_t seq_null = {
+ NOMASK, {CL_M1, 0, {0, 0, 0, 0}, 0}, {CL_M1, 0, {0, 0, 0, 0}, 0}
+};
+/* table of new subroutines called in the sequence */
+abe_subroutine2 abe_all_subsubroutine[MAXNBSUBROUTINE];
+/* number of parameters per calls */
+u32 abe_all_subsubroutine_nparam[MAXNBSUBROUTINE];
+/* index of the subroutine */
+u32 abe_subroutine_id[MAXNBSUBROUTINE];
+/* paramters of the subroutine (if any) */
+u32 *abe_all_subroutine_params[MAXNBSUBROUTINE];
+u32 abe_subroutine_write_pointer;
+/* table of all sequences */
+abe_sequence_t abe_all_sequence[MAXNBSEQUENCE];
+u32 abe_sequence_write_pointer;
+/* current number of pending sequences (avoids to look in the table) */
+u32 abe_nb_pending_sequences;
+/* pending sequences due to ressource collision */
+u32 abe_pending_sequences[MAXNBSEQUENCE];
+/* mask of unsharable ressources among other sequences */
+u32 abe_global_sequence_mask;
+/* table of active sequences */
+abe_seq_t abe_active_sequence[MAXACTIVESEQUENCE][MAXSEQUENCESTEPS];
+/* index of the plugged subroutine doing ping-pong cache-flush DMEM accesses */
+u32 abe_irq_pingpong_player_id;
+EXPORT_SYMBOL(abe_irq_pingpong_player_id);
+/* index of the plugged subroutine doing acoustics protection adaptation */
+u32 abe_irq_aps_adaptation_id;
+/* base addresses of the ping pong buffers in bytes addresses */
+u32 abe_base_address_pingpong[MAX_PINGPONG_BUFFERS];
+/* size of each ping/pong buffers */
+u32 abe_size_pingpong;
+/* number of ping/pong buffer being used */
+u32 abe_nb_pingpong;
+/*
+ * MAIN PORT SELECTION
+ */
+const u32 abe_port_priority[LAST_PORT_ID - 1] = {
+ OMAP_ABE_PDM_DL_PORT,
+ OMAP_ABE_PDM_UL_PORT,
+ OMAP_ABE_MM_EXT_OUT_PORT,
+ OMAP_ABE_MM_EXT_IN_PORT,
+ OMAP_ABE_DMIC_PORT,
+ OMAP_ABE_MM_UL_PORT,
+ OMAP_ABE_MM_UL2_PORT,
+ OMAP_ABE_MM_DL_PORT,
+ OMAP_ABE_TONES_DL_PORT,
+ OMAP_ABE_VX_UL_PORT,
+ OMAP_ABE_VX_DL_PORT,
+ OMAP_ABE_BT_VX_DL_PORT,
+ OMAP_ABE_BT_VX_UL_PORT,
+ OMAP_ABE_VIB_DL_PORT,
+};
diff --git a/sound/soc/omap/abe/abe_dbg.c b/sound/soc/omap/abe/abe_dbg.c
new file mode 100644
index 0000000..d1b160f
--- /dev/null
+++ b/sound/soc/omap/abe/abe_dbg.c
@@ -0,0 +1,201 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include "abe_dbg.h"
+#include "abe.h"
+#include "abe_mem.h"
+
+/**
+ * omap_abe_dbg_reset
+ * @dbg: Pointer on abe debug handle
+ *
+ * Called in order to reset Audio Back End debug global data.
+ * This ensures that ABE debug trace pointer is reset correctly.
+ */
+int omap_abe_dbg_reset(struct omap_abe_dbg *dbg)
+{
+ dbg->activity_log_write_pointer = 0;
+ dbg->mask = 0;
+
+ return 0;
+}
+
+/**
+ * omap_abe_connect_debug_trace
+ * @abe: Pointer on abe handle
+ * @dma2:pointer to the DMEM trace buffer
+ *
+ * returns the address and size of the real-time debug trace buffer,
+ * the content of which will vary from one firmware release to another
+ */
+int omap_abe_connect_debug_trace(struct omap_abe *abe,
+ struct omap_abe_dma *dma2)
+{
+ _log(ABE_ID_CONNECT_DEBUG_TRACE, 0, 0, 0);
+
+ /* return tohe base address of the ping buffer in L3 and L4 spaces */
+ (*dma2).data = (void *)(OMAP_ABE_D_DEBUG_FIFO_ADDR +
+ ABE_DEFAULT_BASE_ADDRESS_L3 + ABE_DMEM_BASE_OFFSET_MPU);
+ (*dma2).l3_dmem = (void *)(OMAP_ABE_D_DEBUG_FIFO_ADDR +
+ ABE_DEFAULT_BASE_ADDRESS_L3 + ABE_DMEM_BASE_OFFSET_MPU);
+ (*dma2).l4_dmem = (void *)(OMAP_ABE_D_DEBUG_FIFO_ADDR +
+ ABE_DEFAULT_BASE_ADDRESS_L4 + ABE_DMEM_BASE_OFFSET_MPU);
+ (*dma2).iter = (OMAP_ABE_D_DEBUG_FIFO_SIZE + OMAP_ABE_D_DEBUG_FIFO_HAL_SIZE)>>2;
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_connect_debug_trace);
+
+/**
+ * omap_abe_set_debug_trace
+ * @dbg: Pointer on abe debug handle
+ * @debug: debug log level
+ *
+ * Set the debug level for ABE trace. This level allows to manage the number
+ * of information put inside the ABE trace buffer. This buffer can contains
+ * both AESS firmware and MPU traces.
+ */
+int omap_abe_set_debug_trace(struct omap_abe_dbg *dbg, int debug)
+{
+ _log(ABE_ID_SET_DEBUG_TRACE, 0, 0, 0);
+
+ dbg->mask = debug;
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_set_debug_trace);
+
+/**
+ * omap_abe_dbg_log - Log ABE trace inside circular buffer
+ * @x: data to be logged
+ * @y: data to be logged
+ * @z: data to be logged
+ * @t: data to be logged
+ * Parameter :
+ *
+ * abe_dbg_activity_log : global circular buffer holding the data
+ * abe_dbg_activity_log_write_pointer : circular write pointer
+ *
+ * saves data in the log file
+ */
+void omap_abe_dbg_log(struct omap_abe *abe, u32 x, u32 y, u32 z, u32 t)
+{
+ u32 time_stamp, data;
+ struct omap_abe_dbg *dbg = &abe->dbg;
+
+ if (dbg->activity_log_write_pointer >=
+ (OMAP_ABE_D_DEBUG_HAL_TASK_SIZE - 2))
+ dbg->activity_log_write_pointer = 0;
+
+ /* copy in DMEM trace buffer and CortexA9 local buffer and a small 7
+ words circular buffer of the DMA trace ending with 0x55555555
+ (tag for last word) */
+ omap_abe_mem_read(abe, OMAP_ABE_DMEM, OMAP_ABE_D_LOOPCOUNTER_ADDR,
+ (u32 *) &time_stamp, sizeof(time_stamp));
+ dbg->activity_log[dbg->activity_log_write_pointer] = time_stamp;
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_DEBUG_HAL_TASK_ADDR +
+ (dbg->activity_log_write_pointer << 2),
+ (u32 *) &time_stamp, sizeof(time_stamp));
+ dbg->activity_log_write_pointer++;
+
+ data = ((x & MAX_UINT8) << 24) | ((y & MAX_UINT8) << 16) |
+ ((z & MAX_UINT8) << 8)
+ | (t & MAX_UINT8);
+ dbg->activity_log[dbg->activity_log_write_pointer] = data;
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_DEBUG_HAL_TASK_ADDR +
+ (dbg->activity_log_write_pointer << 2),
+ (u32 *) &data, sizeof(data));
+
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_DEBUG_FIFO_HAL_ADDR +
+ ((dbg->activity_log_write_pointer << 2) &
+ (OMAP_ABE_D_DEBUG_FIFO_HAL_SIZE - 1)), (u32 *) &data,
+ sizeof(data));
+
+ data = ABE_DBG_MAGIC_NUMBER;
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_DEBUG_FIFO_HAL_ADDR +
+ (((dbg->activity_log_write_pointer + 1) << 2) &
+ (OMAP_ABE_D_DEBUG_FIFO_HAL_SIZE - 1)),
+ (u32 *) &data, sizeof(data));
+ dbg->activity_log_write_pointer++;
+
+ if (dbg->activity_log_write_pointer >= OMAP_ABE_D_DEBUG_HAL_TASK_SIZE)
+ dbg->activity_log_write_pointer = 0;
+}
+
+/**
+ * omap_abe_dbg_error_log - Log ABE error
+ * @abe: Pointer on abe handle
+ * @level: level of error
+ * @error: error ID to log
+ *
+ * Log the ABE errors.
+ */
+void omap_abe_dbg_error(struct omap_abe *abe, int level, int error)
+{
+ omap_abe_dbg_log(abe, error, MAX_UINT8, MAX_UINT8, MAX_UINT8);
+}
diff --git a/sound/soc/omap/abe/abe_dbg.h b/sound/soc/omap/abe/abe_dbg.h
new file mode 100644
index 0000000..8639806
--- /dev/null
+++ b/sound/soc/omap/abe/abe_dbg.h
@@ -0,0 +1,231 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _ABE_DBG_H_
+#define _ABE_DBG_H_
+
+#include <linux/mutex.h>
+
+#include "abe_typ.h"
+#include "abe_dm_addr.h"
+
+/*
+ * Debug trace format
+ * TIME 2 bytes from ABE : 4kHz period of the FW scheduler
+ * SUBID 1 byte : HAL API index
+ * From 0 to 16 bytes : parameters of the subroutine
+ * on every 32 dumps a tag is pushed on the debug trace : 0x55555555
+ */
+#define dbg_bitfield_offset 8
+#define dbg_api_calls 0
+#define dbg_mapi (1L << (dbg_api_calls + dbg_bitfield_offset))
+#define dbg_external_data_access 1
+#define dbg_mdata (1L << (dbg_external_data_access + dbg_bitfield_offset))
+#define dbg_err_codes 2
+#define dbg_merr (1L << (dbg_api_calls + dbg_bitfield_offset))
+#define ABE_DBG_MAGIC_NUMBER 0x55555555
+/*
+ * IDs used for traces
+ */
+#define ABE_ID_RESET_HAL (1 + dbg_mapi)
+#define ABE_ID_LOAD_FW (2 + dbg_mapi)
+#define ABE_ID_DEFAULT_CONFIGURATION (3 + dbg_mapi)
+#define ABE_ID_IRQ_PROCESSING (4 + dbg_mapi)
+#define ABE_ID_EVENT_GENERATOR_SWITCH (5 + dbg_mapi)
+#define ABE_ID_READ_HARDWARE_CONFIGURATION (6 + dbg_mapi)
+#define ABE_ID_READ_LOWEST_OPP (7 + dbg_mapi)
+#define ABE_ID_WRITE_GAIN (8 + dbg_mapi)
+#define ABE_ID_SET_ASRC_DRIFT_CONTROL (9 + dbg_mapi)
+#define ABE_ID_PLUG_SUBROUTINE (10 + dbg_mapi)
+#define ABE_ID_UNPLUG_SUBROUTINE (11 + dbg_mapi)
+#define ABE_ID_PLUG_SEQUENCE (12 + dbg_mapi)
+#define ABE_ID_LAUNCH_SEQUENCE (13 + dbg_mapi)
+#define ABE_ID_LAUNCH_SEQUENCE_param (14 + dbg_mapi)
+#define ABE_ID_CONNECT_IRQ_PING_PONG_PORT (15 + dbg_mapi)
+#define ABE_ID_READ_ANALOG_GAIN_DL (16 + dbg_mapi)
+#define ABE_ID_READ_ANALOG_GAIN_UL (17 + dbg_mapi)
+#define ABE_ID_ENABLE_DYN_UL_GAIN (18 + dbg_mapi)
+#define ABE_ID_DISABLE_DYN_UL_GAIN (19 + dbg_mapi)
+#define ABE_ID_ENABLE_DYN_EXTENSION (20 + dbg_mapi)
+#define ABE_ID_DISABLE_DYN_EXTENSION (21 + dbg_mapi)
+#define ABE_ID_NOTIFY_ANALOG_GAIN_CHANGED (22 + dbg_mapi)
+#define ABE_ID_RESET_PORT (23 + dbg_mapi)
+#define ABE_ID_READ_REMAINING_DATA (24 + dbg_mapi)
+#define ABE_ID_DISABLE_DATA_TRANSFER (25 + dbg_mapi)
+#define ABE_ID_ENABLE_DATA_TRANSFER (26 + dbg_mapi)
+#define ABE_ID_READ_GLOBAL_COUNTER (27 + dbg_mapi)
+#define ABE_ID_SET_DMIC_FILTER (28 + dbg_mapi)
+#define ABE_ID_SET_OPP_PROCESSING (29 + dbg_mapi)
+#define ABE_ID_SET_PING_PONG_BUFFER (30 + dbg_mapi)
+#define ABE_ID_READ_PORT_ADDRESS (31 + dbg_mapi)
+#define ABE_ID_LOAD_FW_param (32 + dbg_mapi)
+#define ABE_ID_WRITE_HEADSET_OFFSET (33 + dbg_mapi)
+#define ABE_ID_READ_GAIN_RANGES (34 + dbg_mapi)
+#define ABE_ID_WRITE_EQUALIZER (35 + dbg_mapi)
+#define ABE_ID_WRITE_ASRC (36 + dbg_mapi)
+#define ABE_ID_WRITE_APS (37 + dbg_mapi)
+#define ABE_ID_WRITE_MIXER (38 + dbg_mapi)
+#define ABE_ID_WRITE_EANC (39 + dbg_mapi)
+#define ABE_ID_WRITE_ROUTER (40 + dbg_mapi)
+#define ABE_ID_READ_PORT_GAIN (41 + dbg_mapi)
+#define ABE_ID_ASRC (42 + dbg_mapi)
+#define ABE_ID_READ_APS (43 + dbg_mapi)
+#define ABE_ID_READ_APS_energy (44 + dbg_mapi)
+#define ABE_ID_READ_MIXER (45 + dbg_mapi)
+#define ABE_READ_EANC (46 + dbg_mapi)
+#define ABE_ID_READ_ROUTER (47 + dbg_mapi)
+#define ABE_ID_READ_DEBUG_TRACE (48 + dbg_mapi)
+#define ABE_ID_SET_SEQUENCE_TIME_ACCURACY (49 + dbg_mapi)
+#define ABE_ID_SET_DEBUG_PINS (50 + dbg_mapi)
+#define ABE_ID_SELECT_MAIN_PORT (51 + dbg_mapi)
+#define ABE_ID_WRITE_EVENT_GENERATOR (52 + dbg_mapi)
+#define ABE_ID_READ_USE_CASE_OPP (53 + dbg_mapi)
+#define ABE_ID_SELECT_DATA_SOURCE (54 + dbg_mapi)
+#define ABE_ID_READ_NEXT_PING_PONG_BUFFER (55 + dbg_mapi)
+#define ABE_ID_INIT_PING_PONG_BUFFER (56 + dbg_mapi)
+#define ABE_ID_CONNECT_CBPR_DMAREQ_PORT (57 + dbg_mapi)
+#define ABE_ID_CONNECT_DMAREQ_PORT (58 + dbg_mapi)
+#define ABE_ID_CONNECT_DMAREQ_PING_PONG_PORT (59 + dbg_mapi)
+#define ABE_ID_CONNECT_SERIAL_PORT (60 + dbg_mapi)
+#define ABE_ID_CONNECT_SLIMBUS_PORT (61 + dbg_mapi)
+#define ABE_ID_READ_GAIN (62 + dbg_mapi)
+#define ABE_ID_SET_ROUTER_CONFIGURATION (63 + dbg_mapi)
+#define ABE_ID_CONNECT_DEBUG_TRACE (64 + dbg_mapi)
+#define ABE_ID_SET_DEBUG_TRACE (65 + dbg_mapi)
+#define ABE_ID_REMOTE_DEBUGGER_INTERFACE (66 + dbg_mapi)
+#define ABE_ID_ENABLE_TEST_PATTERN (67 + dbg_mapi)
+#define ABE_ID_CONNECT_TDM_PORT (68 + dbg_mapi)
+/*
+ * IDs used for error codes
+ */
+#define NOERR 0
+#define ABE_SET_MEMORY_CONFIG_ERR (1 + dbg_merr)
+#define ABE_BLOCK_COPY_ERR (2 + dbg_merr)
+#define ABE_SEQTOOLONG (3 + dbg_merr)
+#define ABE_BADSAMPFORMAT (4 + dbg_merr)
+#define ABE_SET_ATC_ABE_BLOCK_COPY_ERR MEMORY_CONFIG_ERR (5 + dbg_merr)
+#define ABE_PROTOCOL_ERROR (6 + dbg_merr)
+#define ABE_PARAMETER_ERROR (7 + dbg_merr)
+/* port programmed while still running */
+#define ABE_PORT_REPROGRAMMING (8 + dbg_merr)
+#define ABE_READ_USE_CASE_OPP_ERR (9 + dbg_merr)
+#define ABE_PARAMETER_OVERFLOW (10 + dbg_merr)
+#define ABE_FW_FIFO_WRITE_PTR_ERR (11 + dbg_merr)
+
+/*
+ * IDs used for error codes
+ */
+#define OMAP_ABE_ERR_LIB (1 << 1)
+#define OMAP_ABE_ERR_API (1 << 2)
+#define OMAP_ABE_ERR_INI (1 << 3)
+#define OMAP_ABE_ERR_SEQ (1 << 4)
+#define OMAP_ABE_ERR_DBG (1 << 5)
+#define OMAP_ABE_ERR_EXT (1 << 6)
+
+struct omap_abe_dbg {
+ /* Debug Data */
+ u32 activity_log[OMAP_ABE_D_DEBUG_HAL_TASK_SIZE];
+ u32 activity_log_write_pointer;
+ u32 mask;
+};
+
+struct omap_abe_dma {
+ /* OCP L3 pointer to the first address of the */
+ void *data;
+ /* destination buffer (either DMA or Ping-Pong read/write pointers). */
+ /* address L3 when addressing the DMEM buffer instead of CBPr */
+ void *l3_dmem;
+ /* address L3 translated to L4 the ARM memory space */
+ void *l4_dmem;
+ /* number of iterations for the DMA data moves. */
+ u32 iter;
+};
+
+struct omap_abe {
+ void __iomem *io_base[5];
+ u32 firmware_version_number;
+ u16 MultiFrame[25][8];
+ u32 compensated_mixer_gain;
+ u8 muted_gains_indicator[MAX_NBGAIN_CMEM];
+ u32 desired_gains_decibel[MAX_NBGAIN_CMEM];
+ u32 muted_gains_decibel[MAX_NBGAIN_CMEM];
+ u32 desired_gains_linear[MAX_NBGAIN_CMEM];
+ u32 desired_ramp_delay_ms[MAX_NBGAIN_CMEM];
+ int pp_buf_id;
+ int pp_buf_id_next;
+ int pp_buf_addr[4];
+ int pp_first_irq;
+ struct mutex mutex;
+ u32 warm_boot;
+
+ u32 irq_dbg_read_ptr;
+ u32 dbg_param;
+
+ struct omap_abe_dbg dbg;
+};
+
+/**
+ * omap_abe_dbg_reset
+ * @dbg: Pointer on abe debug handle
+ *
+ * Called in order to reset Audio Back End debug global data.
+ * This ensures that ABE debug trace pointer is reset correctly.
+ */
+int omap_abe_dbg_reset(struct omap_abe_dbg *dbg);
+
+#endif /* _ABE_DBG_H_ */
diff --git a/sound/soc/omap/abe/abe_def.h b/sound/soc/omap/abe/abe_def.h
new file mode 100644
index 0000000..0051bab
--- /dev/null
+++ b/sound/soc/omap/abe/abe_def.h
@@ -0,0 +1,308 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _ABE_DEF_H_
+#define _ABE_DEF_H_
+/*
+ * HARDWARE AND PERIPHERAL DEFINITIONS
+ */
+/* MM_DL */
+#define ABE_CBPR0_IDX 0
+/* VX_DL */
+#define ABE_CBPR1_IDX 1
+/* VX_UL */
+#define ABE_CBPR2_IDX 2
+/* MM_UL */
+#define ABE_CBPR3_IDX 3
+/* MM_UL2 */
+#define ABE_CBPR4_IDX 4
+/* TONES */
+#define ABE_CBPR5_IDX 5
+/* VIB */
+#define ABE_CBPR6_IDX 6
+/* DEBUG/CTL */
+#define ABE_CBPR7_IDX 7
+#define CIRCULAR_BUFFER_PERIPHERAL_R__0 (0x100 + ABE_CBPR0_IDX*4)
+#define CIRCULAR_BUFFER_PERIPHERAL_R__1 (0x100 + ABE_CBPR1_IDX*4)
+#define CIRCULAR_BUFFER_PERIPHERAL_R__2 (0x100 + ABE_CBPR2_IDX*4)
+#define CIRCULAR_BUFFER_PERIPHERAL_R__3 (0x100 + ABE_CBPR3_IDX*4)
+#define CIRCULAR_BUFFER_PERIPHERAL_R__4 (0x100 + ABE_CBPR4_IDX*4)
+#define CIRCULAR_BUFFER_PERIPHERAL_R__5 (0x100 + ABE_CBPR5_IDX*4)
+#define CIRCULAR_BUFFER_PERIPHERAL_R__6 (0x100 + ABE_CBPR6_IDX*4)
+#define CIRCULAR_BUFFER_PERIPHERAL_R__7 (0x100 + ABE_CBPR7_IDX*4)
+#define PING_PONG_WITH_MCU_IRQ 1
+#define PING_PONG_WITH_DSP_IRQ 2
+/* ID used for LIB memory copy subroutines */
+#define COPY_FROM_ABE_TO_HOST 1
+#define COPY_FROM_HOST_TO_ABE 2
+/*
+ * INTERNAL DEFINITIONS
+ */
+#define ABE_FIRMWARE_MAX_SIZE 26629
+/* 24 Q6.26 coefficients */
+#define NBEQ1 25
+/* 2x12 Q6.26 coefficients */
+#define NBEQ2 13
+/* TBD APS first set of parameters */
+#define NBAPS1 10
+/* TBD APS second set of parameters */
+#define NBAPS2 10
+/* Mixer used for sending tones to the uplink voice path */
+#define NBMIX_AUDIO_UL 2
+/* Main downlink mixer */
+#define NBMIX_DL1 4
+/* Handsfree downlink mixer */
+#define NBMIX_DL2 4
+/* Side-tone mixer */
+#define NBMIX_SDT 2
+/* Echo reference mixer */
+#define NBMIX_ECHO 2
+/* Voice record mixer */
+#define NBMIX_VXREC 4
+/* unsigned version of (-1) */
+#define CC_M1 0xFF
+#define CS_M1 0xFFFF
+#define CL_M1 0xFFFFFFFFL
+/*
+ Mixer ID Input port ID Comments
+ DL1_MIXER 0 MMDL path
+ 1 MMUL2 path
+ 2 VXDL path
+ 3 TONES path
+ SDT_MIXER 0 Uplink path
+ 1 Downlink path
+ ECHO_MIXER 0 DL1_MIXER path
+ 1 DL2_MIXER path
+ AUDUL_MIXER 0 TONES_DL path
+ 1 Uplink path
+ 2 MM_DL path
+ VXREC_MIXER 0 TONES_DL path
+ 1 VX_DL path
+ 2 MM_DL path
+ 3 VX_UL path
+*/
+#define MIX_VXUL_INPUT_MM_DL 0
+#define MIX_VXUL_INPUT_TONES 1
+#define MIX_VXUL_INPUT_VX_UL 2
+#define MIX_VXUL_INPUT_VX_DL 3
+#define MIX_DL1_INPUT_MM_DL 0
+#define MIX_DL1_INPUT_MM_UL2 1
+#define MIX_DL1_INPUT_VX_DL 2
+#define MIX_DL1_INPUT_TONES 3
+#define MIX_DL2_INPUT_MM_DL 0
+#define MIX_DL2_INPUT_MM_UL2 1
+#define MIX_DL2_INPUT_VX_DL 2
+#define MIX_DL2_INPUT_TONES 3
+#define MIX_SDT_INPUT_UP_MIXER 0
+#define MIX_SDT_INPUT_DL1_MIXER 1
+#define MIX_AUDUL_INPUT_MM_DL 0
+#define MIX_AUDUL_INPUT_TONES 1
+#define MIX_AUDUL_INPUT_UPLINK 2
+#define MIX_AUDUL_INPUT_VX_DL 3
+#define MIX_VXREC_INPUT_MM_DL 0
+#define MIX_VXREC_INPUT_TONES 1
+#define MIX_VXREC_INPUT_VX_UL 2
+#define MIX_VXREC_INPUT_VX_DL 3
+#define MIX_ECHO_DL1 0
+#define MIX_ECHO_DL2 1
+/* nb of samples to route */
+#define NBROUTE_UL 16
+/* 10 routing tables max */
+#define NBROUTE_CONFIG_MAX 10
+/* 5 pre-computed routing tables */
+#define NBROUTE_CONFIG 6
+/* AMIC on VX_UL */
+#define UPROUTE_CONFIG_AMIC 0
+/* DMIC first pair on VX_UL */
+#define UPROUTE_CONFIG_DMIC1 1
+/* DMIC second pair on VX_UL */
+#define UPROUTE_CONFIG_DMIC2 2
+/* DMIC last pair on VX_UL */
+#define UPROUTE_CONFIG_DMIC3 3
+/* BT_UL on VX_UL */
+#define UPROUTE_CONFIG_BT 4
+/* ECHO_REF on MM_UL2 */
+#define UPROUTE_ECHO_MMUL2 5
+/* call-back indexes */
+#define MAXCALLBACK 100
+/* subroutines */
+#define MAXNBSUBROUTINE 100
+/* time controlled sequenced */
+#define MAXNBSEQUENCE 20
+/* maximum simultaneous active sequences */
+#define MAXACTIVESEQUENCE 20
+/* max number of steps in the sequences */
+#define MAXSEQUENCESTEPS 2
+/* max number of feature associated to a port */
+#define MAXFEATUREPORT 12
+#define SUB_0_PARAM 0
+/* number of parameters per sequence calls */
+#define SUB_1_PARAM 1
+#define SUB_2_PARAM 2
+#define SUB_3_PARAM 3
+#define SUB_4_PARAM 4
+/* active sequence mask = 0 means the line is free */
+#define FREE_LINE 0
+/* no ask for collision protection */
+#define NOMASK (1 << 0)
+/* do not allow a PDM OFF during the execution of this sequence */
+#define MASK_PDM_OFF (1 << 1)
+/* do not allow a PDM ON during the execution of this sequence */
+#define MASK_PDM_ON (1 << 2)
+/* explicit name of the feature */
+#define NBCHARFEATURENAME 16
+/* explicit name of the port */
+#define NBCHARPORTNAME 16
+/* sink / input port from Host point of view (or AESS for DMIC/McPDM/.. */
+#define SNK_P ABE_ATC_DIRECTION_IN
+/* source / ouptut port */
+#define SRC_P ABE_ATC_DIRECTION_OUT
+/* no ASRC applied */
+#define NODRIFT 0
+/* for abe_set_asrc_drift_control */
+#define FORCED_DRIFT_CONTROL 1
+/* for abe_set_asrc_drift_control */
+#define ADPATIVE_DRIFT_CONTROL 2
+/* number of task/slot depending on the OPP value */
+#define DOPPMODE32_OPP100 (0x00000010)
+#define DOPPMODE32_OPP50 (0x0000000C)
+#define DOPPMODE32_OPP25 (0x0000004)
+/*
+ * ABE CONST AREA FOR PARAMETERS TRANSLATION
+ */
+#define GAIN_MAXIMUM 3000L
+#define GAIN_24dB 2400L
+#define GAIN_18dB 1800L
+#define GAIN_12dB 1200L
+#define GAIN_6dB 600L
+/* default gain = 1 */
+#define GAIN_0dB 0L
+#define GAIN_M6dB -600L
+#define GAIN_M7dB -700L
+#define GAIN_M12dB -1200L
+#define GAIN_M18dB -1800L
+#define GAIN_M24dB -2400L
+#define GAIN_M30dB -3000L
+#define GAIN_M40dB -4000L
+#define GAIN_M50dB -5000L
+/* muted gain = -120 decibels */
+#define MUTE_GAIN -12000L
+#define GAIN_TOOLOW -13000L
+#define GAIN_MUTE MUTE_GAIN
+#define RAMP_MINLENGTH 0L
+/* ramp_t is in milli- seconds */
+#define RAMP_0MS 0L
+#define RAMP_1MS 1L
+#define RAMP_2MS 2L
+#define RAMP_5MS 5L
+#define RAMP_10MS 10L
+#define RAMP_20MS 20L
+#define RAMP_50MS 50L
+#define RAMP_100MS 100L
+#define RAMP_200MS 200L
+#define RAMP_500MS 500L
+#define RAMP_1000MS 1000L
+#define RAMP_MAXLENGTH 10000L
+/* for abe_translate_gain_format */
+#define LINABE_TO_DECIBELS 1
+#define DECIBELS_TO_LINABE 2
+/* for abe_translate_ramp_format */
+#define IIRABE_TO_MICROS 1
+#define MICROS_TO_IIABE 2
+/*
+ * ABE CONST AREA FOR PERIPHERAL TUNING
+ */
+/* port idled IDLE_P */
+#define OMAP_ABE_PORT_ACTIVITY_IDLE 1
+/* port initialized, ready to be activated */
+#define OMAP_ABE_PORT_INITIALIZED 3
+/* port activated RUN_P */
+#define OMAP_ABE_PORT_ACTIVITY_RUNNING 2
+#define NOCALLBACK 0
+#define NOPARAMETER 0
+/* number of ATC access upon AMIC DMArequests, all the FIFOs are enabled */
+#define MCPDM_UL_ITER 4
+/* All the McPDM FIFOs are enabled simultaneously */
+#define MCPDM_DL_ITER 24
+/* All the DMIC FIFOs are enabled simultaneously */
+#define DMIC_ITER 12
+/* TBD later if needed */
+#define MAX_PINGPONG_BUFFERS 2
+/*
+ * Indexes to the subroutines
+ */
+#define SUB_WRITE_MIXER 1
+#define SUB_WRITE_PORT_GAIN 2
+/* OLD WAY */
+#define c_feat_init_eq 1
+#define c_feat_read_eq1 2
+#define c_write_eq1 3
+#define c_feat_read_eq2 4
+#define c_write_eq2 5
+#define c_feat_read_eq3 6
+#define c_write_eq3 7
+/* max number of gain to be controlled by HAL */
+#define MAX_NBGAIN_CMEM 36
+/*
+ * MACROS
+ */
+#define maximum(a, b) (((a) < (b)) ? (b) : (a))
+#define minimum(a, b) (((a) > (b)) ? (b) : (a))
+#define absolute(a) (((a) > 0) ? (a) : ((-1)*(a)))
+#define HAL_VERSIONS 9
+#endif/* _ABE_DEF_H_ */
diff --git a/sound/soc/omap/abe/abe_define.h b/sound/soc/omap/abe/abe_define.h
new file mode 100644
index 0000000..412fea8
--- /dev/null
+++ b/sound/soc/omap/abe/abe_define.h
@@ -0,0 +1,120 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef _ABE_DEFINE_H_
+#define _ABE_DEFINE_H_
+#define ATC_DESCRIPTOR_NUMBER 64
+#define PROCESSING_SLOTS 25
+#define TASK_POOL_LENGTH 136
+#define MCU_IRQ 0x24
+#define MCU_IRQ_SHIFT2 0x90
+#define DMA_REQ_SHIFT2 0x210
+#define DSP_IRQ 0x4c
+#define IRQtag_APS 0x000a
+#define IRQtag_COUNT 0x000c
+#define IRQtag_PP 0x000d
+#define DMAreq_7 0x0080
+#define IRQ_FIFO_LENGTH 16
+#define SDT_EQ_ORDER 4
+#define DL_EQ_ORDER 12
+#define MIC_FILTER_ORDER 4
+#define GAINS_WITH_RAMP1 14
+#define GAINS_WITH_RAMP2 22
+#define GAINS_WITH_RAMP_TOTAL 36
+#define ASRC_MEMLENGTH 40
+#define ASRC_UL_VX_FIR_L 19
+#define ASRC_DL_VX_FIR_L 19
+#define ASRC_MM_EXT_IN_FIR_L 18
+#define ASRC_margin 2
+#define ASRC_N_8k 2
+#define ASRC_N_16k 4
+#define ASRC_N_48k 12
+#define VIBRA_N 5
+#define VIBRA1_IIR_MEMSIZE 11
+#define SAMP_LOOP_96K 24
+#define SAMP_LOOP_48K 12
+#define SAMP_LOOP_48KM1 11
+#define SAMP_LOOP_48KM2 10
+#define SAMP_LOOP_16K 4
+#define SAMP_LOOP_8K 2
+#define INPUT_SCALE_SHIFTM2 5156
+#define SATURATION 8420
+#define SATURATION_7FFF 8416
+#define OUTPUT_SCALE_SHIFTM2 5160
+#define NTAPS_SRC_44P1 24
+#define NTAPS_SRC_44P1_M4 96
+#define NTAPS_SRC_44P1_THR 60
+#define NTAPS_SRC_44P1_THRM4 240
+#define DRIFT_COUNTER_44P1M1 443
+#define NB_OF_PHASES_SRC44P1 12
+#define NB_OF_PHASES_SRC44P1M1 11
+#define SRC44P1_BUFFER_SIZE 96
+#define SRC44P1_BUFFER_SIZE_M4 384
+#define SRC44P1_INIT_RPTR 60
+#define MUTE_SCALING 5164
+#define ABE_PMEM 1
+#define ABE_CMEM 2
+#define ABE_SMEM 3
+#define ABE_DMEM 4
+#define ABE_ATC 5
+#define ASRC_BT_UL_FIR_L 19
+#define ASRC_BT_DL_FIR_L 19
+#define SRC44P1_COEF_ADDR 1466
+#define NTAPS_P_SRC_44P1_M4 192
+#define MAX_SMEM_CHECK 32
+#define SATURATION_EQ 9780
+#endif /* _ABE_DEFINE_H_ */
diff --git a/sound/soc/omap/abe/abe_dm_addr.h b/sound/soc/omap/abe/abe_dm_addr.h
new file mode 100644
index 0000000..3abf5a5
--- /dev/null
+++ b/sound/soc/omap/abe/abe_dm_addr.h
@@ -0,0 +1,243 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#define OMAP_ABE_D_ATCDESCRIPTORS_ADDR 0x0
+#define OMAP_ABE_D_ATCDESCRIPTORS_SIZE 0x200
+#define OMAP_ABE_STACK_ADDR 0x200
+#define OMAP_ABE_STACK_SIZE 0x70
+#define OMAP_ABE_D_VERSION_ADDR 0x270
+#define OMAP_ABE_D_VERSION_SIZE 0x4
+#define OMAP_ABE_D_IODESCR_ADDR 0x274
+#define OMAP_ABE_D_IODESCR_SIZE 0x280
+#define OMAP_ABE_D_ZERO_ADDR 0x4F4
+#define OMAP_ABE_D_ZERO_SIZE 0x4
+#define OMAP_ABE_DBG_TRACE1_ADDR 0x4F8
+#define OMAP_ABE_DBG_TRACE1_SIZE 0x1
+#define OMAP_ABE_DBG_TRACE2_ADDR 0x4F9
+#define OMAP_ABE_DBG_TRACE2_SIZE 0x1
+#define OMAP_ABE_DBG_TRACE3_ADDR 0x4FA
+#define OMAP_ABE_DBG_TRACE3_SIZE 0x1
+#define OMAP_ABE_D_MULTIFRAME_ADDR 0x4FC
+#define OMAP_ABE_D_MULTIFRAME_SIZE 0x190
+#define OMAP_ABE_D_IDLETASK_ADDR 0x68C
+#define OMAP_ABE_D_IDLETASK_SIZE 0x2
+#define OMAP_ABE_D_TYPELENGTHCHECK_ADDR 0x68E
+#define OMAP_ABE_D_TYPELENGTHCHECK_SIZE 0x2
+#define OMAP_ABE_D_MAXTASKBYTESINSLOT_ADDR 0x690
+#define OMAP_ABE_D_MAXTASKBYTESINSLOT_SIZE 0x2
+#define OMAP_ABE_D_REWINDTASKBYTES_ADDR 0x692
+#define OMAP_ABE_D_REWINDTASKBYTES_SIZE 0x2
+#define OMAP_ABE_D_PCURRENTTASK_ADDR 0x694
+#define OMAP_ABE_D_PCURRENTTASK_SIZE 0x2
+#define OMAP_ABE_D_PFASTLOOPBACK_ADDR 0x696
+#define OMAP_ABE_D_PFASTLOOPBACK_SIZE 0x2
+#define OMAP_ABE_D_PNEXTFASTLOOPBACK_ADDR 0x698
+#define OMAP_ABE_D_PNEXTFASTLOOPBACK_SIZE 0x4
+#define OMAP_ABE_D_PPCURRENTTASK_ADDR 0x69C
+#define OMAP_ABE_D_PPCURRENTTASK_SIZE 0x2
+#define OMAP_ABE_D_SLOTCOUNTER_ADDR 0x6A0
+#define OMAP_ABE_D_SLOTCOUNTER_SIZE 0x2
+#define OMAP_ABE_D_LOOPCOUNTER_ADDR 0x6A4
+#define OMAP_ABE_D_LOOPCOUNTER_SIZE 0x4
+#define OMAP_ABE_D_REWINDFLAG_ADDR 0x6A8
+#define OMAP_ABE_D_REWINDFLAG_SIZE 0x2
+#define OMAP_ABE_D_SLOT23_CTRL_ADDR 0x6AC
+#define OMAP_ABE_D_SLOT23_CTRL_SIZE 0x4
+#define OMAP_ABE_D_MCUIRQFIFO_ADDR 0x6B0
+#define OMAP_ABE_D_MCUIRQFIFO_SIZE 0x40
+#define OMAP_ABE_D_PINGPONGDESC_ADDR 0x6F0
+#define OMAP_ABE_D_PINGPONGDESC_SIZE 0x20
+#define OMAP_ABE_D_PP_MCU_IRQ_ADDR 0x710
+#define OMAP_ABE_D_PP_MCU_IRQ_SIZE 0x2
+#define OMAP_ABE_D_SRC44P1_MMDL_STRUCT_ADDR 0x714
+#define OMAP_ABE_D_SRC44P1_MMDL_STRUCT_SIZE 0x12
+#define OMAP_ABE_D_SRC44P1_TONES_STRUCT_ADDR 0x728
+#define OMAP_ABE_D_SRC44P1_TONES_STRUCT_SIZE 0x12
+#define OMAP_ABE_D_CTRLPORTFIFO_ADDR 0x740
+#define OMAP_ABE_D_CTRLPORTFIFO_SIZE 0x10
+#define OMAP_ABE_D_IDLE_STATE_ADDR 0x750
+#define OMAP_ABE_D_IDLE_STATE_SIZE 0x4
+#define OMAP_ABE_D_STOP_REQUEST_ADDR 0x754
+#define OMAP_ABE_D_STOP_REQUEST_SIZE 0x4
+#define OMAP_ABE_D_REF0_ADDR 0x758
+#define OMAP_ABE_D_REF0_SIZE 0x2
+#define OMAP_ABE_D_DEBUGREGISTER_ADDR 0x75C
+#define OMAP_ABE_D_DEBUGREGISTER_SIZE 0x8C
+#define OMAP_ABE_D_GCOUNT_ADDR 0x7E8
+#define OMAP_ABE_D_GCOUNT_SIZE 0x2
+#define OMAP_ABE_D_FASTCOUNTER_ADDR 0x7EC
+#define OMAP_ABE_D_FASTCOUNTER_SIZE 0x4
+#define OMAP_ABE_D_SLOWCOUNTER_ADDR 0x7F0
+#define OMAP_ABE_D_SLOWCOUNTER_SIZE 0x4
+#define OMAP_ABE_D_AUPLINKROUTING_ADDR 0x7F4
+#define OMAP_ABE_D_AUPLINKROUTING_SIZE 0x20
+#define OMAP_ABE_D_VIRTAUDIOLOOP_ADDR 0x814
+#define OMAP_ABE_D_VIRTAUDIOLOOP_SIZE 0x4
+#define OMAP_ABE_D_ASRCVARS_DL_VX_ADDR 0x818
+#define OMAP_ABE_D_ASRCVARS_DL_VX_SIZE 0x20
+#define OMAP_ABE_D_ASRCVARS_UL_VX_ADDR 0x838
+#define OMAP_ABE_D_ASRCVARS_UL_VX_SIZE 0x20
+#define OMAP_ABE_D_COEFADDRESSES_VX_ADDR 0x858
+#define OMAP_ABE_D_COEFADDRESSES_VX_SIZE 0x20
+#define OMAP_ABE_D_ASRCVARS_MM_EXT_IN_ADDR 0x878
+#define OMAP_ABE_D_ASRCVARS_MM_EXT_IN_SIZE 0x20
+#define OMAP_ABE_D_COEFADDRESSES_MM_ADDR 0x898
+#define OMAP_ABE_D_COEFADDRESSES_MM_SIZE 0x20
+#define OMAP_ABE_D_TRACEBUFADR_ADDR 0x8B8
+#define OMAP_ABE_D_TRACEBUFADR_SIZE 0x2
+#define OMAP_ABE_D_TRACEBUFOFFSET_ADDR 0x8BA
+#define OMAP_ABE_D_TRACEBUFOFFSET_SIZE 0x2
+#define OMAP_ABE_D_TRACEBUFLENGTH_ADDR 0x8BC
+#define OMAP_ABE_D_TRACEBUFLENGTH_SIZE 0x2
+#define OMAP_ABE_D_MAXTASKBYTESINSLOT_SAVED_ADDR 0x8C0
+#define OMAP_ABE_D_MAXTASKBYTESINSLOT_SAVED_SIZE 0x4
+#define OMAP_ABE_D_PEMPTY_ADDR 0x8C4
+#define OMAP_ABE_D_PEMPTY_SIZE 0x50
+#define OMAP_ABE_D_ECHO_REF_48_16_WRAP_ADDR 0x914
+#define OMAP_ABE_D_ECHO_REF_48_16_WRAP_SIZE 0x8
+#define OMAP_ABE_D_ECHO_REF_48_8_WRAP_ADDR 0x91C
+#define OMAP_ABE_D_ECHO_REF_48_8_WRAP_SIZE 0x8
+#define OMAP_ABE_D_BT_UL_16_48_WRAP_ADDR 0x924
+#define OMAP_ABE_D_BT_UL_16_48_WRAP_SIZE 0x8
+#define OMAP_ABE_D_BT_UL_8_48_WRAP_ADDR 0x92C
+#define OMAP_ABE_D_BT_UL_8_48_WRAP_SIZE 0x8
+#define OMAP_ABE_D_BT_DL_48_16_WRAP_ADDR 0x934
+#define OMAP_ABE_D_BT_DL_48_16_WRAP_SIZE 0x8
+#define OMAP_ABE_D_BT_DL_48_8_WRAP_ADDR 0x93C
+#define OMAP_ABE_D_BT_DL_48_8_WRAP_SIZE 0x8
+#define OMAP_ABE_D_VX_DL_16_48_WRAP_ADDR 0x944
+#define OMAP_ABE_D_VX_DL_16_48_WRAP_SIZE 0x8
+#define OMAP_ABE_D_VX_DL_8_48_WRAP_ADDR 0x94C
+#define OMAP_ABE_D_VX_DL_8_48_WRAP_SIZE 0x8
+#define OMAP_ABE_D_VX_UL_48_16_WRAP_ADDR 0x954
+#define OMAP_ABE_D_VX_UL_48_16_WRAP_SIZE 0x8
+#define OMAP_ABE_D_VX_UL_48_8_WRAP_ADDR 0x95C
+#define OMAP_ABE_D_VX_UL_48_8_WRAP_SIZE 0x8
+#define OMAP_ABE_D_ASRCVARS_BT_UL_ADDR 0x964
+#define OMAP_ABE_D_ASRCVARS_BT_UL_SIZE 0x20
+#define OMAP_ABE_D_ASRCVARS_BT_DL_ADDR 0x984
+#define OMAP_ABE_D_ASRCVARS_BT_DL_SIZE 0x20
+#define OMAP_ABE_D_BT_DL_48_8_OPP100_WRAP_ADDR 0x9A4
+#define OMAP_ABE_D_BT_DL_48_8_OPP100_WRAP_SIZE 0x8
+#define OMAP_ABE_D_BT_DL_48_16_OPP100_WRAP_ADDR 0x9AC
+#define OMAP_ABE_D_BT_DL_48_16_OPP100_WRAP_SIZE 0x8
+#define OMAP_ABE_D_VX_DL_8_48_FIR_WRAP_ADDR 0x9B4
+#define OMAP_ABE_D_VX_DL_8_48_FIR_WRAP_SIZE 0x8
+#define OMAP_ABE_D_BT_UL_8_48_FIR_WRAP_ADDR 0x9BC
+#define OMAP_ABE_D_BT_UL_8_48_FIR_WRAP_SIZE 0x8
+#define OMAP_ABE_D_TASKSLIST_ADDR 0x9C4
+#define OMAP_ABE_D_TASKSLIST_SIZE 0x880
+#define OMAP_ABE_D_HW_TEST_ADDR 0x1244
+#define OMAP_ABE_D_HW_TEST_SIZE 0x28
+#define OMAP_ABE_D_TRACEBUFADR_HAL_ADDR 0x126C
+#define OMAP_ABE_D_TRACEBUFADR_HAL_SIZE 0x4
+#define OMAP_ABE_D_CHECK_LIST_SMEM_ADDR 0x1270
+#define OMAP_ABE_D_CHECK_LIST_SMEM_SIZE 0x80
+#define OMAP_ABE_D_CHECK_LIST_LEFT_IDX_ADDR 0x12F0
+#define OMAP_ABE_D_CHECK_LIST_LEFT_IDX_SIZE 0x2
+#define OMAP_ABE_D_CHECK_LIST_RIGHT_IDX_ADDR 0x12F2
+#define OMAP_ABE_D_CHECK_LIST_RIGHT_IDX_SIZE 0x2
+#define OMAP_ABE_D_BT_DL_48_8_FIR_WRAP_ADDR 0x12F4
+#define OMAP_ABE_D_BT_DL_48_8_FIR_WRAP_SIZE 0x8
+#define OMAP_ABE_D_BT_DL_48_8_FIR_OPP100_WRAP_ADDR 0x12FC
+#define OMAP_ABE_D_BT_DL_48_8_FIR_OPP100_WRAP_SIZE 0x8
+#define OMAP_ABE_D_VX_UL_48_8_FIR_WRAP_ADDR 0x1304
+#define OMAP_ABE_D_VX_UL_48_8_FIR_WRAP_SIZE 0x8
+#define OMAP_ABE_D_DEBUG_FW_TASK_ADDR 0x1400
+#define OMAP_ABE_D_DEBUG_FW_TASK_SIZE 0x100
+#define OMAP_ABE_D_DEBUG_FIFO_ADDR 0x1500
+#define OMAP_ABE_D_DEBUG_FIFO_SIZE 0x60
+#define OMAP_ABE_D_DEBUG_FIFO_HAL_ADDR 0x1560
+#define OMAP_ABE_D_DEBUG_FIFO_HAL_SIZE 0x20
+#define OMAP_ABE_D_FWMEMINIT_ADDR 0x1580
+#define OMAP_ABE_D_FWMEMINIT_SIZE 0x3C0
+#define OMAP_ABE_D_FWMEMINITDESCR_ADDR 0x1940
+#define OMAP_ABE_D_FWMEMINITDESCR_SIZE 0x10
+#define OMAP_ABE_D_BT_DL_FIFO_ADDR 0x1C00
+#define OMAP_ABE_D_BT_DL_FIFO_SIZE 0x1E0
+#define OMAP_ABE_D_BT_UL_FIFO_ADDR 0x1E00
+#define OMAP_ABE_D_BT_UL_FIFO_SIZE 0x1E0
+#define OMAP_ABE_D_MM_EXT_OUT_FIFO_ADDR 0x2000
+#define OMAP_ABE_D_MM_EXT_OUT_FIFO_SIZE 0x1E0
+#define OMAP_ABE_D_MM_EXT_IN_FIFO_ADDR 0x2200
+#define OMAP_ABE_D_MM_EXT_IN_FIFO_SIZE 0x1E0
+#define OMAP_ABE_D_MM_UL2_FIFO_ADDR 0x2400
+#define OMAP_ABE_D_MM_UL2_FIFO_SIZE 0x1E0
+#define OMAP_ABE_D_DMIC_UL_FIFO_ADDR 0x2600
+#define OMAP_ABE_D_DMIC_UL_FIFO_SIZE 0x1E0
+#define OMAP_ABE_D_MM_UL_FIFO_ADDR 0x2800
+#define OMAP_ABE_D_MM_UL_FIFO_SIZE 0x1E0
+#define OMAP_ABE_D_MM_DL_FIFO_ADDR 0x2A00
+#define OMAP_ABE_D_MM_DL_FIFO_SIZE 0x1E0
+#define OMAP_ABE_D_TONES_DL_FIFO_ADDR 0x2C00
+#define OMAP_ABE_D_TONES_DL_FIFO_SIZE 0x1E0
+#define OMAP_ABE_D_VIB_DL_FIFO_ADDR 0x2E00
+#define OMAP_ABE_D_VIB_DL_FIFO_SIZE 0x1E0
+#define OMAP_ABE_D_DEBUG_HAL_TASK_ADDR 0x3000
+#define OMAP_ABE_D_DEBUG_HAL_TASK_SIZE 0x800
+#define OMAP_ABE_D_MCPDM_DL_FIFO_ADDR 0x3800
+#define OMAP_ABE_D_MCPDM_DL_FIFO_SIZE 0x1E0
+#define OMAP_ABE_D_MCPDM_UL_FIFO_ADDR 0x3A00
+#define OMAP_ABE_D_MCPDM_UL_FIFO_SIZE 0x1E0
+#define OMAP_ABE_D_VX_UL_FIFO_ADDR 0x3C00
+#define OMAP_ABE_D_VX_UL_FIFO_SIZE 0x1E0
+#define OMAP_ABE_D_VX_DL_FIFO_ADDR 0x3E00
+#define OMAP_ABE_D_VX_DL_FIFO_SIZE 0x1E0
+#define OMAP_ABE_D_PING_ADDR 0x4000
+#define OMAP_ABE_D_PING_SIZE 0x6000
+#define OMAP_ABE_D_PONG_ADDR 0xA000
+#define OMAP_ABE_D_PONG_SIZE 0x6000
diff --git a/sound/soc/omap/abe/abe_ext.h b/sound/soc/omap/abe/abe_ext.h
new file mode 100644
index 0000000..8fb1aac
--- /dev/null
+++ b/sound/soc/omap/abe/abe_ext.h
@@ -0,0 +1,242 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _ABE_EXT_H_
+#define _ABE_EXT_H_
+
+/*
+ * OS DEPENDENT MMU CONFIGURATION
+ */
+#define ABE_PMEM_BASE_OFFSET_MPU 0xe0000
+#define ABE_CMEM_BASE_OFFSET_MPU 0xa0000
+#define ABE_SMEM_BASE_OFFSET_MPU 0xc0000
+#define ABE_DMEM_BASE_OFFSET_MPU 0x80000
+#define ABE_ATC_BASE_OFFSET_MPU 0xf1000
+/* default base address for io_base */
+#define ABE_DEFAULT_BASE_ADDRESS_L3 0x49000000L
+#define ABE_DEFAULT_BASE_ADDRESS_L4 0x40100000L
+#define ABE_DEFAULT_BASE_ADDRESS_DEFAULT ABE_DEFAULT_BASE_ADDRESS_L3
+/*
+ * HARDWARE AND PERIPHERAL DEFINITIONS
+ */
+/* PMEM SIZE in bytes (1024 words of 64 bits: : #32bits words x 4)*/
+#define ABE_PMEM_SIZE 8192
+/* CMEM SIZE in bytes (2048 coeff : #32bits words x 4)*/
+#define ABE_CMEM_SIZE 8192
+/* SMEM SIZE in bytes (3072 stereo samples : #32bits words x 4)*/
+#define ABE_SMEM_SIZE 24576
+/* DMEM SIZE in bytes */
+#define ABE_DMEM_SIZE 65536L
+/* ATC REGISTERS SIZE in bytes */
+#define ABE_ATC_DESC_SIZE 512
+/* holds the MCU Irq signal */
+#define ABE_MCU_IRQSTATUS_RAW 0x24
+/* status : clear the IRQ */
+#define ABE_MCU_IRQSTATUS 0x28
+/* holds the DSP Irq signal */
+#define ABE_DSP_IRQSTATUS_RAW 0x4C
+/* holds the DMA req lines to the sDMA */
+#define ABE_DMASTATUS_RAW 0x84
+#define EVENT_GENERATOR_COUNTER 0x68
+/* PLL output/desired sampling rate = (32768 * 6000)/96000 */
+#define EVENT_GENERATOR_COUNTER_DEFAULT (2048-1)
+/* PLL output/desired sampling rate = (32768 * 6000)/88200 */
+#define EVENT_GENERATOR_COUNTER_44100 (2228-1)
+/* start / stop the EVENT generator */
+#define EVENT_GENERATOR_START 0x6C
+#define EVENT_GENERATOR_ON 1
+#define EVENT_GENERATOR_OFF 0
+/* selection of the EVENT generator source */
+#define EVENT_SOURCE_SELECTION 0x70
+#define EVENT_SOURCE_DMA 0
+#define EVENT_SOURCE_COUNTER 1
+/* selection of the ABE DMA req line from ATC */
+#define AUDIO_ENGINE_SCHEDULER 0x74
+#define ABE_ATC_DMIC_DMA_REQ 1
+#define ABE_ATC_MCPDMDL_DMA_REQ 2
+#define ABE_ATC_MCPDMUL_DMA_REQ 3
+/* Direction=0 means input from ABE point of view */
+#define ABE_ATC_DIRECTION_IN 0
+/* Direction=1 means output from ABE point of view */
+#define ABE_ATC_DIRECTION_OUT 1
+/*
+ * DMA requests
+ */
+/*Internal connection doesn't connect at ABE boundary */
+#define External_DMA_0 0
+/*Transmit request digital microphone */
+#define DMIC_DMA_REQ 1
+/*Multichannel PDM downlink */
+#define McPDM_DMA_DL 2
+/*Multichannel PDM uplink */
+#define McPDM_DMA_UP 3
+/*MCBSP module 1 - transmit request */
+#define MCBSP1_DMA_TX 4
+/*MCBSP module 1 - receive request */
+#define MCBSP1_DMA_RX 5
+/*MCBSP module 2 - transmit request */
+#define MCBSP2_DMA_TX 6
+/*MCBSP module 2 - receive request */
+#define MCBSP2_DMA_RX 7
+/*MCBSP module 3 - transmit request */
+#define MCBSP3_DMA_TX 8
+/*MCBSP module 3 - receive request */
+#define MCBSP3_DMA_RX 9
+/*SLIMBUS module 1 - transmit request channel 0 */
+#define SLIMBUS1_DMA_TX0 10
+/*SLIMBUS module 1 - transmit request channel 1 */
+#define SLIMBUS1_DMA_TX1 11
+/*SLIMBUS module 1 - transmit request channel 2 */
+#define SLIMBUS1_DMA_TX2 12
+/*SLIMBUS module 1 - transmit request channel 3 */
+#define SLIMBUS1_DMA_TX3 13
+/*SLIMBUS module 1 - transmit request channel 4 */
+#define SLIMBUS1_DMA_TX4 14
+/*SLIMBUS module 1 - transmit request channel 5 */
+#define SLIMBUS1_DMA_TX5 15
+/*SLIMBUS module 1 - transmit request channel 6 */
+#define SLIMBUS1_DMA_TX6 16
+/*SLIMBUS module 1 - transmit request channel 7 */
+#define SLIMBUS1_DMA_TX7 17
+/*SLIMBUS module 1 - receive request channel 0 */
+#define SLIMBUS1_DMA_RX0 18
+/*SLIMBUS module 1 - receive request channel 1 */
+#define SLIMBUS1_DMA_RX1 19
+/*SLIMBUS module 1 - receive request channel 2 */
+#define SLIMBUS1_DMA_RX2 20
+/*SLIMBUS module 1 - receive request channel 3 */
+#define SLIMBUS1_DMA_RX3 21
+/*SLIMBUS module 1 - receive request channel 4 */
+#define SLIMBUS1_DMA_RX4 22
+/*SLIMBUS module 1 - receive request channel 5 */
+#define SLIMBUS1_DMA_RX5 23
+/*SLIMBUS module 1 - receive request channel 6 */
+#define SLIMBUS1_DMA_RX6 24
+/*SLIMBUS module 1 - receive request channel 7 */
+#define SLIMBUS1_DMA_RX7 25
+/*McASP - Data transmit DMA request line */
+#define McASP1_AXEVT 26
+/*McASP - Data receive DMA request line */
+#define McASP1_AREVT 29
+/*DUMMY FIFO @@@ */
+#define _DUMMY_FIFO_ 30
+/*DMA of the Circular buffer peripheral 0 */
+#define CBPr_DMA_RTX0 32
+/*DMA of the Circular buffer peripheral 1 */
+#define CBPr_DMA_RTX1 33
+/*DMA of the Circular buffer peripheral 2 */
+#define CBPr_DMA_RTX2 34
+/*DMA of the Circular buffer peripheral 3 */
+#define CBPr_DMA_RTX3 35
+/*DMA of the Circular buffer peripheral 4 */
+#define CBPr_DMA_RTX4 36
+/*DMA of the Circular buffer peripheral 5 */
+#define CBPr_DMA_RTX5 37
+/*DMA of the Circular buffer peripheral 6 */
+#define CBPr_DMA_RTX6 38
+/*DMA of the Circular buffer peripheral 7 */
+#define CBPr_DMA_RTX7 39
+/*
+ * ATC DESCRIPTORS - DESTINATIONS
+ */
+#define DEST_DMEM_access 0x00
+#define DEST_MCBSP1_ TX 0x01
+#define DEST_MCBSP2_ TX 0x02
+#define DEST_MCBSP3_TX 0x03
+#define DEST_SLIMBUS1_TX0 0x04
+#define DEST_SLIMBUS1_TX1 0x05
+#define DEST_SLIMBUS1_TX2 0x06
+#define DEST_SLIMBUS1_TX3 0x07
+#define DEST_SLIMBUS1_TX4 0x08
+#define DEST_SLIMBUS1_TX5 0x09
+#define DEST_SLIMBUS1_TX6 0x0A
+#define DEST_SLIMBUS1_TX7 0x0B
+#define DEST_MCPDM_DL 0x0C
+#define DEST_MCASP_TX0 0x0D
+#define DEST_MCASP_TX1 0x0E
+#define DEST_MCASP_TX2 0x0F
+#define DEST_MCASP_TX3 0x10
+#define DEST_EXTPORT0 0x11
+#define DEST_EXTPORT1 0x12
+#define DEST_EXTPORT2 0x13
+#define DEST_EXTPORT3 0x14
+#define DEST_MCPDM_ON 0x15
+#define DEST_CBP_CBPr 0x3F
+/*
+ * ATC DESCRIPTORS - SOURCES
+ */
+#define SRC_DMEM_access 0x0
+#define SRC_MCBSP1_ RX 0x01
+#define SRC_MCBSP2_RX 0x02
+#define SRC_MCBSP3_RX 0x03
+#define SRC_SLIMBUS1_RX0 0x04
+#define SRC_SLIMBUS1_RX1 0x05
+#define SRC_SLIMBUS1_RX2 0x06
+#define SRC_SLIMBUS1_RX3 0x07
+#define SRC_SLIMBUS1_RX4 0x08
+#define SRC_SLIMBUS1_RX5 0x09
+#define SRC_SLIMBUS1_RX6 0x0A
+#define SRC_SLIMBUS1_RX7 0x0B
+#define SRC_DMIC_UP 0x0C
+#define SRC_MCPDM_UP 0x0D
+#define SRC_MCASP_RX0 0x0E
+#define SRC_MCASP_RX1 0x0F
+#define SRC_MCASP_RX2 0x10
+#define SRC_MCASP_RX3 0x11
+#define SRC_CBP_CBPr 0x3F
+#endif/* _ABE_EXT_H_ */
diff --git a/sound/soc/omap/abe/abe_firmware.c b/sound/soc/omap/abe/abe_firmware.c
new file mode 100644
index 0000000..1676f3c
--- /dev/null
+++ b/sound/soc/omap/abe/abe_firmware.c
@@ -0,0 +1,14355 @@
+0xabeabe00,
+0x00000000,
+0x0000d2a4,
+0x00000d8c,
+0x00000001,
+0x00009495,
+0x00000006,
+0x20314c44,
+0x61757145,
+0x657a696c,
+0x00000072,
+0x00000000,
+0x00000005,
+0x00000019,
+0x74616c46,
+0x73657220,
+0x736e6f70,
+0x00000065,
+0x00000000,
+0x68676948,
+0x7361702d,
+0x64302073,
+0x00000042,
+0x00000000,
+0x68676948,
+0x7361702d,
+0x312d2073,
+0x00426432,
+0x00000000,
+0x68676948,
+0x7361702d,
+0x322d2073,
+0x00426430,
+0x00000000,
+0x7a684b34,
+0x46504c20,
+0x30202020,
+0x00004264,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x20324c44,
+0x7466654c,
+0x75714520,
+0x7a696c61,
+0x00007265,
+0x00000005,
+0x00000019,
+0x74616c46,
+0x73657220,
+0x736e6f70,
+0x00000065,
+0x00000000,
+0x68676948,
+0x7361702d,
+0x64302073,
+0x00000042,
+0x00000000,
+0x68676948,
+0x7361702d,
+0x312d2073,
+0x00426432,
+0x00000000,
+0x68676948,
+0x7361702d,
+0x322d2073,
+0x00426430,
+0x00000000,
+0x48303534,
+0x6948207a,
+0x702d6867,
+0x00737361,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x20324c44,
+0x68676952,
+0x71452074,
+0x696c6175,
+0x0072657a,
+0x00000005,
+0x00000019,
+0x74616c46,
+0x73657220,
+0x736e6f70,
+0x00000065,
+0x00000000,
+0x68676948,
+0x7361702d,
+0x64302073,
+0x00000042,
+0x00000000,
+0x68676948,
+0x7361702d,
+0x312d2073,
+0x00426432,
+0x00000000,
+0x68676948,
+0x7361702d,
+0x322d2073,
+0x00426430,
+0x00000000,
+0x48303534,
+0x6948207a,
+0x702d6867,
+0x00737361,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x65646953,
+0x656e6f74,
+0x75714520,
+0x7a696c61,
+0x00007265,
+0x00000004,
+0x00000009,
+0x74616c46,
+0x73657220,
+0x736e6f70,
+0x00000065,
+0x00000000,
+0x68676948,
+0x7361702d,
+0x64302073,
+0x00000042,
+0x00000000,
+0x68676948,
+0x7361702d,
+0x312d2073,
+0x00426432,
+0x00000000,
+0x68676948,
+0x7361702d,
+0x312d2073,
+0x00426438,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x43494d41,
+0x75714520,
+0x7a696c61,
+0x00007265,
+0x00000000,
+0x00000003,
+0x00000013,
+0x68676948,
+0x7361702d,
+0x64302073,
+0x00000042,
+0x00000000,
+0x68676948,
+0x7361702d,
+0x312d2073,
+0x00426432,
+0x00000000,
+0x68676948,
+0x7361702d,
+0x312d2073,
+0x00426438,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x43494d44,
+0x75714520,
+0x7a696c61,
+0x00007265,
+0x00000000,
+0x00000003,
+0x00000013,
+0x68676948,
+0x7361702d,
+0x64302073,
+0x00000042,
+0x00000000,
+0x68676948,
+0x7361702d,
+0x312d2073,
+0x00426432,
+0x00000000,
+0x68676948,
+0x7361702d,
+0x312d2073,
+0x00426438,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00040002,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0xff8cbb51,
+0x000ace72,
+0xfff53192,
+0x007344b1,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x0067cd91,
+0xfff596e6,
+0x000b29a2,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0xffc65da8,
+0x00567385,
+0xffa98c7d,
+0x0039a258,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x0067cd91,
+0xfff596e6,
+0x000b29a2,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0xffe8f244,
+0x00452938,
+0xffbad6c8,
+0x00170dbc,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x0067cd91,
+0xfff596e6,
+0x000b29a2,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x0002e630,
+0x0008b290,
+0x0008b290,
+0x0002e630,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x0058ae58,
+0xfffa666a,
+0x0007da1e,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00040002,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0xff8cbb51,
+0x000ace72,
+0xfff53192,
+0x007344b1,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x0067cd91,
+0xfff596e6,
+0x000b29a2,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0xffc65da8,
+0x00567385,
+0xffa98c7d,
+0x0039a258,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x0067cd91,
+0xfff596e6,
+0x000b29a2,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0xffe8f244,
+0x00452938,
+0xffbad6c8,
+0x00170dbc,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x0067cd91,
+0xfff596e6,
+0x000b29a2,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x007ac72d,
+0xfff8538e,
+0x007ac72d,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0xff8a3b19,
+0x0007aac2,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00040002,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0xff8cbb51,
+0x000ace72,
+0xfff53192,
+0x007344b1,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x0067cd91,
+0xfff596e6,
+0x000b29a2,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0xffc65da8,
+0x00567385,
+0xffa98c7d,
+0x0039a258,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x0067cd91,
+0xfff596e6,
+0x000b29a2,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0xffe8f244,
+0x00452938,
+0xffbad6c8,
+0x00170dbc,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x0067cd91,
+0xfff596e6,
+0x000b29a2,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x007ac72d,
+0xfff8538e,
+0x007ac72d,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0xff8a3b19,
+0x0007aac2,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00040002,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0xff8cbb51,
+0x000ace72,
+0xfff53192,
+0x007344b1,
+0x00000000,
+0x0067cd91,
+0xfff596e6,
+0x000b29a2,
+0x00000000,
+0xffc65da8,
+0x00567385,
+0xffa98c7d,
+0x0039a258,
+0x00000000,
+0x0067cd91,
+0xfff596e6,
+0x000b29a2,
+0x00000000,
+0xffe8f244,
+0x00452938,
+0xffbad6c8,
+0x00170dbc,
+0x00000000,
+0x0067cd91,
+0xfff596e6,
+0x000b29a2,
+0xffc1248b,
+0xfffd1080,
+0xfffaca4c,
+0xfffab048,
+0xfffdb0ac,
+0x00024f54,
+0x00054fb8,
+0x000535b4,
+0x0002ef80,
+0x003edb7b,
+0x001d92ec,
+0xff962b59,
+0x000bd422,
+0xffe48132,
+0x002dbdc2,
+0xffc7a94a,
+0x0033fbe6,
+0xffdd3502,
+0x000fea26,
+0xfff0490f,
+0xffd10817,
+0xffaca4df,
+0xffab0493,
+0xffdb0acb,
+0x0024f537,
+0x0054fb6f,
+0x00535b23,
+0x002ef7eb,
+0x000fb6f3,
+0x001d930c,
+0xff962afd,
+0x000bd42a,
+0xffe48122,
+0x002dbdda,
+0xffc7a932,
+0x0033fbf6,
+0xffdd34fa,
+0x000fea26,
+0xfff82487,
+0xffe8840b,
+0xffd6526f,
+0xffd5824b,
+0xffed8567,
+0x00127a9b,
+0x002a7db7,
+0x0029ad93,
+0x00177bf7,
+0x0007db7b,
+0x001d930c,
+0xff962afd,
+0x000bd42a,
+0xffe48122,
+0x002dbdda,
+0xffc7a932,
+0x0033fbf6,
+0xffdd34fa,
+0x000fea26,
+0xffc1248b,
+0xfffd1080,
+0xfffaca4c,
+0xfffab048,
+0xfffdb0ac,
+0x00024f54,
+0x00054fb8,
+0x000535b4,
+0x0002ef80,
+0x003edb7b,
+0x001d92ec,
+0xff962b59,
+0x000bd422,
+0xffe48132,
+0x002dbdc2,
+0xffc7a94a,
+0x0033fbe6,
+0xffdd3502,
+0x000fea26,
+0xfff0490f,
+0xffd10817,
+0xffaca4df,
+0xffab0493,
+0xffdb0acb,
+0x0024f537,
+0x0054fb6f,
+0x00535b23,
+0x002ef7eb,
+0x000fb6f3,
+0x001d930c,
+0xff962afd,
+0x000bd42a,
+0xffe48122,
+0x002dbdda,
+0xffc7a932,
+0x0033fbf6,
+0xffdd34fa,
+0x000fea26,
+0xfff82487,
+0xffe8840b,
+0xffd6526f,
+0xffd5824b,
+0xffed8567,
+0x00127a9b,
+0x002a7db7,
+0x0029ad93,
+0x00177bf7,
+0x0007db7b,
+0x001d930c,
+0xff962afd,
+0x000bd42a,
+0xffe48122,
+0x002dbdda,
+0xffc7a932,
+0x0033fbf6,
+0xffdd34fa,
+0x000fea26,
+0x00009495,
+0x00002000,
+0x00001d80,
+0x00004000,
+0x00005510,
+0x1600200f,
+0x0a000960,
+0x08200000,
+0x08200000,
+0x07800000,
+0x160075ce,
+0x014000e0,
+0x014000e1,
+0x014000e2,
+0x014000e3,
+0x014000e4,
+0x014000e5,
+0x014000e6,
+0x014000e7,
+0x014000e8,
+0x014000e9,
+0x014000ea,
+0x014000eb,
+0x014000ec,
+0x014000ed,
+0x014000ef,
+0x014000ef,
+0x144000e4,
+0x9e000000,
+0x0a200e30,
+0x9e000040,
+0x0a200e30,
+0x9e000080,
+0x0a200e30,
+0x9e0000c0,
+0x0a200e30,
+0x9e080000,
+0x0a200e30,
+0x9e080100,
+0x0a200e30,
+0x9e080200,
+0x0a200e30,
+0x9e080300,
+0x0a200e30,
+0x9e080400,
+0x0a200e30,
+0x9e080500,
+0x0a200e30,
+0x9e080600,
+0x0a200e30,
+0x9e080700,
+0x0a200e30,
+0x9c050800,
+0x0a200e30,
+0x16000010,
+0x16000001,
+0x17000102,
+0x01400042,
+0x17800103,
+0x01400043,
+0x98020000,
+0x160003c6,
+0x07800000,
+0x07800000,
+0x9c03b660,
+0x0a0003f0,
+0x9d0c8118,
+0x07800000,
+0x9c0c07b0,
+0x9f16001a,
+0x9f12021a,
+0x9f12031a,
+0x9f12051a,
+0x9f092020,
+0x9f082030,
+0x9c0c07b0,
+0x9f092060,
+0x9f082070,
+0x988003d0,
+0x07800000,
+0x9d0c8118,
+0x08200000,
+0x160003c6,
+0x07800000,
+0x07800000,
+0x9c03b660,
+0x0a000540,
+0x9d0c8158,
+0x07800000,
+0x9c0c07b0,
+0x9f16001a,
+0x9f12021a,
+0x9f12031a,
+0x9f12051a,
+0x9f040040,
+0x9c0c07b0,
+0x9f03fc10,
+0x07800000,
+0x9f092060,
+0x9f082070,
+0x07800000,
+0x98800520,
+0x07800000,
+0x9d0c8158,
+0x08200000,
+0x160003c6,
+0x07800000,
+0x07800000,
+0x9c03b660,
+0x0a0006b0,
+0x9d0c8118,
+0x07800000,
+0x9c0c07b0,
+0x9f15001a,
+0x9f11041a,
+0x9f092020,
+0x9f082030,
+0x9c0c07b0,
+0x9f092060,
+0x9f082070,
+0x98800690,
+0x07800000,
+0x9d0c8118,
+0x08200000,
+0x400002c0,
+0x048002ff,
+0x000000c5,
+0x000004c6,
+0x9c028000,
+0x400006c7,
+0x12000155,
+0x013ffefe,
+0xc00008c4,
+0x1e080000,
+0x020005de,
+0x00000ac3,
+0xdc02b160,
+0x04c3ff2d,
+0xdc01ba70,
+0x128002dd,
+0xdc02a440,
+0x048fffdd,
+0x9c061830,
+0x0b200000,
+0x003ffefe,
+0x000002c4,
+0x400004c5,
+0x048ffeff,
+0x000006c6,
+0x000008c7,
+0x9d02a040,
+0x9d02a950,
+0x9d01b260,
+0x9d02bc70,
+0x08200000,
+0x16006906,
+0x00000068,
+0x16008c05,
+0x01000058,
+0x160069ca,
+0x000000a9,
+0x16008c06,
+0x00000068,
+0x0400089b,
+0x4000009c,
+0x1600694e,
+0x410000ec,
+0x0600000c,
+0x160130cd,
+0x0a800a60,
+0x0a200770,
+0x04800299,
+0x410000a9,
+0x05c00b90,
+0x4ac009f0,
+0x04a01085,
+0x16006a04,
+0x40000047,
+0x16006a8e,
+0x04200599,
+0x400000e1,
+0x04800177,
+0x010000a9,
+0x41000047,
+0x04a00111,
+0x410000e1,
+0x06000001,
+0x4aa00c40,
+0x16006a4d,
+0x400000d6,
+0x16004fc9,
+0x400002d7,
+0x04800166,
+0x410000a9,
+0x04900077,
+0x010000d6,
+0x010002d7,
+0x16006906,
+0x00000068,
+0x16008c05,
+0x01000058,
+0x1600c005,
+0x16007541,
+0x16000002,
+0x40000011,
+0x16007500,
+0x9e0e0550,
+0xdd140530,
+0x160ffff4,
+0x41000002,
+0x06000001,
+0x08400000,
+0x01000004,
+0x9d140550,
+0x0a8009a0,
+0x0a000c40,
+0x048006ff,
+0x013ffafb,
+0x013ffcfc,
+0x413ffefe,
+0x04a0020b,
+0x004002bc,
+0x0600000c,
+0x160130cd,
+0x0a800de0,
+0x0a200770,
+0x0a000d80,
+0x003ffefe,
+0x003ffcfc,
+0x003ffafb,
+0x048ffaff,
+0x08200000,
+0x07800000,
+0x01400040,
+0x01400041,
+0x01400042,
+0x01400043,
+0x08200000,
+0x160004a4,
+0x160004b5,
+0x160004c6,
+0x16000007,
+0x9c032040,
+0x9c032950,
+0x9c033260,
+0x9e0f0070,
+0x9e0f0170,
+0x9e0f0270,
+0x9d032040,
+0x9d032950,
+0x9d033260,
+0x08200000,
+0x9f158048,
+0x9c0c07b0,
+0x9f092020,
+0x9f082030,
+0x9c0c07b0,
+0x9f092060,
+0x9f082070,
+0x07800000,
+0x07800000,
+0x9d088118,
+0x98800f70,
+0x08200000,
+0x9f158048,
+0x9f040040,
+0x9c0c07b0,
+0x9f03fc10,
+0x07800000,
+0x9f092020,
+0x9f082030,
+0x9c0c07b0,
+0x9f092060,
+0x9f082070,
+0x07800000,
+0x07800000,
+0x9d188148,
+0x98801030,
+0x08200000,
+0x9f158048,
+0x9c0c07b0,
+0x9f092060,
+0x9f082070,
+0x9c0c07b0,
+0x9f092020,
+0x9f082030,
+0x07800000,
+0x9d188148,
+0x9d188108,
+0x98801120,
+0x08200000,
+0x9f158048,
+0x9c0c07b0,
+0x9f092060,
+0x9f082070,
+0x9c0c07b0,
+0x9f092020,
+0x9f082030,
+0x07800000,
+0x9d1e8148,
+0x9d1e8108,
+0x988011e0,
+0x08200000,
+0x9f158018,
+0x9f040010,
+0x9c0c07b0,
+0x9f03fc10,
+0x9f092020,
+0x9f082030,
+0x9c0c07b0,
+0x9f092060,
+0x9f082070,
+0x9d1e8108,
+0x988012a0,
+0x08200000,
+0x9c080048,
+0x9f1d0010,
+0x07800000,
+0x07800000,
+0x9d0c8118,
+0x98801360,
+0x08200000,
+0x9c180028,
+0x9f1d0010,
+0x07800000,
+0x07800000,
+0x9d0c8108,
+0x988013d0,
+0x08200000,
+0x9c180068,
+0x9c180028,
+0x9f1d0010,
+0x07800000,
+0x07800000,
+0x9d0c8148,
+0x98801440,
+0x08200000,
+0x9c1e0048,
+0x9c1e0008,
+0x9f1d0010,
+0x07800000,
+0x07800000,
+0x9d0c8148,
+0x988014c0,
+0x08200000,
+0x9c1e0008,
+0x9f1d0010,
+0x07800000,
+0x07800000,
+0x9d0c8108,
+0x98801540,
+0x08200000,
+0x160004a4,
+0x160004b5,
+0x160004c6,
+0x160000bd,
+0x9c032340,
+0x9c032c50,
+0x9c033560,
+0x9c180028,
+0x9c180068,
+0x9f1d0010,
+0x9c1800a8,
+0x9c1800e8,
+0x9f1d00b0,
+0x07800000,
+0x9d0c8318,
+0x9d0c84b8,
+0x9c180028,
+0x9c180068,
+0x9f1d0010,
+0x07800000,
+0x07800000,
+0x9d0c8518,
+0x98801620,
+0x9d032340,
+0x9d032c50,
+0x9d033560,
+0x08200000,
+0x160003c2,
+0x16000504,
+0x16000515,
+0x16000526,
+0x9c011720,
+0x9c03a440,
+0x9c03ad50,
+0x9c03b660,
+0x160000bd,
+0x9f158418,
+0x9c0c02b0,
+0x9f091020,
+0x9f081030,
+0x9c0c02b0,
+0x9f091060,
+0x9f081070,
+0x07800000,
+0x9d180108,
+0x9d180148,
+0x9f158518,
+0x9c0c02b0,
+0x9f091020,
+0x9f081030,
+0x9c0c02b0,
+0x9f091060,
+0x9f081070,
+0x07800000,
+0x9d180108,
+0x9d180148,
+0x9c0c0618,
+0x07800000,
+0x07800000,
+0x9d180108,
+0x9d180148,
+0x988017f0,
+0x9d032440,
+0x9d032d50,
+0x9d033660,
+0x08200000,
+0x1600000d,
+0x9e0f00d0,
+0x00800e0d,
+0x9f158038,
+0x07800000,
+0x04a002dd,
+0x9d188108,
+0x9f158038,
+0x07800000,
+0x98801a30,
+0x9d188108,
+0x08200000,
+0x9e088100,
+0x07800000,
+0x07800000,
+0x12800277,
+0x04c0ff77,
+0x04a00174,
+0x12800266,
+0x04c0ff66,
+0x04000645,
+0x060ffff4,
+0x17000454,
+0x12000244,
+0x9e0f0140,
+0x07800000,
+0x07800000,
+0x9c0c0118,
+0x07800000,
+0x07800000,
+0x9d0c8118,
+0x98801bb0,
+0x08200000,
+0x08200000,
+0x08200000,
+0x08200000,
+0x9c038600,
+0x07800000,
+0x07800000,
+0x9c180770,
+0xdc100348,
+0x160fff05,
+0x9f000810,
+0x9f118412,
+0x9f001010,
+0x9f002810,
+0x9c0c00b8,
+0x160ffd80,
+0x9d0c8410,
+0x9f1d8012,
+0x9f001810,
+0x9f0400d0,
+0x9c0c0210,
+0x16000204,
+0xdd0e00b0,
+0x16000005,
+0x9f1d80b2,
+0x9f0000b0,
+0x9f0020b0,
+0x9f0400d0,
+0x05800560,
+0x0a801dd0,
+0x9c0c0510,
+0x0a001de0,
+0x9c0c0618,
+0x16000014,
+0x9d0c81e8,
+0x9d0c8148,
+0x0a801e50,
+0x9c0c05b0,
+0x9c0c0510,
+0x0a001e70,
+0x9c0c06b8,
+0x9c0c0618,
+0x07800000,
+0x9d0c81e8,
+0x9d0c8148,
+0x98801c50,
+0x9d180750,
+0x08200000,
+0x9d019220,
+0x048002ff,
+0x14400004,
+0x413ffefe,
+0x16000040,
+0x9c010910,
+0x0a203df0,
+0x14400040,
+0x9c030810,
+0x16000171,
+0x9c009f30,
+0x9c019220,
+0x0a2038f0,
+0x003ffefe,
+0x9c009830,
+0x048ffeff,
+0x08200000,
+0x08200000,
+0x08200000,
+0x08200000,
+0x08200000,
+0x40000024,
+0x048002ff,
+0x41000224,
+0x16000005,
+0x413ffefe,
+0x04000400,
+0x9e0f0150,
+0x01000025,
+0x0a202390,
+0x403ffefe,
+0x16000007,
+0x9e0f0170,
+0x048ffeff,
+0x08200000,
+0x40000024,
+0x048002ff,
+0x41000224,
+0x16000005,
+0x413ffefe,
+0x16000016,
+0x41800dc6,
+0x04000400,
+0x9e0f0150,
+0x01000025,
+0x0a202cd0,
+0x403ffefe,
+0x16000007,
+0x9e0f0170,
+0x048ffeff,
+0x08200000,
+0x048002ff,
+0x413ffefe,
+0x16000005,
+0x01000025,
+0x0a202390,
+0x40000024,
+0x16000005,
+0x403ffefe,
+0x04200454,
+0x41000224,
+0x048ffeff,
+0x08200000,
+0x048002ff,
+0x413ffefe,
+0x16000005,
+0x01000025,
+0x01800dc5,
+0x0a202cd0,
+0x40000024,
+0x16000005,
+0x403ffefe,
+0x04200454,
+0x41000224,
+0x048ffeff,
+0x08200000,
+0x048008ff,
+0x413ff8f8,
+0x1440000d,
+0x9c038e10,
+0x413ffaf9,
+0x04a001dd,
+0x413ffcfa,
+0x16000001,
+0x413ffefb,
+0x160000f0,
+0x9c100400,
+0x9c100480,
+0x9c1d06c4,
+0x9f085030,
+0x9c180674,
+0x9c180650,
+0x058001a0,
+0x0aa02890,
+0x04800144,
+0x04400044,
+0x05800040,
+0x0aa025d0,
+0x05800160,
+0x0ac02570,
+0x9e090000,
+0x07800000,
+0x07800000,
+0x9e0d0500,
+0x9d040508,
+0x0a002760,
+0x9d040008,
+0x9e090000,
+0x07800000,
+0x9d040008,
+0x9e0d0500,
+0x0a002760,
+0x9d040008,
+0x9e090000,
+0x07800000,
+0x07800000,
+0x9e0d0500,
+0x1280010a,
+0x048001a9,
+0x05800940,
+0x0aa02760,
+0x05800160,
+0x40000628,
+0x160ffff9,
+0x0ac026f0,
+0x05800180,
+0x0ae02760,
+0x160ffff6,
+0x160ffff7,
+0x0a002730,
+0x05800810,
+0x0ae02760,
+0x16000016,
+0x16000007,
+0x9d044690,
+0x04a00144,
+0x9d180674,
+0x05800160,
+0x9d180654,
+0x0ac027d0,
+0x0420040a,
+0x04a001ab,
+0x4a002800,
+0x044000bb,
+0x0480014b,
+0x044000bb,
+0x1440004a,
+0x120001aa,
+0x42000a38,
+0x120001bb,
+0x42000b39,
+0x12000288,
+0x12000299,
+0x9e0e8280,
+0xca0029a0,
+0x1e0e8390,
+0xdd040604,
+0x05800160,
+0x0ac02940,
+0x9d040008,
+0x9e090000,
+0x07800000,
+0x05800040,
+0x9e0d0500,
+0x0aa029a0,
+0x9d040508,
+0x0a0029a0,
+0x9e090000,
+0x05800040,
+0x9d040008,
+0x9e0d0500,
+0x0a8029a0,
+0x9d040508,
+0x9c1d06c4,
+0xdc1d0644,
+0x1f0400b0,
+0x9c100700,
+0xdc1d06c4,
+0x1f040010,
+0x9d108480,
+0x9f0940b0,
+0x9d108700,
+0x00000cc9,
+0x06000008,
+0x0aa02ba0,
+0xdc1d0684,
+0x14400005,
+0xdc1d0604,
+0x160fff8a,
+0x04a00255,
+0xdd108480,
+0x16000017,
+0xdd108700,
+0x160ffff8,
+0x05800540,
+0x0aa02b60,
+0x05800160,
+0x0ac02b50,
+0x01000027,
+0x0a002b60,
+0x01000028,
+0x9e088000,
+0xa0054dba,
+0xa005c81a,
+0x0a002c30,
+0x9e088000,
+0xa0054dba,
+0xa005c81a,
+0x160fffaa,
+0x9f1f80b0,
+0x9f1e0010,
+0x9f040020,
+0x9f040070,
+0x9f020810,
+0x9d0446a0,
+0x9e0f0070,
+0x9d0c8118,
+0x98802430,
+0x003ffefb,
+0x003ffcfa,
+0x003ffaf9,
+0x003ff8f8,
+0x048ff8ff,
+0x08200000,
+0x048008ff,
+0x413ff8f8,
+0x1440000d,
+0x9c038e10,
+0x413ffaf9,
+0x04a001dd,
+0x413ffcfa,
+0x16000001,
+0x413ffefb,
+0x04a00100,
+0x9c100400,
+0x9c100480,
+0x9c1d06c4,
+0x9f085030,
+0x9c180674,
+0x9c180650,
+0x058001a0,
+0x4aa03200,
+0x160000f7,
+0x04800144,
+0x04400744,
+0x05800740,
+0x0aa02f20,
+0x05800160,
+0x0ac02ec0,
+0x9e090000,
+0x07800000,
+0x07800000,
+0x9e0d0500,
+0x9d040508,
+0x0a0030c0,
+0x9d040008,
+0x9e090000,
+0x07800000,
+0x9d040008,
+0x9e0d0500,
+0x0a0030c0,
+0x9d040008,
+0x9e090000,
+0x160000f7,
+0x07800000,
+0x9e0d0500,
+0x1280017a,
+0x048001a9,
+0x05800940,
+0x0aa030c0,
+0x05800160,
+0x00000ec8,
+0x40000688,
+0x160ffff9,
+0x0ac03050,
+0x05800810,
+0x0ae030c0,
+0x160ffff6,
+0x160ffff7,
+0x0a003090,
+0x05800180,
+0x0ae030c0,
+0x16000016,
+0x16000007,
+0x9d044690,
+0x04a00144,
+0x9d180674,
+0x05800160,
+0x9d180654,
+0x4ac03140,
+0x160000f7,
+0x0420047a,
+0x04a001ab,
+0x4a003170,
+0x044007bb,
+0x0480014b,
+0x044007bb,
+0x1440004a,
+0x120001aa,
+0x42000a38,
+0x120001bb,
+0x42000b39,
+0x12000288,
+0x12000299,
+0x9e0e8280,
+0xca003310,
+0x1e0e8390,
+0xdd040604,
+0x05800160,
+0x0ac032b0,
+0x9d040008,
+0x9e090000,
+0x07800000,
+0x060000f4,
+0x9e0d0500,
+0x0aa03310,
+0x9d040508,
+0x0a003310,
+0x9e090000,
+0x060000f4,
+0x9d040008,
+0x9e0d0500,
+0x0a803310,
+0x9d040508,
+0x060000f4,
+0x0aa03590,
+0x9c1d0600,
+0x9f065060,
+0x9f020830,
+0x9f095010,
+0xdc1d0600,
+0x160fffe9,
+0x9f065060,
+0x9f020c30,
+0x0600000a,
+0x0a803590,
+0x9f095010,
+0x00800dcb,
+0x16000028,
+0x0600000a,
+0x0aa03590,
+0x0600000b,
+0x0a803500,
+0x0600000d,
+0x0aa03590,
+0x9d044480,
+0x9d044780,
+0x9c100480,
+0x9c100700,
+0x9d044490,
+0x9d044790,
+0x9d044680,
+0x9d108480,
+0x9d108700,
+0x0a003610,
+0x058000d0,
+0x0aa03590,
+0x9d044490,
+0x9c100700,
+0x9d044790,
+0x9d108480,
+0x9d108700,
+0x9d044480,
+0x9d044780,
+0x9c1d06c4,
+0xdc1d0644,
+0x1f0400b0,
+0x9c100700,
+0x9f040010,
+0x9d108480,
+0x07800000,
+0x9d108700,
+0x9c1d06c4,
+0x07800000,
+0x9f0940b0,
+0x07800000,
+0x00800cc9,
+0x06000008,
+0x0aa037c0,
+0xdc1d0684,
+0x160000f5,
+0xdc1d0604,
+0x160fff8a,
+0x04a00255,
+0xdd108480,
+0x16000017,
+0xdd108700,
+0x160ffff8,
+0x05800540,
+0x0aa03780,
+0x05800160,
+0x0ac03770,
+0x01000027,
+0x0a003780,
+0x01000028,
+0x9e088000,
+0xa0054dba,
+0xa005c81a,
+0x0a003850,
+0x9e088000,
+0xa0054dba,
+0xa005c81a,
+0x160fffaa,
+0x9f1f80b0,
+0x9f1e0010,
+0x9f040020,
+0x9f040070,
+0x9f020810,
+0x9d0446a0,
+0x9e0f0070,
+0x9d0c8118,
+0x98802d70,
+0x003ffefb,
+0x003ffcfa,
+0x003ffaf9,
+0x003ff8f8,
+0x048ff8ff,
+0x08200000,
+0x9c0c0018,
+0x1440001d,
+0x04a001dd,
+0x9d0c8318,
+0x07800000,
+0x9c0c0018,
+0xa00602ba,
+0x07800000,
+0x9d0c8318,
+0x9d0c81b8,
+0x9d0c02b8,
+0x98803940,
+0x07800000,
+0xa00602ba,
+0x07800000,
+0x07800000,
+0x9d0c81b8,
+0x9d0c82b8,
+0x08200000,
+0x9c0c0018,
+0x160000ad,
+0x07800000,
+0x9d0c8318,
+0x07800000,
+0x9c0c0018,
+0xa00602ba,
+0x07800000,
+0x9d0c8318,
+0x9d0c81b8,
+0x9d0c02b8,
+0x07800000,
+0x9c0c0018,
+0xa00602ba,
+0x07800000,
+0x9d0c8318,
+0x9d0c02b8,
+0x98803a70,
+0x9c0c0018,
+0xa00602ba,
+0x07800000,
+0x9d0c8318,
+0x9d0c81b8,
+0x9d0c02b8,
+0x07800000,
+0xa00602ba,
+0x07800000,
+0x07800000,
+0x9d0c02b8,
+0x08200000,
+0x9c0c0038,
+0x1440001d,
+0x04a001dd,
+0x9d0c8338,
+0x07800000,
+0xa00602ba,
+0xa006821a,
+0x9c0c0038,
+0x07800000,
+0x9d0c8298,
+0x9d0c8338,
+0x9d0c8198,
+0x98803c50,
+0x07800000,
+0xa00602ba,
+0xa006821a,
+0x07800000,
+0x07800000,
+0x9d0c8298,
+0x9d0c8198,
+0x08200000,
+0xdc0c0018,
+0x04a00201,
+0x04a001dd,
+0xdd040008,
+0x06000001,
+0x04a00111,
+0x0aa03d70,
+0x9d0c8118,
+0x98803d50,
+0x08200000,
+0x9c0c02b0,
+0x9c0c0018,
+0x04a00205,
+0x07800000,
+0x9d0c8118,
+0xdd0c81b8,
+0x06000005,
+0x04a00155,
+0x0aa03e40,
+0x98803e00,
+0x08200000,
+0x9c039e30,
+0x07800000,
+0x9c0c0018,
+0x9f138510,
+0x1600004d,
+0x07800000,
+0x9d0c8318,
+0x07800000,
+0x9c0c0510,
+0xa00602ba,
+0x07800000,
+0x9d0c8318,
+0x9d0c81b8,
+0x9d0c02b8,
+0x07800000,
+0x9c0c0018,
+0x9f138510,
+0xa00602ba,
+0x07800000,
+0x9d0c8318,
+0x9d0c81b8,
+0x9d0c02b8,
+0x98803f20,
+0x9c0c0510,
+0xa00602ba,
+0x07800000,
+0x9d0c8318,
+0x9d0c81b8,
+0x9d0c02b8,
+0x07800000,
+0xa00602ba,
+0x07800000,
+0x07800000,
+0x9d0c81b8,
+0x9d0c82b8,
+0x08200000,
+0x9c0c0018,
+0x1440001d,
+0x04a001dd,
+0x9d0c8218,
+0x07800000,
+0x9c0c0018,
+0xa00582ba,
+0x07800000,
+0x07800000,
+0x9d0c81b8,
+0x98804140,
+0x1440001d,
+0x9d0c8218,
+0x04a001dd,
+0xa00582ba,
+0x07800000,
+0x07800000,
+0x9d0c81b8,
+0x988041c0,
+0x08200000,
+0x9c0c0018,
+0x1440001d,
+0x04a002dd,
+0xa00582ba,
+0x07800000,
+0x07800000,
+0x9d0c81b8,
+0x9d0c8218,
+0x07800000,
+0x07800000,
+0x9c0c0018,
+0x07800000,
+0x07800000,
+0x9d0c8218,
+0x9d0c81b8,
+0x988042c0,
+0x9c0c0018,
+0x1440001d,
+0x04a002dd,
+0xa00582ba,
+0x07800000,
+0x07800000,
+0x9d0c81b8,
+0x9d0c8218,
+0x07800000,
+0x07800000,
+0x9c0c0018,
+0x07800000,
+0x07800000,
+0x9d0c8218,
+0x9d0c81b8,
+0x988043c0,
+0x08200000,
+0x9f160028,
+0x9f168298,
+0x04a001dd,
+0x07800000,
+0x9d0c8128,
+0x07800000,
+0x9f160028,
+0x9f168298,
+0x98804470,
+0x9d0c8128,
+0x08200000,
+0x9f160020,
+0x9f168098,
+0x9c0c03b0,
+0x9f092020,
+0x9f082030,
+0x9c0c03b0,
+0x9f092060,
+0x9f082070,
+0x07800000,
+0x07800000,
+0x9d0c8108,
+0x9d0c8258,
+0x988044e0,
+0x08200000,
+0x9f160020,
+0x9f168098,
+0x9c0c02b0,
+0x9f092020,
+0x9f082030,
+0x9c0c02b0,
+0x9f092060,
+0x9f082070,
+0x07800000,
+0x07800000,
+0x9d0c8118,
+0x988045c0,
+0x08200000,
+0x9d008810,
+0x1280020d,
+0x07800000,
+0x9c038810,
+0x9e0e0620,
+0x04a001dd,
+0x9f16801a,
+0x9f12011a,
+0x9f039810,
+0x9f026810,
+0x9f118610,
+0x9f1680ba,
+0x9f1201ba,
+0x9f0398b0,
+0x9f0268b0,
+0x9f1186b0,
+0x9d0c8718,
+0x9d108248,
+0x9d108208,
+0x9d0c87b8,
+0x9d1082c8,
+0x9d108288,
+0x988046f0,
+0x08200000,
+0x00000003,
+0x00000205,
+0x1440001d,
+0x9c039830,
+0x9c03aa50,
+0x07800000,
+0x9c0c0018,
+0x9c0c02b8,
+0x07800000,
+0x07800000,
+0x9d0c8128,
+0x98804870,
+0x08200000,
+0x00001007,
+0x048002ff,
+0x013ffefe,
+0x06000007,
+0x00801605,
+0x16013523,
+0x0a804a30,
+0x12000155,
+0x0200035e,
+0x0b200000,
+0x00800405,
+0x16013523,
+0x12000155,
+0x0200035e,
+0x0b200000,
+0x00801705,
+0x16013523,
+0x12000155,
+0x0200035e,
+0x0b200000,
+0x003ffefe,
+0x048ffeff,
+0x07800000,
+0x08200000,
+0x00800b03,
+0x16000181,
+0x048004ff,
+0x40800a0d,
+0x04000101,
+0x00000212,
+0x00000017,
+0x9e0e0420,
+0x00000416,
+0xdc180404,
+0x06000003,
+0x9c180480,
+0x0aa04ee0,
+0x9c052b20,
+0x9c042820,
+0x9c023970,
+0x40800e04,
+0x16000005,
+0x40800503,
+0x0600000d,
+0x9d01b060,
+0x4a804c40,
+0x0400033d,
+0x04200427,
+0x04200d77,
+0x05800750,
+0x0ae04c40,
+0x16000006,
+0x16000145,
+0x0a004ec0,
+0x160fffd6,
+0x05800420,
+0x0ae04dc0,
+0x160fffe6,
+0x04000344,
+0x05800420,
+0x0ae04eb0,
+0x160ffff6,
+0x04000344,
+0x05800420,
+0x0ae04eb0,
+0x16000006,
+0x04000344,
+0x05800420,
+0x0ae04eb0,
+0x16000016,
+0x04000344,
+0x05800420,
+0x0ae04eb0,
+0x16000026,
+0x04000344,
+0x05800420,
+0x0ae04eb0,
+0x16000036,
+0x013ffcf6,
+0x12000132,
+0x04000233,
+0x9e088300,
+0x40800e02,
+0x16000005,
+0x04000233,
+0x12000233,
+0x04200377,
+0x05800570,
+0x16001e02,
+0x17800523,
+0x04000377,
+0x9e0f0070,
+0x003ffcf6,
+0x00800715,
+0x01000606,
+0x0a0052f0,
+0x9c042b20,
+0x9c052920,
+0x9c023870,
+0x07800000,
+0x07800000,
+0x9d00b360,
+0x16000004,
+0x16000005,
+0x160fffb6,
+0x00800503,
+0x05800420,
+0x0ae051f0,
+0x160fffc6,
+0x04000344,
+0x05800420,
+0x0ae052d0,
+0x160fffd6,
+0x04000344,
+0x05800420,
+0x0ae052d0,
+0x160fffe6,
+0x04000344,
+0x05800420,
+0x0ae052d0,
+0x160ffff6,
+0x04000344,
+0x05800420,
+0x0ae052d0,
+0x16000006,
+0x04000344,
+0x05800420,
+0x0ae052d0,
+0x16000016,
+0x04000344,
+0x05800420,
+0x0ae052d0,
+0x16000026,
+0x04000344,
+0x05800420,
+0x0ae052d0,
+0x16000036,
+0x04000344,
+0x05800420,
+0x0ae052d0,
+0x16000046,
+0x04000344,
+0x05800420,
+0x0ae052d0,
+0x16000056,
+0x013ffcf6,
+0x12000232,
+0x04000233,
+0x9e088300,
+0x16000005,
+0x12000233,
+0x04000377,
+0x04a1e077,
+0x05800570,
+0x16001e02,
+0x17800523,
+0x04000377,
+0x9e0f0170,
+0x003ffcf6,
+0x01000606,
+0x00800715,
+0x16013526,
+0x413ffefe,
+0x12000155,
+0x00000202,
+0x00800d04,
+0x0200056e,
+0x16020e05,
+0x16014246,
+0x16014287,
+0x0400042d,
+0x04a001dd,
+0x9e0e0750,
+0x9e0e0260,
+0x9e0e0370,
+0x0b200000,
+0x00000806,
+0x003ffefe,
+0x0000021d,
+0x9e0e0560,
+0x40800b05,
+0x048ffcff,
+0x408007d7,
+0x06000005,
+0x40800f02,
+0x04c07f77,
+0x4a805540,
+0x04500273,
+0x00800a02,
+0x9e088100,
+0x00000011,
+0x418007d3,
+0x16000003,
+0x12800277,
+0x018003d7,
+0x9d140530,
+0x9d038810,
+0x08200000,
+0x00800a02,
+0x9e088000,
+0x00000011,
+0x418007d3,
+0x16000003,
+0x12800277,
+0x018000d7,
+0x9d140530,
+0x9d038910,
+0x08200000,
+0x00001807,
+0x00801e02,
+0x16000003,
+0x9c01b970,
+0x06000082,
+0x17000233,
+0x9e088100,
+0x07800000,
+0x12000c33,
+0x04c3ff66,
+0x04500366,
+0x07800000,
+0x16000003,
+0x9e0c8100,
+0x16007f46,
+0x00000064,
+0x1600003d,
+0x04a00122,
+0x9c03a040,
+0x04800266,
+0x9e0f0130,
+0x04800433,
+0x06000002,
+0x9c0c0038,
+0x9c0c0078,
+0x9c0c00b8,
+0x9d0c810c,
+0x9d0c815c,
+0x9d0c81ac,
+0x98805750,
+0x0aa056d0,
+0x9e0f0120,
+0x08200000,
+0x00001004,
+0x0080070d,
+0x06000004,
+0x00800203,
+0x0b800000,
+0x40800905,
+0x048002ff,
+0x40001604,
+0x040003dd,
+0x413ffefe,
+0x06000004,
+0x9c03a950,
+0x4aa05920,
+0x144000d2,
+0x0a205be0,
+0x40001604,
+0x1440002d,
+0x06000004,
+0x0a805a60,
+0x05800d40,
+0x0ae05960,
+0x0a205a90,
+0x0a005a10,
+0x042004d2,
+0x1440004d,
+0x0a205a90,
+0x0a205be0,
+0x06000002,
+0x0a805a10,
+0x1440002d,
+0x00001604,
+0x05800d40,
+0x0ac05a60,
+0x0a205a90,
+0x003ffefe,
+0x40800905,
+0x048ffeff,
+0x9d03a950,
+0x08200000,
+0x04a0012d,
+0x0a201a90,
+0x0a005a10,
+0x16014246,
+0x40800605,
+0x048002ff,
+0x413ffefe,
+0x144000d3,
+0x1601352e,
+0x9e0e0260,
+0x12000155,
+0x420005ee,
+0x04a001dd,
+0x0b200000,
+0x9e088000,
+0x403ffefe,
+0x16000006,
+0x40001605,
+0x048ffeff,
+0x9e0e8040,
+0x9e0f0060,
+0x04200355,
+0x01001605,
+0x08200000,
+0x40800b0d,
+0x16000246,
+0x40000403,
+0x04c001d7,
+0x06000007,
+0x4a805cd0,
+0x16000017,
+0x00001e04,
+0x06000004,
+0x0a805df0,
+0x40001c05,
+0x048001dd,
+0x01001604,
+0x01001405,
+0x0a005d40,
+0x00001a04,
+0x06000004,
+0x0a805df0,
+0x40001805,
+0x048001dd,
+0x01001604,
+0x01001405,
+0x9e0e8050,
+0x41800b0d,
+0x16000005,
+0x40800a04,
+0x05c00630,
+0x0a805de0,
+0x12000233,
+0x9e0e0530,
+0x9d140550,
+0x0a005df0,
+0x01800017,
+0x08200000,
+0x048008ff,
+0x413ff8f8,
+0x1440001d,
+0x013ffaf9,
+0x413ffcfa,
+0x16006a49,
+0x413ffefb,
+0x16015002,
+0x00000095,
+0x00000296,
+0x9c018201,
+0x01000025,
+0x41400226,
+0x16002743,
+0x04800122,
+0x9e088200,
+0x9e090300,
+0x07800000,
+0x12800277,
+0x128002bb,
+0x01c00127,
+0x01c0012b,
+0x9c018201,
+0x98805ef0,
+0x04800633,
+0x1440001d,
+0x00000034,
+0x04802833,
+0x01c00124,
+0x98805fa0,
+0x16002102,
+0x9e0e0220,
+0x16000806,
+0x16007eca,
+0x16007f0b,
+0x9d140270,
+0x000000a8,
+0x000000b9,
+0x04a00188,
+0x04a00199,
+0x16000005,
+0x16000006,
+0x06000008,
+0x0aa060e0,
+0x16000015,
+0x000002a8,
+0x06000009,
+0x0aa06120,
+0x16000016,
+0x000002b9,
+0x16007102,
+0x410000a8,
+0x12000166,
+0x410000b9,
+0x04500655,
+0x40800021,
+0x16006a4d,
+0x41800027,
+0x06000004,
+0x0aa06210,
+0x06000005,
+0x0aa06290,
+0x06000001,
+0x0aa06320,
+0x0a006410,
+0x160000a8,
+0x400000d6,
+0x12000c88,
+0x04500487,
+0x07800000,
+0x06000005,
+0x9d180078,
+0x0a806300,
+0x160000c8,
+0x400000d6,
+0x12000c88,
+0x04500587,
+0x07800000,
+0x07800000,
+0x9d180078,
+0x06000001,
+0x0a806390,
+0x160000d8,
+0x400000d6,
+0x12000c88,
+0x04500187,
+0x07800000,
+0x07800000,
+0x9d180078,
+0x16000903,
+0x16000fb9,
+0x16000016,
+0x9e0e0530,
+0x16000007,
+0x9d03c890,
+0x07800000,
+0x9d140570,
+0x16006ac8,
+0x40000280,
+0x16000013,
+0x00000084,
+0x06000000,
+0x40000049,
+0x16006a82,
+0x4a8064b0,
+0x16000005,
+0x04200959,
+0x05800590,
+0x41000045,
+0x160ffff6,
+0x17000353,
+0x17800363,
+0x04800233,
+0x01000023,
+0x16015002,
+0x01004a23,
+0x403ffefb,
+0x16002202,
+0x9e0e0220,
+0x403ffcfa,
+0x16000806,
+0x003ffaf9,
+0x003ff8f8,
+0x9d140270,
+0x048ff8ff,
+0x08200000,
+0x048008ff,
+0x413ff8f8,
+0x16015002,
+0x013ffaf9,
+0x013ffcfa,
+0x413ffefb,
+0x04803322,
+0x16008385,
+0x16000e0d,
+0x00000454,
+0x00000856,
+0x9c0768d0,
+0x01c00124,
+0x01c00126,
+0x40000087,
+0x16003129,
+0x0000028d,
+0x40000c54,
+0x12000299,
+0x00000e56,
+0x01c00127,
+0x0180012d,
+0x9e0e0490,
+0x41400224,
+0x16003138,
+0x41400226,
+0x12000288,
+0x9c100480,
+0x9f03e0b0,
+0x9e0e0580,
+0x9e010080,
+0xdc1005c0,
+0x1600060d,
+0x9f03e0b0,
+0x01400229,
+0x9e0080c0,
+0x0140022a,
+0x9c01ead0,
+0x01400225,
+0x41400226,
+0x16000633,
+0x9e090200,
+0x04800122,
+0x9c029c30,
+0x128002bb,
+0x01c0012b,
+0x160005bd,
+0x9e088400,
+0x07800000,
+0x9c01ead0,
+0x12800277,
+0x01c00127,
+0x9e090200,
+0x16000023,
+0x1600005d,
+0x128002bb,
+0x9c029c30,
+0x01c0012b,
+0x04800122,
+0x9e088400,
+0x9c01ead0,
+0x07800000,
+0x12800277,
+0x9e090200,
+0x07800000,
+0x01c00127,
+0x128002bb,
+0x01c0012b,
+0x04800222,
+0x16008185,
+0x16000e2d,
+0x00000454,
+0x00000856,
+0x9c0768d0,
+0x01c00124,
+0x01c00126,
+0x40000087,
+0x16003149,
+0x0000028d,
+0x40000c54,
+0x12000299,
+0x00000e56,
+0x01c00127,
+0x0180012d,
+0x9e0e0490,
+0x41400224,
+0x16003158,
+0x41400226,
+0x12000288,
+0x9c100480,
+0x9f03e0b0,
+0x9e0e0580,
+0x9e010080,
+0xdc1005c0,
+0x1600009d,
+0x9f03e0b0,
+0x01400229,
+0x9e0080c0,
+0x0140022a,
+0x9c01ead0,
+0x01400225,
+0x16000563,
+0x01400226,
+0x9e090200,
+0x9c029c30,
+0x07800000,
+0x128002bb,
+0x9e088400,
+0x01c0022b,
+0x07800000,
+0x12800277,
+0x01c00227,
+0x003ffefb,
+0x003ffcfa,
+0x003ffaf9,
+0x003ff8f8,
+0x048ff8ff,
+0x08200000,
+0x16000023,
+0x16012f01,
+0x16000bf5,
+0x00000010,
+0x04800200,
+0x9c01aa50,
+0x04c07f00,
+0x16012704,
+0x01000010,
+0x04000400,
+0x00000005,
+0x07800000,
+0x07800000,
+0x9c00a850,
+0x07800000,
+0x07800000,
+0x9e088000,
+0x9f038490,
+0x07800000,
+0x12800266,
+0x04c0ff6d,
+0x9c0c0018,
+0x9f0b0010,
+0x9f092080,
+0x98806e90,
+0x9c0c0330,
+0x07800000,
+0x07800000,
+0x9f0920e0,
+0x07800000,
+0x07800000,
+0x06000004,
+0x0a8070e0,
+0x00000010,
+0x04800200,
+0x9c01aa50,
+0x04c07f00,
+0x16012704,
+0x04000400,
+0x00000005,
+0x07800000,
+0x07800000,
+0x9c00a850,
+0x07800000,
+0x07800000,
+0x9e088000,
+0x9f038490,
+0x07800000,
+0x12800266,
+0x04c0ff6d,
+0x9f038410,
+0x07800000,
+0x07800000,
+0x9c0c00b0,
+0x07800000,
+0x07800000,
+0x9d0c8038,
+0x98807090,
+0x00000010,
+0x04800200,
+0x04c07f00,
+0x07800000,
+0x01000010,
+0x04a00133,
+0x06000003,
+0x0aa06d50,
+0x08200000,
+0x16000023,
+0x16012f21,
+0x16000bf5,
+0x00000010,
+0x04800200,
+0x9c01aa50,
+0x04c07f00,
+0x16012704,
+0x01000010,
+0x04000400,
+0x00000005,
+0x07800000,
+0x07800000,
+0x9c00a850,
+0x07800000,
+0x07800000,
+0x9e088000,
+0x9f038490,
+0x07800000,
+0x12800266,
+0x04c0ff6d,
+0x9c0c0018,
+0x9f0b0010,
+0x9f092090,
+0x988072c0,
+0x9c0c0330,
+0x07800000,
+0x07800000,
+0x9f0920e0,
+0x07800000,
+0x07800000,
+0x06000004,
+0x0a807510,
+0x00000010,
+0x04800200,
+0x9c01aa50,
+0x04c07f00,
+0x16012704,
+0x04000400,
+0x00000005,
+0x07800000,
+0x07800000,
+0x9c00a850,
+0x07800000,
+0x07800000,
+0x9e088000,
+0x9f038490,
+0x07800000,
+0x12800266,
+0x04c0ff6d,
+0x9f038410,
+0x07800000,
+0x07800000,
+0x9c0c00b0,
+0x07800000,
+0x07800000,
+0x9d0c8098,
+0x988074c0,
+0x00000010,
+0x04800200,
+0x04c07f00,
+0x07800000,
+0x01000010,
+0x04a00133,
+0x06000003,
+0x0aa07180,
+0x08200000,
+0x00000004,
+0x00000405,
+0x00000806,
+0x00000c07,
+0x05c00540,
+0x0b800000,
+0x9e0e8040,
+0x9c180034,
+0x07800000,
+0x07800000,
+0x06000033,
+0x9e0e8220,
+0x0aa076c0,
+0x9c1d0004,
+0x9c1d0044,
+0x07800000,
+0x9d0c0210,
+0x0a007770,
+0x06000023,
+0x0aa07720,
+0x9c1d0004,
+0x9d040004,
+0x9d100200,
+0x0a007770,
+0x06000043,
+0x0aa07770,
+0x9c180024,
+0x9d040004,
+0x9d180200,
+0x04800c44,
+0x05c00740,
+0x17800644,
+0x01000004,
+0x0a0075e0,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x07800000,
+0x07800000,
+0x07800000,
+0x08400000,
+0x0a000000,
+0x00000000,
+0x00000000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00151000,
+0x00201000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001001,
+0x00000000,
+0x00001011,
+0x00001011,
+0x00021031,
+0x00041051,
+0x00061071,
+0x00081091,
+0x000a10b1,
+0x000c10d1,
+0x00001001,
+0x00001001,
+0x00001000,
+0x00001000,
+0x00001001,
+0x00001001,
+0x00001011,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001001,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001041,
+0x00001000,
+0x00000000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001001,
+0x00001001,
+0x00001000,
+0x00001000,
+0x00001001,
+0x00001000,
+0x00001001,
+0x00001001,
+0x00001001,
+0x00000000,
+0x00000000,
+0x00001001,
+0x00001001,
+0x00001000,
+0x00001001,
+0x00001001,
+0x00001001,
+0x00001001,
+0x00001001,
+0x00001001,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00000000,
+0x00000000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00001000,
+0x00001001,
+0x00000001,
+0x00001000,
+0x00000858,
+0x00001000,
+0x00001001,
+0x00000001,
+0x00001000,
+0x00000858,
+0x00151000,
+0x00000858,
+0x00000858,
+0x00151000,
+0x00001000,
+0x00001001,
+0x00000001,
+0x00001000,
+0x00000898,
+0x00001000,
+0x00001001,
+0x00001000,
+0x00001000,
+0x00001001,
+0x00001001,
+0x000010c1,
+0x00001000,
+0x000010c1,
+0x00001001,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001061,
+0x00001031,
+0x00001061,
+0x00001021,
+0x00001061,
+0x00001031,
+0x00001061,
+0x00001021,
+0x00001061,
+0x00001031,
+0x00001061,
+0x00001021,
+0x00001061,
+0x00001031,
+0x00001061,
+0x00001021,
+0x00001061,
+0x00001021,
+0x00001061,
+0x00001031,
+0x00151000,
+0x00001000,
+0x00001000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00001071,
+0x000000aa,
+0x00001071,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00001000,
+0x00001000,
+0x0000004d,
+0x000000a8,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00000000,
+0x00000000,
+0x00001000,
+0x003c1000,
+0x00001000,
+0x003c1000,
+0x00001001,
+0x00001000,
+0x0000004d,
+0x00001000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00001001,
+0x00001001,
+0x00001000,
+0x00001051,
+0x00001000,
+0x00001001,
+0x00001051,
+0x00001001,
+0x000000c6,
+0x00001000,
+0x00001001,
+0x00001001,
+0x00001001,
+0x00001001,
+0x00001001,
+0x0000fff9,
+0x00001000,
+0x00001000,
+0x00000000,
+0x00001001,
+0x00001091,
+0x00001000,
+0x00001000,
+0x00000000,
+0x00000000,
+0x00001091,
+0x00001091,
+0x00001091,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00001000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001001,
+0x00001001,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00001000,
+0x00001001,
+0x00000000,
+0x00011001,
+0x00001000,
+0x00151000,
+0x00001001,
+0x00000001,
+0x00001000,
+0x00000858,
+0x00000858,
+0x00001000,
+0x00151000,
+0x00151000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001000,
+0x00001001,
+0x00000001,
+0x00001000,
+0x00000858,
+0x00000858,
+0x00000000,
+0x00000000,
+0x00001001,
+0x00000000,
+0x00000000,
+0x00001000,
+0x00000000,
+0x00000000,
+0x00001001,
+0x00000000,
+0x00001001,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00001001,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000008,
+0x00700001,
+0x00700001,
+0x00700001,
+0x00700001,
+0x00700001,
+0x00700001,
+0x00700001,
+0x00700001,
+0x00700001,
+0x00700001,
+0x00700001,
+0x00700001,
+0x00700001,
+0x00700001,
+0x00700001,
+0x00700001,
+0x00700001,
+0x00700001,
+0x00100001,
+0x00100001,
+0x00100001,
+0x00100001,
+0x00100001,
+0x00100001,
+0x00100001,
+0x00100001,
+0x00100001,
+0x00100001,
+0x00100001,
+0x00100001,
+0x00100001,
+0x00100001,
+0x00100001,
+0x00100001,
+0x00100001,
+0x00100001,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00040002,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00fc4793,
+0x000a8b43,
+0x00e7110b,
+0x003411d3,
+0x009d1cfb,
+0x0002b6a8,
+0x00fb8ca8,
+0x0006dac8,
+0x00f610e8,
+0x000d8628,
+0x00eed498,
+0x0013f0e8,
+0x00eb9d08,
+0x000f02e8,
+0x00056260,
+0x00bf42f0,
+0x006a9a5d,
+0x005ed280,
+0x00023c94,
+0x00fb2e83,
+0x000c8ac3,
+0x00e408e3,
+0x0037ceab,
+0x0099fe5b,
+0x0002b430,
+0x00fbb618,
+0x000661c0,
+0x00f71850,
+0x000b9710,
+0x00f220d0,
+0x000eb9d8,
+0x00f37700,
+0x0003c304,
+0x00142c90,
+0x00b09170,
+0x0066c791,
+0x006d23e0,
+0x000208b8,
+0x00fa9443,
+0x000d8413,
+0x00e2e293,
+0x00386703,
+0x009badeb,
+0x000296d8,
+0x00fc0144,
+0x0005c418,
+0x00f83d98,
+0x0009a2c0,
+0x00f53c30,
+0x000a1678,
+0x00fa10a0,
+0x00fadbf0,
+0x001ef5c0,
+0x00a7ab80,
+0x0061b101,
+0x00782010,
+0x000464a8,
+0x00fa1fab,
+0x000e2353,
+0x00e28193,
+0x003770b3,
+0x00a04a0b,
+0x000264f0,
+0x00fc6e8c,
+0x0004f108,
+0x00f9b218,
+0x00073f50,
+0x00f8ea80,
+0x0004be58,
+0x005c1863,
+0x00f141c8,
+0x002a0528,
+0x009fa4a8,
+0x005b6439,
+0x0042ccf5,
+0x0005d8f0,
+0x00f9f08b,
+0x000e2e23,
+0x00e33ce3,
+0x00348833,
+0x00a80de3,
+0x00022008,
+0x00fcf654,
+0x0003fc0c,
+0x00fb4d68,
+0x0004b6b0,
+0x00fcaf74,
+0x00dcf75b,
+0x000875d0,
+0x00e88c00,
+0x00334af8,
+0x009a9170,
+0x0053f3c9,
+0x00498425,
+0x0007d850,
+0x00fa0193,
+0x000db28b,
+0x00e4f083,
+0x0030005b,
+0x00b24bfb,
+0x007356ab,
+0x00fd8f38,
+0x0002f50c,
+0x00fcf624,
+0x00022f7c,
+0x0014e623,
+0x00fa8240,
+0x000ec210,
+0x00e12888,
+0x003a6068,
+0x00989408,
+0x004b98d9,
+0x004fdc61,
+0x000aadb0,
+0x00fa47fb,
+0x000cc843,
+0x00e76cb3,
+0x002a3303,
+0x00be6543,
+0x005c437b,
+0x008ca913,
+0x007977d3,
+0x00a7922b,
+0x00ef1dc3,
+0x0003bf5c,
+0x00f604a8,
+0x00143b08,
+0x00db27e8,
+0x003f50c0,
+0x009968b8,
+0x00427b21,
+0x0055cb8d,
+0x000e5198,
+0x00faba2b,
+0x000b83a3,
+0x00ea894b,
+0x00236a23,
+0x00cbda13,
+0x004398b3,
+0x00b6e5bb,
+0x003575db,
+0x000eee5b,
+0x00fd6ac0,
+0x0006e2f8,
+0x00f20c30,
+0x0018d298,
+0x00d68e40,
+0x00423000,
+0x009cd148,
+0x00716ee8,
+0x005b537d,
+0x0012b270,
+0x00fb4feb,
+0x0009f65b,
+0x00ee2313,
+0x001be763,
+0x00da372b,
+0x002a1533,
+0x00e15813,
+0x00f2fbd3,
+0x007127a3,
+0x00fb4778,
+0x0009ae08,
+0x00eeab88,
+0x001c7728,
+0x00d365f8,
+0x004306e0,
+0x00a2a0d0,
+0x005cd070,
+0x00606a75,
+0x0017d078,
+0x00fc00bb,
+0x0008330b,
+0x00f2141b,
+0x0013f29b,
+0x00e8fcc3,
+0x00108f73,
+0x000aabc3,
+0x00b409ab,
+0x00032d94,
+0x00f961f0,
+0x000c0d48,
+0x00ebf900,
+0x001f1310,
+0x00d1bd70,
+0x0041db98,
+0x00aaa7f0,
+0x004768e0,
+0x0064f695,
+0x001db640,
+0x00fcc40b,
+0x00064cab,
+0x00f635fb,
+0x000bd423,
+0x00f7ab83,
+0x00f7da9b,
+0x00319613,
+0x00fdea24,
+0x00046be0,
+0x00f7c838,
+0x000def88,
+0x00ea0778,
+0x002095b8,
+0x00d19bf0,
+0x003ebd10,
+0x00b4aec0,
+0x00318e40,
+0x0068d739,
+0x002477b8,
+0x00fd9103,
+0x000456bb,
+0x00fa6163,
+0x0003d51b,
+0x0005c47b,
+0x00e0c55b,
+0x0054da23,
+0x00fd2110,
+0x000575e0,
+0x00f68680,
+0x000f46d0,
+0x00e8e4e8,
+0x0020f5c0,
+0x00d2fe70,
+0x0039c7a8,
+0x00c06ee8,
+0x001ba3e0,
+0x006be82d,
+0x002c29d8,
+0x00fe5ef3,
+0x000264b3,
+0x00fe6f83,
+0x00fc3c73,
+0x0012cf0b,
+0x00cc0f3b,
+0x00735b43,
+0x00fc7b10,
+0x00064398,
+0x00f5a608,
+0x001009d8,
+0x00e89830,
+0x002033b8,
+0x00d5d488,
+0x003327e0,
+0x00cd9208,
+0x000617a0,
+0x006e0619,
+0x0034dcb8,
+0x00ff2633,
+0x0000878b,
+0x00023efb,
+0x00f54613,
+0x001e683b,
+0x00ba501b,
+0x00023104,
+0x00fbfc98,
+0x0006cfe8,
+0x00f52c00,
+0x001034f8,
+0x00e92088,
+0x001e5960,
+0x00da0390,
+0x002b1358,
+0x00dbbf58,
+0x00f14c40,
+0x006f129d,
+0x003ea030,
+0x00ffdfeb,
+0x00feceb3,
+0x0005b193,
+0x00ef271b,
+0x00283a63,
+0x00ac06cb,
+0x00027b84,
+0x00fba8d8,
+0x00071778,
+0x00f51a98,
+0x000fc900,
+0x00ea7720,
+0x001b7880,
+0x00df6710,
+0x0021ca50,
+0x00ea95d0,
+0x00dda780,
+0x006ef2a5,
+0x00497dd8,
+0x00000003,
+0x00fd4a6b,
+0x0008a803,
+0x00ea151b,
+0x002ff333,
+0x00a1a463,
+0x0002aad0,
+0x00fb81d8,
+0x00071978,
+0x00f56fc8,
+0x000ecde0,
+0x00ec89f0,
+0x0017b2e0,
+0x00e5c408,
+0x0017aaa0,
+0x00f99118,
+0x00cbaf78,
+0x006d93e5,
+0x005561e0,
+0x00000020,
+0x003fffe0,
+0x00000020,
+0x003fffe0,
+0x00069cf3,
+0x00eda3d3,
+0x00284343,
+0x00b2821b,
+0x000228bc,
+0x00fc4508,
+0x0006a660,
+0x00f19bc0,
+0x007f26c5,
+0x00107398,
+0x00f8cbc0,
+0x0003fef8,
+0x00fdafb8,
+0x00539323,
+0x00d40cc3,
+0x0014740b,
+0x00f855f3,
+0x0001b16b,
+0x000c0373,
+0x00ddec9b,
+0x004b880b,
+0x00fdb6d0,
+0x00041728,
+0x00f8ede8,
+0x000c8b38,
+0x00e57730,
+0x007ca649,
+0x0022b568,
+0x00f146b8,
+0x00081d20,
+0x00fb4da0,
+0x0002a8ac,
+0x00a5ff43,
+0x002a4a93,
+0x00efdbfb,
+0x0003c6eb,
+0x00101e33,
+0x00d13c0b,
+0x0068d11b,
+0x00fcce9c,
+0x0005bc08,
+0x00f61488,
+0x001185c8,
+0x00dbb070,
+0x00788dfd,
+0x00367598,
+0x00e9b5b8,
+0x000c31f8,
+0x00f8f1a0,
+0x00040178,
+0x00fdde98,
+0x0040ab93,
+0x00e6e21b,
+0x0006309b,
+0x0012ea3b,
+0x00c7c91b,
+0x007f746f,
+0x00fc1754,
+0x00070c10,
+0x00f3cc50,
+0x00157878,
+0x00d45408,
+0x0072f70d,
+0x004b56d0,
+0x00e26390,
+0x001012a8,
+0x00f6b500,
+0x00054a20,
+0x00fd2bf0,
+0x0056a233,
+0x00ddc993,
+0x0008d62b,
+0x00147503,
+0x00c1a1c3,
+0x00023c70,
+0x00fb9490,
+0x0007fff0,
+0x00f221f8,
+0x00185148,
+0x00cf5d50,
+0x006c0395,
+0x0060f058,
+0x00db9f08,
+0x00139340,
+0x00f4b188,
+0x00067398,
+0x00fc8844,
+0x006b2573,
+0x00d501e3,
+0x000b96fb,
+0x0014d9d3,
+0x00beae3b,
+0x00025f04,
+0x00fb4790,
+0x00089470,
+0x00f11b68,
+0x001a0988,
+0x00ccb738,
+0x0063ddc5,
+0x0076d0d4,
+0x00d5b880,
+0x001688a8,
+0x00f30060,
+0x00076f18,
+0x00fbfbf8,
+0x007d228f,
+0x00cd04b3,
+0x000e4b13,
+0x00143ecb,
+0x00beb5b3,
+0x000266a0,
+0x00fb2f48,
+0x0008ca48,
+0x00f0b7e8,
+0x001aa578,
+0x00cc3da8,
+0x005ab67d,
+0x004640a1,
+0x00d0ff50,
+0x0018ca30,
+0x00f1b920,
+0x00082ea8,
+0x00fb8f00,
+0x00022e24,
+0x00c650c3,
+0x0010c49b,
+0x0012d1ab,
+0x00c1642b,
+0x0002555c,
+0x00fb48b0,
+0x0008a5d0,
+0x00f0f0a0,
+0x001a3350,
+0x00cdbf38,
+0x0050c415,
+0x0050c415,
+0x00cdbf38,
+0x001a3350,
+0x00f0f0a0,
+0x0008a5d0,
+0x00fb48b0,
+0x0002555c,
+0x00c1642b,
+0x0012d1ab,
+0x0010c49b,
+0x00c650c3,
+0x00022e24,
+0x00fb8f00,
+0x00082ea8,
+0x00f1b920,
+0x0018ca30,
+0x00d0ff50,
+0x004640a1,
+0x005ab67d,
+0x00cc3da8,
+0x001aa578,
+0x00f0b7e8,
+0x0008ca48,
+0x00fb2f48,
+0x000266a0,
+0x00beb5b3,
+0x00143ecb,
+0x000e4b13,
+0x00cd04b3,
+0x007d228f,
+0x00fbfbf8,
+0x00076f18,
+0x00f30060,
+0x001688a8,
+0x00d5b880,
+0x0076d0d4,
+0x0063ddc5,
+0x00ccb738,
+0x001a0988,
+0x00f11b68,
+0x00089470,
+0x00fb4790,
+0x00025f04,
+0x00beae3b,
+0x0014d9d3,
+0x000b96fb,
+0x00d501e3,
+0x006b2573,
+0x00fc8844,
+0x00067398,
+0x00f4b188,
+0x00139340,
+0x00db9f08,
+0x0060f058,
+0x006c0395,
+0x00cf5d50,
+0x00185148,
+0x00f221f8,
+0x0007fff0,
+0x00fb9490,
+0x00023c70,
+0x00c1a1c3,
+0x00147503,
+0x0008d62b,
+0x00ddc993,
+0x0056a233,
+0x00fd2bf0,
+0x00054a20,
+0x00f6b500,
+0x001012a8,
+0x00e26390,
+0x004b56d0,
+0x0072f70d,
+0x00d45408,
+0x00157878,
+0x00f3cc50,
+0x00070c10,
+0x00fc1754,
+0x007f746f,
+0x00c7c91b,
+0x0012ea3b,
+0x0006309b,
+0x00e6e21b,
+0x0040ab93,
+0x00fdde98,
+0x00040178,
+0x00f8f1a0,
+0x000c31f8,
+0x00e9b5b8,
+0x00367598,
+0x00788dfd,
+0x00dbb070,
+0x001185c8,
+0x00f61488,
+0x0005bc08,
+0x00fcce9c,
+0x0068d11b,
+0x00d13c0b,
+0x00101e33,
+0x0003c6eb,
+0x00efdbfb,
+0x002a4a93,
+0x00a5ff43,
+0x0002a8ac,
+0x00fb4da0,
+0x00081d20,
+0x00f146b8,
+0x0022b568,
+0x007ca649,
+0x00e57730,
+0x000c8b38,
+0x00f8ede8,
+0x00041728,
+0x00fdb6d0,
+0x004b880b,
+0x00ddec9b,
+0x000c0373,
+0x0001b16b,
+0x00f855f3,
+0x0014740b,
+0x00d40cc3,
+0x00539323,
+0x00fdafb8,
+0x0003fef8,
+0x00f8cbc0,
+0x00107398,
+0x007f26c5,
+0x00f19bc0,
+0x0006a660,
+0x00fc4508,
+0x000228bc,
+0x00b2821b,
+0x00284343,
+0x00eda3d3,
+0x00069cf3,
+0x00000003,
+0x00000003,
+0x00000003,
+0x00000003,
+0x00000003,
+0x00000003,
+0x00000003,
+0x00000003,
+0x00000003,
+0x00040002,
+0x00000003,
+0x00000003,
+0x00000003,
+0x00000003,
+0x00000003,
+0x00000003,
+0x00000003,
+0x00000003,
+0x00000020,
+0x003fffe0,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00040002,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00040002,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00040002,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x004b54e7,
+0x002866b7,
+0x0002526c,
+0x005d3e43,
+0x0002526c,
+0x002866b7,
+0x004b54e7,
+0x0090828c,
+0x00097262,
+0x00e8875a,
+0x0021f9ea,
+0x00e1aff2,
+0x000feece,
+0x000387fc,
+0x0079341b,
+0x0006f740,
+0x00045eec,
+0x0006f740,
+0x0079341b,
+0x000387fc,
+0x0090828c,
+0x00097262,
+0x00e8875a,
+0x0021f9ea,
+0x00e1aff2,
+0x000feece,
+0x007de295,
+0x00f821da,
+0x007de295,
+0x008431e5,
+0x0007dde2,
+0x0012f747,
+0x00c81d03,
+0x005f165b,
+0x0094600b,
+0x005f165b,
+0x00c81d03,
+0x0012f747,
+0x00aa0ab1,
+0x001057be,
+0x00d5b832,
+0x003b8bf6,
+0x00cfd2ca,
+0x00154066,
+0x0071cb9f,
+0x00fac2b8,
+0x0008ea18,
+0x00f5e900,
+0x0008ea18,
+0x00fac2b8,
+0x0071cb9f,
+0x00aa0ab1,
+0x001057be,
+0x00d5b832,
+0x003b8bf6,
+0x00cfd2ca,
+0x00154066,
+0x008e3ba9,
+0x000aaa6a,
+0x00f5559a,
+0x0071c459,
+0x00651dd9,
+0x00f5c6b6,
+0x000b0ede,
+0x00000020,
+0x003fffe0,
+0x00269ec3,
+0x000d0ff4,
+0x00051eba,
+0x00640001,
+0x0002f290,
+0x00fdd340,
+0x0002a810,
+0x0002a810,
+0x00fdd340,
+0x0002f290,
+0x0045a895,
+0x00f4a186,
+0x0018a312,
+0x00e445b2,
+0x0010419e,
+0x000b68e0,
+0x0021f7f0,
+0x0044471c,
+0x005c5e48,
+0x005c5e48,
+0x0044471c,
+0x0021f7f0,
+0x000b68e0,
+0x0020ff38,
+0x00b24b3d,
+0x00062d86,
+0x00f4f6ea,
+0x000d3f5a,
+0x00f2ea1a,
+0x00075f92,
+0x00c1248b,
+0x00fd1080,
+0x00faca4c,
+0x00fab048,
+0x00fdb0ac,
+0x00024f54,
+0x00054fb8,
+0x000535b4,
+0x0002ef80,
+0x003edb7b,
+0x001d92ec,
+0x00962b59,
+0x000bd422,
+0x00e48132,
+0x002dbdc2,
+0x00c7a94a,
+0x0033fbe6,
+0x00dd3502,
+0x000fea26,
+0x00c1248b,
+0x00fd1080,
+0x00faca4c,
+0x00fab048,
+0x00fdb0ac,
+0x00024f54,
+0x00054fb8,
+0x000535b4,
+0x0002ef80,
+0x003edb7b,
+0x001d92ec,
+0x00962b59,
+0x000bd422,
+0x00e48132,
+0x002dbdc2,
+0x00c7a94a,
+0x0033fbe6,
+0x00dd3502,
+0x000fea26,
+0x00400000,
+0x00100002,
+0x007f0001,
+0x00040002,
+0x00040002,
+0x00000020,
+0x003fffe0,
+0x00000020,
+0x003fffe0,
+0x0000e95b,
+0x00045e13,
+0x00e38bd3,
+0x0049a0eb,
+0x0086a523,
+0x00021a60,
+0x00b3b22b,
+0x00c008cb,
+0x0003ee58,
+0x00f98190,
+0x0007bebc,
+0x00f927cc,
+0x000369f8,
+0x000210a0,
+0x00f7d700,
+0x000cf630,
+0x00f0b350,
+0x000f00d8,
+0x00f45884,
+0x0004930c,
+0x000cca50,
+0x00d5b1a0,
+0x0036ee50,
+0x00c50200,
+0x00c1a4a3,
+0x005fc0f5,
+0x004f99c8,
+0x00026924,
+0x00ff83fb,
+0x0008de8b,
+0x00de91f3,
+0x0044c043,
+0x00a56883,
+0x0040eb8b,
+0x001e74a3,
+0x00fd21b0,
+0x000554e4,
+0x00f94c68,
+0x000647f4,
+0x00fc5390,
+0x00c48fa3,
+0x00067e44,
+0x00f4b078,
+0x000daa00,
+0x00f31630,
+0x00095c18,
+0x00fd153c,
+0x00f92d4c,
+0x00187260,
+0x00d118b0,
+0x002e0ce0,
+0x00dfc588,
+0x00d88710,
+0x005ade01,
+0x0069ab18,
+0x000581a8,
+0x00ff0f2b,
+0x000a2003,
+0x00e0f9b3,
+0x00341ca3,
+0x00d0769b,
+0x00f72c73,
+0x0079f1eb,
+0x00fbed28,
+0x0005aab8,
+0x00fa5660,
+0x0003b918,
+0x00028fb3,
+0x00fb129c,
+0x00098f30,
+0x00f3bff8,
+0x000bc1d8,
+0x00f7e048,
+0x000228e8,
+0x0005dda0,
+0x00f00dc0,
+0x001eb940,
+0x00d459f0,
+0x001db068,
+0x00fcb1e0,
+0x00bb70e0,
+0x00500125,
+0x00423c25,
+0x000cb760,
+0x00ff3a63,
+0x0008dcf3,
+0x00e89f33,
+0x001cf00b,
+0x00fdff03,
+0x00b7a13b,
+0x0002d9ac,
+0x00fb9038,
+0x0004ee8c,
+0x00fc6088,
+0x0025bba3,
+0x000396e8,
+0x00f82434,
+0x000abf70,
+0x00f51e3c,
+0x0007b174,
+0x0083c45b,
+0x00fadcd0,
+0x000d0158,
+0x00eaba28,
+0x001ef828,
+0x00de61d8,
+0x00091358,
+0x00172bd8,
+0x00aaa5f8,
+0x007f9968,
+0x004ead35,
+0x0016d358,
+0x00ff9df3,
+0x00063423,
+0x00f2bc93,
+0x0004db3b,
+0x002510d3,
+0x008d0163,
+0x00032dec,
+0x00fc0c14,
+0x00035394,
+0x00bfd6bb,
+0x00fd7bc4,
+0x000653e0,
+0x00f6c644,
+0x0009e92c,
+0x00f874d4,
+0x00025014,
+0x000466f8,
+0x00f4e050,
+0x00113910,
+0x00e9e9d8,
+0x0019a460,
+0x00ed06b8,
+0x00f3f088,
+0x002b3ea8,
+0x00a75d50,
+0x00573b88,
+0x0058acdd,
+0x00253e48,
+0x00ffe99b,
+0x00034623,
+0x00fca67b,
+0x00f0ade3,
+0x003f433b,
+0x00fdf30c,
+0x0002e438,
+0x00fd3880,
+0x004d26e3,
+0x006be023,
+0x00fafa08,
+0x0007c8c0,
+0x00f72900,
+0x000747f8,
+0x00fd12b4,
+0x00fcac44,
+0x0009b130,
+0x00f14b30,
+0x0011de10,
+0x00ed7168,
+0x00102888,
+0x00fd78fc,
+0x00e1dd38,
+0x003632e8,
+0x00b0f308,
+0x002ad4a0,
+0x005ec5d9,
+0x00385800,
+0x00fed5c7,
+0x0001d26f,
+0x00fa588b,
+0x000daca7,
+0x00e3d6d3,
+0x00339bc7,
+0x00a97e63,
+0x00021db8,
+0x00fcd778,
+0x00049f28,
+0x00f8fde8,
+0x000d23e8,
+0x007d7e59,
+0x00f89bb0,
+0x000259b4,
+0x00d1846b,
+0x0003ff83,
+0x000bf0f7,
+0x00f0e3cf,
+0x000ca4f7,
+0x00f77a0b,
+0x0004c883,
+0x00fdcf83,
+0x0000c0c7,
+0x00ff662f,
+0x0002ff27,
+0x00f6afb3,
+0x0016defb,
+0x00cfc55f,
+0x005b288f,
+0x00fd84f0,
+0x000411f4,
+0x00f99078,
+0x000a2044,
+0x00ef1978,
+0x0024d648,
+0x007aa5a5,
+0x00e7c3e8,
+0x000aa4ec,
+0x00fa6a80,
+0x000304e0,
+0x009a1a6f,
+0x0032b7db,
+0x00e8d6d7,
+0x000963f3,
+0x00fcc6cf,
+0x0000dc7b,
+0x00ffda2f,
+0x00ff1ed3,
+0x0004309f,
+0x00f31b9b,
+0x001faed3,
+0x00bcc34f,
+0x00020190,
+0x00fc755c,
+0x0005e8c8,
+0x00f675cc,
+0x000f6978,
+0x00e54b98,
+0x003efe58,
+0x007512ad,
+0x00daf028,
+0x001174bc,
+0x00f652f4,
+0x000589a8,
+0x00fce2b4,
+0x006b13b7,
+0x00ca4a77,
+0x00188ddf,
+0x00f61837,
+0x000357df,
+0x00ff2823,
+0x00fed6b7,
+0x00054b13,
+0x00efed7f,
+0x0027599b,
+0x00ac68e3,
+0x00028110,
+0x00fb8cf0,
+0x00077d74,
+0x00f3c420,
+0x00141628,
+0x00dc4340,
+0x005ac084,
+0x006d00dd,
+0x00d23fd0,
+0x00167f08,
+0x00f33398,
+0x00077ccc,
+0x00fbb300,
+0x00025c40,
+0x00b2862b,
+0x0024406f,
+0x00f10627,
+0x00052b3f,
+0x00feadf3,
+0x00fe9647,
+0x00062edf,
+0x00ed7c03,
+0x002d1867,
+0x00a04bef,
+0x0002df40,
+0x00fae088,
+0x0008acc0,
+0x00f1b5e8,
+0x0017c370,
+0x00d4b638,
+0x0077237c,
+0x0062c57d,
+0x00cd9f14,
+0x00199c28,
+0x00f12c50,
+0x0008c91c,
+0x00fae638,
+0x0002d380,
+0x00a270f7,
+0x002c1793,
+0x00edb29f,
+0x00065057,
+0x00fe69af,
+0x00fe678b,
+0x0006badb,
+0x00ec1c5f,
+0x00302fa7,
+0x0099df87,
+0x00031180,
+0x00fa8294,
+0x000957b0,
+0x00f07efc,
+0x001a1714,
+0x00cf5714,
+0x00498dbd,
+0x0056cb65,
+0x00cccae8,
+0x001ac64c,
+0x00f04854,
+0x0009661c,
+0x00fa8180,
+0x00030f60,
+0x009a52c3,
+0x00300017,
+0x00ec1ad7,
+0x0006cf1f,
+0x00fe552f,
+0x00fe552f,
+0x0006cf1f,
+0x00ec1ad7,
+0x00300017,
+0x009a52c3,
+0x00030f60,
+0x00fa8180,
+0x0009661c,
+0x00f04854,
+0x001ac64c,
+0x00cccae8,
+0x0056cb65,
+0x00498dbd,
+0x00cf5714,
+0x001a1714,
+0x00f07efc,
+0x000957b0,
+0x00fa8294,
+0x00031180,
+0x0099df87,
+0x00302fa7,
+0x00ec1c5f,
+0x0006badb,
+0x00fe678b,
+0x00fe69af,
+0x00065057,
+0x00edb29f,
+0x002c1793,
+0x00a270f7,
+0x0002d380,
+0x00fae638,
+0x0008c91c,
+0x00f12c50,
+0x00199c28,
+0x00cd9f14,
+0x0062c57d,
+0x0077237c,
+0x00d4b638,
+0x0017c370,
+0x00f1b5e8,
+0x0008acc0,
+0x00fae088,
+0x0002df40,
+0x00a04bef,
+0x002d1867,
+0x00ed7c03,
+0x00062edf,
+0x00fe9647,
+0x00feadf3,
+0x00052b3f,
+0x00f10627,
+0x0024406f,
+0x00b2862b,
+0x00025c40,
+0x00fbb300,
+0x00077ccc,
+0x00f33398,
+0x00167f08,
+0x00d23fd0,
+0x006d00dd,
+0x005ac084,
+0x00dc4340,
+0x00141628,
+0x00f3c420,
+0x00077d74,
+0x00fb8cf0,
+0x00028110,
+0x00ac68e3,
+0x0027599b,
+0x00efed7f,
+0x00054b13,
+0x00fed6b7,
+0x00ff2823,
+0x000357df,
+0x00f61837,
+0x00188ddf,
+0x00ca4a77,
+0x006b13b7,
+0x00fce2b4,
+0x000589a8,
+0x00f652f4,
+0x001174bc,
+0x00daf028,
+0x007512ad,
+0x003efe58,
+0x00e54b98,
+0x000f6978,
+0x00f675cc,
+0x0005e8c8,
+0x00fc755c,
+0x00020190,
+0x00bcc34f,
+0x001faed3,
+0x00f31b9b,
+0x0004309f,
+0x00ff1ed3,
+0x00ffda2f,
+0x0000dc7b,
+0x00fcc6cf,
+0x000963f3,
+0x00e8d6d7,
+0x0032b7db,
+0x009a1a6f,
+0x000304e0,
+0x00fa6a80,
+0x000aa4ec,
+0x00e7c3e8,
+0x007aa5a5,
+0x0024d648,
+0x00ef1978,
+0x000a2044,
+0x00f99078,
+0x000411f4,
+0x00fd84f0,
+0x005b288f,
+0x00cfc55f,
+0x0016defb,
+0x00f6afb3,
+0x0002ff27,
+0x00ff662f,
+0x0000c0c7,
+0x00fdcf83,
+0x0004c883,
+0x00f77a0b,
+0x000ca4f7,
+0x00f0e3cf,
+0x000bf0f7,
+0x0003ff83,
+0x00d1846b,
+0x000259b4,
+0x00f89bb0,
+0x007d7e59,
+0x000d23e8,
+0x00f8fde8,
+0x00049f28,
+0x00fcd778,
+0x00021db8,
+0x00a97e63,
+0x00339bc7,
+0x00e3d6d3,
+0x000daca7,
+0x00fa588b,
+0x0001d26f,
+0x00fed5c7,
+0x00000000,
+0x00040002,
+0x00000000,
+0x00040002,
+0x00020002,
+0x00f80002,
+0x001a88b7,
+0x003bed83,
+0x00789f93,
+0x000353c8,
+0x000558c0,
+0x0007faa0,
+0x000b314c,
+0x000edf48,
+0x0012cfec,
+0x0016b8f4,
+0x001a3f88,
+0x001d00ec,
+0x001e9df0,
+0x001ec7e4,
+0x001d4d44,
+0x001a2424,
+0x00157070,
+0x000f8484,
+0x0008db70,
+0x00020cec,
+0x00fbbc5c,
+0x00f684d0,
+0x00f2e4e0,
+0x00f12d44,
+0x00f174d8,
+0x00f3945c,
+0x00f72ab0,
+0x00fba984,
+0x001a22e3,
+0x0004bcfc,
+0x0008115c,
+0x0009fa78,
+0x000a45e0,
+0x0008fffc,
+0x00067070,
+0x00030d88,
+0x00da2f23,
+0x00fc1730,
+0x00f99a54,
+0x00f84c00,
+0x00f851a8,
+0x00f9982c,
+0x00fbd928,
+0x00aa16cf,
+0x0061c6c3,
+0x0003fa04,
+0x00059d78,
+0x0006357c,
+0x0005b618,
+0x0004437c,
+0x000229bc,
+0x00f3803f,
+0x00fd9c14,
+0x00fbf354,
+0x00fb16bc,
+0x00fb2258,
+0x00fc0838,
+0x00fd94b0,
+0x00de4c6f,
+0x0056ddab,
+0x0002e608,
+0x0003d744,
+0x00040c18,
+0x000384e8,
+0x000263b0,
+0x00391a1f,
+0x00d43a5f,
+0x00fdf2ec,
+0x00fd06bc,
+0x00fcb100,
+0x00fcf934,
+0x00fdc9dc,
+0x00bd738f,
+0x00106c83,
+0x005be09f,
+0x00024a04,
+0x0002ae04,
+0x00028fd4,
+0x007f0d8f,
+0x004529ff,
+0x00020d4b,
+0x00c2867f,
+0x00920a0f,
+0x00fde2b0,
+0x00fde758,
+0x0093e747,
+0x00c0cb77,
+0x00f754ab,
+0x002d163f,
+0x00585e1f,
+0x0071ef8b,
+0x00763257,
+0x0065a79f,
+0x00449a33,
+0x001a255f,
+0x00eec953,
+0x00cad5f7,
+0x00b4f72f,
+0x00b1226f,
+0x00c00eeb,
+0x00df46af,
+0x0009c1f7,
+0x0038e64f,
+0x0065b5ef,
+0x000227dc,
+0x00028480,
+0x0002a3a4,
+0x000285b0,
+0x00023398,
+0x006f1f67,
+0x004caed7,
+0x002a2af7,
+0x000b7d53,
+0x00f3677b,
+0x00e349ef,
+0x00db2ccf,
+0x00d9fec3,
+0x00ddf61b,
+0x00e4fe17,
+0x00ed1b4f,
+0x00f068f7,
+0x0084edbd,
+0x007b1245,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00762489,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00009494,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000010,
+0x00000000,
+0x00000000,
+0x000004fc,
+0x00000000,
+0x00000000,
+0x00000018,
+0x000004f4,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00a0000c,
+0x003c00a1,
+0x00000000,
+0x00000000,
+0x000005ba,
+0x00a2000c,
+0x003c00a3,
+0x00000000,
+0x00000000,
+0x000005ba,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x0000000a,
+0x7fff7fff,
+0x7fff7fff,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000004,
+0x00000001,
+0x00000000,
+0x00000000,
+0x00000000,
+0x003ffff0,
+0x00000000,
+0x00400000,
+0x00000004,
+0x00000001,
+0x00000000,
+0x00000000,
+0x00000000,
+0x003ffff0,
+0x00000000,
+0x00400000,
+0x01f501e2,
+0x021b0208,
+0x0241022e,
+0x02670254,
+0x028d027a,
+0x02b302a0,
+0x02d902c6,
+0x02ff02ec,
+0x00000004,
+0x00000001,
+0x00000000,
+0x00000000,
+0x00000000,
+0x003ffff0,
+0x00000000,
+0x00400000,
+0x03280316,
+0x034c033a,
+0x0370035e,
+0x03940382,
+0x03b803a6,
+0x03dc03ca,
+0x040003ee,
+0x04240412,
+0x00001400,
+0x000000ff,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x0aa40c24,
+0x00000c34,
+0x0a940c04,
+0x00000c14,
+0x0e640bb4,
+0x00000ba4,
+0x0e540b94,
+0x00000b84,
+0x0a840be4,
+0x00000bf4,
+0x0a740bc4,
+0x00000bd4,
+0x0e440b34,
+0x00000b24,
+0x0e340b14,
+0x00000b04,
+0x0a640b64,
+0x00000b74,
+0x0a540b44,
+0x00000b54,
+0x00000004,
+0x00000001,
+0x00000000,
+0x00000000,
+0x00000000,
+0x003ffff0,
+0x00000000,
+0x00400000,
+0x00000004,
+0x00000001,
+0x00000000,
+0x00000000,
+0x00000000,
+0x003ffff0,
+0x00000000,
+0x00400000,
+0x0a740bc4,
+0x000010f4,
+0x0a840be4,
+0x00001104,
+0x11340b14,
+0x00000000,
+0x11540b94,
+0x00000000,
+0x00560014,
+0x00570003,
+0x005a0058,
+0x00000013,
+0x00560014,
+0x00570006,
+0x00610058,
+0x00000013,
+0x00560019,
+0x00570003,
+0x005a0058,
+0x033c0013,
+0x00560019,
+0x00570006,
+0x00610058,
+0x033c0013,
+0x00640014,
+0x0065010d,
+0x00680066,
+0x00000012,
+0x005b0015,
+0x005c0002,
+0x005f005d,
+0x00000013,
+0x005b0015,
+0x005c0005,
+0x0062005d,
+0x00000013,
+0x005b001a,
+0x005c0002,
+0x005f005d,
+0x038c0013,
+0x005b001a,
+0x005c0005,
+0x0062005d,
+0x038c0013,
+0x01050003,
+0x0001008e,
+0x00900001,
+0x00000000,
+0x01060003,
+0x0001008f,
+0x00910001,
+0x00000000,
+0x01070003,
+0x00010129,
+0x00900001,
+0x00000000,
+0x01070003,
+0x0001012a,
+0x00910001,
+0x00000000,
+0x00ab0003,
+0x0001008e,
+0x00900001,
+0x00000000,
+0x00ab0003,
+0x0001008f,
+0x00910001,
+0x00000000,
+0x0032000b,
+0x00710030,
+0x004e0072,
+0x00000000,
+0x00010013,
+0x00010001,
+0x01100001,
+0x00000000,
+0x00010013,
+0x00010001,
+0x01110001,
+0x00000000,
+0x000d000a,
+0x000f000e,
+0x00110010,
+0x00000000,
+0x00280000,
+0x002b002a,
+0x002d002c,
+0x00000000,
+0x00240000,
+0x00790024,
+0x00920074,
+0x00000000,
+0x00030000,
+0x007a0003,
+0x009300ad,
+0x00000000,
+0x00240000,
+0x007b0024,
+0x00940077,
+0x00000000,
+0x00060000,
+0x007c0006,
+0x00950078,
+0x00000000,
+0x00290000,
+0x007d0105,
+0x00920073,
+0x00000000,
+0x008e0000,
+0x007e0060,
+0x00930075,
+0x00000000,
+0x00290000,
+0x007f0106,
+0x00940076,
+0x00000000,
+0x008f0000,
+0x00800063,
+0x00950078,
+0x00000000,
+0x01080000,
+0x00810108,
+0x00920074,
+0x00000000,
+0x010b0000,
+0x0082010b,
+0x00930075,
+0x00000000,
+0x01080000,
+0x00830108,
+0x00940077,
+0x00000000,
+0x010c0000,
+0x0084010c,
+0x00950078,
+0x00000000,
+0x00310000,
+0x00850107,
+0x00920073,
+0x00000000,
+0x01290000,
+0x00860109,
+0x00930075,
+0x00000000,
+0x00310000,
+0x00870107,
+0x00940076,
+0x00000000,
+0x012a0000,
+0x0088010a,
+0x00950078,
+0x00000000,
+0x00270000,
+0x008b00ab,
+0x00920073,
+0x00000000,
+0x008e0000,
+0x008c008d,
+0x00930075,
+0x00000000,
+0x00270000,
+0x008900ab,
+0x00940076,
+0x00000000,
+0x008f0000,
+0x008a008d,
+0x00950078,
+0x00000000,
+0x00310000,
+0x006f002f,
+0x004e0070,
+0x00000000,
+0x00300018,
+0x00980051,
+0x009700d0,
+0x00000000,
+0x002f0018,
+0x00960050,
+0x009700d2,
+0x00000000,
+0x002e0008,
+0x003c0031,
+0x00000017,
+0x00000000,
+0x00220008,
+0x003c0032,
+0x00000018,
+0x00000000,
+0x0137000c,
+0x00010001,
+0x01380001,
+0x00000000,
+0x0001000d,
+0x00010001,
+0x00db0001,
+0x00000000,
+0x0001000d,
+0x00010001,
+0x00dc0001,
+0x00000000,
+0x0001000d,
+0x00010001,
+0x00dd0001,
+0x00000000,
+0x0001000d,
+0x00010001,
+0x00de0001,
+0x00000000,
+0x0001000d,
+0x00010001,
+0x00df0001,
+0x00000000,
+0x0001000d,
+0x00010001,
+0x00e00001,
+0x00000000,
+0x0001000d,
+0x00010001,
+0x00e10001,
+0x00000000,
+0x0001000d,
+0x00010001,
+0x00e20001,
+0x00000000,
+0x0001000d,
+0x00010001,
+0x00e30001,
+0x00000000,
+0x0001000d,
+0x00010001,
+0x00e40001,
+0x00000000,
+0x0001000d,
+0x00010001,
+0x00e50001,
+0x00000000,
+0x0001000d,
+0x00010001,
+0x00e60001,
+0x00000000,
+0x0001000d,
+0x00010001,
+0x00e70001,
+0x00000000,
+0x0001000d,
+0x00010001,
+0x00e80001,
+0x00000000,
+0x00fb000e,
+0x00010001,
+0x00fc0001,
+0x00000000,
+0x002f0005,
+0x001c0027,
+0x00000030,
+0x00000000,
+0x002a0005,
+0x001d002e,
+0x00000021,
+0x00000000,
+0x00080006,
+0x00260021,
+0x0000001a,
+0x00000000,
+0x00080006,
+0x00260022,
+0x0000001b,
+0x00000000,
+0x00080007,
+0x00260021,
+0x0000001a,
+0x00000000,
+0x00080007,
+0x00260022,
+0x0000001b,
+0x00000000,
+0x00080006,
+0x006a0069,
+0x0000001e,
+0x00000000,
+0x00080006,
+0x006a0029,
+0x0000001f,
+0x00000000,
+0x00080007,
+0x006a0029,
+0x0000001f,
+0x00000000,
+0x00c10001,
+0x00ca0052,
+0x0000011e,
+0x00000000,
+0x00030004,
+0x000c0024,
+0x00900001,
+0x00000000,
+0x00060004,
+0x000c0024,
+0x00910001,
+0x00000000,
+0x010b0004,
+0x000c0108,
+0x00900001,
+0x00000000,
+0x010c0004,
+0x000c0108,
+0x00910001,
+0x00000000,
+0x00300004,
+0x00fe0051,
+0x00fd0001,
+0x00000000,
+0x002f0004,
+0x00fe0050,
+0x00fd0001,
+0x00000000,
+0x00410002,
+0x00430042,
+0x00000016,
+0x00000000,
+0x00330002,
+0x00350034,
+0x00000013,
+0x00000000,
+0x00360002,
+0x00380037,
+0x00000014,
+0x00000000,
+0x00390002,
+0x003b003a,
+0x00000015,
+0x00000000,
+0x00690002,
+0x006c006b,
+0x00000019,
+0x00000000,
+0x01080002,
+0x0040003f,
+0x00000020,
+0x00000000,
+0x00080002,
+0x00470046,
+0x00000019,
+0x00000000,
+0x00cf0002,
+0x00c900c0,
+0x00000019,
+0x00000000,
+0x010d0002,
+0x010f010e,
+0x00000019,
+0x00000000,
+0x00270002,
+0x00450044,
+0x00000019,
+0x00000000,
+0x00010002,
+0x00010001,
+0x00000019,
+0x00000000,
+0x00010009,
+0x00010028,
+0x00530001,
+0x00000000,
+0x00010009,
+0x00010023,
+0x00540001,
+0x00000000,
+0x00c00011,
+0x00c200c1,
+0x00c700c3,
+0x00000000,
+0x00c80010,
+0x00cb00ca,
+0x00ce00cc,
+0x00000000,
+0x00010013,
+0x00010001,
+0x01120001,
+0x00000000,
+0x00010013,
+0x00010001,
+0x01130001,
+0x00000000,
+0x00010013,
+0x00010001,
+0x01140001,
+0x00000000,
+0x00010013,
+0x00010001,
+0x01150001,
+0x00000000,
+0x00010013,
+0x00010001,
+0x01160001,
+0x00000000,
+0x00010013,
+0x00010001,
+0x01170001,
+0x00000000,
+0x00010013,
+0x00010001,
+0x01180001,
+0x00000000,
+0x00010013,
+0x00010001,
+0x01190001,
+0x00000000,
+0x0001000f,
+0x00010001,
+0x00010001,
+0x00000000,
+0x00490012,
+0x00d30041,
+0x00d600d4,
+0x00000000,
+0x004a0012,
+0x00d80033,
+0x00d700d5,
+0x00000000,
+0x004b0012,
+0x00d90036,
+0x00d700d5,
+0x00000000,
+0x004c0012,
+0x00da0039,
+0x00d700d5,
+0x00000000,
+0x011b0016,
+0x011c0001,
+0x011d0001,
+0x00000000,
+0x00010017,
+0x00010001,
+0x00000001,
+0x00000000,
+0x011f0014,
+0x0121010b,
+0x01240122,
+0x00000013,
+0x011f0014,
+0x0121010c,
+0x01250122,
+0x00000013,
+0x011f0019,
+0x0121010b,
+0x01240122,
+0x04040013,
+0x011f0019,
+0x0121010c,
+0x01250122,
+0x04040013,
+0x01260015,
+0x012d012b,
+0x0130012e,
+0x00000013,
+0x01260015,
+0x012d012c,
+0x0131012e,
+0x00000013,
+0x0126001a,
+0x012d012b,
+0x0130012e,
+0x02c40013,
+0x0126001a,
+0x012d012c,
+0x0131012e,
+0x02c40013,
+0x01290000,
+0x00860127,
+0x00930075,
+0x00000000,
+0x012a0000,
+0x00880128,
+0x00950078,
+0x00000000,
+0x00010013,
+0x00010001,
+0x01320001,
+0x00000000,
+0x00010013,
+0x00010001,
+0x01330001,
+0x00000000,
+0x0003001b,
+0x01340024,
+0x01350001,
+0x00000000,
+0x00010013,
+0x00010001,
+0x01360001,
+0x00000000,
+0x010b001b,
+0x013a0108,
+0x01350001,
+0x00000000,
+0x00010013,
+0x00010001,
+0x01390001,
+0x00000000,
+0x00a7001c,
+0x004d0008,
+0x009e009c,
+0x00000000,
+0x00a9001c,
+0x00a80025,
+0x009f009d,
+0x00000000,
+0x00a7001d,
+0x004d0008,
+0x009e009c,
+0x00000000,
+0x00a9001d,
+0x00a80025,
+0x009f009d,
+0x00000000,
+0x00a7001e,
+0x004d0008,
+0x00ac009c,
+0x00000000,
+0x00a7001f,
+0x004d0008,
+0x00ac009c,
+0x00000000,
+0x00010020,
+0x00010001,
+0x00000001,
+0x00000000,
+0x00010021,
+0x00010001,
+0x00000001,
+0x00000000,
+0x00310022,
+0x013c0107,
+0x013d0001,
+0x00000000,
+0x00010013,
+0x00010001,
+0x013e0001,
+0x00000000,
+0x00010013,
+0x00010001,
+0x013f0001,
+0x00000000,
+0x00010013,
+0x00010001,
+0x01400001,
+0x00000000,
+0x00290022,
+0x01410105,
+0x013d0001,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00003000,
+0x0041008a,
+0x003300d3,
+0x003600d8,
+0x003900d9,
+0x010500da,
+0x0105007d,
+0x0060007e,
+0x0060007e,
+0x010b007d,
+0x010b0082,
+0x01080081,
+0x01080082,
+0x00030081,
+0x00030134,
+0x00030079,
+0x0024007a,
+0x00240134,
+0x00240079,
+0x0107007a,
+0x0107013c,
+0x01090086,
+0x0109013c,
+0x002a0086,
+0x002f002b,
+0x0050006f,
+0x00300096,
+0x00510071,
+0x00ab0098,
+0x00ab008b,
+0x008d008c,
+0x008d008b,
+0x008d008c,
+0x00000000,
+0x0a7411f4,
+0x00000bd4,
+0x0a7411f4,
+0x000010f4,
+0x0a541234,
+0x00000b54,
+0x0443038f,
+0x03d5044e,
+0x006403df,
+0x004d0038,
+0x0481045c,
+0x03c00469,
+0x048e057f,
+0x00e905e0,
+0x01ed01c1,
+0x00d303a2,
+0x02020220,
+0x065e075a,
+0x022c03ea,
+0x040e0210,
+0x01ff0200,
+0x01fe0201,
+0x071706d4,
+0x00020422,
+0x010300f7,
+0x011e0112,
+0x0136012a,
+0x0154013d,
+0x0144014c,
+0x0176015b,
+0x0003019d,
+0x01bf01be,
+0x055e01c0,
+0x01a904a6,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00001580,
+0x00001580,
+0x00001580,
+0x0000193f,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00046f03,
+0x00000000,
+0x00047202,
+0x00000000,
+0x0006ba03,
+0x00000000,
+0x0004bc05,
+0x00000000,
+0x0004c104,
+0x00000000,
+0x0006b505,
+0x00000000,
+0x0001dc0c,
+0x00000000,
+0x0004c528,
+0x00000000,
+0x00051528,
+0x00000000,
+0x0006ef06,
+0x00000000,
+0x00028318,
+0x00000000,
+0x00019312,
+0x00000000,
+0x0001a512,
+0x00019000,
+0x0001b524,
+0x00019112,
+0x0001a312,
+0x00120024,
+0x0006dc00,
+0x0001b50e,
+0x0001b50e,
+0x0001b50e,
+0x0001b50e,
+0x0001b50e,
+0x0001b50e,
+0x0001b50e,
+0x0001b50e,
+0x0001b50e,
+0x0001b50e,
+0x0001b50e,
+0x0001b50e,
+0x0001b50e,
+0x0001b50e,
+0x0001b50e,
+0x0001b50e,
+0x0001b80c,
+0x0001c304,
+0x0001b80c,
+0x0001c704,
+0x00000000,
+0x0001cb02,
+0x00000000,
+0x0001cd02,
+0x0001c40c,
+0x0001cf04,
+0x0001c40c,
+0x0001d304,
+0x0001d702,
+0x0001d702,
+0x00000000,
+0x0001e80c,
+0x00000000,
+0x0001f40c,
+0x00000000,
+0x0001d00c,
+0x00000000,
+0x0001c40c,
+0x00000000,
+0x0001b80c,
+0x0001c40c,
+0x0001d00c,
+0x00000000,
+0x0002000c,
+0x00000000,
+0x0002180c,
+0x00000000,
+0x0002240c,
+0x00000000,
+0x0002560c,
+0x00026209,
+0x00026209,
+0x00000000,
+0x0001d909,
+0x000b0009,
+0x00000000,
+0x00000000,
+0x00020c0c,
+0x00000000,
+0x0006bd0c,
+0x00000000,
+0x0006c90c,
+0x00000000,
+0x0006d50c,
+0x00000000,
+0x0006e10c,
+0x00000000,
+0x00029b0c,
+0x00028318,
+0x0002cb0c,
+0x00083900,
+0x0002d70c,
+0x00000000,
+0x0002a70c,
+0x00028300,
+0x0002e30c,
+0x00083900,
+0x0002ef0c,
+0x00000000,
+0x0002b30c,
+0x00028318,
+0x0002fb0c,
+0x00083900,
+0x0003070c,
+0x00083801,
+0x00083901,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00028318,
+0x0003130c,
+0x00083900,
+0x00031f0c,
+0x00000000,
+0x0002bf0c,
+0x00028318,
+0x00032b0c,
+0x00083900,
+0x0003370c,
+0x00028318,
+0x0003430c,
+0x00083900,
+0x00034f0c,
+0x00028318,
+0x00035b0c,
+0x00083900,
+0x0003670c,
+0x00000000,
+0x00037378,
+0x00000000,
+0x0003eb18,
+0x00000000,
+0x00040318,
+0x00000000,
+0x00041b18,
+0x00000000,
+0x00043318,
+0x00000000,
+0x0008c860,
+0x000b0019,
+0x00000000,
+0x000b0009,
+0x00000000,
+0x00000000,
+0x00047418,
+0x00000000,
+0x00048c18,
+0x00000000,
+0x0004a418,
+0x000b080c,
+0x00000000,
+0x000b0808,
+0x00000000,
+0x000b07f4,
+0x00000000,
+0x00000000,
+0x0004c528,
+0x0002ec13,
+0x0002ff13,
+0x0004c528,
+0x00031400,
+0x00031500,
+0x00020600,
+0x00590002,
+0x00038c00,
+0x00000000,
+0x0004ed28,
+0x0002ec13,
+0x0002ff13,
+0x0004ed28,
+0x00031200,
+0x00031300,
+0x00020e00,
+0x005e0002,
+0x00033c00,
+0x00000000,
+0x0004ed28,
+0x00590004,
+0x00038c00,
+0x005e0004,
+0x00033c00,
+0x00000000,
+0x0004ed28,
+0x00000000,
+0x00051528,
+0x00041212,
+0x00042412,
+0x00051528,
+0x00043600,
+0x00043700,
+0x00021e00,
+0x0067000c,
+0x00047c00,
+0x00000000,
+0x00053d0c,
+0x0002180c,
+0x0001b80c,
+0x00000000,
+0x0005490c,
+0x00083900,
+0x0005550c,
+0x00028318,
+0x0005610c,
+0x00083900,
+0x00056d0c,
+0x00059219,
+0x00059219,
+0x00000000,
+0x00046a19,
+0x00057919,
+0x00057919,
+0x00045119,
+0x00043819,
+0x00000000,
+0x0004a20d,
+0x00000000,
+0x0004af0d,
+0x00000000,
+0x0004bc07,
+0x00000000,
+0x0004830d,
+0x00000000,
+0x0004900d,
+0x00000000,
+0x00049d05,
+0x0005cf0d,
+0x0005cf0d,
+0x0005dc07,
+0x0005dc07,
+0x0005e30d,
+0x0005e30d,
+0x0005f005,
+0x0005f005,
+0x0005f50d,
+0x0005f50d,
+0x00060207,
+0x00060207,
+0x0006090d,
+0x0006090d,
+0x00061605,
+0x00061605,
+0x00061b0d,
+0x00061b0d,
+0x00062807,
+0x00062807,
+0x00062f0d,
+0x00062f0d,
+0x00063c05,
+0x00063c05,
+0x0006410d,
+0x0006410d,
+0x00064e07,
+0x00064e07,
+0x0006550d,
+0x0006550d,
+0x00066205,
+0x00066205,
+0x00067b0d,
+0x00067b0d,
+0x00068805,
+0x00068805,
+0x0006670d,
+0x0006670d,
+0x00067407,
+0x00067407,
+0x00000000,
+0x00068d28,
+0x00000000,
+0x0005c902,
+0x00000000,
+0x0005cb04,
+0x00000006,
+0x00000000,
+0x00000003,
+0x00000000,
+0x000b000d,
+0x00000000,
+0x00010007,
+0x00000000,
+0x000b000d,
+0x00000000,
+0x00030005,
+0x00000000,
+0x0005ab0f,
+0x0005ab0f,
+0x0000000f,
+0x00000000,
+0x0005ba0f,
+0x0005ba0f,
+0x00030007,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x0005ba01,
+0x00000000,
+0x0005ba01,
+0x00140364,
+0x00000007,
+0x002803b4,
+0x00000007,
+0x00000000,
+0x0006da02,
+0x00000000,
+0x00098802,
+0x00000000,
+0x0006dc02,
+0x00000000,
+0x00098a02,
+0x0006de02,
+0x00098c01,
+0x00000000,
+0x00000000,
+0x00000000,
+0x0008c860,
+0x00000000,
+0x0008c860,
+0x00000000,
+0x00092860,
+0x00000000,
+0x00092860,
+0x00050c01,
+0x00083918,
+0x00000000,
+0x0004630c,
+0x001406f0,
+0x00000007,
+0x00000000,
+0x00075907,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00098d01,
+0x00098d01,
+0x00000000,
+0x00072706,
+0x00028300,
+0x00074518,
+0x00075d0b,
+0x00075d0b,
+0x00000000,
+0x0004c90b,
+0x00019000,
+0x00072d18,
+0x00075d0b,
+0x00075d0b,
+0x00074518,
+0x00072706,
+0x00c4000b,
+0x0000c500,
+0x00000000,
+0x0006f506,
+0x00083900,
+0x0006f506,
+0x00028318,
+0x0006fc18,
+0x0004c504,
+0x0006fb00,
+0x00019000,
+0x00071400,
+0x00023100,
+0x00071512,
+0x00ea00cd,
+0x000026ff,
+0x00000000,
+0x0006ef06,
+0x0006ee00,
+0x0004d40f,
+0x00000000,
+0x00000000,
+0x0006ed00,
+0x0004d40f,
+0x00078013,
+0x00078013,
+0x00000000,
+0x0004e313,
+0x00000000,
+0x0004f613,
+0x00170013,
+0x00000000,
+0x00170013,
+0x00000000,
+0x00079313,
+0x00079313,
+0x0007a613,
+0x0007a613,
+0x0007b913,
+0x0007b913,
+0x00000274,
+0x00000000,
+0x0000029c,
+0x00000000,
+0x000002c4,
+0x00000000,
+0x000002ec,
+0x00000000,
+0x00000314,
+0x00000000,
+0x0000033c,
+0x00000000,
+0x00000364,
+0x00000000,
+0x0000038c,
+0x00000000,
+0x000003b4,
+0x00000000,
+0x000003dc,
+0x00000000,
+0x00000404,
+0x00000000,
+0x0000042c,
+0x00000000,
+0x00000454,
+0x00000000,
+0x0000047c,
+0x00000000,
+0x000004a4,
+0x00000000,
+0x000004cc,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x0001ac10,
+0x000f00eb,
+0x00000000,
+0x00000002,
+0x00000000,
+0x00000000,
+0x00019000,
+0x00000000,
+0x00019a0c,
+0x00000000,
+0x0007cc02,
+0x00000000,
+0x0007ce04,
+0x00000000,
+0x0007d206,
+0x00000000,
+0x0007d80c,
+0x00000000,
+0x0007e418,
+0x00000000,
+0x00044b0c,
+0x00000000,
+0x0004570c,
+0x00000000,
+0x0002300c,
+0x00000000,
+0x00023c0c,
+0x00000000,
+0x00024802,
+0x00000000,
+0x00024b04,
+0x00000000,
+0x00025002,
+0x00000000,
+0x00025204,
+0x00000000,
+0x0007fc0c,
+0x00028318,
+0x0008080c,
+0x00083900,
+0x0008140c,
+0x00000914,
+0x00000000,
+0x0000091c,
+0x00000000,
+0x00000924,
+0x00000000,
+0x0000092c,
+0x00000000,
+0x00000934,
+0x00000000,
+0x0000093c,
+0x00000000,
+0x00000944,
+0x00000000,
+0x0000094c,
+0x00000000,
+0x00000954,
+0x00000000,
+0x0000095c,
+0x00000000,
+0x00000000,
+0x00013d00,
+0x00000000,
+0x00056000,
+0x00013d00,
+0x00019000,
+0x00001940,
+0x00000000,
+0x00050c00,
+0x00050c01,
+0x00000000,
+0x00083a28,
+0x00000000,
+0x00083a28,
+0x0002ec13,
+0x0002ff13,
+0x00083a28,
+0x00051000,
+0x00051100,
+0x00025900,
+0x00230002,
+0x0002c401,
+0x00230004,
+0x0002c401,
+0x00000000,
+0x00086228,
+0x00000000,
+0x00086228,
+0x00000000,
+0x00086228,
+0x00000000,
+0x00088a02,
+0x00000000,
+0x00088c04,
+0x00000000,
+0x00024803,
+0x00000000,
+0x00024b05,
+0x0002ec13,
+0x0002ff13,
+0x00086228,
+0x00050e00,
+0x00050f00,
+0x00026100,
+0x002f0002,
+0x00040401,
+0x002f0004,
+0x00040401,
+0x000009a4,
+0x00000000,
+0x000009ac,
+0x00000000,
+0x000512a8,
+0x0008901c,
+0x0006001c,
+0x00000000,
+0x000009b4,
+0x00000000,
+0x00000000,
+0x00100020,
+0x001006f0,
+0x00000007,
+0x000009bc,
+0x00000000,
+0x000512a8,
+0x0008ac1c,
+0x000b0011,
+0x00000000,
+0x0006e079,
+0x00099f79,
+0x00060079,
+0x00000000,
+0x000012f4,
+0x00000000,
+0x000012fc,
+0x00000000,
+0x00001304,
+0x00000000,
+0x0006e079,
+0x000a2979,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000001,
+0x00000010,
+0x00000001,
+0x00000010,
+0x00000001,
+0x00000010,
+0x00000001,
+0x00000010,
+0x00000001,
+0x00000010,
+0x00000001,
+0x00000010,
+0x00000001,
+0x00000010,
+0x00000001,
+0x00000010,
+0x00000001,
+0x00000010,
+0x00000001,
+0x00000010,
+0x00000001,
+0x00000010,
+0x00000001,
+0x00000010,
+0x00000001,
+0x00000010,
+0x00000001,
+0x00000010,
+0x00000001,
+0x00000010,
+0x00000001,
+0x00000010,
+0x00000001,
+0x00000010,
+0x00000000,
+0x00000000,
+0x00400000,
+0x00400000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000001,
+0x00000001,
+0x00000001,
+0x00000001,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x0000001b,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000003,
+0x00000000,
+0x00000003,
+0x00000000,
+0x00000003,
+0x00000000,
+0x00000006,
+0x00000000,
+0x00000006,
+0x00000000,
+0x00000006,
+0x00000000,
+0x00000009,
+0x00000000,
+0x00000009,
+0x00000000,
+0x00000009,
+0x00000000,
+0x00000005,
+0x00000000,
+0x00000005,
+0x00000000,
+0x00000005,
+0x00000000,
+0x00000007,
+0x00000000,
+0x00000007,
+0x00000000,
+0x00000007,
+0x00000000,
+0x00000008,
+0x00000000,
+0x00000008,
+0x00000000,
+0x00000008,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00e66666,
+0x00199999,
+0x00199999,
+0x00199999,
+0x00000000,
+0x00000000,
+0x00e66666,
+0x00e66666,
+0x00ffffff,
+0x00ffffff,
+0x00199999,
+0x00199999,
+0x00f33333,
+0x000ccccc,
+0x00f33333,
+0x00f33333,
+0x00199999,
+0x00e66666,
+0x00f33333,
+0x00f33333,
+0x00f33333,
+0x000ccccc,
+0x00199999,
+0x00199999,
+0x000ccccc,
+0x00162b95,
+0x00f33333,
+0x000ccccc,
+0x00e66666,
+0x00000000,
+0x00f33333,
+0x00f33333,
+0x000ccccc,
+0x00e9d46a,
+0x00199999,
+0x00e66666,
+0x000ccccc,
+0x00e9d46a,
+0x00f33333,
+0x00f33333,
+0x00e66666,
+0x00ffffff,
+0x00f33333,
+0x000ccccc,
+0x000ccccc,
+0x00162b95,
+0x00199999,
+0x00199999,
+0x00162b95,
+0x0018ba4a,
+0x000ccccc,
+0x00162b95,
+0x00000000,
+0x00121a18,
+0x00f33333,
+0x000ccccc,
+0x00e9d46a,
+0x0006a032,
+0x00e66666,
+0x00000000,
+0x00e9d46a,
+0x00f95fcd,
+0x00f33333,
+0x00f33333,
+0x00ffffff,
+0x00ede5e7,
+0x000ccccc,
+0x00e9d46a,
+0x00162b95,
+0x00e745b5,
+0x00199999,
+0x00e66666,
+0x00162b95,
+0x00e745b5,
+0x000ccccc,
+0x00e9d46a,
+0x00000000,
+0x00ede5e7,
+0x00f33333,
+0x00f33333,
+0x00e9d46a,
+0x00f95fcf,
+0x00e66666,
+0x00ffffff,
+0x00e9d46a,
+0x0006a032,
+0x00f33333,
+0x000ccccc,
+0x00ffffff,
+0x00121a18,
+0x000ccccc,
+0x00162b95,
+0x00162b95,
+0x0018ba4a,
+0x00199999,
+0x00199999,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x007fffff,
+0x00800000,
+0x001fffff,
+0x00e00000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x000049cc,
+0x000049cc,
+0x004fffff,
+0x00b00000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
+0x00000000,
diff --git a/sound/soc/omap/abe/abe_functionsid.h b/sound/soc/omap/abe/abe_functionsid.h
new file mode 100644
index 0000000..1c02770
--- /dev/null
+++ b/sound/soc/omap/abe/abe_functionsid.h
@@ -0,0 +1,122 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef _ABE_FUNCTIONSID_H_
+#define _ABE_FUNCTIONSID_H_
+/*
+ * TASK function ID definitions
+ */
+#define C_ABE_FW_FUNCTION_IIR 0
+#define C_ABE_FW_FUNCTION_monoToStereoPack 1
+#define C_ABE_FW_FUNCTION_stereoToMonoSplit 2
+#define C_ABE_FW_FUNCTION_decimator 3
+#define C_ABE_FW_FUNCTION_OS0Fill 4
+#define C_ABE_FW_FUNCTION_mixer2 5
+#define C_ABE_FW_FUNCTION_mixer4 6
+#define C_ABE_FW_FUNCTION_mixer4_dual_mono 7
+#define C_ABE_FW_FUNCTION_inplaceGain 8
+#define C_ABE_FW_FUNCTION_StreamRouting 9
+#define C_ABE_FW_FUNCTION_gainConverge 10
+#define C_ABE_FW_FUNCTION_dualIir 11
+#define C_ABE_FW_FUNCTION_IO_DL_pp 12
+#define C_ABE_FW_FUNCTION_IO_generic 13
+#define C_ABE_FW_FUNCTION_irq_fifo_debug 14
+#define C_ABE_FW_FUNCTION_synchronize_pointers 15
+#define C_ABE_FW_FUNCTION_VIBRA2 16
+#define C_ABE_FW_FUNCTION_VIBRA1 17
+#define C_ABE_FW_FUNCTION_IIR_SRC_MIC 18
+#define C_ABE_FW_FUNCTION_wrappers 19
+#define C_ABE_FW_FUNCTION_ASRC_DL_wrapper 20
+#define C_ABE_FW_FUNCTION_ASRC_UL_wrapper 21
+#define C_ABE_FW_FUNCTION_mem_init 22
+#define C_ABE_FW_FUNCTION_debug_vx_asrc 23
+#define C_ABE_FW_FUNCTION_IIR_SRC2 24
+#define C_ABE_FW_FUNCTION_ASRC_DL_wrapper_sibling 25
+#define C_ABE_FW_FUNCTION_ASRC_UL_wrapper_sibling 26
+#define C_ABE_FW_FUNCTION_FIR6 27
+#define C_ABE_FW_FUNCTION_SRC44P1 28
+#define C_ABE_FW_FUNCTION_SRC44P1_1211 29
+#define C_ABE_FW_FUNCTION_SRC44P1_PP 30
+#define C_ABE_FW_FUNCTION_SRC44P1_1211_PP 31
+#define C_ABE_FW_FUNCTION_CHECK_IIR_LEFT 32
+#define C_ABE_FW_FUNCTION_CHECK_IIR_RIGHT 33
+#define C_ABE_FW_FUNCTION_FIR12_2 34
+/*
+ * COPY function ID definitions
+ */
+#define NULL_COPY_CFPID 0
+#define S2D_STEREO_16_16_CFPID 1
+#define S2D_MONO_MSB_CFPID 2
+#define S2D_STEREO_MSB_CFPID 3
+#define S2D_STEREO_RSHIFTED_16_CFPID 4
+#define S2D_MONO_RSHIFTED_16_CFPID 5
+#define D2S_STEREO_16_16_CFPID 6
+#define D2S_MONO_MSB_CFPID 7
+#define D2S_MONO_RSHIFTED_16_CFPID 8
+#define D2S_STEREO_RSHIFTED_16_CFPID 9
+#define D2S_STEREO_MSB_CFPID 10
+#define COPY_DMIC_CFPID 11
+#define COPY_MCPDM_DL_CFPID 12
+#define COPY_MM_UL_CFPID 13
+#define SPLIT_SMEM_CFPID 14
+#define MERGE_SMEM_CFPID 15
+#define SPLIT_TDM_CFPID 16
+#define MERGE_TDM_CFPID 17
+#define ROUTE_MM_UL_CFPID 18
+#define IO_IP_CFPID 19
+#define COPY_UNDERFLOW_CFPID 20
+#endif /* _ABE_FUNCTIONSID_H_ */
diff --git a/sound/soc/omap/abe/abe_fw.h b/sound/soc/omap/abe/abe_fw.h
new file mode 100644
index 0000000..a29dbb3
--- /dev/null
+++ b/sound/soc/omap/abe/abe_fw.h
@@ -0,0 +1,214 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _ABE_FW_H_
+#define _ABE_FW_H_
+
+#include "abe_cm_addr.h"
+#include "abe_sm_addr.h"
+#include "abe_dm_addr.h"
+#include "abe_typedef.h"
+/*
+ * GLOBAL DEFINITION
+ */
+/* one scheduler loop = 4kHz = 12 samples at 48kHz */
+#define FW_SCHED_LOOP_FREQ 4000
+/* one scheduler loop = 4kHz = 12 samples at 48kHz */
+#define FW_SCHED_LOOP_FREQ_DIV1000 (FW_SCHED_LOOP_FREQ/1000)
+#define EVENT_FREQUENCY 96000
+#define SLOTS_IN_SCHED_LOOP (96000/FW_SCHED_LOOP_FREQ)
+#define SCHED_LOOP_8kHz (8000/FW_SCHED_LOOP_FREQ)
+#define SCHED_LOOP_16kHz (16000/FW_SCHED_LOOP_FREQ)
+#define SCHED_LOOP_24kHz (24000/FW_SCHED_LOOP_FREQ)
+#define SCHED_LOOP_48kHz (48000/FW_SCHED_LOOP_FREQ)
+#define TASKS_IN_SLOT 8
+/*
+ * DMEM AREA - SCHEDULER
+ */
+#define dmem_mm_trace OMAP_ABE_D_DEBUG_FIFO_ADDR
+#define dmem_mm_trace_size ((OMAP_ABE_D_DEBUG_FIFO_SIZE)/4)
+#define ATC_SIZE 8 /* 8 bytes per descriptors */
+struct omap_abe_atc_desc {
+ unsigned rdpt:7; /* first 32bits word of the descriptor */
+ unsigned reserved0:1;
+ unsigned cbsize:7;
+ unsigned irqdest:1;
+ unsigned cberr:1;
+ unsigned reserved1:5;
+ unsigned cbdir:1;
+ unsigned nw:1;
+ unsigned wrpt:7;
+ unsigned reserved2:1;
+ unsigned badd:12; /* second 32bits word of the descriptor */
+ unsigned iter:7; /* iteration field overlaps 16-bit boundary */
+ unsigned srcid:6;
+ unsigned destid:6;
+ unsigned desen:1;
+};
+/*
+ * Infinite counter incremented on each sheduler periods (~250 us)
+ * uint16 dmem_debug_time_stamp
+ */
+#define dmem_debug_time_stamp OMAP_ABE_D_LOOPCOUNTER_ADDR
+/*
+ * ATC BUFFERS + IO TASKS SMEM buffers
+ */
+#define dmem_dmic OMAP_ABE_D_DMIC_UL_FIFO_ADDR
+#define dmem_dmic_size (OMAP_ABE_D_DMIC_UL_FIFO_SIZE/4)
+#define dmem_amic OMAP_ABE_D_MCPDM_UL_FIFO_ADDR
+#define dmem_amic_size (OMAP_ABE_D_MCPDM_UL_FIFO_SIZE/4)
+#define smem_amic AMIC_96_labelID
+#define dmem_mcpdm OMAP_ABE_D_MCPDM_DL_FIFO_ADDR
+#define dmem_mcpdm_size (OMAP_ABE_D_MCPDM_DL_FIFO_SIZE/4)
+#define dmem_mm_ul OMAP_ABE_D_MM_UL_FIFO_ADDR
+#define dmem_mm_ul_size (OMAP_ABE_D_MM_UL_FIFO_SIZE/4)
+/* managed directly by the router */
+#define smem_mm_ul MM_UL_labelID
+#define dmem_mm_ul2 OMAP_ABE_D_MM_UL2_FIFO_ADDR
+#define dmem_mm_ul2_size (OMAP_ABE_D_MM_UL2_FIFO_SIZE/4)
+/* managed directly by the router */
+#define smem_mm_ul2 MM_UL2_labelID
+#define dmem_mm_dl OMAP_ABE_D_MM_DL_FIFO_ADDR
+#define dmem_mm_dl_size (OMAP_ABE_D_MM_DL_FIFO_SIZE/4)
+#define smem_mm_dl MM_DL_labelID
+#define dmem_vx_dl OMAP_ABE_D_VX_DL_FIFO_ADDR
+#define dmem_vx_dl_size (OMAP_ABE_D_VX_DL_FIFO_SIZE/4)
+#define smem_vx_dl IO_VX_DL_ASRC_labelID /* Voice_16k_DL_labelID */
+#define dmem_vx_ul OMAP_ABE_D_VX_UL_FIFO_ADDR
+#define dmem_vx_ul_size (OMAP_ABE_D_VX_UL_FIFO_SIZE/4)
+#define smem_vx_ul Voice_8k_UL_labelID
+#define dmem_tones_dl OMAP_ABE_D_TONES_DL_FIFO_ADDR
+#define dmem_tones_dl_size (OMAP_ABE_D_TONES_DL_FIFO_SIZE/4)
+#define smem_tones_dl Tones_labelID
+#define dmem_vib_dl OMAP_ABE_D_VIB_DL_FIFO_ADDR
+#define dmem_vib_dl_size (OMAP_ABE_D_VIB_DL_FIFO_SIZE/4)
+#define smem_vib IO_VIBRA_DL_labelID
+#define dmem_mm_ext_out OMAP_ABE_D_MM_EXT_OUT_FIFO_ADDR
+#define dmem_mm_ext_out_size (OMAP_ABE_D_MM_EXT_OUT_FIFO_SIZE/4)
+#define smem_mm_ext_out DL1_GAIN_out_labelID
+#define dmem_mm_ext_in OMAP_ABE_D_MM_EXT_IN_FIFO_ADDR
+#define dmem_mm_ext_in_size (OMAP_ABE_D_MM_EXT_IN_FIFO_SIZE/4)
+/*IO_MM_EXT_IN_ASRC_labelID ASRC input buffer, size 40 */
+#define smem_mm_ext_in_opp100 IO_MM_EXT_IN_ASRC_labelID
+/* at OPP 50 without ASRC */
+#define smem_mm_ext_in_opp50 MM_EXT_IN_labelID
+#define dmem_bt_vx_dl OMAP_ABE_D_BT_DL_FIFO_ADDR
+#define dmem_bt_vx_dl_size (OMAP_ABE_D_BT_DL_FIFO_SIZE/4)
+#define smem_bt_vx_dl_opp50 BT_DL_8k_labelID
+/*BT_DL_8k_opp100_labelID ASRC output buffer, size 40 */
+#define smem_bt_vx_dl_opp100 BT_DL_8k_opp100_labelID
+#define dmem_bt_vx_ul OMAP_ABE_D_BT_UL_FIFO_ADDR
+#define dmem_bt_vx_ul_size (OMAP_ABE_D_BT_UL_FIFO_SIZE/4)
+#define smem_bt_vx_ul_opp50 BT_UL_8k_labelID
+/*IO_BT_UL_ASRC_labelID ASRC input buffer, size 40 */
+#define smem_bt_vx_ul_opp100 IO_BT_UL_ASRC_labelID
+/*
+ * SMEM AREA
+ */
+/*
+ * GAIN SMEM on PORT
+ * int32 smem_G0 [18] : desired gain on the ports
+ * format of G0 = 6 bits left shifted desired gain in linear 24bits format
+ * int24 stereo G0 [18] = G0
+ * int24 stereo GI [18] current value of the gain in the same format of G0
+ * List of smoothed gains :
+ * 6 DMIC 0 1 2 3 4 5
+ * 2 AMIC L R
+ * 4 PORT1/2_RX L R
+ * 2 MM_EXT L R
+ * 2 MM_VX_DL L R
+ * 2 IHF L R
+ * ---------------
+ * 18 = TOTAL
+ */
+/*
+ * COEFFICIENTS AREA
+ */
+/*
+ * delay coefficients used in the IIR-1 filters
+ * int24 cmem_gain_delay_iir1[9 x 2] (a, (1-a))
+ *
+ * 3 for 6 DMIC 0 1 2 3 4 5
+ * 1 for 2 AMIC L R
+ * 2 for 4 PORT1/2_RX L R
+ * 1 for 2 MM_EXT L R
+ * 1 for 2 MM_VX_DL L R
+ * 1 for 2 IHF L R
+ */
+/*
+ * gain controls
+ */
+#define GAIN_LEFT_OFFSET 0
+#define GAIN_RIGHT_OFFSET 1
+/* stereo gains */
+#define dmic1_gains_offset 0
+#define dmic2_gains_offset 2
+#define dmic3_gains_offset 4
+#define amic_gains_offset 6
+#define dl1_gains_offset 8
+#define dl2_gains_offset 10
+#define splitters_gains_offset 12
+#define mixer_dl1_offset 14
+#define mixer_dl2_offset 18
+#define mixer_echo_offset 22
+#define mixer_sdt_offset 24
+#define mixer_vxrec_offset 26
+#define mixer_audul_offset 30
+#define btul_gains_offset 34
+
+#endif/* _ABE_FW_H_ */
diff --git a/sound/soc/omap/abe/abe_gain.c b/sound/soc/omap/abe/abe_gain.c
new file mode 100644
index 0000000..9c148da
--- /dev/null
+++ b/sound/soc/omap/abe/abe_gain.c
@@ -0,0 +1,790 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include "abe_dbg.h"
+#include "abe.h"
+#include "abe_gain.h"
+#include "abe_mem.h"
+
+/*
+ * ABE CONST AREA FOR PARAMETERS TRANSLATION
+ */
+#define min_mdb (-12000)
+#define max_mdb (3000)
+#define sizeof_db2lin_table (1 + ((max_mdb - min_mdb)/100))
+
+const u32 abe_db2lin_table[sizeof_db2lin_table] = {
+ 0x00000000, /* SMEM coding of -120 dB */
+ 0x00000000, /* SMEM coding of -119 dB */
+ 0x00000000, /* SMEM coding of -118 dB */
+ 0x00000000, /* SMEM coding of -117 dB */
+ 0x00000000, /* SMEM coding of -116 dB */
+ 0x00000000, /* SMEM coding of -115 dB */
+ 0x00000000, /* SMEM coding of -114 dB */
+ 0x00000000, /* SMEM coding of -113 dB */
+ 0x00000000, /* SMEM coding of -112 dB */
+ 0x00000000, /* SMEM coding of -111 dB */
+ 0x00000000, /* SMEM coding of -110 dB */
+ 0x00000000, /* SMEM coding of -109 dB */
+ 0x00000001, /* SMEM coding of -108 dB */
+ 0x00000001, /* SMEM coding of -107 dB */
+ 0x00000001, /* SMEM coding of -106 dB */
+ 0x00000001, /* SMEM coding of -105 dB */
+ 0x00000001, /* SMEM coding of -104 dB */
+ 0x00000001, /* SMEM coding of -103 dB */
+ 0x00000002, /* SMEM coding of -102 dB */
+ 0x00000002, /* SMEM coding of -101 dB */
+ 0x00000002, /* SMEM coding of -100 dB */
+ 0x00000002, /* SMEM coding of -99 dB */
+ 0x00000003, /* SMEM coding of -98 dB */
+ 0x00000003, /* SMEM coding of -97 dB */
+ 0x00000004, /* SMEM coding of -96 dB */
+ 0x00000004, /* SMEM coding of -95 dB */
+ 0x00000005, /* SMEM coding of -94 dB */
+ 0x00000005, /* SMEM coding of -93 dB */
+ 0x00000006, /* SMEM coding of -92 dB */
+ 0x00000007, /* SMEM coding of -91 dB */
+ 0x00000008, /* SMEM coding of -90 dB */
+ 0x00000009, /* SMEM coding of -89 dB */
+ 0x0000000A, /* SMEM coding of -88 dB */
+ 0x0000000B, /* SMEM coding of -87 dB */
+ 0x0000000D, /* SMEM coding of -86 dB */
+ 0x0000000E, /* SMEM coding of -85 dB */
+ 0x00000010, /* SMEM coding of -84 dB */
+ 0x00000012, /* SMEM coding of -83 dB */
+ 0x00000014, /* SMEM coding of -82 dB */
+ 0x00000017, /* SMEM coding of -81 dB */
+ 0x0000001A, /* SMEM coding of -80 dB */
+ 0x0000001D, /* SMEM coding of -79 dB */
+ 0x00000021, /* SMEM coding of -78 dB */
+ 0x00000025, /* SMEM coding of -77 dB */
+ 0x00000029, /* SMEM coding of -76 dB */
+ 0x0000002E, /* SMEM coding of -75 dB */
+ 0x00000034, /* SMEM coding of -74 dB */
+ 0x0000003A, /* SMEM coding of -73 dB */
+ 0x00000041, /* SMEM coding of -72 dB */
+ 0x00000049, /* SMEM coding of -71 dB */
+ 0x00000052, /* SMEM coding of -70 dB */
+ 0x0000005D, /* SMEM coding of -69 dB */
+ 0x00000068, /* SMEM coding of -68 dB */
+ 0x00000075, /* SMEM coding of -67 dB */
+ 0x00000083, /* SMEM coding of -66 dB */
+ 0x00000093, /* SMEM coding of -65 dB */
+ 0x000000A5, /* SMEM coding of -64 dB */
+ 0x000000B9, /* SMEM coding of -63 dB */
+ 0x000000D0, /* SMEM coding of -62 dB */
+ 0x000000E9, /* SMEM coding of -61 dB */
+ 0x00000106, /* SMEM coding of -60 dB */
+ 0x00000126, /* SMEM coding of -59 dB */
+ 0x0000014A, /* SMEM coding of -58 dB */
+ 0x00000172, /* SMEM coding of -57 dB */
+ 0x0000019F, /* SMEM coding of -56 dB */
+ 0x000001D2, /* SMEM coding of -55 dB */
+ 0x0000020B, /* SMEM coding of -54 dB */
+ 0x0000024A, /* SMEM coding of -53 dB */
+ 0x00000292, /* SMEM coding of -52 dB */
+ 0x000002E2, /* SMEM coding of -51 dB */
+ 0x0000033C, /* SMEM coding of -50 dB */
+ 0x000003A2, /* SMEM coding of -49 dB */
+ 0x00000413, /* SMEM coding of -48 dB */
+ 0x00000492, /* SMEM coding of -47 dB */
+ 0x00000521, /* SMEM coding of -46 dB */
+ 0x000005C2, /* SMEM coding of -45 dB */
+ 0x00000676, /* SMEM coding of -44 dB */
+ 0x0000073F, /* SMEM coding of -43 dB */
+ 0x00000822, /* SMEM coding of -42 dB */
+ 0x00000920, /* SMEM coding of -41 dB */
+ 0x00000A3D, /* SMEM coding of -40 dB */
+ 0x00000B7D, /* SMEM coding of -39 dB */
+ 0x00000CE4, /* SMEM coding of -38 dB */
+ 0x00000E76, /* SMEM coding of -37 dB */
+ 0x0000103A, /* SMEM coding of -36 dB */
+ 0x00001235, /* SMEM coding of -35 dB */
+ 0x0000146E, /* SMEM coding of -34 dB */
+ 0x000016EC, /* SMEM coding of -33 dB */
+ 0x000019B8, /* SMEM coding of -32 dB */
+ 0x00001CDC, /* SMEM coding of -31 dB */
+ 0x00002061, /* SMEM coding of -30 dB */
+ 0x00002455, /* SMEM coding of -29 dB */
+ 0x000028C4, /* SMEM coding of -28 dB */
+ 0x00002DBD, /* SMEM coding of -27 dB */
+ 0x00003352, /* SMEM coding of -26 dB */
+ 0x00003995, /* SMEM coding of -25 dB */
+ 0x0000409C, /* SMEM coding of -24 dB */
+ 0x0000487E, /* SMEM coding of -23 dB */
+ 0x00005156, /* SMEM coding of -22 dB */
+ 0x00005B43, /* SMEM coding of -21 dB */
+ 0x00006666, /* SMEM coding of -20 dB */
+ 0x000072E5, /* SMEM coding of -19 dB */
+ 0x000080E9, /* SMEM coding of -18 dB */
+ 0x000090A4, /* SMEM coding of -17 dB */
+ 0x0000A24B, /* SMEM coding of -16 dB */
+ 0x0000B618, /* SMEM coding of -15 dB */
+ 0x0000CC50, /* SMEM coding of -14 dB */
+ 0x0000E53E, /* SMEM coding of -13 dB */
+ 0x00010137, /* SMEM coding of -12 dB */
+ 0x0001209A, /* SMEM coding of -11 dB */
+ 0x000143D1, /* SMEM coding of -10 dB */
+ 0x00016B54, /* SMEM coding of -9 dB */
+ 0x000197A9, /* SMEM coding of -8 dB */
+ 0x0001C967, /* SMEM coding of -7 dB */
+ 0x00020137, /* SMEM coding of -6 dB */
+ 0x00023FD6, /* SMEM coding of -5 dB */
+ 0x00028619, /* SMEM coding of -4 dB */
+ 0x0002D4EF, /* SMEM coding of -3 dB */
+ 0x00032D64, /* SMEM coding of -2 dB */
+ 0x000390A4, /* SMEM coding of -1 dB */
+ 0x00040000, /* SMEM coding of 0 dB */
+ 0x00047CF2, /* SMEM coding of 1 dB */
+ 0x00050923, /* SMEM coding of 2 dB */
+ 0x0005A670, /* SMEM coding of 3 dB */
+ 0x000656EE, /* SMEM coding of 4 dB */
+ 0x00071CF5, /* SMEM coding of 5 dB */
+ 0x0007FB26, /* SMEM coding of 6 dB */
+ 0x0008F473, /* SMEM coding of 7 dB */
+ 0x000A0C2B, /* SMEM coding of 8 dB */
+ 0x000B4606, /* SMEM coding of 9 dB */
+ 0x000CA62C, /* SMEM coding of 10 dB */
+ 0x000E314A, /* SMEM coding of 11 dB */
+ 0x000FEC9E, /* SMEM coding of 12 dB */
+ 0x0011DE0A, /* SMEM coding of 13 dB */
+ 0x00140C28, /* SMEM coding of 14 dB */
+ 0x00167E60, /* SMEM coding of 15 dB */
+ 0x00193D00, /* SMEM coding of 16 dB */
+ 0x001C515D, /* SMEM coding of 17 dB */
+ 0x001FC5EB, /* SMEM coding of 18 dB */
+ 0x0023A668, /* SMEM coding of 19 dB */
+ 0x00280000, /* SMEM coding of 20 dB */
+ 0x002CE178, /* SMEM coding of 21 dB */
+ 0x00325B65, /* SMEM coding of 22 dB */
+ 0x00388062, /* SMEM coding of 23 dB */
+ 0x003F654E, /* SMEM coding of 24 dB */
+ 0x00472194, /* SMEM coding of 25 dB */
+ 0x004FCF7C, /* SMEM coding of 26 dB */
+ 0x00598C81, /* SMEM coding of 27 dB */
+ 0x006479B7, /* SMEM coding of 28 dB */
+ 0x0070BC3D, /* SMEM coding of 29 dB */
+ 0x007E7DB9, /* SMEM coding of 30 dB */
+};
+
+const u32 abe_1_alpha_iir[64] = {
+ 0x040002, 0x040002, 0x040002, 0x040002, /* 0 */
+ 0x50E955, 0x48CA65, 0x40E321, 0x72BE78, /* 1 [ms] */
+ 0x64BA68, 0x57DF14, 0x4C3D60, 0x41D690, /* 2 */
+ 0x38A084, 0x308974, 0x297B00, 0x235C7C, /* 4 */
+ 0x1E14B0, 0x198AF0, 0x15A800, 0x125660, /* 8 */
+ 0x0F82A0, 0x0D1B5C, 0x0B113C, 0x0956CC, /* 16 */
+ 0x07E054, 0x06A3B8, 0x059844, 0x04B680, /* 32 */
+ 0x03F80C, 0x035774, 0x02D018, 0x025E0C, /* 64 */
+ 0x7F8057, 0x6B482F, 0x5A4297, 0x4BEECB, /* 128 */
+ 0x3FE00B, 0x35BAA7, 0x2D3143, 0x2602AF, /* 256 */
+ 0x1FF803, 0x1AE2FB, 0x169C9F, 0x13042B, /* 512 */
+ 0x0FFE03, 0x0D72E7, 0x0B4F4F, 0x0982CB, /* 1.024 [s] */
+ 0x07FF83, 0x06B9CF, 0x05A7E7, 0x04C193, /* 2.048 */
+ 0x03FFE3, 0x035CFF, 0x02D403, 0x0260D7, /* 4.096 */
+ 0x01FFFB, 0x01AE87, 0x016A07, 0x01306F, /* 8.192 */
+ 0x00FFFF, 0x00D743, 0x00B503, 0x009837,
+};
+
+const u32 abe_alpha_iir[64] = {
+ 0x000000, 0x000000, 0x000000, 0x000000, /* 0 */
+ 0x5E2D58, 0x6E6B3C, 0x7E39C0, 0x46A0C5, /* 1 [ms] */
+ 0x4DA2CD, 0x541079, 0x59E151, 0x5F14B9, /* 2 */
+ 0x63AFC1, 0x67BB45, 0x6B4281, 0x6E51C1, /* 4 */
+ 0x70F5A9, 0x733A89, 0x752C01, 0x76D4D1, /* 8 */
+ 0x783EB1, 0x797251, 0x7A7761, 0x7B549D, /* 16 */
+ 0x7C0FD5, 0x7CAE25, 0x7D33DD, 0x7DA4C1, /* 32 */
+ 0x7E03FD, 0x7E5449, 0x7E97F5, 0x7ED0F9, /* 64 */
+ 0x7F0101, 0x7F2971, 0x7F4B7D, 0x7F6825, /* 128 */
+ 0x7F8041, 0x7F948D, 0x7FA59D, 0x7FB3FD, /* 256 */
+ 0x7FC011, 0x7FCA3D, 0x7FD2C9, 0x7FD9F9, /* 512 */
+ 0x7FE005, 0x7FE51D, 0x7FE961, 0x7FECFD, /* 1.024 [s] */
+ 0x7FF001, 0x7FF28D, 0x7FF4B1, 0x7FF67D, /* 2.048 */
+ 0x7FF801, 0x7FF949, 0x7FFA59, 0x7FFB41, /* 4.096 */
+ 0x7FFC01, 0x7FFCA5, 0x7FFD2D, 0x7FFDA1, /* 8.192 */
+ 0x7FFE01, 0x7FFE51, 0x7FFE95, 0x7FFED1,
+};
+
+/**
+ * abe_int_2_float
+ * returns a mantissa on 16 bits and the exponent
+ * 0x4000.0000 leads to M=0x4000 X=15
+ * 0x0004.0000 leads to M=0x4000 X=4
+ * 0x0000.0001 leads to M=0x4000 X=-14
+ *
+ */
+void abe_int_2_float16(u32 data, u32 *mantissa, u32 *exp)
+{
+ u32 i;
+ *exp = 0;
+ *mantissa = 0;
+ for (i = 0; i < 32; i++) {
+ if ((1 << i) > data)
+ break;
+ }
+ *exp = i - 15;
+ *mantissa = (*exp > 0) ? data >> (*exp) : data << (*exp);
+}
+
+/**
+ * abe_use_compensated_gain
+ * @on_off:
+ *
+ * Selects the automatic Mixer's gain management
+ * on_off = 1 allows the "abe_write_gain" to adjust the overall
+ * gains of the mixer to be tuned not to create saturation
+ */
+int omap_abe_use_compensated_gain(struct omap_abe *abe, int on_off)
+{
+ abe->compensated_mixer_gain = on_off;
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_use_compensated_gain);
+
+/**
+ * omap_abe_gain_offset
+ * returns the offset to firmware data structures
+ *
+ */
+void omap_abe_gain_offset(struct omap_abe *abe, u32 id, u32 *mixer_offset)
+{
+ switch (id) {
+ default:
+ case GAINS_DMIC1:
+ *mixer_offset = dmic1_gains_offset;
+ break;
+ case GAINS_DMIC2:
+ *mixer_offset = dmic2_gains_offset;
+ break;
+ case GAINS_DMIC3:
+ *mixer_offset = dmic3_gains_offset;
+ break;
+ case GAINS_AMIC:
+ *mixer_offset = amic_gains_offset;
+ break;
+ case GAINS_DL1:
+ *mixer_offset = dl1_gains_offset;
+ break;
+ case GAINS_DL2:
+ *mixer_offset = dl2_gains_offset;
+ break;
+ case GAINS_SPLIT:
+ *mixer_offset = splitters_gains_offset;
+ break;
+ case MIXDL1:
+ *mixer_offset = mixer_dl1_offset;
+ break;
+ case MIXDL2:
+ *mixer_offset = mixer_dl2_offset;
+ break;
+ case MIXECHO:
+ *mixer_offset = mixer_echo_offset;
+ break;
+ case MIXSDT:
+ *mixer_offset = mixer_sdt_offset;
+ break;
+ case MIXVXREC:
+ *mixer_offset = mixer_vxrec_offset;
+ break;
+ case MIXAUDUL:
+ *mixer_offset = mixer_audul_offset;
+ break;
+ case GAINS_BTUL:
+ *mixer_offset = btul_gains_offset;
+ break;
+ }
+}
+
+/**
+ * oamp_abe_write_equalizer
+ * @abe: Pointer on abe handle
+ * @id: name of the equalizer
+ * @param : equalizer coefficients
+ *
+ * Load the coefficients in CMEM.
+ */
+int omap_abe_write_equalizer(struct omap_abe *abe,
+ u32 id, struct omap_abe_equ *param)
+{
+ u32 eq_offset, length, *src, eq_mem, eq_mem_len;
+ _log(ABE_ID_WRITE_EQUALIZER, id, 0, 0);
+ switch (id) {
+ default:
+ case EQ1:
+ eq_offset = OMAP_ABE_C_DL1_COEFS_ADDR;
+ eq_mem = OMAP_ABE_S_DL1_M_EQ_DATA_ADDR;
+ eq_mem_len = OMAP_ABE_S_DL1_M_EQ_DATA_SIZE;
+ break;
+ case EQ2L:
+ eq_offset = OMAP_ABE_C_DL2_L_COEFS_ADDR;
+ eq_mem = OMAP_ABE_S_DL2_M_LR_EQ_DATA_ADDR;
+ eq_mem_len = OMAP_ABE_S_DL2_M_LR_EQ_DATA_SIZE;
+ break;
+ case EQ2R:
+ eq_offset = OMAP_ABE_C_DL2_R_COEFS_ADDR;
+ eq_mem = OMAP_ABE_S_DL2_M_LR_EQ_DATA_ADDR;
+ eq_mem_len = OMAP_ABE_S_DL2_M_LR_EQ_DATA_SIZE;
+ break;
+ case EQSDT:
+ eq_offset = OMAP_ABE_C_SDT_COEFS_ADDR;
+ eq_mem = OMAP_ABE_S_SDT_F_DATA_ADDR;
+ eq_mem_len = OMAP_ABE_S_SDT_F_DATA_SIZE;
+ break;
+ case EQAMIC:
+ eq_offset = OMAP_ABE_C_96_48_AMIC_COEFS_ADDR;
+ eq_mem = OMAP_ABE_S_AMIC_96_48_DATA_ADDR;
+ eq_mem_len = OMAP_ABE_S_AMIC_96_48_DATA_SIZE;
+ break;
+ case EQDMIC:
+ eq_offset = OMAP_ABE_C_96_48_DMIC_COEFS_ADDR;
+ eq_mem = OMAP_ABE_S_DMIC0_96_48_DATA_ADDR;
+ eq_mem_len = OMAP_ABE_S_DMIC0_96_48_DATA_SIZE;
+ /* three DMIC are clear at the same time DMIC0 DMIC1 DMIC2 */
+ eq_mem_len *= 3;
+ break;
+ }
+ /* reset SMEM buffers before the coefficients are loaded */
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM, eq_mem, eq_mem_len);
+
+ length = param->equ_length;
+ src = (u32 *) ((param->coef).type1);
+ /* translate in bytes */
+ length <<= 2;
+ omap_abe_mem_write(abe, OMAP_ABE_CMEM, eq_offset, src, length);
+
+ /* reset SMEM buffers after the coefficients are loaded */
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM, eq_mem, eq_mem_len);
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_write_equalizer);
+
+/**
+ * omap_abe_disable_gain
+ * @abe: Pointer on abe handle
+ * Parameters:
+ * mixer id
+ * sub-port id
+ *
+ */
+int omap_abe_disable_gain(struct omap_abe *abe, u32 id, u32 p)
+{
+ u32 mixer_offset, f_g, ramp;
+ omap_abe_gain_offset(abe, id, &mixer_offset);
+ /* save the input parameters for mute/unmute */
+ ramp = abe->desired_ramp_delay_ms[mixer_offset + p];
+ f_g = GAIN_MUTE;
+ if (!(abe->muted_gains_indicator[mixer_offset + p] &
+ OMAP_ABE_GAIN_DISABLED)) {
+ /* Check if we are in mute */
+ if (!(abe->muted_gains_indicator[mixer_offset + p] &
+ OMAP_ABE_GAIN_MUTED)) {
+ abe->muted_gains_decibel[mixer_offset + p] =
+ abe->desired_gains_decibel[mixer_offset + p];
+ /* mute the gain */
+ omap_abe_write_gain(abe, id, f_g, ramp, p);
+ }
+ abe->muted_gains_indicator[mixer_offset + p] |=
+ OMAP_ABE_GAIN_DISABLED;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_disable_gain);
+
+/**
+ * omap_abe_enable_gain
+ * Parameters:
+ * mixer id
+ * sub-port id
+ *
+ */
+int omap_abe_enable_gain(struct omap_abe *abe, u32 id, u32 p)
+{
+ u32 mixer_offset, f_g, ramp;
+ omap_abe_gain_offset(abe, id, &mixer_offset);
+ if ((abe->muted_gains_indicator[mixer_offset + p] &
+ OMAP_ABE_GAIN_DISABLED)) {
+ /* restore the input parameters for mute/unmute */
+ f_g = abe->muted_gains_decibel[mixer_offset + p];
+ ramp = abe->desired_ramp_delay_ms[mixer_offset + p];
+ abe->muted_gains_indicator[mixer_offset + p] &=
+ ~OMAP_ABE_GAIN_DISABLED;
+ /* unmute the gain */
+ omap_abe_write_gain(abe, id, f_g, ramp, p);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_enable_gain);
+/**
+ * omap_abe_mute_gain
+ * Parameters:
+ * mixer id
+ * sub-port id
+ *
+ */
+int omap_abe_mute_gain(struct omap_abe *abe, u32 id, u32 p)
+{
+ u32 mixer_offset, f_g, ramp;
+ omap_abe_gain_offset(abe, id, &mixer_offset);
+ /* save the input parameters for mute/unmute */
+ ramp = abe->desired_ramp_delay_ms[mixer_offset + p];
+ f_g = GAIN_MUTE;
+ if (!abe->muted_gains_indicator[mixer_offset + p]) {
+ abe->muted_gains_decibel[mixer_offset + p] =
+ abe->desired_gains_decibel[mixer_offset + p];
+ /* mute the gain */
+ omap_abe_write_gain(abe, id, f_g, ramp, p);
+ }
+ abe->muted_gains_indicator[mixer_offset + p] |= OMAP_ABE_GAIN_MUTED;
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_mute_gain);
+/**
+ * omap_abe_unmute_gain
+ * Parameters:
+ * mixer id
+ * sub-port id
+ *
+ */
+int omap_abe_unmute_gain(struct omap_abe *abe, u32 id, u32 p)
+{
+ u32 mixer_offset, f_g, ramp;
+ omap_abe_gain_offset(abe, id, &mixer_offset);
+ if ((abe->muted_gains_indicator[mixer_offset + p] &
+ OMAP_ABE_GAIN_MUTED)) {
+ /* restore the input parameters for mute/unmute */
+ f_g = abe->muted_gains_decibel[mixer_offset + p];
+ ramp = abe->desired_ramp_delay_ms[mixer_offset + p];
+ abe->muted_gains_indicator[mixer_offset + p] &=
+ ~OMAP_ABE_GAIN_MUTED;
+ /* unmute the gain */
+ omap_abe_write_gain(abe, id, f_g, ramp, p);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_unmute_gain);
+
+/**
+ * omap_abe_write_gain
+ * @id: gain name or mixer name
+ * @f_g: list of input gains of the mixer
+ * @ramp: gain ramp speed factor
+ * @p: list of ports corresponding to the above gains
+ *
+ * Loads the gain coefficients to FW memory. This API can be called when
+ * the corresponding MIXER is not activated. After reloading the firmware
+ * the default coefficients corresponds to "all input and output mixer's gain
+ * in mute state". A mixer is disabled with a network reconfiguration
+ * corresponding to an OPP value.
+ */
+int omap_abe_write_gain(struct omap_abe *abe,
+ u32 id, s32 f_g, u32 ramp, u32 p)
+{
+ u32 lin_g, sum_g, mixer_target, mixer_offset, i, mean_gain, mean_exp;
+ u32 new_gain_linear[4];
+ s32 gain_index;
+ u32 alpha, beta;
+ u32 ramp_index;
+
+ _log(ABE_ID_WRITE_GAIN, id, f_g, p);
+ gain_index = ((f_g - min_mdb) / 100);
+ gain_index = maximum(gain_index, 0);
+ gain_index = minimum(gain_index, sizeof_db2lin_table);
+ lin_g = abe_db2lin_table[gain_index];
+ omap_abe_gain_offset(abe, id, &mixer_offset);
+ /* save the input parameters for mute/unmute */
+ abe->desired_gains_linear[mixer_offset + p] = lin_g;
+ abe->desired_gains_decibel[mixer_offset + p] = f_g;
+ abe->desired_ramp_delay_ms[mixer_offset + p] = ramp;
+ /* SMEM address in bytes */
+ mixer_target = OMAP_ABE_S_GTARGET1_ADDR;
+ mixer_target += (mixer_offset<<2);
+ mixer_target += (p<<2);
+
+ if (abe->compensated_mixer_gain) {
+ switch (id) {
+ case MIXDL1:
+ case MIXDL2:
+ case MIXVXREC:
+ case MIXAUDUL:
+ /* compute the sum of the gain of the mixer */
+ for (sum_g = i = 0; i < 4; i++)
+ sum_g += abe->desired_gains_linear[mixer_offset +
+ i];
+ /* lets avoid a division by 0 */
+ if (sum_g == 0)
+ break;
+ /* if the sum is OK with less than 1, then
+ do not weight the gains */
+ if (sum_g < 0x00040000) { /* REMOVE HARD CONST */
+ /* recompute all gains from original
+ desired values */
+ sum_g = 0x00040000;
+ }
+ /* translate it in Q16 format for the later division */
+ abe_int_2_float16(sum_g, &mean_gain, &mean_exp);
+ mean_exp = 10 - mean_exp;
+ for (i = 0; i < 4; i++) {
+ /* new gain = desired gain divided by sum of gains */
+ new_gain_linear[i] =
+ (abe->desired_gains_linear
+ [mixer_offset + i]
+ << 8) / mean_gain;
+ new_gain_linear[i] = (mean_exp > 0) ?
+ new_gain_linear[i] << mean_exp :
+ new_gain_linear[i] >> mean_exp;
+ }
+ /* load the whole adpated S_G_Target SMEM MIXER table */
+ omap_abe_mem_write(abe, OMAP_ABE_SMEM,
+ mixer_target - (p << 2),
+ new_gain_linear, (4 * sizeof(lin_g)));
+ break;
+ default:
+ /* load the S_G_Target SMEM table */
+ omap_abe_mem_write(abe, OMAP_ABE_SMEM,
+ mixer_target,
+ (u32 *) &lin_g, sizeof(lin_g));
+ break;
+ }
+ } else {
+ if (!abe->muted_gains_indicator[mixer_offset + p])
+ /* load the S_G_Target SMEM table */
+ omap_abe_mem_write(abe, OMAP_ABE_SMEM,
+ mixer_target, (u32 *) &lin_g,
+ sizeof(lin_g));
+ else
+ /* update muted gain with new value */
+ abe->muted_gains_decibel[mixer_offset + p] = f_g;
+ }
+ ramp = maximum(minimum(RAMP_MAXLENGTH, ramp), RAMP_MINLENGTH);
+ /* ramp data should be interpolated in the table instead */
+ ramp_index = 3;
+ if ((RAMP_2MS <= ramp) && (ramp < RAMP_5MS))
+ ramp_index = 8;
+ if ((RAMP_5MS <= ramp) && (ramp < RAMP_50MS))
+ ramp_index = 24;
+ if ((RAMP_50MS <= ramp) && (ramp < RAMP_500MS))
+ ramp_index = 36;
+ if (ramp > RAMP_500MS)
+ ramp_index = 48;
+ beta = abe_alpha_iir[ramp_index];
+ alpha = abe_1_alpha_iir[ramp_index];
+ /* CMEM bytes address */
+ mixer_target = OMAP_ABE_C_1_ALPHA_ADDR;
+ /* a pair of gains is updated once in the firmware */
+ mixer_target += ((p + mixer_offset) >> 1) << 2;
+ /* load the ramp delay data */
+ omap_abe_mem_write(abe, OMAP_ABE_CMEM, mixer_target,
+ (u32 *) &alpha, sizeof(alpha));
+ /* CMEM bytes address */
+ mixer_target = OMAP_ABE_C_ALPHA_ADDR;
+ /* a pair of gains is updated once in the firmware */
+ mixer_target += ((p + mixer_offset) >> 1) << 2;
+ omap_abe_mem_write(abe, OMAP_ABE_CMEM, mixer_target,
+ (u32 *) &beta, sizeof(beta));
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_write_gain);
+/**
+ * omap_abe_write_mixer
+ * @id: name of the mixer
+ * @param: input gains and delay ramp of the mixer
+ * @p: port corresponding to the above gains
+ *
+ * Load the gain coefficients in FW memory. This API can be called when
+ * the corresponding MIXER is not activated. After reloading the firmware
+ * the default coefficients corresponds to "all input and output mixer's
+ * gain in mute state". A mixer is disabled with a network reconfiguration
+ * corresponding to an OPP value.
+ */
+int omap_abe_write_mixer(struct omap_abe *abe,
+ u32 id, s32 f_g, u32 f_ramp, u32 p)
+{
+ _log(ABE_ID_WRITE_MIXER, id, f_ramp, p);
+ omap_abe_write_gain(abe, id, f_g, f_ramp, p);
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_write_mixer);
+
+/**
+ * omap_abe_read_gain
+ * @id: name of the mixer
+ * @param: list of input gains of the mixer
+ * @p: list of port corresponding to the above gains
+ *
+ */
+int omap_abe_read_gain(struct omap_abe *abe,
+ u32 id, u32 *f_g, u32 p)
+{
+ u32 mixer_target, mixer_offset, i;
+ _log(ABE_ID_READ_GAIN, id, (u32) f_g, p);
+ omap_abe_gain_offset(abe, id, &mixer_offset);
+ /* SMEM bytes address */
+ mixer_target = OMAP_ABE_S_GTARGET1_ADDR;
+ mixer_target += (mixer_offset<<2);
+ mixer_target += (p<<2);
+ if (!abe->muted_gains_indicator[mixer_offset + p]) {
+ /* load the S_G_Target SMEM table */
+ omap_abe_mem_read(abe, OMAP_ABE_SMEM, mixer_target,
+ (u32 *) f_g, sizeof(*f_g));
+ for (i = 0; i < sizeof_db2lin_table; i++) {
+ if (abe_db2lin_table[i] == *f_g)
+ goto found;
+ }
+ *f_g = 0;
+ return -1;
+ found:
+ *f_g = (i * 100) + min_mdb;
+ } else {
+ /* update muted gain with new value */
+ *f_g = abe->muted_gains_decibel[mixer_offset + p];
+ }
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_read_gain);
+
+/**
+ * abe_read_mixer
+ * @id: name of the mixer
+ * @param: gains of the mixer
+ * @p: port corresponding to the above gains
+ *
+ * Load the gain coefficients in FW memory. This API can be called when
+ * the corresponding MIXER is not activated. After reloading the firmware
+ * the default coefficients corresponds to "all input and output mixer's
+ * gain in mute state". A mixer is disabled with a network reconfiguration
+ * corresponding to an OPP value.
+ */
+int omap_abe_read_mixer(struct omap_abe *abe,
+ u32 id, u32 *f_g, u32 p)
+{
+ _log(ABE_ID_READ_MIXER, id, 0, p);
+ omap_abe_read_gain(abe, id, f_g, p);
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_read_mixer);
+
+/**
+ * abe_reset_gain_mixer
+ * @id: name of the mixer
+ * @p: list of port corresponding to the above gains
+ *
+ * restart the working gain value of the mixers when a port is enabled
+ */
+void omap_abe_reset_gain_mixer(struct omap_abe *abe, u32 id, u32 p)
+{
+ u32 lin_g, mixer_target, mixer_offset;
+ switch (id) {
+ default:
+ case GAINS_DMIC1:
+ mixer_offset = dmic1_gains_offset;
+ break;
+ case GAINS_DMIC2:
+ mixer_offset = dmic2_gains_offset;
+ break;
+ case GAINS_DMIC3:
+ mixer_offset = dmic3_gains_offset;
+ break;
+ case GAINS_AMIC:
+ mixer_offset = amic_gains_offset;
+ break;
+ case GAINS_DL1:
+ mixer_offset = dl1_gains_offset;
+ break;
+ case GAINS_DL2:
+ mixer_offset = dl2_gains_offset;
+ break;
+ case GAINS_SPLIT:
+ mixer_offset = splitters_gains_offset;
+ break;
+ case MIXDL1:
+ mixer_offset = mixer_dl1_offset;
+ break;
+ case MIXDL2:
+ mixer_offset = mixer_dl2_offset;
+ break;
+ case MIXECHO:
+ mixer_offset = mixer_echo_offset;
+ break;
+ case MIXSDT:
+ mixer_offset = mixer_sdt_offset;
+ break;
+ case MIXVXREC:
+ mixer_offset = mixer_vxrec_offset;
+ break;
+ case MIXAUDUL:
+ mixer_offset = mixer_audul_offset;
+ break;
+ case GAINS_BTUL:
+ mixer_offset = btul_gains_offset;
+ break;
+ }
+ /* SMEM bytes address for the CURRENT gain values */
+ mixer_target = OMAP_ABE_S_GCURRENT_ADDR;
+ mixer_target += (mixer_offset<<2);
+ mixer_target += (p<<2);
+ lin_g = 0;
+ /* load the S_G_Target SMEM table */
+ omap_abe_mem_write(abe, OMAP_ABE_SMEM, mixer_target,
+ (u32 *) &lin_g, sizeof(lin_g));
+}
diff --git a/sound/soc/omap/abe/abe_gain.h b/sound/soc/omap/abe/abe_gain.h
new file mode 100644
index 0000000..f332837
--- /dev/null
+++ b/sound/soc/omap/abe/abe_gain.h
@@ -0,0 +1,111 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _ABE_GAIN_H_
+#define _ABE_GAIN_H_
+
+#include "abe_typ.h"
+#include "abe_dm_addr.h"
+#include "abe_sm_addr.h"
+#include "abe_cm_addr.h"
+
+#define OMAP_ABE_GAIN_MUTED (0x0001<<0)
+#define OMAP_ABE_GAIN_DISABLED (0x0001<<1)
+
+#define OMAP_ABE_GAIN_DMIC1_LEFT 0
+#define OMAP_ABE_GAIN_DMIC1_RIGTH 1
+#define OMAP_ABE_GAIN_DMIC2_LEFT 2
+#define OMAP_ABE_GAIN_DMIC2_RIGTH 3
+#define OMAP_ABE_GAIN_DMIC3_LEFT 4
+#define OMAP_ABE_GAIN_DMIC3_RIGTH 5
+#define OMAP_ABE_GAIN_AMIC_LEFT 6
+#define OMAP_ABE_GAIN_AMIC_RIGTH 7
+#define OMAP_ABE_GAIN_DL1_LEFT 8
+#define OMAP_ABE_GAIN_DL1_RIGTH 9
+#define OMAP_ABE_GAIN_DL2_LEFT 10
+#define OMAP_ABE_GAIN_DL2_RIGTH 11
+#define OMAP_ABE_GAIN_SPLIT_LEFT 12
+#define OMAP_ABE_GAIN_SPLIT_RIGTH 13
+#define OMAP_ABE_MIXDL1_MM_DL 14
+#define OMAP_ABE_MIXDL1_MM_UL2 15
+#define OMAP_ABE_MIXDL1_VX_DL 16
+#define OMAP_ABE_MIXDL1_TONES 17
+#define OMAP_ABE_MIXDL2_MM_DL 18
+#define OMAP_ABE_MIXDL2_MM_UL2 19
+#define OMAP_ABE_MIXDL2_VX_DL 20
+#define OMAP_ABE_MIXDL2_TONES 21
+#define OMAP_ABE_MIXECHO_DL1 22
+#define OMAP_ABE_MIXECHO_DL2 23
+#define OMAP_ABE_MIXSDT_UL 24
+#define OMAP_ABE_MIXECHO_DL 25
+#define OMAP_ABE_MIXVXREC_MM_DL 26
+#define OMAP_ABE_MIXVXREC_TONES 27
+#define OMAP_ABE_MIXVXREC_VX_UL 28
+#define OMAP_ABE_MIXVXREC_VX_DL 29
+#define OMAP_ABE_MIXAUDUL_MM_DL 30
+#define OMAP_ABE_MIXAUDUL_TONES 31
+#define OMAP_ABE_MIXAUDUL_UPLINK 32
+#define OMAP_ABE_MIXAUDUL_VX_DL 33
+#define OMAP_ABE_GAIN_BTUL_LEFT 34
+#define OMAP_ABE_GAIN_BTUL_RIGTH 35
+
+void omap_abe_reset_gain_mixer(struct omap_abe *abe, u32 id, u32 p);
+
+void abe_int_2_float16(u32 data, u32 *mantissa, u32 *exp);
+
+#endif /* _ABE_GAIN_H_ */
diff --git a/sound/soc/omap/abe/abe_ini.c b/sound/soc/omap/abe/abe_ini.c
new file mode 100644
index 0000000..a126e23
--- /dev/null
+++ b/sound/soc/omap/abe/abe_ini.c
@@ -0,0 +1,455 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include "abe_dbg.h"
+#include "abe.h"
+#include "abe_aess.h"
+#include "abe_gain.h"
+#include "abe_mem.h"
+#include "abe_port.h"
+#include "abe_seq.h"
+
+#include "abe_taskid.h"
+
+
+#define ABE_TASK_ID(ID) (OMAP_ABE_D_TASKSLIST_ADDR + sizeof(ABE_STask)*(ID))
+void omap_abe_build_scheduler_table(struct omap_abe *abe);
+void omap_abe_reset_all_ports(struct omap_abe *abe);
+
+const u32 abe_firmware_array[ABE_FIRMWARE_MAX_SIZE] = {
+#include "abe_firmware.c"
+};
+
+
+/*
+ * initialize the default values for call-backs to subroutines
+ * - FIFO IRQ call-backs for sequenced tasks
+ * - FIFO IRQ call-backs for audio player/recorders (ping-pong protocols)
+ * - Remote debugger interface
+ * - Error monitoring
+ * - Activity Tracing
+ */
+
+/**
+ * abe_init_mem - Allocate Kernel space memory map for ABE
+ *
+ * Memory map of ABE memory space for PMEM/DMEM/SMEM/DMEM
+ */
+void abe_init_mem(void __iomem **_io_base)
+{
+ int i;
+
+ abe = kzalloc(sizeof(struct omap_abe), GFP_KERNEL);
+ if (abe == NULL)
+ printk(KERN_ERR "ABE Allocation ERROR ");
+
+ for (i = 0; i < 5; i++)
+ abe->io_base[i] = _io_base[i];
+
+ mutex_init(&abe->mutex);
+
+}
+EXPORT_SYMBOL(abe_init_mem);
+
+/**
+ * abe_load_fw_param - Load ABE Firmware memories
+ * @PMEM: Pointer of Program memory data
+ * @PMEM_SIZE: Size of PMEM data
+ * @CMEM: Pointer of Coeffients memory data
+ * @CMEM_SIZE: Size of CMEM data
+ * @SMEM: Pointer of Sample memory data
+ * @SMEM_SIZE: Size of SMEM data
+ * @DMEM: Pointer of Data memory data
+ * @DMEM_SIZE: Size of DMEM data
+ *
+ */
+int abe_load_fw_param(u32 *ABE_FW)
+{
+ u32 pmem_size, dmem_size, smem_size, cmem_size;
+ u32 *pmem_ptr, *dmem_ptr, *smem_ptr, *cmem_ptr, *fw_ptr;
+ /* fast counter timer set at 4096 * 250us = 1,024s */
+ u32 data = 0x10001000;
+
+ _log(ABE_ID_LOAD_FW_param, 0, 0, 0);
+#define ABE_FW_OFFSET 5
+ fw_ptr = ABE_FW;
+ abe->firmware_version_number = *fw_ptr++;
+ pmem_size = *fw_ptr++;
+ cmem_size = *fw_ptr++;
+ dmem_size = *fw_ptr++;
+ smem_size = *fw_ptr++;
+ pmem_ptr = fw_ptr;
+ cmem_ptr = pmem_ptr + (pmem_size >> 2);
+ dmem_ptr = cmem_ptr + (cmem_size >> 2);
+ smem_ptr = dmem_ptr + (dmem_size >> 2);
+ /* do not load PMEM */
+ if (abe->warm_boot) {
+ /* Stop the event Generator */
+ omap_abe_stop_event_generator(abe);
+
+ /* Now we are sure the firmware is stalled */
+ omap_abe_mem_write(abe, OMAP_ABE_CMEM, 0, cmem_ptr,
+ cmem_size);
+ omap_abe_mem_write(abe, OMAP_ABE_SMEM, 0, smem_ptr,
+ smem_size);
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM, 0, dmem_ptr,
+ dmem_size);
+
+ /* Restore the event Generator status */
+ omap_abe_start_event_generator(abe);
+ } else {
+ omap_abe_mem_write(abe, OMAP_ABE_PMEM, 0, pmem_ptr,
+ pmem_size);
+ omap_abe_mem_write(abe, OMAP_ABE_CMEM, 0, cmem_ptr,
+ cmem_size);
+ omap_abe_mem_write(abe, OMAP_ABE_SMEM, 0, smem_ptr,
+ smem_size);
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM, 0, dmem_ptr,
+ dmem_size);
+ }
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_FASTCOUNTER_ADDR,
+ &data,
+ OMAP_ABE_D_FASTCOUNTER_SIZE);
+
+ /* Update Saturation threshold */
+ data = 0x00700000;
+ omap_abe_mem_write(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_SATURATION_EQ_ADDR,
+ &data, 4);
+ data = 0x00900000;
+ omap_abe_mem_write(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_SATURATION_EQ_ADDR + 4,
+ &data, 4);
+
+ abe->warm_boot = 1;
+ return 0;
+}
+EXPORT_SYMBOL(abe_load_fw_param);
+
+/**
+ * omap_abe_load_fw - Load ABE Firmware and initialize memories
+ * @abe: Pointer on abe handle
+ *
+ */
+int omap_abe_load_fw(struct omap_abe *abe, u32 *firmware)
+{
+ _log(ABE_ID_LOAD_FW, 0, 0, 0);
+ abe_load_fw_param(firmware);
+ omap_abe_reset_all_ports(abe);
+ omap_abe_build_scheduler_table(abe);
+ omap_abe_reset_all_sequence(abe);
+ omap_abe_select_main_port(OMAP_ABE_PDM_DL_PORT);
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_load_fw);
+
+/**
+ * abe_reload_fw - Reload ABE Firmware after OFF mode
+ */
+int omap_abe_reload_fw(struct omap_abe *abe, u32 *firmware)
+{
+ abe->warm_boot = 0;
+ abe_load_fw_param(firmware);
+ omap_abe_build_scheduler_table(abe);
+ omap_abe_dbg_reset(&abe->dbg);
+ /* IRQ circular read pointer in DMEM */
+ abe->irq_dbg_read_ptr = 0;
+ /* Restore Gains not managed by the drivers */
+ omap_abe_write_gain(abe, GAINS_SPLIT, GAIN_0dB,
+ RAMP_2MS, GAIN_LEFT_OFFSET);
+ omap_abe_write_gain(abe, GAINS_SPLIT, GAIN_0dB,
+ RAMP_2MS, GAIN_RIGHT_OFFSET);
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_reload_fw);
+
+/**
+ * omap_abe_get_default_fw
+ *
+ * Get default ABE firmware
+ */
+u32 *omap_abe_get_default_fw(struct omap_abe *abe)
+{
+ return (u32 *)abe_firmware_array;
+}
+
+/**
+ * abe_build_scheduler_table
+ *
+ */
+void omap_abe_build_scheduler_table(struct omap_abe *abe)
+{
+ u16 i, n;
+ u8 *ptr;
+ u16 aUplinkMuxing[NBROUTE_UL];
+
+ /* LOAD OF THE TASKS' MULTIFRAME */
+ /* WARNING ON THE LOCATION OF IO_MM_DL WHICH IS PATCHED
+ IN "abe_init_io_tasks" */
+ for (ptr = (u8 *) &(abe->MultiFrame[0][0]), i = 0;
+ i < sizeof(abe->MultiFrame); i++)
+ *ptr++ = 0;
+
+ abe->MultiFrame[0][2] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_IO_VX_DL)*/;
+ abe->MultiFrame[0][3] = ABE_TASK_ID(C_ABE_FW_TASK_ASRC_VX_DL_8);
+
+ abe->MultiFrame[1][3] = ABE_TASK_ID(C_ABE_FW_TASK_VX_DL_8_48_FIR);
+ abe->MultiFrame[1][6] = ABE_TASK_ID(C_ABE_FW_TASK_DL2Mixer);
+ abe->MultiFrame[1][7] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_IO_VIB_DL)*/;
+
+ abe->MultiFrame[2][0] = ABE_TASK_ID(C_ABE_FW_TASK_DL1Mixer);
+ abe->MultiFrame[2][1] = ABE_TASK_ID(C_ABE_FW_TASK_SDTMixer);
+ abe->MultiFrame[2][5] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_IO_DMIC)*/;
+
+ abe->MultiFrame[3][0] = ABE_TASK_ID(C_ABE_FW_TASK_DL1_GAIN);
+ abe->MultiFrame[3][6] = ABE_TASK_ID(C_ABE_FW_TASK_DL2_GAIN);
+ abe->MultiFrame[3][7] = ABE_TASK_ID(C_ABE_FW_TASK_DL2_EQ);
+
+ abe->MultiFrame[4][0] = ABE_TASK_ID(C_ABE_FW_TASK_DL1_EQ);
+ abe->MultiFrame[4][2] = ABE_TASK_ID(C_ABE_FW_TASK_VXRECMixer);
+ abe->MultiFrame[4][3] = ABE_TASK_ID(C_ABE_FW_TASK_VXREC_SPLIT);
+ abe->MultiFrame[4][6] = ABE_TASK_ID(C_ABE_FW_TASK_VIBRA1);
+ abe->MultiFrame[4][7] = ABE_TASK_ID(C_ABE_FW_TASK_VIBRA2);
+
+ abe->MultiFrame[5][0] = 0;
+ abe->MultiFrame[5][1] = ABE_TASK_ID(C_ABE_FW_TASK_EARP_48_96_LP);
+ abe->MultiFrame[5][2] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_IO_PDM_UL)*/;
+ abe->MultiFrame[5][7] = ABE_TASK_ID(C_ABE_FW_TASK_VIBRA_SPLIT);
+
+ abe->MultiFrame[6][0] = ABE_TASK_ID(C_ABE_FW_TASK_EARP_48_96_LP);
+ abe->MultiFrame[6][4] = ABE_TASK_ID(C_ABE_FW_TASK_EchoMixer);
+ abe->MultiFrame[6][5] = ABE_TASK_ID(C_ABE_FW_TASK_BT_UL_SPLIT);
+
+ abe->MultiFrame[7][0] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_IO_PDM_DL)*/;
+ abe->MultiFrame[7][3] = ABE_TASK_ID(C_ABE_FW_TASK_DBG_SYNC);
+ abe->MultiFrame[7][5] = ABE_TASK_ID(C_ABE_FW_TASK_ECHO_REF_SPLIT);
+
+ abe->MultiFrame[9][2] = ABE_TASK_ID(C_ABE_FW_TASK_CHECK_IIR_RIGHT);
+
+ abe->MultiFrame[9][6] = 0;
+ abe->MultiFrame[9][7] = ABE_TASK_ID(C_ABE_FW_TASK_IHF_48_96_LP);
+
+ abe->MultiFrame[10][7] = ABE_TASK_ID(C_ABE_FW_TASK_IHF_48_96_LP);
+
+ abe->MultiFrame[11][2] = ABE_TASK_ID(C_ABE_FW_TASK_AMIC_96_48_LP);
+ abe->MultiFrame[11][4] = ABE_TASK_ID(C_ABE_FW_TASK_AMIC_SPLIT);
+ abe->MultiFrame[11][7] = ABE_TASK_ID(C_ABE_FW_TASK_VIBRA_PACK);
+
+ abe->MultiFrame[12][3] = ABE_TASK_ID(C_ABE_FW_TASK_VX_UL_ROUTING);
+ abe->MultiFrame[12][4] = ABE_TASK_ID(C_ABE_FW_TASK_ULMixer);
+ abe->MultiFrame[12][5] = ABE_TASK_ID(C_ABE_FW_TASK_VX_UL_48_8);
+
+ abe->MultiFrame[13][2] = ABE_TASK_ID(C_ABE_FW_TASK_MM_UL2_ROUTING);
+ abe->MultiFrame[13][3] = ABE_TASK_ID(C_ABE_FW_TASK_SideTone);
+ abe->MultiFrame[13][5] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_IO_BT_VX_DL)*/;
+
+ abe->MultiFrame[14][3] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_IO_DMIC)*/;
+ abe->MultiFrame[14][4] = ABE_TASK_ID(C_ABE_FW_TASK_BT_DL_48_8_FIR);
+
+ abe->MultiFrame[15][0] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_IO_MM_EXT_OUT)*/;
+ abe->MultiFrame[15][3] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_IO_BT_VX_UL)*/;
+ abe->MultiFrame[15][6] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_ASRC_BT_UL_8)*/;
+
+ abe->MultiFrame[16][2] = ABE_TASK_ID(C_ABE_FW_TASK_ASRC_VX_UL_8);
+ abe->MultiFrame[16][3] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_IO_VX_UL)*/;
+
+ abe->MultiFrame[17][2] = ABE_TASK_ID(C_ABE_FW_TASK_BT_UL_8_48);
+ abe->MultiFrame[17][3] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_IO_MM_UL2)*/;
+
+ abe->MultiFrame[18][0] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_IO_MM_DL)*/;
+ abe->MultiFrame[18][6] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_ASRC_BT_DL_8)*/;
+
+ abe->MultiFrame[19][0] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_IO_PDM_DL)*/;
+
+ /* MM_UL is moved to OPP 100% */
+ abe->MultiFrame[19][6] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_IO_MM_UL)*/;
+
+ abe->MultiFrame[20][0] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_IO_TONES_DL)*/;
+ abe->MultiFrame[20][6] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_ASRC_MM_EXT_IN)*/;
+
+ abe->MultiFrame[21][1] = ABE_TASK_ID(C_ABE_FW_TASK_DEBUGTRACE_VX_ASRCs);
+ abe->MultiFrame[21][3] = 0/*ABE_TASK_ID(C_ABE_FW_TASK_IO_MM_EXT_IN)*/;
+ /* MUST STAY ON SLOT 22 */
+ abe->MultiFrame[22][0] = ABE_TASK_ID(C_ABE_FW_TASK_DEBUG_IRQFIFO);
+ abe->MultiFrame[22][1] = ABE_TASK_ID(C_ABE_FW_TASK_INIT_FW_MEMORY);
+ abe->MultiFrame[22][2] = 0;
+ /* MM_EXT_IN_SPLIT task must be after IO_MM_EXT_IN and before
+ ASRC_MM_EXT_IN in order to manage OPP50 <-> transitions */
+ abe->MultiFrame[22][4] = ABE_TASK_ID(C_ABE_FW_TASK_MM_EXT_IN_SPLIT);
+
+ abe->MultiFrame[23][0] = ABE_TASK_ID(C_ABE_FW_TASK_GAIN_UPDATE);
+ abe->MultiFrame[23][2] = ABE_TASK_ID(C_ABE_FW_TASK_CHECK_IIR_LEFT);
+
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM, OMAP_ABE_D_MULTIFRAME_ADDR,
+ (u32 *) abe->MultiFrame, sizeof(abe->MultiFrame));
+ /* reset the uplink router */
+ n = (OMAP_ABE_D_AUPLINKROUTING_SIZE) >> 1;
+ for (i = 0; i < n; i++)
+ aUplinkMuxing[i] = ZERO_labelID;
+
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM, OMAP_ABE_D_AUPLINKROUTING_ADDR,
+ (u32 *) aUplinkMuxing, sizeof(aUplinkMuxing));
+}
+
+/**
+ * omap_abe_reset_port
+ * @id: ABE port ID
+ *
+ * stop the port activity and reload default parameters on the associated
+ * processing features.
+ * Clears the internal AE buffers.
+ */
+int omap_abe_reset_port(u32 id)
+{
+ _log(ABE_ID_RESET_PORT, id, 0, 0);
+ abe_port[id] = ((abe_port_t *) abe_port_init)[id];
+ return 0;
+}
+
+/**
+ * abe_reset_all_ports
+ *
+ * load default configuration for all features
+ */
+void omap_abe_reset_all_ports(struct omap_abe *abe)
+{
+ u16 i;
+ for (i = 0; i < LAST_PORT_ID; i++)
+ omap_abe_reset_port(i);
+ /* mixers' configuration */
+ omap_abe_write_mixer(abe, MIXDL1, MUTE_GAIN,
+ RAMP_2MS, MIX_DL1_INPUT_MM_DL);
+ omap_abe_write_mixer(abe, MIXDL1, MUTE_GAIN,
+ RAMP_2MS, MIX_DL1_INPUT_MM_UL2);
+ omap_abe_write_mixer(abe, MIXDL1, MUTE_GAIN,
+ RAMP_2MS, MIX_DL1_INPUT_VX_DL);
+ omap_abe_write_mixer(abe, MIXDL1, MUTE_GAIN,
+ RAMP_2MS, MIX_DL1_INPUT_TONES);
+ omap_abe_write_mixer(abe, MIXDL2, MUTE_GAIN,
+ RAMP_2MS, MIX_DL2_INPUT_TONES);
+ omap_abe_write_mixer(abe, MIXDL2, MUTE_GAIN,
+ RAMP_2MS, MIX_DL2_INPUT_VX_DL);
+ omap_abe_write_mixer(abe, MIXDL2, MUTE_GAIN,
+ RAMP_2MS, MIX_DL2_INPUT_MM_DL);
+ omap_abe_write_mixer(abe, MIXDL2, MUTE_GAIN,
+ RAMP_2MS, MIX_DL2_INPUT_MM_UL2);
+ omap_abe_write_mixer(abe, MIXSDT, MUTE_GAIN,
+ RAMP_2MS, MIX_SDT_INPUT_UP_MIXER);
+ omap_abe_write_mixer(abe, MIXSDT, GAIN_0dB,
+ RAMP_2MS, MIX_SDT_INPUT_DL1_MIXER);
+ omap_abe_write_mixer(abe, MIXECHO, MUTE_GAIN,
+ RAMP_2MS, MIX_ECHO_DL1);
+ omap_abe_write_mixer(abe, MIXECHO, MUTE_GAIN,
+ RAMP_2MS, MIX_ECHO_DL2);
+ omap_abe_write_mixer(abe, MIXAUDUL, MUTE_GAIN,
+ RAMP_2MS, MIX_AUDUL_INPUT_MM_DL);
+ omap_abe_write_mixer(abe, MIXAUDUL, MUTE_GAIN,
+ RAMP_2MS, MIX_AUDUL_INPUT_TONES);
+ omap_abe_write_mixer(abe, MIXAUDUL, GAIN_0dB,
+ RAMP_2MS, MIX_AUDUL_INPUT_UPLINK);
+ omap_abe_write_mixer(abe, MIXAUDUL, MUTE_GAIN,
+ RAMP_2MS, MIX_AUDUL_INPUT_VX_DL);
+ omap_abe_write_mixer(abe, MIXVXREC, MUTE_GAIN,
+ RAMP_2MS, MIX_VXREC_INPUT_TONES);
+ omap_abe_write_mixer(abe, MIXVXREC, MUTE_GAIN,
+ RAMP_2MS, MIX_VXREC_INPUT_VX_DL);
+ omap_abe_write_mixer(abe, MIXVXREC, MUTE_GAIN,
+ RAMP_2MS, MIX_VXREC_INPUT_MM_DL);
+ omap_abe_write_mixer(abe, MIXVXREC, MUTE_GAIN,
+ RAMP_2MS, MIX_VXREC_INPUT_VX_UL);
+ omap_abe_write_gain(abe, GAINS_DMIC1, GAIN_0dB,
+ RAMP_2MS, GAIN_LEFT_OFFSET);
+ omap_abe_write_gain(abe, GAINS_DMIC1, GAIN_0dB,
+ RAMP_2MS, GAIN_RIGHT_OFFSET);
+ omap_abe_write_gain(abe, GAINS_DMIC2, GAIN_0dB,
+ RAMP_2MS, GAIN_LEFT_OFFSET);
+ omap_abe_write_gain(abe, GAINS_DMIC2, GAIN_0dB,
+ RAMP_2MS, GAIN_RIGHT_OFFSET);
+ omap_abe_write_gain(abe, GAINS_DMIC3, GAIN_0dB,
+ RAMP_2MS, GAIN_LEFT_OFFSET);
+ omap_abe_write_gain(abe, GAINS_DMIC3, GAIN_0dB,
+ RAMP_2MS, GAIN_RIGHT_OFFSET);
+ omap_abe_write_gain(abe, GAINS_AMIC, GAIN_0dB,
+ RAMP_2MS, GAIN_LEFT_OFFSET);
+ omap_abe_write_gain(abe, GAINS_AMIC, GAIN_0dB,
+ RAMP_2MS, GAIN_RIGHT_OFFSET);
+ omap_abe_write_gain(abe, GAINS_SPLIT, GAIN_0dB,
+ RAMP_2MS, GAIN_LEFT_OFFSET);
+ omap_abe_write_gain(abe, GAINS_SPLIT, GAIN_0dB,
+ RAMP_2MS, GAIN_RIGHT_OFFSET);
+ omap_abe_write_gain(abe, GAINS_DL1, GAIN_0dB,
+ RAMP_2MS, GAIN_LEFT_OFFSET);
+ omap_abe_write_gain(abe, GAINS_DL1, GAIN_0dB,
+ RAMP_2MS, GAIN_RIGHT_OFFSET);
+ omap_abe_write_gain(abe, GAINS_DL2, GAIN_0dB,
+ RAMP_2MS, GAIN_LEFT_OFFSET);
+ omap_abe_write_gain(abe, GAINS_DL2, GAIN_0dB,
+ RAMP_2MS, GAIN_RIGHT_OFFSET);
+ omap_abe_write_gain(abe, GAINS_BTUL, GAIN_0dB,
+ RAMP_2MS, GAIN_LEFT_OFFSET);
+ omap_abe_write_gain(abe, GAINS_BTUL, GAIN_0dB,
+ RAMP_2MS, GAIN_RIGHT_OFFSET);
+}
diff --git a/sound/soc/omap/abe/abe_initxxx_labels.h b/sound/soc/omap/abe/abe_initxxx_labels.h
new file mode 100644
index 0000000..823f2f3
--- /dev/null
+++ b/sound/soc/omap/abe/abe_initxxx_labels.h
@@ -0,0 +1,460 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef _ABE_INITXXX_LABELS_H_
+#define _ABE_INITXXX_LABELS_H_
+#define Dummy_Regs_labelID 0
+#define Dummy_AM_labelID 1
+#define Voice_8k_UL_labelID 2
+#define Voice_8k_DL_labelID 3
+#define ECHO_REF_8K_labelID 4
+#define Voice_16k_UL_labelID 5
+#define Voice_16k_DL_labelID 6
+#define ECHO_REF_16K_labelID 7
+#define MM_DL_labelID 8
+#define IO_VX_DL_ASRC_labelID 9
+#define IO_MM_EXT_IN_ASRC_labelID 10
+#define IO_VIBRA_DL_labelID 11
+#define ZERO_labelID 12
+#define GTarget_labelID 13
+#define GCurrent_labelID 14
+#define Gr_1_labelID 15
+#define Gr_2_labelID 16
+#define Gr_Regs_labelID 17
+#define DMIC0_Gain_labelID 18
+#define DMIC1_Gain_labelID 19
+#define DMIC2_Gain_labelID 20
+#define DMIC3_Gain_labelID 21
+#define AMIC_Gain_labelID 22
+#define MIXDL1_Gain_labelID 23
+#define MIXDL2_Gain_labelID 24
+#define DEFAULT_Gain_labelID 25
+#define DL1_M_G_Tones_labelID 26
+#define DL2_M_G_Tones_labelID 27
+#define Echo_M_G_labelID 28
+#define SDT_M_G_labelID 29
+#define VXREC_M_G_VX_DL_labelID 30
+#define UL_M_G_VX_DL_labelID 31
+#define BTUL_Gain_labelID 32
+#define DL1_M_labelID 33
+#define DL2_M_labelID 34
+#define MM_UL2_labelID 35
+#define VX_DL_labelID 36
+#define Tones_labelID 37
+#define DL_M_MM_UL2_VX_DL_labelID 38
+#define Echo_M_labelID 39
+#define VX_UL_labelID 40
+#define VX_UL_M_labelID 41
+#define SDT_F_labelID 42
+#define SDT_F_data_labelID 43
+#define SDT_Coef_labelID 44
+#define SDT_Regs_labelID 45
+#define SDT_M_labelID 46
+#define DL1_EQ_labelID 47
+#define DL2_EQ_labelID 48
+#define DL1_GAIN_out_labelID 49
+#define DL2_GAIN_out_labelID 50
+#define DMIC1_labelID 51
+#define DMIC1_L_labelID 52
+#define DMIC1_R_labelID 53
+#define DMIC2_labelID 54
+#define DMIC2_L_labelID 55
+#define DMIC2_R_labelID 56
+#define DMIC3_labelID 57
+#define DMIC3_L_labelID 58
+#define DMIC3_R_labelID 59
+#define SaturationMinMax_labelID 60
+#define TEMPORARY0_labelID 61
+#define TEMPORARY1_labelID 62
+#define BT_UL_L_labelID 63
+#define BT_UL_R_labelID 64
+#define AMIC_labelID 65
+#define AMIC_L_labelID 66
+#define AMIC_R_labelID 67
+#define EchoRef_L_labelID 68
+#define EchoRef_R_labelID 69
+#define MM_DL_L_labelID 70
+#define MM_DL_R_labelID 71
+#define MM_UL_labelID 72
+#define AMIC_96_labelID 73
+#define DMIC0_96_labelID 74
+#define DMIC1_96_labelID 75
+#define DMIC2_96_labelID 76
+#define MM_DL_44P1_WPTR_labelID 77
+#define EQ_DL_48K_labelID 78
+#define EQ_48K_labelID 79
+#define McPDM_Out1_labelID 80
+#define McPDM_Out2_labelID 81
+#define McPDM_Out3_labelID 82
+#define VX_UL_MUX_labelID 83
+#define MM_UL2_MUX_labelID 84
+#define MM_UL_MUX_labelID 85
+#define XinASRC_DL_VX_labelID 86
+#define ASRC_DL_VX_Coefs_labelID 87
+#define ASRC_DL_VX_Alpha_labelID 88
+#define ASRC_DL_VX_VarsBeta_labelID 89
+#define ASRC_DL_VX_8k_Regs_labelID 90
+#define XinASRC_UL_VX_labelID 91
+#define ASRC_UL_VX_Coefs_labelID 92
+#define ASRC_UL_VX_Alpha_labelID 93
+#define ASRC_UL_VX_VarsBeta_labelID 94
+#define ASRC_UL_VX_8k_Regs_labelID 95
+#define UL_48_8_DEC_labelID 96
+#define ASRC_DL_VX_16k_Regs_labelID 97
+#define ASRC_UL_VX_16k_Regs_labelID 98
+#define UL_48_16_DEC_labelID 99
+#define XinASRC_MM_EXT_IN_labelID 100
+#define ASRC_MM_EXT_IN_Coefs_labelID 101
+#define ASRC_MM_EXT_IN_Alpha_labelID 102
+#define ASRC_MM_EXT_IN_VarsBeta_labelID 103
+#define ASRC_MM_EXT_IN_Regs_labelID 104
+#define VX_REC_labelID 105
+#define VXREC_UL_M_Tones_VX_UL_labelID 106
+#define VX_REC_L_labelID 107
+#define VX_REC_R_labelID 108
+#define DL2_M_L_labelID 109
+#define DL2_M_R_labelID 110
+#define DL1_M_data_labelID 111
+#define DL1_M_Coefs_labelID 112
+#define DL2_M_LR_data_labelID 113
+#define DL2_M_LR_Coefs_labelID 114
+#define SRC_6_LP_COEFS_labelID 115
+#define SRC_6_LP_GAIN_COEFS_labelID 116
+#define SRC_6_HP_COEFS_labelID 117
+#define SRC_3_LP_COEFS_labelID 118
+#define SRC_3_LP_GAIN_COEFS_labelID 119
+#define SRC_3_HP_COEFS_labelID 120
+#define VX_DL_8_48_LP_DATA_labelID 121
+#define VX_DL_8_48_HP_DATA_labelID 122
+#define VX_DL_16_48_LP_DATA_labelID 123
+#define VX_DL_16_48_HP_DATA_labelID 124
+#define VX_UL_48_8_LP_DATA_labelID 125
+#define VX_UL_48_8_HP_DATA_labelID 126
+#define VX_UL_48_16_LP_DATA_labelID 127
+#define VX_UL_48_16_HP_DATA_labelID 128
+#define BT_UL_8_48_LP_DATA_labelID 129
+#define BT_UL_8_48_HP_DATA_labelID 130
+#define BT_UL_16_48_LP_DATA_labelID 131
+#define BT_UL_16_48_HP_DATA_labelID 132
+#define BT_DL_48_8_LP_DATA_labelID 133
+#define BT_DL_48_8_HP_DATA_labelID 134
+#define BT_DL_48_16_LP_DATA_labelID 135
+#define BT_DL_48_16_HP_DATA_labelID 136
+#define ECHO_REF_48_16_LP_DATA_labelID 137
+#define ECHO_REF_48_16_HP_DATA_labelID 138
+#define ECHO_REF_48_8_LP_DATA_labelID 139
+#define ECHO_REF_48_8_HP_DATA_labelID 140
+#define ECHO_REF_DEC_labelID 141
+#define VX_UL_8_TEMP_labelID 142
+#define VX_UL_16_TEMP_labelID 143
+#define UP_DOWN_8_48_labelID 144
+#define UP_DOWN_16_48_labelID 145
+#define SRC_6_LP_48k_labelID 146
+#define SRC_6_HP_labelID 147
+#define SRC_3_LP_48k_labelID 148
+#define SRC_3_HP_labelID 149
+#define EARP_48_96_LP_DATA_labelID 150
+#define SRC_48_96_LP_labelID 151
+#define IHF_48_96_LP_DATA_labelID 152
+#define EQ_VX_UL_16K_labelID 153
+#define AB0_labelID 154
+#define AC0_labelID 155
+#define MM_DL_C_labelID 156
+#define TONES_C_labelID 157
+#define MM_DL_44P1_REGS_labelID 158
+#define TONES_44P1_REGS_labelID 159
+#define MM_DL_44P1_DRIFT_labelID 160
+#define MM_DL_44P1_XK_labelID 161
+#define TONES_44P1_DRIFT_labelID 162
+#define TONES_44P1_XK_labelID 163
+#define SRC_44P1_MULFAC1_2_labelID 164
+#define A00_labelID 165
+#define MM_DL_44P1_WPTR1_labelID 166
+#define MM_DL_44P1_RPTR_labelID 167
+#define TONES_44P1_WPTR_labelID 168
+#define TONES_44P1_RPTR_labelID 169
+#define C_0DB_SAT_labelID 170
+#define UL_MIC_48K_labelID 171
+#define MM_DL_44P1_PP_REGS_labelID 172
+#define SRC_6_HP_NEW_COEFS_labelID 173
+#define AF_labelID 174
+#define AG_labelID 175
+#define AH_labelID 176
+#define AI_labelID 177
+#define AJ_labelID 178
+#define AK_labelID 179
+#define AL_labelID 180
+#define AM_labelID 181
+#define AN_labelID 182
+#define AO_labelID 183
+#define AP_labelID 184
+#define AQ_labelID 185
+#define AR_labelID 186
+#define AS_labelID 187
+#define AT_labelID 188
+#define AU_labelID 189
+#define AV_labelID 190
+#define SaturationMinMaxEQ_labelID 191
+#define pVIBRA1_p0_labelID 192
+#define pVIBRA1_p1_labelID 193
+#define pVIBRA1_p23_labelID 194
+#define pVIBRA1_p45_labelID 195
+#define pVibra1_pR1_labelID 196
+#define pVibra1_pR2_labelID 197
+#define pVibra1_pR3_labelID 198
+#define pVIBRA1_r_labelID 199
+#define pVIBRA2_p0_0_labelID 200
+#define pVIBRA2_p0_labelID 201
+#define pVIBRA2_p1_labelID 202
+#define pVIBRA2_p23_labelID 203
+#define pVIBRA2_p45_labelID 204
+#define pCtrl_p67_labelID 205
+#define pVIBRA2_r_labelID 206
+#define VIBRA_labelID 207
+#define UP_48_96_LP_COEFS_DC_HF_labelID 208
+#define AX_labelID 209
+#define UP_48_96_LP_COEFS_DC_HS_labelID 210
+#define AMIC_96_48_data_labelID 211
+#define DOWN_96_48_AMIC_Coefs_labelID 212
+#define DOWN_96_48_DMIC_Coefs_labelID 213
+#define DOWN_96_48_AMIC_Regs_labelID 214
+#define DOWN_96_48_DMIC_Regs_labelID 215
+#define DMIC0_96_48_data_labelID 216
+#define DMIC1_96_48_data_labelID 217
+#define DMIC2_96_48_data_labelID 218
+#define SIO_DMIC_labelID 219
+#define SIO_PDM_UL_labelID 220
+#define SIO_BT_VX_UL_labelID 221
+#define SIO_MM_UL_labelID 222
+#define SIO_MM_UL2_labelID 223
+#define SIO_VX_UL_labelID 224
+#define SIO_MM_DL_labelID 225
+#define SIO_VX_DL_labelID 226
+#define SIO_TONES_DL_labelID 227
+#define SIO_VIB_DL_labelID 228
+#define SIO_BT_VX_DL_labelID 229
+#define SIO_PDM_DL_labelID 230
+#define SIO_MM_EXT_OUT_labelID 231
+#define SIO_MM_EXT_IN_labelID 232
+#define SIO_TDM_OUT_labelID 233
+#define SIO_TDM_IN_labelID 234
+#define DMIC_ATC_PTR_labelID 235
+#define MCPDM_UL_ATC_PTR_labelID 236
+#define BT_VX_UL_ATC_PTR_labelID 237
+#define MM_UL_ATC_PTR_labelID 238
+#define MM_UL2_ATC_PTR_labelID 239
+#define VX_UL_ATC_PTR_labelID 240
+#define MM_DL_ATC_PTR_labelID 241
+#define VX_DL_ATC_PTR_labelID 242
+#define TONES_DL_ATC_PTR_labelID 243
+#define VIB_DL_ATC_PTR_labelID 244
+#define BT_VX_DL_ATC_PTR_labelID 245
+#define PDM_DL_ATC_PTR_labelID 246
+#define MM_EXT_OUT_ATC_PTR_labelID 247
+#define MM_EXT_IN_ATC_PTR_labelID 248
+#define TDM_OUT_ATC_PTR_labelID 249
+#define TDM_IN_ATC_PTR_labelID 250
+#define MCU_IRQ_FIFO_ptr_labelID 251
+#define DEBUG_IRQ_FIFO_reg_labelID 252
+#define UP_DOWN_48_96_labelID 253
+#define OSR96_2_labelID 254
+#define DEBUG_GAINS_labelID 255
+#define DBG_8K_PATTERN_labelID 256
+#define DBG_16K_PATTERN_labelID 257
+#define DBG_24K_PATTERN_labelID 258
+#define DBG_48K_PATTERN_labelID 259
+#define DBG_96K_PATTERN_labelID 260
+#define UL_VX_UL_48_8K_labelID 261
+#define UL_VX_UL_48_16K_labelID 262
+#define BT_DL_labelID 263
+#define BT_UL_labelID 264
+#define BT_DL_8k_labelID 265
+#define BT_DL_16k_labelID 266
+#define BT_UL_8k_labelID 267
+#define BT_UL_16k_labelID 268
+#define MM_EXT_IN_labelID 269
+#define MM_EXT_IN_L_labelID 270
+#define MM_EXT_IN_R_labelID 271
+#define ECHO_REF_48_16_WRAP_labelID 272
+#define ECHO_REF_48_8_WRAP_labelID 273
+#define BT_UL_16_48_WRAP_labelID 274
+#define BT_UL_8_48_WRAP_labelID 275
+#define BT_DL_48_16_WRAP_labelID 276
+#define BT_DL_48_8_WRAP_labelID 277
+#define VX_DL_16_48_WRAP_labelID 278
+#define VX_DL_8_48_WRAP_labelID 279
+#define VX_UL_48_16_WRAP_labelID 280
+#define VX_UL_48_8_WRAP_labelID 281
+#define ATC_NULL_BUFFER_labelID 282
+#define MEM_INIT_hal_mem_labelID 283
+#define MEM_INIT_write_mem_labelID 284
+#define MEM_INIT_regs_labelID 285
+#define GAIN_0DB_labelID 286
+#define XinASRC_BT_UL_labelID 287
+#define IO_BT_UL_ASRC_labelID 288
+#define ASRC_BT_UL_Coefs_labelID 289
+#define ASRC_BT_UL_Alpha_labelID 290
+#define ASRC_BT_UL_VarsBeta_labelID 291
+#define ASRC_BT_UL_8k_Regs_labelID 292
+#define ASRC_BT_UL_16k_Regs_labelID 293
+#define XinASRC_BT_DL_labelID 294
+#define DL_48_8_DEC_labelID 295
+#define DL_48_16_DEC_labelID 296
+#define BT_DL_8k_TEMP_labelID 297
+#define BT_DL_16k_TEMP_labelID 298
+#define BT_DL_8k_opp100_labelID 299
+#define BT_DL_16k_opp100_labelID 300
+#define ASRC_BT_DL_Coefs_labelID 301
+#define ASRC_BT_DL_Alpha_labelID 302
+#define ASRC_BT_DL_VarsBeta_labelID 303
+#define ASRC_BT_DL_8k_Regs_labelID 304
+#define ASRC_BT_DL_16k_Regs_labelID 305
+#define BT_DL_48_8_OPP100_WRAP_labelID 306
+#define BT_DL_48_16_OPP100_WRAP_labelID 307
+#define VX_DL_8_48_OSR_LP_labelID 308
+#define SRC_FIR6_OSR_LP_labelID 309
+#define VX_DL_8_48_FIR_WRAP_labelID 310
+#define PING_labelID 311
+#define PING_Regs_labelID 312
+#define BT_UL_8_48_FIR_WRAP_labelID 313
+#define BT_UL_8_48_OSR_LP_labelID 314
+#define SRC_6_LP_NEW_48k_labelID 315
+#define BT_DL_8_48_OSR_LP_labelID 316
+#define SRC_FIR12_OSR_LP_labelID 317
+#define BT_DL_48_8_FIR_WRAP_labelID 318
+#define BT_DL_48_8_FIR_OPP100_WRAP_labelID 319
+#define VX_UL_48_8_FIR_WRAP_labelID 320
+#define VX_UL_8_48_OSR_LP_labelID 321
+#define Dummy_322_labelID 322
+#define Dummy_323_labelID 323
+#define Dummy_324_labelID 324
+#define Dummy_325_labelID 325
+#define Dummy_326_labelID 326
+#define Dummy_327_labelID 327
+#define Dummy_328_labelID 328
+#define Dummy_329_labelID 329
+#define Dummy_330_labelID 330
+#define Dummy_331_labelID 331
+#define Dummy_332_labelID 332
+#define Dummy_333_labelID 333
+#define Dummy_334_labelID 334
+#define Dummy_335_labelID 335
+#define Dummy_336_labelID 336
+#define Dummy_337_labelID 337
+#define Dummy_338_labelID 338
+#define Dummy_339_labelID 339
+#define Dummy_340_labelID 340
+#define Dummy_341_labelID 341
+#define Dummy_342_labelID 342
+#define Dummy_343_labelID 343
+#define Dummy_344_labelID 344
+#define Dummy_345_labelID 345
+#define Dummy_346_labelID 346
+#define Dummy_347_labelID 347
+#define Dummy_348_labelID 348
+#define Dummy_349_labelID 349
+#define Dummy_350_labelID 350
+#define Dummy_351_labelID 351
+#define Dummy_352_labelID 352
+#define Dummy_353_labelID 353
+#define Dummy_354_labelID 354
+#define Dummy_355_labelID 355
+#define Dummy_356_labelID 356
+#define Dummy_357_labelID 357
+#define Dummy_358_labelID 358
+#define Dummy_359_labelID 359
+#define Dummy_360_labelID 360
+#define Dummy_361_labelID 361
+#define Dummy_362_labelID 362
+#define Dummy_363_labelID 363
+#define Dummy_364_labelID 364
+#define Dummy_365_labelID 365
+#define Dummy_366_labelID 366
+#define Dummy_367_labelID 367
+#define Dummy_368_labelID 368
+#define Dummy_369_labelID 369
+#define Dummy_370_labelID 370
+#define Dummy_371_labelID 371
+#define Dummy_372_labelID 372
+#define Dummy_373_labelID 373
+#define Dummy_374_labelID 374
+#define Dummy_375_labelID 375
+#define Dummy_376_labelID 376
+#define Dummy_377_labelID 377
+#define Dummy_378_labelID 378
+#define Dummy_379_labelID 379
+#define Dummy_380_labelID 380
+#define Dummy_381_labelID 381
+#define Dummy_382_labelID 382
+#define Dummy_383_labelID 383
+#define Dummy_384_labelID 384
+#define Dummy_385_labelID 385
+#define Dummy_386_labelID 386
+#define Dummy_387_labelID 387
+#define Dummy_388_labelID 388
+#define Dummy_389_labelID 389
+#define Dummy_390_labelID 390
+#define Dummy_391_labelID 391
+#define Dummy_392_labelID 392
+#define Dummy_393_labelID 393
+#define Dummy_394_labelID 394
+#define Dummy_395_labelID 395
+#define Dummy_396_labelID 396
+#define Dummy_397_labelID 397
+#define Dummy_398_labelID 398
+#define Dummy_399_labelID 399
+#endif /* _ABE_INITXXXX_LABELS_H_ */
diff --git a/sound/soc/omap/abe/abe_irq.c b/sound/soc/omap/abe/abe_irq.c
new file mode 100644
index 0000000..7749d46
--- /dev/null
+++ b/sound/soc/omap/abe/abe_irq.c
@@ -0,0 +1,113 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include "abe_legacy.h"
+
+extern u32 abe_irq_pingpong_player_id;
+
+/*
+ * initialize the default values for call-backs to subroutines
+ * - FIFO IRQ call-backs for sequenced tasks
+ * - FIFO IRQ call-backs for audio player/recorders (ping-pong protocols)
+ * - Remote debugger interface
+ * - Error monitoring
+ * - Activity Tracing
+ */
+/**
+ * abe_irq_ping_pong
+ *
+ * Call the respective subroutine depending on the IRQ FIFO content:
+ * APS interrupts : IRQ_FIFO[31:28] = IRQtag_APS,
+ * IRQ_FIFO[27:16] = APS_IRQs, IRQ_FIFO[15:0] = loopCounter
+ * SEQ interrupts : IRQ_FIFO[31:28] = IRQtag_COUNT,
+ * IRQ_FIFO[27:16] = Count_IRQs, IRQ_FIFO[15:0] = loopCounter
+ * Ping-Pong Interrupts : IRQ_FIFO[31:28] = IRQtag_PP,
+ * IRQ_FIFO[27:16] = PP_MCU_IRQ, IRQ_FIFO[15:0] = loopCounter
+ */
+void abe_irq_ping_pong(void)
+{
+ /* first IRQ doesn't represent a buffer transference completion */
+ if (abe->pp_first_irq)
+ abe->pp_first_irq = 0;
+ else
+ abe->pp_buf_id = (abe->pp_buf_id + 1) & 0x03;
+
+ abe_call_subroutine(abe_irq_pingpong_player_id, NOPARAMETER,
+ NOPARAMETER, NOPARAMETER, NOPARAMETER);
+}
+/**
+ * abe_irq_check_for_sequences
+* @i: sequence ID
+ *
+ * check the active sequence list
+ *
+ */
+void abe_irq_check_for_sequences(u32 i)
+{
+}
+/**
+ * abe_irq_aps
+ *
+ * call the application subroutines that updates
+ * the acoustics protection filters
+ */
+void abe_irq_aps(u32 aps_info)
+{
+ abe_call_subroutine(abe_irq_aps_adaptation_id, NOPARAMETER, NOPARAMETER,
+ NOPARAMETER, NOPARAMETER);
+}
diff --git a/sound/soc/omap/abe/abe_legacy.h b/sound/soc/omap/abe/abe_legacy.h
new file mode 100644
index 0000000..ca73dc2
--- /dev/null
+++ b/sound/soc/omap/abe/abe_legacy.h
@@ -0,0 +1,98 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _ABE_MAIN_H_
+#define _ABE_MAIN_H_
+
+#include <linux/io.h>
+
+#include "abe_dm_addr.h"
+#include "abe_sm_addr.h"
+#include "abe_cm_addr.h"
+#include "abe_define.h"
+#include "abe_fw.h"
+#include "abe_def.h"
+#include "abe_typ.h"
+#include "abe_ext.h"
+#include "abe_dbg.h"
+#include "abe_ref.h"
+#include "abe_api.h"
+#include "abe_typedef.h"
+#include "abe_functionsid.h"
+#include "abe_taskid.h"
+#include "abe_initxxx_labels.h"
+#include "abe_fw.h"
+
+/* pipe connection to the TARGET simulator */
+#define ABE_DEBUG_CHECKERS 0
+/* simulator data extracted from a text-file */
+#define ABE_DEBUG_HWFILE 0
+/* low-level log files */
+#define ABE_DEBUG_LL_LOG 0
+
+extern struct omap_abe *abe;
+
+void omap_abe_dbg_log(struct omap_abe *abe, u32 x, u32 y, u32 z, u32 t);
+void omap_abe_dbg_error(struct omap_abe *abe, int level, int error);
+
+/*
+ * MACROS
+ */
+#define _log(x, y, z, t) { if (x & abe->dbg.mask) omap_abe_dbg_log(abe, x, y, z, t); }
+
+#endif /* _ABE_MAIN_H_ */
diff --git a/sound/soc/omap/abe/abe_main.c b/sound/soc/omap/abe/abe_main.c
new file mode 100644
index 0000000..4f35e7f
--- /dev/null
+++ b/sound/soc/omap/abe/abe_main.c
@@ -0,0 +1,847 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "abe_legacy.h"
+#include "abe_dbg.h"
+#include "abe_port.h"
+
+
+struct omap_abe_equ {
+ /* type of filter */
+ u32 equ_type;
+ /* filter length */
+ u32 equ_length;
+ union {
+ /* parameters are the direct and recursive coefficients in */
+ /* Q6.26 integer fixed-point format. */
+ s32 type1[NBEQ1];
+ struct {
+ /* center frequency of the band [Hz] */
+ s32 freq[NBEQ2];
+ /* gain of each band. [dB] */
+ s32 gain[NBEQ2];
+ /* Q factor of this band [dB] */
+ s32 q[NBEQ2];
+ } type2;
+ } coef;
+ s32 equ_param3;
+};
+
+#include "abe_gain.h"
+#include "abe_aess.h"
+#include "abe_seq.h"
+
+
+int omap_abe_connect_debug_trace(struct omap_abe *abe,
+ struct omap_abe_dma *dma2);
+
+int omap_abe_reset_hal(struct omap_abe *abe);
+int omap_abe_load_fw(struct omap_abe *abe, u32 *firmware);
+int omap_abe_reload_fw(struct omap_abe *abe, u32 *firmware);
+u32* omap_abe_get_default_fw(struct omap_abe *abe);
+int omap_abe_wakeup(struct omap_abe *abe);
+int omap_abe_irq_processing(struct omap_abe *abe);
+int omap_abe_clear_irq(struct omap_abe *abe);
+int omap_abe_disable_irq(struct omap_abe *abe);
+int omap_abe_set_debug_trace(struct omap_abe_dbg *dbg, int debug);
+int omap_abe_set_ping_pong_buffer(struct omap_abe *abe,
+ u32 port, u32 n_bytes);
+int omap_abe_read_next_ping_pong_buffer(struct omap_abe *abe,
+ u32 port, u32 *p, u32 *n);
+int omap_abe_init_ping_pong_buffer(struct omap_abe *abe,
+ u32 id, u32 size_bytes, u32 n_buffers,
+ u32 *p);
+int omap_abe_read_offset_from_ping_buffer(struct omap_abe *abe,
+ u32 id, u32 *n);
+int omap_abe_set_router_configuration(struct omap_abe *abe,
+ u32 id, u32 k, u32 *param);
+int omap_abe_set_opp_processing(struct omap_abe *abe, u32 opp);
+int omap_abe_disable_data_transfer(struct omap_abe *abe, u32 id);
+int omap_abe_enable_data_transfer(struct omap_abe *abe, u32 id);
+int omap_abe_connect_cbpr_dmareq_port(struct omap_abe *abe,
+ u32 id, abe_data_format_t *f,
+ u32 d,
+ abe_dma_t *returned_dma_t);
+int omap_abe_connect_irq_ping_pong_port(struct omap_abe *abe,
+ u32 id, abe_data_format_t *f,
+ u32 subroutine_id, u32 size,
+ u32 *sink, u32 dsp_mcu_flag);
+int omap_abe_connect_serial_port(struct omap_abe *abe,
+ u32 id, abe_data_format_t *f,
+ u32 mcbsp_id);
+int omap_abe_read_port_address(struct omap_abe *abe,
+ u32 port, abe_dma_t *dma2);
+int omap_abe_check_activity(struct omap_abe *abe);
+
+int omap_abe_use_compensated_gain(struct omap_abe *abe, int on_off);
+int omap_abe_write_equalizer(struct omap_abe *abe,
+ u32 id, struct omap_abe_equ *param);
+
+int omap_abe_disable_gain(struct omap_abe *abe, u32 id, u32 p);
+int omap_abe_enable_gain(struct omap_abe *abe, u32 id, u32 p);
+int omap_abe_mute_gain(struct omap_abe *abe, u32 id, u32 p);
+int omap_abe_unmute_gain(struct omap_abe *abe, u32 id, u32 p);
+
+int omap_abe_write_gain(struct omap_abe *abe,
+ u32 id, s32 f_g, u32 ramp, u32 p);
+int omap_abe_write_mixer(struct omap_abe *abe,
+ u32 id, s32 f_g, u32 f_ramp, u32 p);
+int omap_abe_read_gain(struct omap_abe *abe,
+ u32 id, u32 *f_g, u32 p);
+int omap_abe_read_mixer(struct omap_abe *abe,
+ u32 id, u32 *f_g, u32 p);
+int omap_abe_mono_mixer(struct omap_abe *abe, u32 id, u32 on_off);
+
+int omap_abe_reset_vx_ul_src_filters(struct omap_abe *abe);
+int omap_abe_reset_mic_ul_src_filters(struct omap_abe *abe);
+int omap_abe_reset_vx_dl_src_filters(struct omap_abe *abe);
+int omap_abe_reset_dl1_src_filters(struct omap_abe *abe);
+int omap_abe_reset_dl2_src_filters(struct omap_abe *abe);
+int omap_abe_reset_bt_dl_src_filters(struct omap_abe *abe);
+void omap_abe_src_filters_saturation_monitoring(struct omap_abe *abe);
+
+extern struct omap_abe *abe;
+
+#if 0
+/**
+ * abe_init_mem - Allocate Kernel space memory map for ABE
+ *
+ * Memory map of ABE memory space for PMEM/DMEM/SMEM/DMEM
+ */
+void abe_init_mem(void __iomem *_io_base)
+{
+ omap_abe_init_mem(abe, _io_base);
+}
+EXPORT_SYMBOL(abe_init_mem);
+
+struct omap_abe* abe_probe_aess(void)
+{
+ return omap_abe_probe_aess(abe);
+}
+EXPORT_SYMBOL(abe_probe_aess);
+
+void abe_remove_aess(void)
+{
+ omap_abe_remove_aess(abe);
+}
+EXPORT_SYMBOL(abe_remove_aess);
+
+void abe_add_subroutine(u32 *id, abe_subroutine2 f,
+ u32 nparam, u32 *params)
+{
+ omap_abe_add_subroutine(abe, id, f, nparam, params);
+}
+EXPORT_SYMBOL(abe_add_subroutine);
+
+#endif
+
+/**
+ * abe_reset_hal - reset the ABE/HAL
+ * @rdev: regulator source
+ * @constraints: constraints to apply
+ *
+ * Operations : reset the HAL by reloading the static variables and
+ * default AESS registers.
+ * Called after a PRCM cold-start reset of ABE
+ */
+u32 abe_reset_hal(void)
+{
+ omap_abe_reset_hal(abe);
+ return 0;
+}
+EXPORT_SYMBOL(abe_reset_hal);
+
+/**
+ * abe_load_fw - Load ABE Firmware and initialize memories
+ *
+ */
+u32 abe_load_fw(u32 *firmware)
+{
+ omap_abe_load_fw(abe, firmware);
+ return 0;
+}
+EXPORT_SYMBOL(abe_load_fw);
+
+/**
+ * abe_reload_fw - Reload ABE Firmware and initialize memories
+ *
+ */
+u32 abe_reload_fw(u32 *firmware)
+{
+ omap_abe_reload_fw(abe, firmware);
+ return 0;
+}
+EXPORT_SYMBOL(abe_reload_fw);
+
+u32* abe_get_default_fw(void)
+{
+ return omap_abe_get_default_fw(abe);
+}
+EXPORT_SYMBOL(abe_get_default_fw);
+
+/**
+ * abe_wakeup - Wakeup ABE
+ *
+ * Wakeup ABE in case of retention
+ */
+u32 abe_wakeup(void)
+{
+ omap_abe_wakeup(abe);
+ return 0;
+}
+EXPORT_SYMBOL(abe_wakeup);
+
+/**
+ * abe_irq_processing - Process ABE interrupt
+ *
+ * This subroutine is call upon reception of "MA_IRQ_99 ABE_MPU_IRQ" Audio
+ * back-end interrupt. This subroutine will check the ATC Hrdware, the
+ * IRQ_FIFO from the AE and act accordingly. Some IRQ source are originated
+ * for the delivery of "end of time sequenced tasks" notifications, some are
+ * originated from the Ping-Pong protocols, some are generated from
+ * the embedded debugger when the firmware stops on programmable break-points,
+ * etc ...
+ */
+u32 abe_irq_processing(void)
+{
+ omap_abe_irq_processing(abe);
+ return 0;
+}
+EXPORT_SYMBOL(abe_irq_processing);
+
+/**
+ * abe_clear_irq - clear ABE interrupt
+ *
+ * This subroutine is call to clear MCU Irq
+ */
+u32 abe_clear_irq(void)
+{
+ omap_abe_clear_irq(abe);
+ return 0;
+}
+EXPORT_SYMBOL(abe_clear_irq);
+
+/**
+ * abe_disable_irq - disable MCU/DSP ABE interrupt
+ *
+ * This subroutine is disabling ABE MCU/DSP Irq
+ */
+u32 abe_disable_irq(void)
+{
+ omap_abe_disable_irq(abe);
+
+ return 0;
+}
+EXPORT_SYMBOL(abe_disable_irq);
+
+/**
+ * abe_write_event_generator - Selects event generator source
+ * @e: Event Generation Counter, McPDM, DMIC or default.
+ *
+ * Loads the AESS event generator hardware source.
+ * Loads the firmware parameters accordingly.
+ * Indicates to the FW which data stream is the most important to preserve
+ * in case all the streams are asynchronous.
+ * If the parameter is "default", then HAL decides which Event source
+ * is the best appropriate based on the opened ports.
+ *
+ * When neither the DMIC and the McPDM are activated, the AE will have
+ * its EVENT generator programmed with the EVENT_COUNTER.
+ * The event counter will be tuned in order to deliver a pulse frequency higher
+ * than 96 kHz.
+ * The DPLL output at 100% OPP is MCLK = (32768kHz x6000) = 196.608kHz
+ * The ratio is (MCLK/96000)+(1<<1) = 2050
+ * (1<<1) in order to have the same speed at 50% and 100% OPP
+ * (only 15 MSB bits are used at OPP50%)
+ */
+u32 abe_write_event_generator(u32 e) // should integarte abe as parameter
+{
+ omap_abe_write_event_generator(abe, e);
+ return 0;
+}
+EXPORT_SYMBOL(abe_write_event_generator);
+
+/**
+ * abe_start_event_generator - Starts event generator source
+ *
+ * Start the event genrator of AESS. No more event will be send to AESS engine.
+ * Upper layer must wait 1/96kHz to be sure that engine reaches
+ * the IDLE instruction.
+ */
+u32 abe_stop_event_generator(void)
+{
+ omap_abe_stop_event_generator(abe);
+ return 0;
+}
+EXPORT_SYMBOL(abe_stop_event_generator);
+
+/**
+ * abe_connect_debug_trace
+ * @dma2:pointer to the DMEM trace buffer
+ *
+ * returns the address and size of the real-time debug trace buffer,
+ * the content of which will vary from one firmware release to another
+ */
+u32 abe_connect_debug_trace(abe_dma_t *dma2)
+{
+ omap_abe_connect_debug_trace(abe, (struct omap_abe_dma *)dma2);
+ return 0;
+}
+EXPORT_SYMBOL(abe_connect_debug_trace);
+
+/**
+ * abe_set_debug_trace
+ * @debug: debug ID from a list to be defined
+ *
+ * loads a mask which filters the debug trace to dedicated types of data
+ */
+u32 abe_set_debug_trace(abe_dbg_t debug)
+{
+ omap_abe_set_debug_trace(&abe->dbg, (int)(debug));
+ return 0;
+}
+EXPORT_SYMBOL(abe_set_debug_trace);
+
+/**
+ * abe_set_ping_pong_buffer
+ * @port: ABE port ID
+ * @n_bytes: Size of Ping/Pong buffer
+ *
+ * Updates the next ping-pong buffer with "size" bytes copied from the
+ * host processor. This API notifies the FW that the data transfer is done.
+ */
+u32 abe_set_ping_pong_buffer(u32 port, u32 n_bytes)
+{
+ omap_abe_set_ping_pong_buffer(abe, port, n_bytes);
+ return 0;
+}
+EXPORT_SYMBOL(abe_set_ping_pong_buffer);
+
+/**
+ * abe_read_next_ping_pong_buffer
+ * @port: ABE portID
+ * @p: Next buffer address (pointer)
+ * @n: Next buffer size (pointer)
+ *
+ * Tell the next base address of the next ping_pong Buffer and its size
+ */
+u32 abe_read_next_ping_pong_buffer(u32 port, u32 *p, u32 *n)
+{
+ omap_abe_read_next_ping_pong_buffer(abe, port, p, n);
+ return 0;
+}
+EXPORT_SYMBOL(abe_read_next_ping_pong_buffer);
+
+/**
+ * abe_init_ping_pong_buffer
+ * @id: ABE port ID
+ * @size_bytes:size of the ping pong
+ * @n_buffers:number of buffers (2 = ping/pong)
+ * @p:returned address of the ping-pong list of base addresses
+ * (byte offset from DMEM start)
+ *
+ * Computes the base address of the ping_pong buffers
+ */
+u32 abe_init_ping_pong_buffer(u32 id, u32 size_bytes, u32 n_buffers,
+ u32 *p)
+{
+ omap_abe_init_ping_pong_buffer(abe, id, size_bytes, n_buffers, p);
+ return 0;
+}
+EXPORT_SYMBOL(abe_init_ping_pong_buffer);
+
+/**
+ * abe_read_offset_from_ping_buffer
+ * @id: ABE port ID
+ * @n: returned address of the offset
+ * from the ping buffer start address (in samples)
+ *
+ * Computes the current firmware ping pong read pointer location,
+ * expressed in samples, as the offset from the start address of ping buffer.
+ */
+u32 abe_read_offset_from_ping_buffer(u32 id, u32 *n)
+{
+ omap_abe_read_offset_from_ping_buffer(abe, id, n);
+ return 0;
+}
+EXPORT_SYMBOL(abe_read_offset_from_ping_buffer);
+
+/**
+ * abe_write_equalizer
+ * @id: name of the equalizer
+ * @param : equalizer coefficients
+ *
+ * Load the coefficients in CMEM.
+ */
+u32 abe_write_equalizer(u32 id, abe_equ_t *param)
+{
+ omap_abe_write_equalizer(abe, id, (struct omap_abe_equ *)param);
+ return 0;
+}
+EXPORT_SYMBOL(abe_write_equalizer);
+/**
+ * abe_disable_gain
+ * Parameters:
+ * mixer id
+ * sub-port id
+ *
+ */
+u32 abe_disable_gain(u32 id, u32 p)
+{
+ omap_abe_disable_gain(abe, id, p);
+ return 0;
+}
+EXPORT_SYMBOL(abe_disable_gain);
+/**
+ * abe_enable_gain
+ * Parameters:
+ * mixer id
+ * sub-port id
+ *
+ */
+u32 abe_enable_gain(u32 id, u32 p)
+{
+ omap_abe_enable_gain(abe, id, p);
+ return 0;
+}
+EXPORT_SYMBOL(abe_enable_gain);
+
+/**
+ * abe_mute_gain
+ * Parameters:
+ * mixer id
+ * sub-port id
+ *
+ */
+u32 abe_mute_gain(u32 id, u32 p)
+{
+ omap_abe_mute_gain(abe, id, p);
+ return 0;
+}
+EXPORT_SYMBOL(abe_mute_gain);
+
+/**
+ * abe_unmute_gain
+ * Parameters:
+ * mixer id
+ * sub-port id
+ *
+ */
+u32 abe_unmute_gain(u32 id, u32 p)
+{
+ omap_abe_unmute_gain(abe, id, p);
+ return 0;
+}
+EXPORT_SYMBOL(abe_unmute_gain);
+
+/**
+ * abe_write_gain
+ * @id: gain name or mixer name
+ * @f_g: list of input gains of the mixer
+ * @ramp: gain ramp speed factor
+ * @p: list of ports corresponding to the above gains
+ *
+ * Loads the gain coefficients to FW memory. This API can be called when
+ * the corresponding MIXER is not activated. After reloading the firmware
+ * the default coefficients corresponds to "all input and output mixer's gain
+ * in mute state". A mixer is disabled with a network reconfiguration
+ * corresponding to an OPP value.
+ */
+u32 abe_write_gain(u32 id, s32 f_g, u32 ramp, u32 p)
+{
+ omap_abe_write_gain(abe, id, f_g, ramp, p);
+ return 0;
+}
+EXPORT_SYMBOL(abe_write_gain);
+
+/**
+ * abe_write_mixer
+ * @id: name of the mixer
+ * @param: input gains and delay ramp of the mixer
+ * @p: port corresponding to the above gains
+ *
+ * Load the gain coefficients in FW memory. This API can be called when
+ * the corresponding MIXER is not activated. After reloading the firmware
+ * the default coefficients corresponds to "all input and output mixer's
+ * gain in mute state". A mixer is disabled with a network reconfiguration
+ * corresponding to an OPP value.
+ */
+u32 abe_write_mixer(u32 id, s32 f_g, u32 f_ramp, u32 p)
+{
+ omap_abe_write_gain(abe, id, f_g, f_ramp, p);
+ return 0;
+}
+EXPORT_SYMBOL(abe_write_mixer);
+
+/**
+ * abe_read_gain
+ * @id: name of the mixer
+ * @param: list of input gains of the mixer
+ * @p: list of port corresponding to the above gains
+ *
+ */
+u32 abe_read_gain(u32 id, u32 *f_g, u32 p)
+{
+ omap_abe_read_gain(abe, id, f_g, p);
+ return 0;
+}
+EXPORT_SYMBOL(abe_read_gain);
+
+/**
+ * abe_read_mixer
+ * @id: name of the mixer
+ * @param: gains of the mixer
+ * @p: port corresponding to the above gains
+ *
+ * Load the gain coefficients in FW memory. This API can be called when
+ * the corresponding MIXER is not activated. After reloading the firmware
+ * the default coefficients corresponds to "all input and output mixer's
+ * gain in mute state". A mixer is disabled with a network reconfiguration
+ * corresponding to an OPP value.
+ */
+u32 abe_read_mixer(u32 id, u32 *f_g, u32 p)
+{
+ omap_abe_read_gain(abe, id, f_g, p);
+ return 0;
+}
+EXPORT_SYMBOL(abe_read_mixer);
+
+/**
+ * abe_set_router_configuration
+ * @Id: name of the router
+ * @Conf: id of the configuration
+ * @param: list of output index of the route
+ *
+ * The uplink router takes its input from DMIC (6 samples), AMIC (2 samples)
+ * and PORT1/2 (2 stereo ports). Each sample will be individually stored in
+ * an intermediate table of 10 elements.
+ *
+ * Example of router table parameter for voice uplink with phoenix microphones
+ *
+ * indexes 0 .. 9 = MM_UL description (digital MICs and MMEXTIN)
+ * DMIC1_L_labelID, DMIC1_R_labelID, DMIC2_L_labelID, DMIC2_R_labelID,
+ * MM_EXT_IN_L_labelID, MM_EXT_IN_R_labelID, ZERO_labelID, ZERO_labelID,
+ * ZERO_labelID, ZERO_labelID,
+ * indexes 10 .. 11 = MM_UL2 description (recording on DMIC3)
+ * DMIC3_L_labelID, DMIC3_R_labelID,
+ * indexes 12 .. 13 = VX_UL description (VXUL based on PDMUL data)
+ * AMIC_L_labelID, AMIC_R_labelID,
+ * indexes 14 .. 15 = RESERVED (NULL)
+ * ZERO_labelID, ZERO_labelID,
+ */
+u32 abe_set_router_configuration(u32 id, u32 k, u32 *param)
+{
+ omap_abe_set_router_configuration(abe, id, k, param);
+ return 0;
+}
+EXPORT_SYMBOL(abe_set_router_configuration);
+
+/**
+ * abe_set_opp_processing - Set OPP mode for ABE Firmware
+ * @opp: OOPP mode
+ *
+ * New processing network and OPP:
+ * 0: Ultra Lowest power consumption audio player (no post-processing, no mixer)
+ * 1: OPP 25% (simple multimedia features, including low-power player)
+ * 2: OPP 50% (multimedia and voice calls)
+ * 3: OPP100% ( multimedia complex use-cases)
+ *
+ * Rearranges the FW task network to the corresponding OPP list of features.
+ * The corresponding AE ports are supposed to be set/reset accordingly before
+ * this switch.
+ *
+ */
+u32 abe_set_opp_processing(u32 opp)
+{
+ omap_abe_set_opp_processing(abe, opp);
+ return 0;
+}
+EXPORT_SYMBOL(abe_set_opp_processing);
+
+/**
+ * abe_disable_data_transfer
+ * @id: ABE port id
+ *
+ * disables the ATC descriptor and stop IO/port activities
+ * disable the IO task (@f = 0)
+ * clear ATC DMEM buffer, ATC enabled
+ */
+u32 abe_disable_data_transfer(u32 id)
+{
+ omap_abe_disable_data_transfer(abe, id);
+ return 0;
+}
+EXPORT_SYMBOL(abe_disable_data_transfer);
+
+/**
+ * abe_enable_data_transfer
+ * @ip: ABE port id
+ *
+ * enables the ATC descriptor
+ * reset ATC pointers
+ * enable the IO task (@f <> 0)
+ */
+u32 abe_enable_data_transfer(u32 id)
+{
+ omap_abe_enable_data_transfer(abe, id);
+ return 0;
+}
+EXPORT_SYMBOL(abe_enable_data_transfer);
+
+/**
+ * abe_connect_cbpr_dmareq_port
+ * @id: port name
+ * @f: desired data format
+ * @d: desired dma_request line (0..7)
+ * @a: returned pointer to the base address of the CBPr register and number of
+ * samples to exchange during a DMA_request.
+ *
+ * enables the data echange between a DMA and the ABE through the
+ * CBPr registers of AESS.
+ */
+u32 abe_connect_cbpr_dmareq_port(u32 id, abe_data_format_t *f, u32 d,
+ abe_dma_t *returned_dma_t)
+{
+ omap_abe_connect_cbpr_dmareq_port(abe, id, f, d, returned_dma_t);
+ return 0;
+}
+EXPORT_SYMBOL(abe_connect_cbpr_dmareq_port);
+
+/**
+ * abe_connect_irq_ping_pong_port
+ * @id: port name
+ * @f: desired data format
+ * @I: index of the call-back subroutine to call
+ * @s: half-buffer (ping) size
+ * @p: returned base address of the first (ping) buffer)
+ *
+ * enables the data echanges between a direct access to the DMEM
+ * memory of ABE using cache flush. On each IRQ activation a subroutine
+ * registered with "abe_plug_subroutine" will be called. This subroutine
+ * will generate an amount of samples, send them to DMEM memory and call
+ * "abe_set_ping_pong_buffer" to notify the new amount of samples in the
+ * pong buffer.
+ */
+u32 abe_connect_irq_ping_pong_port(u32 id, abe_data_format_t *f,
+ u32 subroutine_id, u32 size,
+ u32 *sink, u32 dsp_mcu_flag)
+{
+ omap_abe_connect_irq_ping_pong_port(abe, id, f, subroutine_id, size,
+ sink, dsp_mcu_flag);
+ return 0;
+}
+EXPORT_SYMBOL(abe_connect_irq_ping_pong_port);
+
+/**
+ * abe_connect_serial_port()
+ * @id: port name
+ * @f: data format
+ * @i: peripheral ID (McBSP #1, #2, #3)
+ *
+ * Operations : enables the data echanges between a McBSP and an ATC buffer in
+ * DMEM. This API is used connect 48kHz McBSP streams to MM_DL and 8/16kHz
+ * voice streams to VX_UL, VX_DL, BT_VX_UL, BT_VX_DL. It abstracts the
+ * abe_write_port API.
+ */
+u32 abe_connect_serial_port(u32 id, abe_data_format_t *f,
+ u32 mcbsp_id)
+{
+ omap_abe_connect_serial_port(abe, id, f, mcbsp_id);
+ return 0;
+}
+EXPORT_SYMBOL(abe_connect_serial_port);
+
+/**
+ * abe_read_port_address
+ * @dma: output pointer to the DMA iteration and data destination pointer
+ *
+ * This API returns the address of the DMA register used on this audio port.
+ * Depending on the protocol being used, adds the base address offset L3
+ * (DMA) or MPU (ARM)
+ */
+u32 abe_read_port_address(u32 port, abe_dma_t *dma2)
+{
+ omap_abe_read_port_address(abe, port, dma2);
+ return 0;
+}
+EXPORT_SYMBOL(abe_read_port_address);
+
+/**
+ * abe_check_activity - Check if some ABE activity.
+ *
+ * Check if any ABE ports are running.
+ * return 1: still activity on ABE
+ * return 0: no more activity on ABE. Event generator can be stopped
+ *
+ */
+u32 abe_check_activity(void)
+{
+ return (u32)omap_abe_check_activity(abe);
+}
+EXPORT_SYMBOL(abe_check_activity);
+/**
+ * abe_use_compensated_gain
+ * @on_off:
+ *
+ * Selects the automatic Mixer's gain management
+ * on_off = 1 allows the "abe_write_gain" to adjust the overall
+ * gains of the mixer to be tuned not to create saturation
+ */
+abehal_status abe_use_compensated_gain(u32 on_off)
+{
+ omap_abe_use_compensated_gain(abe, (int)(on_off));
+ return 0;
+}
+
+/**
+ * abe_mono_mixer
+ * @id: name of the mixer (MIXDL1, MIXDL2, MIXAUDUL)
+ * on_off: enable\disable flag
+ *
+ * This API Programs DL1Mixer or DL2Mixer to output mono data
+ * on both left and right data paths.
+ */
+int abe_mono_mixer(u32 id, u32 on_off)
+{
+ return omap_abe_mono_mixer(abe, id, on_off);
+}
+EXPORT_SYMBOL(abe_mono_mixer);
+
+EXPORT_SYMBOL(abe_use_compensated_gain);
+
+/**
+* abe_reset_vx_ul_src_filters - reset VX UL path filters
+*
+* it is assumed that filters are located in SMEM
+*/
+u32 abe_reset_vx_ul_src_filters(void)
+{
+ return (u32)omap_abe_reset_vx_ul_src_filters(abe);
+}
+EXPORT_SYMBOL(abe_reset_vx_ul_src_filters);
+
+/**
+* abe_reset_mic_ul_src_filters - reset mic path filters
+*
+* it is assumed that filters are located in SMEM
+*/
+u32 abe_reset_mic_ul_src_filters(void)
+{
+ return (u32)omap_abe_reset_mic_ul_src_filters(abe);
+}
+EXPORT_SYMBOL(abe_reset_mic_ul_src_filters);
+
+/**
+* abe_reset_vx_dl_src_filters - reset VX DL path filters
+*
+* it is assumed that filters are located in SMEM
+*/
+u32 abe_reset_vx_dl_src_filters(void)
+{
+ return (u32)omap_abe_reset_vx_dl_src_filters(abe);
+}
+EXPORT_SYMBOL(abe_reset_vx_dl_src_filters);
+
+/**
+* abe_reset_dl1_src_filters - reset DL1 path filters
+*
+* it is assumed that filters are located in SMEM
+*/
+u32 abe_reset_dl1_src_filters(void)
+{
+ return (u32)omap_abe_reset_dl1_src_filters(abe);
+}
+EXPORT_SYMBOL(abe_reset_dl1_src_filters);
+
+/**
+* abe_reset_dl2_src_filters - reset DL2 path filters
+*
+* it is assumed that filters are located in SMEM
+*/
+u32 abe_reset_dl2_src_filters(void)
+{
+ return (u32)omap_abe_reset_dl2_src_filters(abe);
+}
+EXPORT_SYMBOL(abe_reset_dl2_src_filters);
+
+/**
+* abe_reset_bt_dl_src_filters - reset BT DL path filters
+*
+* it is assumed that filters are located in SMEM
+*/
+u32 abe_reset_bt_dl_src_filters(void)
+{
+ return (u32)omap_abe_reset_bt_dl_src_filters(abe);
+}
+EXPORT_SYMBOL(abe_reset_bt_dl_src_filters);
+
+/**
+* abe_src_ilters_saturation_monitoring - monitor for saturation
+* in abe filters
+*/
+void abe_src_filters_saturation_monitoring(void)
+{
+ omap_abe_src_filters_saturation_monitoring(abe);
+}
+EXPORT_SYMBOL(abe_src_filters_saturation_monitoring);
diff --git a/sound/soc/omap/abe/abe_main.h b/sound/soc/omap/abe/abe_main.h
new file mode 100644
index 0000000..e019541
--- /dev/null
+++ b/sound/soc/omap/abe/abe_main.h
@@ -0,0 +1,676 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _ABE_MAIN_H_
+#define _ABE_MAIN_H_
+
+#include <linux/io.h>
+
+#include "abe_initxxx_labels.h"
+
+#define D_DEBUG_FIFO_ADDR 8160
+#define D_DEBUG_FIFO_ADDR_END 8255
+
+#define SUB_0_PARAM 0
+#define SUB_1_PARAM 1
+
+#define ABE_DEFAULT_BASE_ADDRESS_L3 0x49000000L
+#define ABE_DMEM_BASE_ADDRESS_MPU 0x40180000L
+#define ABE_DMEM_BASE_OFFSET_MPU 0x00080000L
+#define ABE_DMEM_BASE_ADDRESS_L3 (ABE_DEFAULT_BASE_ADDRESS_L3 + \
+ ABE_DMEM_BASE_OFFSET_MPU)
+
+/*
+ * HARDWARE AND PERIPHERAL DEFINITIONS
+ */
+/* MM_DL */
+#define ABE_CBPR0_IDX 0
+/* VX_DL */
+#define ABE_CBPR1_IDX 1
+/* VX_UL */
+#define ABE_CBPR2_IDX 2
+/* MM_UL */
+#define ABE_CBPR3_IDX 3
+/* MM_UL2 */
+#define ABE_CBPR4_IDX 4
+/* TONES */
+#define ABE_CBPR5_IDX 5
+/* VIB */
+#define ABE_CBPR6_IDX 6
+/* DEBUG/CTL */
+#define ABE_CBPR7_IDX 7
+
+/*
+ * OPP TYPE
+ *
+ * 0: Ultra Lowest power consumption audio player
+ * 1: OPP 25% (simple multimedia features)
+ * 2: OPP 50% (multimedia and voice calls)
+ * 3: OPP100% (multimedia complex use-cases)
+ */
+#define ABE_OPP0 0
+#define ABE_OPP25 1
+#define ABE_OPP50 2
+#define ABE_OPP100 3
+/*
+ * SAMPLES TYPE
+ *
+ * mono 16 bit sample LSB aligned, 16 MSB bits are unused;
+ * mono right shifted to 16bits LSBs on a 32bits DMEM FIFO for McBSP
+ * TX purpose;
+ * mono sample MSB aligned (16/24/32bits);
+ * two successive mono samples in one 32bits container;
+ * Two L/R 16bits samples in a 32bits container;
+ * Two channels defined with two MSB aligned samples;
+ * Three channels defined with three MSB aligned samples (MIC);
+ * Four channels defined with four MSB aligned samples (MIC);
+ * . . .
+ * Eight channels defined with eight MSB aligned samples (MIC);
+ */
+#define MONO_MSB 1
+#define MONO_RSHIFTED_16 2
+#define STEREO_RSHIFTED_16 3
+#define STEREO_16_16 4
+#define STEREO_MSB 5
+#define THREE_MSB 6
+#define FOUR_MSB 7
+#define FIVE_MSB 8
+#define SIX_MSB 9
+#define SEVEN_MSB 10
+#define EIGHT_MSB 11
+#define NINE_MSB 12
+#define TEN_MSB 13
+/*
+ * PORT PROTOCOL TYPE - abe_port_protocol_switch_id
+ */
+#define SLIMBUS_PORT_PROT 1
+#define SERIAL_PORT_PROT 2
+#define TDM_SERIAL_PORT_PROT 3
+#define DMIC_PORT_PROT 4
+#define MCPDMDL_PORT_PROT 5
+#define MCPDMUL_PORT_PROT 6
+#define PINGPONG_PORT_PROT 7
+#define DMAREQ_PORT_PROT 8
+/*
+ * PORT IDs, this list is aligned with the FW data mapping
+ */
+#define DMIC_PORT 0
+#define PDM_UL_PORT 1
+#define BT_VX_UL_PORT 2
+#define MM_UL_PORT 3
+#define MM_UL2_PORT 4
+#define VX_UL_PORT 5
+#define MM_DL_PORT 6
+#define VX_DL_PORT 7
+#define TONES_DL_PORT 8
+#define VIB_DL_PORT 9
+#define BT_VX_DL_PORT 10
+#define PDM_DL_PORT 11
+#define MM_EXT_OUT_PORT 12
+#define MM_EXT_IN_PORT 13
+#define TDM_DL_PORT 14
+#define TDM_UL_PORT 15
+#define DEBUG_PORT 16
+#define LAST_PORT_ID 17
+/* definitions for the compatibility with HAL05xx */
+#define PDM_DL1_PORT 18
+#define PDM_DL2_PORT 19
+#define PDM_VIB_PORT 20
+/* There is only one DMIC port, always used with 6 samples
+ per 96kHz periods */
+#define DMIC_PORT1 DMIC_PORT
+#define DMIC_PORT2 DMIC_PORT
+#define DMIC_PORT3 DMIC_PORT
+/*
+ * Signal processing module names - EQ APS MIX ROUT
+ */
+/* equalizer downlink path headset + earphone */
+#define FEAT_EQ1 1
+/* equalizer downlink path integrated handsfree LEFT */
+#define FEAT_EQ2L (FEAT_EQ1+1)
+/* equalizer downlink path integrated handsfree RIGHT */
+#define FEAT_EQ2R (FEAT_EQ2L+1)
+/* equalizer downlink path side-tone */
+#define FEAT_EQSDT (FEAT_EQ2R+1)
+/* equalizer uplink path AMIC */
+#define FEAT_EQAMIC (FEAT_EQSDT+1)
+/* equalizer uplink path DMIC */
+#define FEAT_EQDMIC (FEAT_EQAMIC+1)
+/* Acoustic protection for headset */
+#define FEAT_APS1 (FEAT_EQDMIC+1)
+/* acoustic protection high-pass filter for handsfree "Left" */
+#define FEAT_APS2 (FEAT_APS1+1)
+/* acoustic protection high-pass filter for handsfree "Right" */
+#define FEAT_APS3 (FEAT_APS2+1)
+/* asynchronous sample-rate-converter for the downlink voice path */
+#define FEAT_ASRC1 (FEAT_APS3+1)
+/* asynchronous sample-rate-converter for the uplink voice path */
+#define FEAT_ASRC2 (FEAT_ASRC1+1)
+/* asynchronous sample-rate-converter for the multimedia player */
+#define FEAT_ASRC3 (FEAT_ASRC2+1)
+/* asynchronous sample-rate-converter for the echo reference */
+#define FEAT_ASRC4 (FEAT_ASRC3+1)
+/* mixer of the headset and earphone path */
+#define FEAT_MIXDL1 (FEAT_ASRC4+1)
+/* mixer of the hands-free path */
+#define FEAT_MIXDL2 (FEAT_MIXDL1+1)
+/* mixer for audio being sent on the voice_ul path */
+#define FEAT_MIXAUDUL (FEAT_MIXDL2+1)
+/* mixer for voice communication recording */
+#define FEAT_MIXVXREC (FEAT_MIXAUDUL+1)
+/* mixer for side-tone */
+#define FEAT_MIXSDT (FEAT_MIXVXREC+1)
+/* mixer for echo reference */
+#define FEAT_MIXECHO (FEAT_MIXSDT+1)
+/* router of the uplink path */
+#define FEAT_UPROUTE (FEAT_MIXECHO+1)
+/* all gains */
+#define FEAT_GAINS (FEAT_UPROUTE+1)
+#define FEAT_GAINS_DMIC1 (FEAT_GAINS+1)
+#define FEAT_GAINS_DMIC2 (FEAT_GAINS_DMIC1+1)
+#define FEAT_GAINS_DMIC3 (FEAT_GAINS_DMIC2+1)
+#define FEAT_GAINS_AMIC (FEAT_GAINS_DMIC3+1)
+#define FEAT_GAINS_SPLIT (FEAT_GAINS_AMIC+1)
+#define FEAT_GAINS_DL1 (FEAT_GAINS_SPLIT+1)
+#define FEAT_GAINS_DL2 (FEAT_GAINS_DL1+1)
+#define FEAT_GAIN_BTUL (FEAT_GAINS_DL2+1)
+/* sequencing queue of micro tasks */
+#define FEAT_SEQ (FEAT_GAIN_BTUL+1)
+/* Phoenix control queue through McPDM */
+#define FEAT_CTL (FEAT_SEQ+1)
+/* list of features of the firmware -------------------------------*/
+#define MAXNBFEATURE FEAT_CTL
+/* abe_equ_id */
+/* equalizer downlink path headset + earphone */
+#define EQ1 FEAT_EQ1
+/* equalizer downlink path integrated handsfree LEFT */
+#define EQ2L FEAT_EQ2L
+#define EQ2R FEAT_EQ2R
+/* equalizer downlink path side-tone */
+#define EQSDT FEAT_EQSDT
+#define EQAMIC FEAT_EQAMIC
+#define EQDMIC FEAT_EQDMIC
+/* abe_aps_id */
+/* Acoustic protection for headset */
+#define APS1 FEAT_APS1
+#define APS2L FEAT_APS2
+#define APS2R FEAT_APS3
+/* abe_asrc_id */
+/* asynchronous sample-rate-converter for the downlink voice path */
+#define ASRC1 FEAT_ASRC1
+/* asynchronous sample-rate-converter for the uplink voice path */
+#define ASRC2 FEAT_ASRC2
+/* asynchronous sample-rate-converter for the multimedia player */
+#define ASRC3 FEAT_ASRC3
+/* asynchronous sample-rate-converter for the voice uplink echo_reference */
+#define ASRC4 FEAT_ASRC4
+/* abe_mixer_id */
+#define MIXDL1 FEAT_MIXDL1
+#define MIXDL2 FEAT_MIXDL2
+#define MIXSDT FEAT_MIXSDT
+#define MIXECHO FEAT_MIXECHO
+#define MIXAUDUL FEAT_MIXAUDUL
+#define MIXVXREC FEAT_MIXVXREC
+/* abe_router_id */
+/* there is only one router up to now */
+#define UPROUTE FEAT_UPROUTE
+/*
+ * gain controls
+ */
+#define GAIN_LEFT_OFFSET 0
+#define GAIN_RIGHT_OFFSET 1
+/*
+ * GAIN IDs
+ */
+#define GAINS_DMIC1 FEAT_GAINS_DMIC1
+#define GAINS_DMIC2 FEAT_GAINS_DMIC2
+#define GAINS_DMIC3 FEAT_GAINS_DMIC3
+#define GAINS_AMIC FEAT_GAINS_AMIC
+#define GAINS_SPLIT FEAT_GAINS_SPLIT
+#define GAINS_DL1 FEAT_GAINS_DL1
+#define GAINS_DL2 FEAT_GAINS_DL2
+#define GAINS_BTUL FEAT_GAIN_BTUL
+/*
+ * ABE CONST AREA FOR PARAMETERS TRANSLATION
+ */
+#define sizeof_alpha_iir_table 61
+#define sizeof_beta_iir_table 61
+#define GAIN_MAXIMUM 3000L
+#define GAIN_24dB 2400L
+#define GAIN_18dB 1800L
+#define GAIN_12dB 1200L
+#define GAIN_6dB 600L
+/* default gain = 1 */
+#define GAIN_0dB 0L
+#define GAIN_M1dB -100L
+#define GAIN_M6dB -600L
+#define GAIN_M7dB -700L
+#define GAIN_M8dB -800L
+#define GAIN_M12dB -1200L
+#define GAIN_M18dB -1800L
+#define GAIN_M24dB -2400L
+#define GAIN_M30dB -3000L
+#define GAIN_M40dB -4000L
+#define GAIN_M50dB -5000L
+/* muted gain = -120 decibels */
+#define MUTE_GAIN -12000L
+#define GAIN_TOOLOW -13000L
+#define GAIN_MUTE MUTE_GAIN
+#define RAMP_MINLENGTH 3L
+/* ramp_t is in milli- seconds */
+#define RAMP_0MS 0L
+#define RAMP_1MS 1L
+#define RAMP_2MS 2L
+#define RAMP_5MS 5L
+#define RAMP_10MS 10L
+#define RAMP_20MS 20L
+#define RAMP_50MS 50L
+#define RAMP_100MS 100L
+#define RAMP_200MS 200L
+#define RAMP_500MS 500L
+#define RAMP_1000MS 1000L
+#define RAMP_MAXLENGTH 10000L
+/* for abe_translate_gain_format */
+#define LINABE_TO_DECIBELS 1
+#define DECIBELS_TO_LINABE 2
+/* for abe_translate_ramp_format */
+#define IIRABE_TO_MICROS 1
+#define MICROS_TO_IIABE 2
+/*
+ * EVENT GENERATORS - abe_event_id
+ */
+#define EVENT_TIMER 0
+#define EVENT_44100 1
+/*
+ * DMA requests
+ */
+/*Internal connection doesn't connect at ABE boundary */
+#define External_DMA_0 0
+/*Transmit request digital microphone */
+#define DMIC_DMA_REQ 1
+/*Multichannel PDM downlink */
+#define McPDM_DMA_DL 2
+/*Multichannel PDM uplink */
+#define McPDM_DMA_UP 3
+/*MCBSP module 1 - transmit request */
+#define MCBSP1_DMA_TX 4
+/*MCBSP module 1 - receive request */
+#define MCBSP1_DMA_RX 5
+/*MCBSP module 2 - transmit request */
+#define MCBSP2_DMA_TX 6
+/*MCBSP module 2 - receive request */
+#define MCBSP2_DMA_RX 7
+/*MCBSP module 3 - transmit request */
+#define MCBSP3_DMA_TX 8
+/*MCBSP module 3 - receive request */
+#define MCBSP3_DMA_RX 9
+/*
+ * SERIAL PORTS IDs - abe_mcbsp_id
+ */
+#define MCBSP1_TX MCBSP1_DMA_TX
+#define MCBSP1_RX MCBSP1_DMA_RX
+#define MCBSP2_TX MCBSP2_DMA_TX
+#define MCBSP2_RX MCBSP2_DMA_RX
+#define MCBSP3_TX MCBSP3_DMA_TX
+#define MCBSP3_RX MCBSP3_DMA_RX
+
+#define PING_PONG_WITH_MCU_IRQ 1
+#define PING_PONG_WITH_DSP_IRQ 2
+
+/*
+ Mixer ID Input port ID Comments
+ DL1_MIXER 0 MMDL path
+ 1 MMUL2 path
+ 2 VXDL path
+ 3 TONES path
+ SDT_MIXER 0 Uplink path
+ 1 Downlink path
+ ECHO_MIXER 0 DL1_MIXER path
+ 1 DL2_MIXER path
+ AUDUL_MIXER 0 TONES_DL path
+ 1 Uplink path
+ 2 MM_DL path
+ VXREC_MIXER 0 TONES_DL path
+ 1 VX_DL path
+ 2 MM_DL path
+ 3 VX_UL path
+*/
+#define MIX_VXUL_INPUT_MM_DL 0
+#define MIX_VXUL_INPUT_TONES 1
+#define MIX_VXUL_INPUT_VX_UL 2
+#define MIX_VXUL_INPUT_VX_DL 3
+#define MIX_DL1_INPUT_MM_DL 0
+#define MIX_DL1_INPUT_MM_UL2 1
+#define MIX_DL1_INPUT_VX_DL 2
+#define MIX_DL1_INPUT_TONES 3
+#define MIX_DL2_INPUT_MM_DL 0
+#define MIX_DL2_INPUT_MM_UL2 1
+#define MIX_DL2_INPUT_VX_DL 2
+#define MIX_DL2_INPUT_TONES 3
+#define MIX_SDT_INPUT_UP_MIXER 0
+#define MIX_SDT_INPUT_DL1_MIXER 1
+#define MIX_AUDUL_INPUT_MM_DL 0
+#define MIX_AUDUL_INPUT_TONES 1
+#define MIX_AUDUL_INPUT_UPLINK 2
+#define MIX_AUDUL_INPUT_VX_DL 3
+#define MIX_VXREC_INPUT_MM_DL 0
+#define MIX_VXREC_INPUT_TONES 1
+#define MIX_VXREC_INPUT_VX_UL 2
+#define MIX_VXREC_INPUT_VX_DL 3
+#define MIX_ECHO_DL1 0
+#define MIX_ECHO_DL2 1
+/* nb of samples to route */
+#define NBROUTE_UL 16
+/* 10 routing tables max */
+#define NBROUTE_CONFIG_MAX 10
+/* 5 pre-computed routing tables */
+#define NBROUTE_CONFIG 6
+/* AMIC on VX_UL */
+#define UPROUTE_CONFIG_AMIC 0
+/* DMIC first pair on VX_UL */
+#define UPROUTE_CONFIG_DMIC1 1
+/* DMIC second pair on VX_UL */
+#define UPROUTE_CONFIG_DMIC2 2
+/* DMIC last pair on VX_UL */
+#define UPROUTE_CONFIG_DMIC3 3
+/* BT_UL on VX_UL */
+#define UPROUTE_CONFIG_BT 4
+/* ECHO_REF on MM_UL2 */
+#define UPROUTE_ECHO_MMUL2 5
+
+/*
+ * DMA_T
+ *
+ * dma structure for easing programming
+ */
+typedef struct {
+ /* OCP L3 pointer to the first address of the */
+ void *data;
+ /* destination buffer (either DMA or Ping-Pong read/write pointers). */
+ /* address L3 when addressing the DMEM buffer instead of CBPr */
+ void *l3_dmem;
+ /* address L3 translated to L4 the ARM memory space */
+ void *l4_dmem;
+ /* number of iterations for the DMA data moves. */
+ u32 iter;
+} abe_dma_t;
+typedef u32 abe_dbg_t;
+/*
+ * ROUTER_T
+ *
+ * table of indexes in unsigned bytes
+ */
+typedef u16 abe_router_t;
+/*
+ * DATA_FORMAT_T
+ *
+ * used in port declaration
+ */
+typedef struct {
+ /* Sampling frequency of the stream */
+ u32 f;
+ /* Sample format type */
+ u32 samp_format;
+} abe_data_format_t;
+/*
+ * PORT_PROTOCOL_T
+ *
+ * port declaration
+ */
+typedef struct {
+ /* Direction=0 means input from AESS point of view */
+ u32 direction;
+ /* Protocol type (switch) during the data transfers */
+ u32 protocol_switch;
+ union {
+ /* Slimbus peripheral connected to ATC */
+ struct {
+ /* Address of ATC Slimbus descriptor's index */
+ u32 desc_addr1;
+ /* DMEM address 1 in bytes */
+ u32 buf_addr1;
+ /* DMEM buffer size size in bytes */
+ u32 buf_size;
+ /* ITERation on each DMAreq signals */
+ u32 iter;
+ /* Second ATC index for SlimBus reception (or NULL) */
+ u32 desc_addr2;
+ /* DMEM address 2 in bytes */
+ u32 buf_addr2;
+ } prot_slimbus;
+ /* McBSP/McASP peripheral connected to ATC */
+ struct {
+ u32 desc_addr;
+ /* Address of ATC McBSP/McASP descriptor's in bytes */
+ u32 buf_addr;
+ /* DMEM address in bytes */
+ u32 buf_size;
+ /* ITERation on each DMAreq signals */
+ u32 iter;
+ } prot_serial;
+ /* DMIC peripheral connected to ATC */
+ struct {
+ /* DMEM address in bytes */
+ u32 buf_addr;
+ /* DMEM buffer size in bytes */
+ u32 buf_size;
+ /* Number of activated DMIC */
+ u32 nbchan;
+ } prot_dmic;
+ /* McPDMDL peripheral connected to ATC */
+ struct {
+ /* DMEM address in bytes */
+ u32 buf_addr;
+ /* DMEM size in bytes */
+ u32 buf_size;
+ /* Control allowed on McPDM DL */
+ u32 control;
+ } prot_mcpdmdl;
+ /* McPDMUL peripheral connected to ATC */
+ struct {
+ /* DMEM address size in bytes */
+ u32 buf_addr;
+ /* DMEM buffer size size in bytes */
+ u32 buf_size;
+ } prot_mcpdmul;
+ /* Ping-Pong interface to the Host using cache-flush */
+ struct {
+ /* Address of ATC descriptor's */
+ u32 desc_addr;
+ /* DMEM buffer base address in bytes */
+ u32 buf_addr;
+ /* DMEM size in bytes for each ping and pong buffers */
+ u32 buf_size;
+ /* IRQ address (either DMA (0) MCU (1) or DSP(2)) */
+ u32 irq_addr;
+ /* IRQ data content loaded in the AESS IRQ register */
+ u32 irq_data;
+ /* Call-back function upon IRQ reception */
+ u32 callback;
+ } prot_pingpong;
+ /* DMAreq line to CBPr */
+ struct {
+ /* Address of ATC descriptor's */
+ u32 desc_addr;
+ /* DMEM buffer address in bytes */
+ u32 buf_addr;
+ /* DMEM buffer size size in bytes */
+ u32 buf_size;
+ /* ITERation on each DMAreq signals */
+ u32 iter;
+ /* DMAreq address */
+ u32 dma_addr;
+ /* DMA/AESS = 1 << #DMA */
+ u32 dma_data;
+ } prot_dmareq;
+ /* Circular buffer - direct addressing to DMEM */
+ struct {
+ /* DMEM buffer base address in bytes */
+ u32 buf_addr;
+ /* DMEM buffer size in bytes */
+ u32 buf_size;
+ /* DMAreq address */
+ u32 dma_addr;
+ /* DMA/AESS = 1 << #DMA */
+ u32 dma_data;
+ } prot_circular_buffer;
+ } p;
+} abe_port_protocol_t;
+
+/*
+ * EQU_T
+ *
+ * coefficients of the equalizer
+ */
+/* 24 Q6.26 coefficients */
+#define NBEQ1 25
+/* 2x12 Q6.26 coefficients */
+#define NBEQ2 13
+
+typedef struct {
+ /* type of filter */
+ u32 equ_type;
+ /* filter length */
+ u32 equ_length;
+ union {
+ /* parameters are the direct and recursive coefficients in */
+ /* Q6.26 integer fixed-point format. */
+ s32 type1[NBEQ1];
+ struct {
+ /* center frequency of the band [Hz] */
+ s32 freq[NBEQ2];
+ /* gain of each band. [dB] */
+ s32 gain[NBEQ2];
+ /* Q factor of this band [dB] */
+ s32 q[NBEQ2];
+ } type2;
+ } coef;
+ s32 equ_param3;
+} abe_equ_t;
+
+
+/* subroutine with no parameter */
+typedef void (*abe_subroutine0) (void);
+/* subroutine with one parameter */
+typedef void (*abe_subroutine1) (u32);
+typedef void (*abe_subroutine2) (u32, u32);
+typedef void (*abe_subroutine3) (u32, u32, u32);
+typedef void (*abe_subroutine4) (u32, u32, u32, u32);
+
+
+extern u32 abe_irq_pingpong_player_id;
+
+
+void abe_init_mem(void __iomem **_io_base);
+u32 abe_reset_hal(void);
+int abe_load_fw(u32 *firmware);
+int abe_reload_fw(u32 *firmware);
+u32 *abe_get_default_fw(void);
+u32 abe_wakeup(void);
+u32 abe_irq_processing(void);
+u32 abe_clear_irq(void);
+u32 abe_disable_irq(void);
+u32 abe_write_event_generator(u32 e);
+u32 abe_stop_event_generator(void);
+u32 abe_connect_debug_trace(abe_dma_t *dma2);
+u32 abe_set_debug_trace(abe_dbg_t debug);
+u32 abe_set_ping_pong_buffer(u32 port, u32 n_bytes);
+u32 abe_read_next_ping_pong_buffer(u32 port, u32 *p, u32 *n);
+u32 abe_init_ping_pong_buffer(u32 id, u32 size_bytes, u32 n_buffers,
+ u32 *p);
+u32 abe_read_offset_from_ping_buffer(u32 id, u32 *n);
+u32 abe_write_equalizer(u32 id, abe_equ_t *param);
+u32 abe_disable_gain(u32 id, u32 p);
+u32 abe_enable_gain(u32 id, u32 p);
+u32 abe_mute_gain(u32 id, u32 p);
+u32 abe_unmute_gain(u32 id, u32 p);
+u32 abe_write_gain(u32 id, s32 f_g, u32 ramp, u32 p);
+u32 abe_write_mixer(u32 id, s32 f_g, u32 f_ramp, u32 p);
+u32 abe_read_gain(u32 id, u32 *f_g, u32 p);
+u32 abe_read_mixer(u32 id, u32 *f_g, u32 p);
+int abe_mono_mixer(u32 id, u32 on_off);
+u32 abe_set_router_configuration(u32 id, u32 k, u32 *param);
+u32 abe_set_opp_processing(u32 opp);
+u32 abe_disable_data_transfer(u32 id);
+u32 abe_enable_data_transfer(u32 id);
+u32 abe_connect_cbpr_dmareq_port(u32 id, abe_data_format_t *f, u32 d,
+ abe_dma_t *returned_dma_t);
+u32 abe_connect_irq_ping_pong_port(u32 id, abe_data_format_t *f,
+ u32 subroutine_id, u32 size,
+ u32 *sink, u32 dsp_mcu_flag);
+u32 abe_connect_serial_port(u32 id, abe_data_format_t *f,
+ u32 mcbsp_id);
+u32 abe_read_port_address(u32 port, abe_dma_t *dma2);
+void abe_add_subroutine(u32 *id, abe_subroutine2 f, u32 nparam, u32 *params);
+u32 abe_read_next_ping_pong_buffer(u32 port, u32 *p, u32 *n);
+void abe_write_pdmdl_offset(u32 path, u32 offset_left, u32 offset_right);
+u32 abe_reset_vx_ul_src_filters(void);
+u32 abe_reset_mic_ul_src_filters(void);
+u32 abe_reset_vx_dl_src_filters(void);
+u32 abe_reset_dl1_src_filters(void);
+u32 abe_reset_dl2_src_filters(void);
+u32 abe_reset_bt_dl_src_filters(void);
+void abe_src_filters_saturation_monitoring(void);
+u32 abe_check_activity(void);
+void abe_add_subroutine(u32 *id, abe_subroutine2 f,
+ u32 nparam, u32 *params);
+
+u32 abe_plug_subroutine(u32 *id, abe_subroutine2 f, u32 n,
+ u32 *params);
+
+#endif /* _ABE_MAIN_H_ */
diff --git a/sound/soc/omap/abe/abe_mem.h b/sound/soc/omap/abe/abe_mem.h
new file mode 100644
index 0000000..683968e
--- /dev/null
+++ b/sound/soc/omap/abe/abe_mem.h
@@ -0,0 +1,99 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _ABE_MEM_H_
+#define _ABE_MEM_H_
+
+#define OMAP_ABE_DMEM 0
+#define OMAP_ABE_CMEM 1
+#define OMAP_ABE_SMEM 2
+#define OMAP_ABE_PMEM 3
+#define OMAP_ABE_AESS 4
+
+/* Distinction between Read and Write from/to ABE memory
+ * is useful for simulation tool */
+static inline void omap_abe_mem_write(struct omap_abe *abe, int bank,
+ u32 offset, u32 *src, size_t bytes)
+{
+ memcpy((abe->io_base[bank] + offset), src, bytes);
+}
+
+static inline void omap_abe_mem_read(struct omap_abe *abe, int bank,
+ u32 offset, u32 *dest, size_t bytes)
+{
+ memcpy(dest, (abe->io_base[bank] + offset), bytes);
+}
+
+static inline u32 omap_abe_reg_readl(struct omap_abe *abe, u32 offset)
+{
+ return __raw_readl(abe->io_base[OMAP_ABE_AESS] + offset);
+}
+
+static inline void omap_abe_reg_writel(struct omap_abe *abe,
+ u32 offset, u32 val)
+{
+ __raw_writel(val, (abe->io_base[OMAP_ABE_AESS] + offset));
+}
+
+static inline void *omap_abe_reset_mem(struct omap_abe *abe, int bank,
+ u32 offset, size_t bytes)
+{
+ return memset(abe->io_base[bank] + offset, 0, bytes);
+}
+
+#endif /*_ABE_MEM_H_*/
diff --git a/sound/soc/omap/abe/abe_port.c b/sound/soc/omap/abe/abe_port.c
new file mode 100644
index 0000000..724dabd
--- /dev/null
+++ b/sound/soc/omap/abe/abe_port.c
@@ -0,0 +1,1774 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include "abe_legacy.h"
+#include "abe_port.h"
+#include "abe_dbg.h"
+#include "abe_mem.h"
+#include "abe_gain.h"
+
+/**
+ * abe_clean_temporay buffers
+ *
+ * clear temporary buffers
+ */
+void omap_abe_clean_temporary_buffers(struct omap_abe *abe, u32 id)
+{
+ switch (id) {
+ case OMAP_ABE_DMIC_PORT:
+ omap_abe_reset_mem(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_DMIC_UL_FIFO_ADDR,
+ OMAP_ABE_D_DMIC_UL_FIFO_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_DMIC0_96_48_DATA_ADDR,
+ OMAP_ABE_S_DMIC0_96_48_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_DMIC1_96_48_DATA_ADDR,
+ OMAP_ABE_S_DMIC1_96_48_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_DMIC2_96_48_DATA_ADDR,
+ OMAP_ABE_S_DMIC2_96_48_DATA_SIZE);
+ /* reset working values of the gain, target gain is preserved */
+ omap_abe_reset_gain_mixer(abe, GAINS_DMIC1, GAIN_LEFT_OFFSET);
+ omap_abe_reset_gain_mixer(abe, GAINS_DMIC1, GAIN_RIGHT_OFFSET);
+ omap_abe_reset_gain_mixer(abe, GAINS_DMIC2, GAIN_LEFT_OFFSET);
+ omap_abe_reset_gain_mixer(abe, GAINS_DMIC2, GAIN_RIGHT_OFFSET);
+ omap_abe_reset_gain_mixer(abe, GAINS_DMIC3, GAIN_LEFT_OFFSET);
+ omap_abe_reset_gain_mixer(abe, GAINS_DMIC3, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_PDM_UL_PORT:
+ omap_abe_reset_mem(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_MCPDM_UL_FIFO_ADDR,
+ OMAP_ABE_D_MCPDM_UL_FIFO_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_AMIC_96_48_DATA_ADDR,
+ OMAP_ABE_S_AMIC_96_48_DATA_SIZE);
+ /* reset working values of the gain, target gain is preserved */
+ omap_abe_reset_gain_mixer(abe, GAINS_AMIC, GAIN_LEFT_OFFSET);
+ omap_abe_reset_gain_mixer(abe, GAINS_AMIC, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_BT_VX_UL_PORT:
+ omap_abe_reset_mem(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_BT_UL_FIFO_ADDR,
+ OMAP_ABE_D_BT_UL_FIFO_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_UL_ADDR,
+ OMAP_ABE_S_BT_UL_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_UL_8_48_HP_DATA_ADDR,
+ OMAP_ABE_S_BT_UL_8_48_HP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_UL_8_48_LP_DATA_ADDR,
+ OMAP_ABE_S_BT_UL_8_48_LP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_UL_16_48_HP_DATA_ADDR,
+ OMAP_ABE_S_BT_UL_16_48_HP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_UL_16_48_LP_DATA_ADDR,
+ OMAP_ABE_S_BT_UL_16_48_LP_DATA_SIZE);
+ /* reset working values of the gain, target gain is preserved */
+ omap_abe_reset_gain_mixer(abe, GAINS_BTUL, GAIN_LEFT_OFFSET);
+ omap_abe_reset_gain_mixer(abe, GAINS_BTUL, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_MM_UL_PORT:
+ omap_abe_reset_mem(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_MM_UL_FIFO_ADDR,
+ OMAP_ABE_D_MM_UL_FIFO_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_MM_UL_ADDR,
+ OMAP_ABE_S_MM_UL_SIZE);
+ break;
+ case OMAP_ABE_MM_UL2_PORT:
+ omap_abe_reset_mem(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_MM_UL2_FIFO_ADDR,
+ OMAP_ABE_D_MM_UL2_FIFO_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_MM_UL2_ADDR,
+ OMAP_ABE_S_MM_UL2_SIZE);
+ break;
+ case OMAP_ABE_VX_UL_PORT:
+ omap_abe_reset_mem(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_VX_UL_FIFO_ADDR,
+ OMAP_ABE_D_VX_UL_FIFO_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_UL_ADDR,
+ OMAP_ABE_S_VX_UL_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_UL_48_8_HP_DATA_ADDR,
+ OMAP_ABE_S_VX_UL_48_8_HP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_UL_48_8_LP_DATA_ADDR,
+ OMAP_ABE_S_VX_UL_48_8_LP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_UL_48_16_HP_DATA_ADDR,
+ OMAP_ABE_S_VX_UL_48_16_HP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_UL_48_16_LP_DATA_ADDR,
+ OMAP_ABE_S_VX_UL_48_16_LP_DATA_SIZE);
+ omap_abe_reset_gain_mixer(abe, MIXAUDUL, MIX_AUDUL_INPUT_UPLINK);
+ break;
+ case OMAP_ABE_MM_DL_PORT:
+ omap_abe_reset_mem(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_MM_DL_FIFO_ADDR,
+ OMAP_ABE_D_MM_DL_FIFO_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_MM_DL_ADDR,
+ OMAP_ABE_S_MM_DL_SIZE);
+ omap_abe_reset_gain_mixer(abe, MIXDL1, MIX_DL1_INPUT_MM_DL);
+ omap_abe_reset_gain_mixer(abe, MIXDL2, MIX_DL2_INPUT_MM_DL);
+ break;
+ case OMAP_ABE_VX_DL_PORT:
+ omap_abe_reset_mem(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_VX_DL_FIFO_ADDR,
+ OMAP_ABE_D_VX_DL_FIFO_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_DL_ADDR,
+ OMAP_ABE_S_VX_DL_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_DL_8_48_HP_DATA_ADDR,
+ OMAP_ABE_S_VX_DL_8_48_HP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_DL_8_48_LP_DATA_ADDR,
+ OMAP_ABE_S_VX_DL_8_48_LP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_DL_8_48_OSR_LP_DATA_ADDR,
+ OMAP_ABE_S_VX_DL_8_48_OSR_LP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_DL_16_48_HP_DATA_ADDR,
+ OMAP_ABE_S_VX_DL_16_48_HP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VX_DL_16_48_LP_DATA_ADDR,
+ OMAP_ABE_S_VX_DL_16_48_LP_DATA_SIZE);
+ omap_abe_reset_gain_mixer(abe, MIXDL1, MIX_DL1_INPUT_VX_DL);
+ omap_abe_reset_gain_mixer(abe, MIXDL2, MIX_DL2_INPUT_VX_DL);
+ break;
+ case OMAP_ABE_TONES_DL_PORT:
+ omap_abe_reset_mem(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_TONES_DL_FIFO_ADDR,
+ OMAP_ABE_D_TONES_DL_FIFO_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_TONES_ADDR,
+ OMAP_ABE_S_TONES_SIZE);
+ omap_abe_reset_gain_mixer(abe, MIXDL1, MIX_DL1_INPUT_TONES);
+ omap_abe_reset_gain_mixer(abe, MIXDL2, MIX_DL2_INPUT_TONES);
+ break;
+ case OMAP_ABE_VIB_DL_PORT:
+ omap_abe_reset_mem(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_VIB_DL_FIFO_ADDR,
+ OMAP_ABE_D_VIB_DL_FIFO_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_VIBRA_ADDR,
+ OMAP_ABE_S_VIBRA_SIZE);
+ break;
+ case OMAP_ABE_BT_VX_DL_PORT:
+ omap_abe_reset_mem(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_BT_DL_FIFO_ADDR,
+ OMAP_ABE_D_BT_DL_FIFO_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_DL_ADDR,
+ OMAP_ABE_S_BT_DL_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_DL_8_48_OSR_LP_DATA_ADDR,
+ OMAP_ABE_S_BT_DL_8_48_OSR_LP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_DL_48_8_HP_DATA_ADDR,
+ OMAP_ABE_S_BT_DL_48_8_HP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_DL_48_8_LP_DATA_ADDR,
+ OMAP_ABE_S_BT_DL_48_8_LP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_DL_48_16_HP_DATA_ADDR,
+ OMAP_ABE_S_BT_DL_48_16_HP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_BT_DL_48_16_LP_DATA_ADDR,
+ OMAP_ABE_S_BT_DL_48_16_LP_DATA_SIZE);
+ break;
+ case OMAP_ABE_PDM_DL_PORT:
+ omap_abe_reset_mem(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_MCPDM_DL_FIFO_ADDR,
+ OMAP_ABE_D_MCPDM_DL_FIFO_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_DL2_M_LR_EQ_DATA_ADDR,
+ OMAP_ABE_S_DL2_M_LR_EQ_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_DL1_M_EQ_DATA_ADDR,
+ OMAP_ABE_S_DL1_M_EQ_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_EARP_48_96_LP_DATA_ADDR,
+ OMAP_ABE_S_EARP_48_96_LP_DATA_SIZE);
+ omap_abe_reset_mem(abe, OMAP_ABE_SMEM,
+ OMAP_ABE_S_IHF_48_96_LP_DATA_ADDR,
+ OMAP_ABE_S_IHF_48_96_LP_DATA_SIZE);
+ omap_abe_reset_gain_mixer(abe, GAINS_DL1, GAIN_LEFT_OFFSET);
+ omap_abe_reset_gain_mixer(abe, GAINS_DL1, GAIN_RIGHT_OFFSET);
+ omap_abe_reset_gain_mixer(abe, GAINS_DL2, GAIN_LEFT_OFFSET);
+ omap_abe_reset_gain_mixer(abe, GAINS_DL2, GAIN_RIGHT_OFFSET);
+ omap_abe_reset_gain_mixer(abe, MIXSDT, MIX_SDT_INPUT_UP_MIXER);
+ omap_abe_reset_gain_mixer(abe, MIXSDT, MIX_SDT_INPUT_DL1_MIXER);
+ break;
+ case OMAP_ABE_MM_EXT_OUT_PORT:
+ omap_abe_reset_mem(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_MM_EXT_OUT_FIFO_ADDR,
+ OMAP_ABE_D_MM_EXT_OUT_FIFO_SIZE);
+ break;
+ case OMAP_ABE_MM_EXT_IN_PORT:
+ omap_abe_reset_mem(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_MM_EXT_IN_FIFO_ADDR,
+ OMAP_ABE_D_MM_EXT_IN_FIFO_SIZE);
+ break;
+ }
+}
+
+/**
+ * omap_abe_disable_enable_dma_request
+ * Parameter:
+ * Operations:
+ * Return value:
+ */
+void omap_abe_disable_enable_dma_request(struct omap_abe *abe, u32 id,
+ u32 on_off)
+{
+ u8 desc_third_word[4], irq_dmareq_field;
+ u32 sio_desc_address;
+ u32 struct_offset;
+ struct ABE_SIODescriptor sio_desc;
+ struct ABE_SPingPongDescriptor desc_pp;
+
+ if (abe_port[id].protocol.protocol_switch == PINGPONG_PORT_PROT) {
+ irq_dmareq_field =
+ (u8) (on_off *
+ abe_port[id].protocol.p.prot_pingpong.irq_data);
+ sio_desc_address = OMAP_ABE_D_PINGPONGDESC_ADDR;
+ struct_offset = (u32) &(desc_pp.data_size) - (u32) &(desc_pp);
+ omap_abe_mem_read(abe, OMAP_ABE_DMEM,
+ sio_desc_address + struct_offset,
+ (u32 *) desc_third_word, 4);
+ desc_third_word[2] = irq_dmareq_field;
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ sio_desc_address + struct_offset,
+ (u32 *) desc_third_word, 4);
+ } else {
+ /* serial interface: sync ATC with Firmware activity */
+ sio_desc_address =
+ OMAP_ABE_D_IODESCR_ADDR +
+ (id * sizeof(struct ABE_SIODescriptor));
+ omap_abe_mem_read(abe, OMAP_ABE_DMEM,
+ sio_desc_address, (u32 *) &sio_desc,
+ sizeof(sio_desc));
+ if (on_off) {
+ if (abe_port[id].protocol.protocol_switch != SERIAL_PORT_PROT)
+ sio_desc.atc_irq_data =
+ (u8) abe_port[id].protocol.p.prot_dmareq.
+ dma_data;
+ sio_desc.on_off = 0x80;
+ } else {
+ sio_desc.atc_irq_data = 0;
+ sio_desc.on_off = 0;
+ }
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ sio_desc_address, (u32 *) &sio_desc,
+ sizeof(sio_desc));
+ }
+
+}
+
+/**
+ * omap_abe_enable_dma_request
+ *
+ * Parameter:
+ * Operations:
+ * Return value:
+ *
+ */
+void omap_abe_enable_dma_request(struct omap_abe *abe, u32 id)
+{
+ omap_abe_disable_enable_dma_request(abe, id, 1);
+}
+
+/**
+ * omap_abe_disable_dma_request
+ *
+ * Parameter:
+ * Operations:
+ * Return value:
+ *
+ */
+void omap_abe_disable_dma_request(struct omap_abe *abe, u32 id)
+{
+ omap_abe_disable_enable_dma_request(abe, id, 0);
+}
+
+/**
+ * abe_init_atc
+ * @id: ABE port ID
+ *
+ * load the DMEM ATC/AESS descriptors
+ */
+void omap_abe_init_atc(struct omap_abe *abe, u32 id)
+{
+ u8 iter;
+ s32 datasize;
+ struct omap_abe_atc_desc atc_desc;
+
+#define JITTER_MARGIN 4
+ /* load default values of the descriptor */
+ atc_desc.rdpt = 0;
+ atc_desc.wrpt = 0;
+ atc_desc.irqdest = 0;
+ atc_desc.cberr = 0;
+ atc_desc.desen = 0;
+ atc_desc.nw = 0;
+ atc_desc.reserved0 = 0;
+ atc_desc.reserved1 = 0;
+ atc_desc.reserved2 = 0;
+ atc_desc.srcid = 0;
+ atc_desc.destid = 0;
+ atc_desc.badd = 0;
+ atc_desc.iter = 0;
+ atc_desc.cbsize = 0;
+ datasize = abe_dma_port_iter_factor(&((abe_port[id]).format));
+ iter = (u8) abe_dma_port_iteration(&((abe_port[id]).format));
+ /* if the ATC FIFO is too small there will be two ABE firmware
+ utasks to do the copy this happems on DMIC and MCPDMDL */
+ /* VXDL_8kMono = 4 = 2 + 2x1 */
+ /* VXDL_16kstereo = 12 = 8 + 2x2 */
+ /* MM_DL_1616 = 14 = 12 + 2x1 */
+ /* DMIC = 84 = 72 + 2x6 */
+ /* VXUL_8kMono = 2 */
+ /* VXUL_16kstereo = 4 */
+ /* MM_UL2_Stereo = 4 */
+ /* PDMDL = 12 */
+ /* IN from AESS point of view */
+ if (abe_port[id].protocol.direction == ABE_ATC_DIRECTION_IN)
+ if (iter + 2 * datasize > 126)
+ atc_desc.wrpt = (iter >> 1) +
+ ((JITTER_MARGIN-1) * datasize);
+ else
+ atc_desc.wrpt = iter + ((JITTER_MARGIN-1) * datasize);
+ else
+ atc_desc.wrpt = 0 + ((JITTER_MARGIN+1) * datasize);
+ switch ((abe_port[id]).protocol.protocol_switch) {
+ case SLIMBUS_PORT_PROT:
+ atc_desc.cbdir = (abe_port[id]).protocol.direction;
+ atc_desc.cbsize =
+ (abe_port[id]).protocol.p.prot_slimbus.buf_size;
+ atc_desc.badd =
+ ((abe_port[id]).protocol.p.prot_slimbus.buf_addr1) >> 4;
+ atc_desc.iter = (abe_port[id]).protocol.p.prot_slimbus.iter;
+ atc_desc.srcid =
+ abe_atc_srcid[(abe_port[id]).protocol.p.prot_slimbus.
+ desc_addr1 >> 3];
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ (abe_port[id]).protocol.p.prot_slimbus.
+ desc_addr1, (u32 *) &atc_desc, sizeof(atc_desc));
+ atc_desc.badd =
+ (abe_port[id]).protocol.p.prot_slimbus.buf_addr2;
+ atc_desc.srcid =
+ abe_atc_srcid[(abe_port[id]).protocol.p.prot_slimbus.
+ desc_addr2 >> 3];
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ (abe_port[id]).protocol.p.prot_slimbus.
+ desc_addr2, (u32 *) &atc_desc, sizeof(atc_desc));
+ break;
+ case SERIAL_PORT_PROT:
+ atc_desc.cbdir = (abe_port[id]).protocol.direction;
+ atc_desc.cbsize =
+ (abe_port[id]).protocol.p.prot_serial.buf_size;
+ atc_desc.badd =
+ ((abe_port[id]).protocol.p.prot_serial.buf_addr) >> 4;
+ atc_desc.iter = (abe_port[id]).protocol.p.prot_serial.iter;
+ atc_desc.srcid =
+ abe_atc_srcid[(abe_port[id]).protocol.p.prot_serial.
+ desc_addr >> 3];
+ atc_desc.destid =
+ abe_atc_dstid[(abe_port[id]).protocol.p.prot_serial.
+ desc_addr >> 3];
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ (abe_port[id]).protocol.p.prot_serial.desc_addr,
+ (u32 *) &atc_desc, sizeof(atc_desc));
+ break;
+ case DMIC_PORT_PROT:
+ atc_desc.cbdir = ABE_ATC_DIRECTION_IN;
+ atc_desc.cbsize = (abe_port[id]).protocol.p.prot_dmic.buf_size;
+ atc_desc.badd =
+ ((abe_port[id]).protocol.p.prot_dmic.buf_addr) >> 4;
+ atc_desc.iter = DMIC_ITER;
+ atc_desc.srcid = abe_atc_srcid[ABE_ATC_DMIC_DMA_REQ];
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ (ABE_ATC_DMIC_DMA_REQ*ATC_SIZE),
+ (u32 *) &atc_desc, sizeof(atc_desc));
+ break;
+ case MCPDMDL_PORT_PROT:
+ atc_desc.cbdir = ABE_ATC_DIRECTION_OUT;
+ atc_desc.cbsize =
+ (abe_port[id]).protocol.p.prot_mcpdmdl.buf_size;
+ atc_desc.badd =
+ ((abe_port[id]).protocol.p.prot_mcpdmdl.buf_addr) >> 4;
+ atc_desc.iter = MCPDM_DL_ITER;
+ atc_desc.destid = abe_atc_dstid[ABE_ATC_MCPDMDL_DMA_REQ];
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ (ABE_ATC_MCPDMDL_DMA_REQ*ATC_SIZE),
+ (u32 *) &atc_desc, sizeof(atc_desc));
+ break;
+ case MCPDMUL_PORT_PROT:
+ atc_desc.cbdir = ABE_ATC_DIRECTION_IN;
+ atc_desc.cbsize =
+ (abe_port[id]).protocol.p.prot_mcpdmul.buf_size;
+ atc_desc.badd =
+ ((abe_port[id]).protocol.p.prot_mcpdmul.buf_addr) >> 4;
+ atc_desc.iter = MCPDM_UL_ITER;
+ atc_desc.srcid = abe_atc_srcid[ABE_ATC_MCPDMUL_DMA_REQ];
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ (ABE_ATC_MCPDMUL_DMA_REQ*ATC_SIZE),
+ (u32 *) &atc_desc, sizeof(atc_desc));
+ break;
+ case PINGPONG_PORT_PROT:
+ /* software protocol, nothing to do on ATC */
+ break;
+ case DMAREQ_PORT_PROT:
+ atc_desc.cbdir = (abe_port[id]).protocol.direction;
+ atc_desc.cbsize =
+ (abe_port[id]).protocol.p.prot_dmareq.buf_size;
+ atc_desc.badd =
+ ((abe_port[id]).protocol.p.prot_dmareq.buf_addr) >> 4;
+ /* CBPr needs ITER=1.
+ It is the job of eDMA to do the iterations */
+ atc_desc.iter = 1;
+ /* input from ABE point of view */
+ if (abe_port[id].protocol.direction == ABE_ATC_DIRECTION_IN) {
+ /* atc_atc_desc.rdpt = 127; */
+ /* atc_atc_desc.wrpt = 0; */
+ atc_desc.srcid = abe_atc_srcid
+ [(abe_port[id]).protocol.p.prot_dmareq.
+ desc_addr >> 3];
+ } else {
+ /* atc_atc_desc.rdpt = 0; */
+ /* atc_atc_desc.wrpt = 127; */
+ atc_desc.destid = abe_atc_dstid
+ [(abe_port[id]).protocol.p.prot_dmareq.
+ desc_addr >> 3];
+ }
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ (abe_port[id]).protocol.p.prot_dmareq.desc_addr,
+ (u32 *) &atc_desc, sizeof(atc_desc));
+ break;
+ }
+}
+
+/**
+ * omap_abe_enable_pp_io_task
+ * @id: port_id
+ *
+ *
+ */
+void omap_abe_enable_pp_io_task(struct omap_abe *abe, u32 id)
+{
+ if (OMAP_ABE_MM_DL_PORT == id) {
+ /* MM_DL managed in ping-pong */
+ abe->MultiFrame[TASK_IO_MM_DL_SLT][TASK_IO_MM_DL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_IO_PING_PONG);
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_MULTIFRAME_ADDR, (u32 *) abe->MultiFrame,
+ sizeof(abe->MultiFrame));
+ } else {
+ /* ping_pong is only supported on MM_DL */
+ omap_abe_dbg_error(abe, OMAP_ABE_ERR_API,
+ ABE_PARAMETER_ERROR);
+ }
+}
+/**
+ * omap_abe_disable_pp_io_task
+ * @id: port_id
+ *
+ *
+ */
+void omap_abe_disable_pp_io_task(struct omap_abe *abe, u32 id)
+{
+ if (OMAP_ABE_MM_DL_PORT == id) {
+ /* MM_DL managed in ping-pong */
+ abe->MultiFrame[TASK_IO_MM_DL_SLT][TASK_IO_MM_DL_IDX] = 0;
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_MULTIFRAME_ADDR, (u32 *) abe->MultiFrame,
+ sizeof(abe->MultiFrame));
+ } else {
+ /* ping_pong is only supported on MM_DL */
+ omap_abe_dbg_error(abe, OMAP_ABE_ERR_API,
+ ABE_PARAMETER_ERROR);
+ }
+}
+
+/**
+ * omap_abe_disable_data_transfer
+ * @id: ABE port id
+ *
+ * disables the ATC descriptor and stop IO/port activities
+ * disable the IO task (@f = 0)
+ * clear ATC DMEM buffer, ATC enabled
+ */
+int omap_abe_disable_data_transfer(struct omap_abe *abe, u32 id)
+{
+
+ _log(ABE_ID_DISABLE_DATA_TRANSFER, id, 0, 0);
+
+ /* MM_DL managed in ping-pong */
+ if (id == OMAP_ABE_MM_DL_PORT) {
+ if (abe_port[OMAP_ABE_MM_DL_PORT].protocol.protocol_switch == PINGPONG_PORT_PROT)
+ omap_abe_disable_pp_io_task(abe, OMAP_ABE_MM_DL_PORT);
+ }
+ /* local host variable status= "port is running" */
+ abe_port[id].status = OMAP_ABE_PORT_ACTIVITY_IDLE;
+ /* disable DMA requests */
+ omap_abe_disable_dma_request(abe, id);
+ /* disable ATC transfers */
+ omap_abe_init_atc(abe, id);
+ omap_abe_clean_temporary_buffers(abe, id);
+ /* select the main port based on the desactivation of this port */
+ abe_decide_main_port();
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_disable_data_transfer);
+
+/**
+ * omap_abe_enable_data_transfer
+ * @ip: ABE port id
+ *
+ * enables the ATC descriptor
+ * reset ATC pointers
+ * enable the IO task (@f <> 0)
+ */
+int omap_abe_enable_data_transfer(struct omap_abe *abe, u32 id)
+{
+ abe_port_protocol_t *protocol;
+ abe_data_format_t format;
+
+ _log(ABE_ID_ENABLE_DATA_TRANSFER, id, 0, 0);
+ omap_abe_clean_temporary_buffers(abe, id);
+
+ switch (id) {
+ case OMAP_ABE_PDM_UL_PORT:
+ case OMAP_ABE_PDM_DL_PORT:
+ case OMAP_ABE_DMIC_PORT:
+ /* initializes the ABE ATC descriptors in DMEM for BE ports */
+ protocol = &(abe_port[id].protocol);
+ format = abe_port[id].format;
+ omap_abe_init_atc(abe, id);
+ abe_init_io_tasks(id, &format, protocol);
+ break;
+
+ case OMAP_ABE_MM_DL_PORT:
+ protocol = &(abe_port[OMAP_ABE_MM_DL_PORT].protocol);
+ if (protocol->protocol_switch == PINGPONG_PORT_PROT)
+ abe->MultiFrame[TASK_IO_MM_DL_SLT][TASK_IO_MM_DL_IDX] = ABE_TASK_ID(C_ABE_FW_TASK_IO_PING_PONG);
+ else
+ abe->MultiFrame[TASK_IO_MM_DL_SLT][TASK_IO_MM_DL_IDX] = ABE_TASK_ID(C_ABE_FW_TASK_IO_MM_DL);
+ break;
+ case OMAP_ABE_VX_UL_PORT:
+ break;
+ case OMAP_ABE_VX_DL_PORT:
+ break;
+ case OMAP_ABE_MM_UL2_PORT:
+ abe->MultiFrame[17][3] = ABE_TASK_ID(C_ABE_FW_TASK_IO_MM_UL2);
+ break;
+ case OMAP_ABE_TONES_DL_PORT:
+ abe->MultiFrame[20][0] = ABE_TASK_ID(C_ABE_FW_TASK_IO_TONES_DL);
+ break;
+ case OMAP_ABE_MM_UL_PORT:
+ abe->MultiFrame[19][6] = ABE_TASK_ID(C_ABE_FW_TASK_IO_MM_UL);
+ break;
+ case OMAP_ABE_BT_VX_DL_PORT:
+ abe->MultiFrame[13][5] = ABE_TASK_ID(C_ABE_FW_TASK_IO_BT_VX_DL);
+ break;
+ case OMAP_ABE_BT_VX_UL_PORT:
+ abe->MultiFrame[15][3] = ABE_TASK_ID(C_ABE_FW_TASK_IO_BT_VX_UL);
+ break;
+ case OMAP_ABE_MM_EXT_IN_PORT:
+ abe->MultiFrame[21][3] = ABE_TASK_ID(C_ABE_FW_TASK_IO_MM_EXT_IN);
+ break;
+ case OMAP_ABE_MM_EXT_OUT_PORT:
+ abe->MultiFrame[15][0] = ABE_TASK_ID(C_ABE_FW_TASK_IO_MM_EXT_OUT);
+ break;
+ default:
+ break;
+ }
+
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM, OMAP_ABE_D_MULTIFRAME_ADDR,
+ (u32 *) abe->MultiFrame, sizeof(abe->MultiFrame));
+
+ /* local host variable status= "port is running" */
+ abe_port[id].status = OMAP_ABE_PORT_ACTIVITY_RUNNING;
+ /* enable DMA requests */
+ omap_abe_enable_dma_request(abe, id);
+ /* select the main port based on the activation of this new port */
+ abe_decide_main_port();
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_enable_data_transfer);
+
+/**
+ * omap_abe_connect_cbpr_dmareq_port
+ * @id: port name
+ * @f: desired data format
+ * @d: desired dma_request line (0..7)
+ * @a: returned pointer to the base address of the CBPr register and number of
+ * samples to exchange during a DMA_request.
+ *
+ * enables the data echange between a DMA and the ABE through the
+ * CBPr registers of AESS.
+ */
+int omap_abe_connect_cbpr_dmareq_port(struct omap_abe *abe,
+ u32 id, abe_data_format_t *f,
+ u32 d,
+ abe_dma_t *returned_dma_t)
+{
+ _log(ABE_ID_CONNECT_CBPR_DMAREQ_PORT, id, f->f, f->samp_format);
+
+ abe_port[id] = ((abe_port_t *) abe_port_init)[id];
+ (abe_port[id]).format = (*f);
+ abe_port[id].protocol.protocol_switch = DMAREQ_PORT_PROT;
+ abe_port[id].protocol.p.prot_dmareq.iter = abe_dma_port_iteration(f);
+ abe_port[id].protocol.p.prot_dmareq.dma_addr = ABE_DMASTATUS_RAW;
+ abe_port[id].protocol.p.prot_dmareq.dma_data = (1 << d);
+ /* load the dma_t with physical information from AE memory mapping */
+ abe_init_dma_t(id, &((abe_port[id]).protocol));
+
+ /* load the ATC descriptors - disabled */
+ omap_abe_init_atc(abe, id);
+
+ /* load the micro-task parameters */
+ abe_init_io_tasks(id, &((abe_port[id]).format),
+ &((abe_port[id]).protocol));
+ abe_port[id].status = OMAP_ABE_PORT_INITIALIZED;
+
+ /* return the dma pointer address */
+ abe_read_port_address(id, returned_dma_t);
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_connect_cbpr_dmareq_port);
+
+/**
+ * omap_abe_connect_irq_ping_pong_port
+ * @id: port name
+ * @f: desired data format
+ * @I: index of the call-back subroutine to call
+ * @s: half-buffer (ping) size
+ * @p: returned base address of the first (ping) buffer)
+ *
+ * enables the data echanges between a direct access to the DMEM
+ * memory of ABE using cache flush. On each IRQ activation a subroutine
+ * registered with "abe_plug_subroutine" will be called. This subroutine
+ * will generate an amount of samples, send them to DMEM memory and call
+ * "abe_set_ping_pong_buffer" to notify the new amount of samples in the
+ * pong buffer.
+ */
+int omap_abe_connect_irq_ping_pong_port(struct omap_abe *abe,
+ u32 id, abe_data_format_t *f,
+ u32 subroutine_id, u32 size,
+ u32 *sink, u32 dsp_mcu_flag)
+{
+ _log(ABE_ID_CONNECT_IRQ_PING_PONG_PORT, id, f->f, f->samp_format);
+
+ /* ping_pong is only supported on MM_DL */
+ if (id != OMAP_ABE_MM_DL_PORT) {
+ omap_abe_dbg_error(abe, OMAP_ABE_ERR_API,
+ ABE_PARAMETER_ERROR);
+ }
+ abe_port[id] = ((abe_port_t *) abe_port_init)[id];
+ (abe_port[id]).format = (*f);
+ (abe_port[id]).protocol.protocol_switch = PINGPONG_PORT_PROT;
+ (abe_port[id]).protocol.p.prot_pingpong.buf_addr =
+ OMAP_ABE_D_PING_ADDR;
+ (abe_port[id]).protocol.p.prot_pingpong.buf_size = size;
+ (abe_port[id]).protocol.p.prot_pingpong.irq_data = (1);
+ abe_init_ping_pong_buffer(OMAP_ABE_MM_DL_PORT, size, 2, sink);
+ if (dsp_mcu_flag == PING_PONG_WITH_MCU_IRQ)
+ (abe_port[id]).protocol.p.prot_pingpong.irq_addr =
+ ABE_MCU_IRQSTATUS_RAW;
+ if (dsp_mcu_flag == PING_PONG_WITH_DSP_IRQ)
+ (abe_port[id]).protocol.p.prot_pingpong.irq_addr =
+ ABE_DSP_IRQSTATUS_RAW;
+ abe_port[id].status = OMAP_ABE_PORT_INITIALIZED;
+
+ /* load the ATC descriptors - disabled */
+ omap_abe_init_atc(abe, id);
+ /* load the micro-task parameters */
+ abe_init_io_tasks(id, &((abe_port[id]).format),
+ &((abe_port[id]).protocol));
+
+ *sink = (abe_port[id]).protocol.p.prot_pingpong.buf_addr;
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_connect_irq_ping_pong_port);
+
+/**
+ * omap_abe_connect_serial_port()
+ * @id: port name
+ * @f: data format
+ * @i: peripheral ID (McBSP #1, #2, #3)
+ *
+ * Operations : enables the data echanges between a McBSP and an ATC buffer in
+ * DMEM. This API is used connect 48kHz McBSP streams to MM_DL and 8/16kHz
+ * voice streams to VX_UL, VX_DL, BT_VX_UL, BT_VX_DL. It abstracts the
+ * abe_write_port API.
+ */
+int omap_abe_connect_serial_port(struct omap_abe *abe,
+ u32 id, abe_data_format_t *f,
+ u32 mcbsp_id)
+{
+ _log(ABE_ID_CONNECT_SERIAL_PORT, id, f->samp_format, mcbsp_id);
+
+ abe_port[id] = ((abe_port_t *) abe_port_init)[id];
+ (abe_port[id]).format = (*f);
+ (abe_port[id]).protocol.protocol_switch = SERIAL_PORT_PROT;
+ /* McBSP peripheral connected to ATC */
+ (abe_port[id]).protocol.p.prot_serial.desc_addr = mcbsp_id*ATC_SIZE;
+ /* check the iteration of ATC */
+ (abe_port[id]).protocol.p.prot_serial.iter =
+ abe_dma_port_iter_factor(f);
+
+ /* load the ATC descriptors - disabled */
+ omap_abe_init_atc(abe, id);
+ /* load the micro-task parameters */
+ abe_init_io_tasks(id, &((abe_port[id]).format),
+ &((abe_port[id]).protocol));
+ abe_port[id].status = OMAP_ABE_PORT_INITIALIZED;
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_connect_serial_port);
+
+/**
+ * omap_abe_read_port_address
+ * @dma: output pointer to the DMA iteration and data destination pointer
+ *
+ * This API returns the address of the DMA register used on this audio port.
+ * Depending on the protocol being used, adds the base address offset L3
+ * (DMA) or MPU (ARM)
+ */
+int omap_abe_read_port_address(struct omap_abe *abe,
+ u32 port, abe_dma_t *dma2)
+{
+ abe_dma_t_offset dma1;
+ u32 protocol_switch;
+
+ _log(ABE_ID_READ_PORT_ADDRESS, port, 0, 0);
+
+ dma1 = (abe_port[port]).dma;
+ protocol_switch = abe_port[port].protocol.protocol_switch;
+ switch (protocol_switch) {
+ case PINGPONG_PORT_PROT:
+ /* return the base address of the buffer in L3 and L4 spaces */
+ (*dma2).data = (void *)(dma1.data +
+ ABE_DEFAULT_BASE_ADDRESS_L3 + ABE_DMEM_BASE_OFFSET_MPU);
+ (*dma2).l3_dmem = (void *)(dma1.data +
+ ABE_DEFAULT_BASE_ADDRESS_L3 + ABE_DMEM_BASE_OFFSET_MPU);
+ (*dma2).l4_dmem = (void *)(dma1.data +
+ ABE_DEFAULT_BASE_ADDRESS_L4 + ABE_DMEM_BASE_OFFSET_MPU);
+ break;
+ case DMAREQ_PORT_PROT:
+ /* return the CBPr(L3), DMEM(L3), DMEM(L4) address */
+ (*dma2).data = (void *)(dma1.data +
+ ABE_DEFAULT_BASE_ADDRESS_L3 + ABE_ATC_BASE_OFFSET_MPU);
+ (*dma2).l3_dmem =
+ (void *)((abe_port[port]).protocol.p.prot_dmareq.buf_addr +
+ ABE_DEFAULT_BASE_ADDRESS_L3 + ABE_DMEM_BASE_OFFSET_MPU);
+ (*dma2).l4_dmem =
+ (void *)((abe_port[port]).protocol.p.prot_dmareq.buf_addr +
+ ABE_DEFAULT_BASE_ADDRESS_L4 + ABE_DMEM_BASE_OFFSET_MPU);
+ break;
+ default:
+ break;
+ }
+ (*dma2).iter = (dma1.iter);
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_read_port_address);
+
+/**
+ * abe_init_dma_t
+ * @ id: ABE port ID
+ * @ prot: protocol being used
+ *
+ * load the dma_t with physical information from AE memory mapping
+ */
+void abe_init_dma_t(u32 id, abe_port_protocol_t *prot)
+{
+ abe_dma_t_offset dma;
+ u32 idx;
+ /* default dma_t points to address 0000... */
+ dma.data = 0;
+ dma.iter = 0;
+ switch (prot->protocol_switch) {
+ case PINGPONG_PORT_PROT:
+ for (idx = 0; idx < 32; idx++) {
+ if (((prot->p).prot_pingpong.irq_data) ==
+ (u32) (1 << idx))
+ break;
+ }
+ (prot->p).prot_dmareq.desc_addr =
+ ((CBPr_DMA_RTX0 + idx)*ATC_SIZE);
+ /* translate byte address/size in DMEM words */
+ dma.data = (prot->p).prot_pingpong.buf_addr >> 2;
+ dma.iter = (prot->p).prot_pingpong.buf_size >> 2;
+ break;
+ case DMAREQ_PORT_PROT:
+ for (idx = 0; idx < 32; idx++) {
+ if (((prot->p).prot_dmareq.dma_data) ==
+ (u32) (1 << idx))
+ break;
+ }
+ dma.data = (CIRCULAR_BUFFER_PERIPHERAL_R__0 + (idx << 2));
+ dma.iter = (prot->p).prot_dmareq.iter;
+ (prot->p).prot_dmareq.desc_addr =
+ ((CBPr_DMA_RTX0 + idx)*ATC_SIZE);
+ break;
+ case SLIMBUS_PORT_PROT:
+ case SERIAL_PORT_PROT:
+ case DMIC_PORT_PROT:
+ case MCPDMDL_PORT_PROT:
+ case MCPDMUL_PORT_PROT:
+ default:
+ break;
+ }
+ /* upload the dma type */
+ abe_port[id].dma = dma;
+}
+
+/**
+ * abe_enable_atc
+ * Parameter:
+ * Operations:
+ * Return value:
+ */
+void abe_enable_atc(u32 id)
+{
+ struct omap_abe_atc_desc atc_desc;
+
+ omap_abe_mem_read(abe, OMAP_ABE_DMEM,
+ (abe_port[id]).protocol.p.prot_dmareq.desc_addr,
+ (u32 *) &atc_desc, sizeof(atc_desc));
+ atc_desc.desen = 1;
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ (abe_port[id]).protocol.p.prot_dmareq.desc_addr,
+ (u32 *) &atc_desc, sizeof(atc_desc));
+
+}
+/**
+ * abe_disable_atc
+ * Parameter:
+ * Operations:
+ * Return value:
+ */
+void abe_disable_atc(u32 id)
+{
+ struct omap_abe_atc_desc atc_desc;
+
+ omap_abe_mem_read(abe, OMAP_ABE_DMEM,
+ (abe_port[id]).protocol.p.prot_dmareq.desc_addr,
+ (u32 *) &atc_desc, sizeof(atc_desc));
+ atc_desc.desen = 0;
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ (abe_port[id]).protocol.p.prot_dmareq.desc_addr,
+ (u32 *) &atc_desc, sizeof(atc_desc));
+
+}
+/**
+ * abe_init_io_tasks
+ * @prot : protocol being used
+ *
+ * load the micro-task parameters doing to DMEM <==> SMEM data moves
+ *
+ * I/O descriptors input parameters :
+ * For Read from DMEM usually THR1/THR2 = X+1/X-1
+ * For Write to DMEM usually THR1/THR2 = 2/0
+ * UP_1/2 =X+1/X-1
+ */
+void abe_init_io_tasks(u32 id, abe_data_format_t *format,
+ abe_port_protocol_t *prot)
+{
+ u32 x_io, direction, iter_samples, smem1, smem2, smem3, io_sub_id,
+ io_flag;
+ u32 copy_func_index, before_func_index, after_func_index;
+ u32 dmareq_addr, dmareq_field;
+ u32 sio_desc_address, datasize, iter, nsamp, datasize2, dOppMode32;
+ u32 atc_ptr_saved, atc_ptr_saved2, copy_func_index1;
+ u32 copy_func_index2, atc_desc_address1, atc_desc_address2;
+ struct ABE_SPingPongDescriptor desc_pp;
+ struct ABE_SIODescriptor sio_desc;
+
+ if (prot->protocol_switch == PINGPONG_PORT_PROT) {
+ /* ping_pong is only supported on MM_DL */
+ if (OMAP_ABE_MM_DL_PORT != id) {
+ omap_abe_dbg_error(abe, OMAP_ABE_ERR_API,
+ ABE_PARAMETER_ERROR);
+ }
+ smem1 = smem_mm_dl;
+ copy_func_index = (u8) abe_dma_port_copy_subroutine_id(id);
+ dmareq_addr = abe_port[id].protocol.p.prot_pingpong.irq_addr;
+ dmareq_field = abe_port[id].protocol.p.prot_pingpong.irq_data;
+ datasize = abe_dma_port_iter_factor(format);
+ /* number of "samples" either mono or stereo */
+ iter = abe_dma_port_iteration(format);
+ iter_samples = (iter / datasize);
+ /* load the IO descriptor */
+ /* no drift */
+ desc_pp.drift_ASRC = 0;
+ /* no drift */
+ desc_pp.drift_io = 0;
+ desc_pp.hw_ctrl_addr = (u16) dmareq_addr;
+ desc_pp.copy_func_index = (u8) copy_func_index;
+ desc_pp.smem_addr = (u8) smem1;
+ /* DMA req 0 is used for CBPr0 */
+ desc_pp.atc_irq_data = (u8) dmareq_field;
+ /* size of block transfer */
+ desc_pp.x_io = (u8) iter_samples;
+ desc_pp.data_size = (u8) datasize;
+ /* address comunicated in Bytes */
+ desc_pp.workbuff_BaseAddr =
+ (u16) (abe_base_address_pingpong[1]);
+ /* size comunicated in XIO sample */
+ desc_pp.workbuff_Samples = 0;
+ desc_pp.nextbuff0_BaseAddr =
+ (u16) (abe_base_address_pingpong[0]);
+ desc_pp.nextbuff1_BaseAddr =
+ (u16) (abe_base_address_pingpong[1]);
+ if (dmareq_addr == ABE_DMASTATUS_RAW) {
+ desc_pp.nextbuff0_Samples =
+ (u16) ((abe_size_pingpong >> 2) / datasize);
+ desc_pp.nextbuff1_Samples =
+ (u16) ((abe_size_pingpong >> 2) / datasize);
+ } else {
+ desc_pp.nextbuff0_Samples = 0;
+ desc_pp.nextbuff1_Samples = 0;
+ }
+ /* next buffer to send is B1, first IRQ fills B0 */
+ desc_pp.counter = 0;
+ /* send a DMA req to fill B0 with N samples
+ abe_block_copy (COPY_FROM_HOST_TO_ABE,
+ ABE_ATC,
+ ABE_DMASTATUS_RAW,
+ &(abe_port[id].protocol.p.prot_pingpong.irq_data),
+ 4); */
+ sio_desc_address = OMAP_ABE_D_PINGPONGDESC_ADDR;
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ sio_desc_address, (u32 *) &desc_pp,
+ sizeof(desc_pp));
+ } else {
+ io_sub_id = dmareq_addr = ABE_DMASTATUS_RAW;
+ dmareq_field = 0;
+ atc_desc_address1 = atc_desc_address2 = 0;
+ /* default: repeat of the last downlink samples in case of
+ DMA errors, (disable=0x00) */
+ io_flag = 0xFF;
+ datasize2 = datasize = abe_dma_port_iter_factor(format);
+ x_io = (u8) abe_dma_port_iteration(format);
+ nsamp = (x_io / datasize);
+ atc_ptr_saved2 = atc_ptr_saved = DMIC_ATC_PTR_labelID + id;
+ smem1 = abe_port[id].smem_buffer1;
+ smem3 = smem2 = abe_port[id].smem_buffer2;
+ copy_func_index1 = (u8) abe_dma_port_copy_subroutine_id(id);
+ before_func_index = after_func_index =
+ copy_func_index2 = NULL_COPY_CFPID;
+ switch (prot->protocol_switch) {
+ case DMIC_PORT_PROT:
+ /* DMIC port is read in two steps */
+ x_io = x_io >> 1;
+ nsamp = nsamp >> 1;
+ atc_desc_address1 = (ABE_ATC_DMIC_DMA_REQ*ATC_SIZE);
+ io_sub_id = IO_IP_CFPID;
+ break;
+ case MCPDMDL_PORT_PROT:
+ /* PDMDL port is written to in two steps */
+ x_io = x_io >> 1;
+ atc_desc_address1 =
+ (ABE_ATC_MCPDMDL_DMA_REQ*ATC_SIZE);
+ io_sub_id = IO_IP_CFPID;
+ break;
+ case MCPDMUL_PORT_PROT:
+ atc_desc_address1 =
+ (ABE_ATC_MCPDMUL_DMA_REQ*ATC_SIZE);
+ io_sub_id = IO_IP_CFPID;
+ break;
+ case SLIMBUS_PORT_PROT:
+ atc_desc_address1 =
+ abe_port[id].protocol.p.prot_slimbus.desc_addr1;
+ atc_desc_address2 =
+ abe_port[id].protocol.p.prot_slimbus.desc_addr2;
+ copy_func_index2 = NULL_COPY_CFPID;
+ /* @@@@@@
+ #define SPLIT_SMEM_CFPID 9
+ #define MERGE_SMEM_CFPID 10
+ #define SPLIT_TDM_12_CFPID 11
+ #define MERGE_TDM_12_CFPID 12
+ */
+ io_sub_id = IO_IP_CFPID;
+ break;
+ case SERIAL_PORT_PROT: /* McBSP/McASP */
+ atc_desc_address1 =
+ (s16) abe_port[id].protocol.p.prot_serial.
+ desc_addr;
+ io_sub_id = IO_IP_CFPID;
+ break;
+ case DMAREQ_PORT_PROT: /* DMA w/wo CBPr */
+ dmareq_addr =
+ abe_port[id].protocol.p.prot_dmareq.dma_addr;
+ dmareq_field = 0;
+ atc_desc_address1 =
+ abe_port[id].protocol.p.prot_dmareq.desc_addr;
+ io_sub_id = IO_IP_CFPID;
+ break;
+ }
+ /* special situation of the PING_PONG protocol which
+ has its own SIO descriptor format */
+ /*
+ Sequence of operations on ping-pong buffers B0/B1
+ -------------- time ---------------------------->>>>
+ Host Application is ready to send data from DDR to B0
+ SDMA is initialized from "abe_connect_irq_ping_pong_port" to B0
+ FIRMWARE starts with #12 B1 data,
+ sends IRQ/DMAreq, sends #pong B1 data,
+ sends IRQ/DMAreq, sends #ping B0,
+ sends B1 samples
+ ARM / SDMA | fills B0 | fills B1 ... | fills B0 ...
+ Counter 0 1 2 3
+ */
+ switch (id) {
+ case OMAP_ABE_PDM_DL_PORT:
+ abe->MultiFrame[7][0] = ABE_TASK_ID(C_ABE_FW_TASK_IO_PDM_DL);
+ abe->MultiFrame[19][0] = ABE_TASK_ID(C_ABE_FW_TASK_IO_PDM_DL);
+ break;
+ case OMAP_ABE_TONES_DL_PORT:
+ break;
+ case OMAP_ABE_PDM_UL_PORT:
+ abe->MultiFrame[5][2] = ABE_TASK_ID(C_ABE_FW_TASK_IO_PDM_UL);
+ break;
+ case OMAP_ABE_DMIC_PORT:
+ abe->MultiFrame[2][5] = ABE_TASK_ID(C_ABE_FW_TASK_IO_DMIC);
+ abe->MultiFrame[14][3] = ABE_TASK_ID(C_ABE_FW_TASK_IO_DMIC);
+ break;
+ case OMAP_ABE_MM_UL_PORT:
+ copy_func_index1 = COPY_MM_UL_CFPID;
+ before_func_index = ROUTE_MM_UL_CFPID;
+ break;
+ case OMAP_ABE_MM_UL2_PORT:
+ break;
+ case OMAP_ABE_VX_DL_PORT:
+ abe->MultiFrame[0][2] = ABE_TASK_ID(C_ABE_FW_TASK_IO_VX_DL);
+ /* check for 8kHz/16kHz */
+ if (abe_port[id].format.f == 8000) {
+ abe->MultiFrame[TASK_VX_DL_SLT][TASK_VX_DL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_VX_DL_8_48_FIR);
+ /*Voice_8k_DL_labelID */
+ smem1 = IO_VX_DL_ASRC_labelID;
+
+ if ((abe_port[OMAP_ABE_VX_DL_PORT].status ==
+ OMAP_ABE_PORT_ACTIVITY_IDLE) &&
+ (abe_port[OMAP_ABE_VX_UL_PORT].status ==
+ OMAP_ABE_PORT_ACTIVITY_IDLE)) {
+ /* the 1st opened port is VX_DL_PORT
+ * both VX_UL ASRC and VX_DL ASRC will add/remove sample
+ * referring to VX_DL flow_counter */
+ abe->MultiFrame[TASK_ASRC_VX_DL_SLT][TASK_ASRC_VX_DL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ASRC_VX_DL_8);
+ abe->MultiFrame[TASK_ASRC_VX_UL_SLT][TASK_ASRC_VX_UL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ASRC_VX_UL_8_SIB);
+ /* Init VX_UL ASRC & VX_DL ASRC and enable its adaptation */
+ abe_init_asrc_vx_ul(-250);
+ abe_init_asrc_vx_dl(250);
+ } else {
+ /* Do nothing, Scheduling Table has already been patched */
+ }
+ } else {
+ abe->MultiFrame[TASK_VX_DL_SLT][TASK_VX_DL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_VX_DL_16_48);
+ /* Voice_16k_DL_labelID */
+ smem1 = IO_VX_DL_ASRC_labelID;
+
+ if ((abe_port[OMAP_ABE_VX_DL_PORT].status ==
+ OMAP_ABE_PORT_ACTIVITY_IDLE) &&
+ (abe_port[OMAP_ABE_VX_UL_PORT].status ==
+ OMAP_ABE_PORT_ACTIVITY_IDLE)) {
+ /* the 1st opened port is VX_DL_PORT
+ * both VX_UL ASRC and VX_DL ASRC will add/remove sample
+ * referring to VX_DL flow_counter */
+ abe->MultiFrame[TASK_ASRC_VX_DL_SLT][TASK_ASRC_VX_DL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ASRC_VX_DL_16);
+ abe->MultiFrame[TASK_ASRC_VX_UL_SLT][TASK_ASRC_VX_UL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ASRC_VX_UL_16_SIB);
+ /* Init VX_UL ASRC & VX_DL ASRC and enable its adaptation */
+ abe_init_asrc_vx_ul(-250);
+ abe_init_asrc_vx_dl(250);
+ } else {
+ /* Do nothing, Scheduling Table has already been patched */
+ }
+ }
+ break;
+ case OMAP_ABE_VX_UL_PORT:
+ abe->MultiFrame[16][3] = ABE_TASK_ID(C_ABE_FW_TASK_IO_VX_UL);
+ /* check for 8kHz/16kHz */
+ if (abe_port[id].format.f == 8000) {
+ abe->MultiFrame[TASK_VX_UL_SLT][TASK_VX_UL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_VX_UL_48_8);
+ /* MultiFrame[TASK_ECHO_SLT][TASK_ECHO_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ECHO_REF_48_8); */
+ smem1 = Voice_8k_UL_labelID;
+
+ if ((abe_port[OMAP_ABE_VX_DL_PORT].status ==
+ OMAP_ABE_PORT_ACTIVITY_IDLE) &&
+ (abe_port[OMAP_ABE_VX_UL_PORT].status ==
+ OMAP_ABE_PORT_ACTIVITY_IDLE)) {
+ /* the 1st opened port is VX_UL_PORT
+ * both VX_UL ASRC and VX_DL ASRC will add/remove sample
+ * referring to VX_UL flow_counter */
+ abe->MultiFrame[TASK_ASRC_VX_DL_SLT][TASK_ASRC_VX_DL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ASRC_VX_DL_8_SIB);
+ abe->MultiFrame[TASK_ASRC_VX_UL_SLT][TASK_ASRC_VX_UL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ASRC_VX_UL_8);
+ /* Init VX_UL ASRC & VX_DL ASRC and enable its adaptation */
+ abe_init_asrc_vx_ul(-250);
+ abe_init_asrc_vx_dl(250);
+ } else {
+ /* Do nothing, Scheduling Table has already been patched */
+ }
+ } else {
+ abe->MultiFrame[TASK_VX_UL_SLT][TASK_VX_UL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_VX_UL_48_16);
+ /* MultiFrame[TASK_ECHO_SLT][TASK_ECHO_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ECHO_REF_48_16); */
+ smem1 = Voice_16k_UL_labelID;
+
+ if ((abe_port[OMAP_ABE_VX_DL_PORT].status ==
+ OMAP_ABE_PORT_ACTIVITY_IDLE) &&
+ (abe_port[OMAP_ABE_VX_UL_PORT].status ==
+ OMAP_ABE_PORT_ACTIVITY_IDLE)) {
+ /* the 1st opened port is VX_UL_PORT
+ * both VX_UL ASRC and VX_DL ASRC will add/remove sample
+ * referring to VX_UL flow_counter */
+ abe->MultiFrame[TASK_ASRC_VX_DL_SLT][TASK_ASRC_VX_DL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ASRC_VX_DL_16_SIB);
+ abe->MultiFrame[TASK_ASRC_VX_UL_SLT][TASK_ASRC_VX_UL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ASRC_VX_UL_16);
+ /* Init VX_UL ASRC & VX_DL ASRC and enable its adaptation */
+ abe_init_asrc_vx_ul(-250);
+ abe_init_asrc_vx_dl(250);
+ } else {
+ /* Do nothing, Scheduling Table has already been patched */
+ }
+ }
+ break;
+ case OMAP_ABE_BT_VX_DL_PORT:
+ /* check for 8kHz/16kHz */
+ omap_abe_mem_read(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_MAXTASKBYTESINSLOT_ADDR, &dOppMode32,
+ sizeof(u32));
+
+ /* Disable BT ASRC */
+ dOppMode32 = DOPPMODE32_OPP50;
+
+ if (abe_port[id].format.f == 8000) {
+ if (dOppMode32 == DOPPMODE32_OPP100) {
+ abe->MultiFrame[TASK_BT_DL_48_8_SLT][TASK_BT_DL_48_8_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_BT_DL_48_8_FIR_OPP100);
+ smem1 = BT_DL_8k_opp100_labelID;
+ } else {
+ abe->MultiFrame[TASK_BT_DL_48_8_SLT][TASK_BT_DL_48_8_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_BT_DL_48_8_FIR);
+ smem1 = BT_DL_8k_labelID;
+ }
+#if 0
+ if ((abe_port[OMAP_ABE_BT_VX_DL_PORT].status ==
+ OMAP_ABE_PORT_ACTIVITY_IDLE) &&
+ (abe_port[OMAP_ABE_BT_VX_UL_PORT].status ==
+ OMAP_ABE_PORT_ACTIVITY_IDLE)) {
+ /* the 1st opened port is BT_VX_DL_PORT
+ * both BT_VX_DL ASRC and BT_VX_UL ASRC will add/remove sample
+ * referring to BT_VX_DL flow_counter */
+ abe->MultiFrame[TASK_ASRC_BT_DL_SLT][TASK_ASRC_BT_DL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ASRC_BT_DL_8);
+ abe->MultiFrame[TASK_ASRC_BT_UL_SLT][TASK_ASRC_BT_UL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ASRC_BT_UL_8_SIB);
+ } else {
+ /* Do nothing, Scheduling Table has already been patched */
+ }
+#endif
+ } else {
+ if (dOppMode32 == DOPPMODE32_OPP100) {
+ abe->MultiFrame[TASK_BT_DL_48_8_SLT][TASK_BT_DL_48_8_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_BT_DL_48_16_OPP100);
+ smem1 = BT_DL_16k_opp100_labelID;
+ } else {
+ abe->MultiFrame[TASK_BT_DL_48_8_SLT][TASK_BT_DL_48_8_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_BT_DL_48_16);
+ smem1 = BT_DL_16k_labelID;
+ }
+#if 0
+ if ((abe_port[OMAP_ABE_BT_VX_DL_PORT].status ==
+ OMAP_ABE_PORT_ACTIVITY_IDLE) &&
+ (abe_port[OMAP_ABE_BT_VX_UL_PORT].status ==
+ OMAP_ABE_PORT_ACTIVITY_IDLE)) {
+ /* the 1st opened port is BT_VX_DL_PORT
+ * both BT_VX_DL ASRC and BT_VX_UL ASRC will add/remove sample
+ * referring to BT_VX_DL flow_counter */
+ abe->MultiFrame[TASK_ASRC_BT_DL_SLT][TASK_ASRC_BT_DL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ASRC_BT_DL_16);
+ abe->MultiFrame[TASK_ASRC_BT_UL_SLT][TASK_ASRC_BT_UL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ASRC_BT_UL_16_SIB);
+ } else {
+ /* Do nothing, Scheduling Table has already been patched */
+ }
+#endif
+ }
+ break;
+ case OMAP_ABE_BT_VX_UL_PORT:
+ /* check for 8kHz/16kHz */
+ /* set the SMEM buffer -- programming sequence */
+ omap_abe_mem_read(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_MAXTASKBYTESINSLOT_ADDR, &dOppMode32,
+ sizeof(u32));
+
+ /* Disable BT ASRC */
+ dOppMode32 = DOPPMODE32_OPP50;
+
+ if (abe_port[id].format.f == 8000) {
+ abe->MultiFrame[TASK_BT_UL_8_48_SLT][TASK_BT_UL_8_48_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_BT_UL_8_48);
+ if (dOppMode32 == DOPPMODE32_OPP100)
+ /* ASRC input buffer, size 40 */
+ smem1 = smem_bt_vx_ul_opp100;
+ else
+ /* at OPP 50 without ASRC */
+ smem1 = BT_UL_8k_labelID;
+#if 0
+ if ((abe_port[OMAP_ABE_BT_VX_UL_PORT].status ==
+ OMAP_ABE_PORT_ACTIVITY_IDLE) &&
+ (abe_port[OMAP_ABE_BT_VX_DL_PORT].status ==
+ OMAP_ABE_PORT_ACTIVITY_IDLE)) {
+ /* the 1st opened port is BT_VX_UL_PORT */
+ /* both BT_VX_UL ASRC and BT_VX_DL ASRC will add/remove sample
+ referring to BT_VX_UL flow_counter */
+ abe->MultiFrame[TASK_ASRC_BT_UL_SLT][TASK_ASRC_BT_UL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ASRC_BT_UL_8);
+ abe->MultiFrame[TASK_ASRC_BT_DL_SLT][TASK_ASRC_BT_DL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ASRC_BT_DL_8_SIB);
+ } else {
+ /* Do nothing, Scheduling Table has already been patched */
+ }
+#endif
+ } else {
+ abe->MultiFrame[TASK_BT_UL_8_48_SLT][TASK_BT_UL_8_48_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_BT_UL_16_48);
+ if (dOppMode32 == DOPPMODE32_OPP100)
+ /* ASRC input buffer, size 40 */
+ smem1 = smem_bt_vx_ul_opp100;
+ else
+ /* at OPP 50 without ASRC */
+ smem1 = BT_UL_16k_labelID;
+#if 0
+ if ((abe_port[OMAP_ABE_BT_VX_UL_PORT].status ==
+ OMAP_ABE_PORT_ACTIVITY_IDLE) &&
+ (abe_port[OMAP_ABE_BT_VX_DL_PORT].status ==
+ OMAP_ABE_PORT_ACTIVITY_IDLE)) {
+ /* the 1st opened port is BT_VX_UL_PORT */
+ /* both BT_VX_UL ASRC and BT_VX_DL ASRC will add/remove sample
+ referring to BT_VX_UL flow_counter */
+ abe->MultiFrame[TASK_ASRC_BT_UL_SLT][TASK_ASRC_BT_UL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ASRC_BT_UL_16);
+ abe->MultiFrame[TASK_ASRC_BT_DL_SLT][TASK_ASRC_BT_DL_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ASRC_BT_DL_16_SIB);
+ } else {
+ /* Do nothing, Scheduling Table has already been patched */
+ }
+#endif
+ }
+ break;
+ case OMAP_ABE_MM_DL_PORT:
+ /* check for CBPr / serial_port / Ping-pong access */
+ smem1 = smem_mm_dl;
+ break;
+ case OMAP_ABE_MM_EXT_IN_PORT:
+ /* set the SMEM buffer -- programming sequence */
+ omap_abe_mem_read(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_MAXTASKBYTESINSLOT_ADDR, &dOppMode32,
+ sizeof(u32));
+
+ /* Disable MM EXT ASRC */
+ dOppMode32 = DOPPMODE32_OPP50;
+
+ if (dOppMode32 == DOPPMODE32_OPP100)
+ /* ASRC input buffer, size 40 */
+ smem1 = smem_mm_ext_in_opp100;
+ else
+ /* at OPP 50 without ASRC */
+ smem1 = smem_mm_ext_in_opp50;
+
+ break;
+ case OMAP_ABE_MM_EXT_OUT_PORT:
+ break;
+ default:
+ break;
+ }
+
+ if (abe_port[id].protocol.direction == ABE_ATC_DIRECTION_IN)
+ direction = 0;
+ else
+ /* offset of the write pointer in the ATC descriptor */
+ direction = 3;
+
+ sio_desc.drift_ASRC = 0;
+ sio_desc.drift_io = 0;
+ sio_desc.io_type_idx = (u8) io_sub_id;
+ sio_desc.samp_size = (u8) datasize;
+ sio_desc.hw_ctrl_addr = (u16) (dmareq_addr << 2);
+ sio_desc.atc_irq_data = (u8) dmareq_field;
+ sio_desc.flow_counter = (u16) 0;
+ sio_desc.direction_rw = (u8) direction;
+ sio_desc.repeat_last_samp = (u8) io_flag;
+ sio_desc.nsamp = (u8) nsamp;
+ sio_desc.x_io = (u8) x_io;
+ /* set ATC ON */
+ sio_desc.on_off = 0x80;
+ sio_desc.split_addr1 = (u16) smem1;
+ sio_desc.split_addr2 = (u16) smem2;
+ sio_desc.split_addr3 = (u16) smem3;
+ sio_desc.before_f_index = (u8) before_func_index;
+ sio_desc.after_f_index = (u8) after_func_index;
+ sio_desc.smem_addr1 = (u16) smem1;
+ sio_desc.atc_address1 = (u16) atc_desc_address1;
+ sio_desc.atc_pointer_saved1 = (u16) atc_ptr_saved;
+ sio_desc.data_size1 = (u8) datasize;
+ sio_desc.copy_f_index1 = (u8) copy_func_index1;
+ sio_desc.smem_addr2 = (u16) smem2;
+ sio_desc.atc_address2 = (u16) atc_desc_address2;
+ sio_desc.atc_pointer_saved2 = (u16) atc_ptr_saved2;
+ sio_desc.data_size2 = (u8) datasize2;
+ sio_desc.copy_f_index2 = (u8) copy_func_index2;
+ sio_desc_address = OMAP_ABE_D_IODESCR_ADDR + (id *
+ sizeof(struct ABE_SIODescriptor));
+
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ sio_desc_address, (u32 *) &sio_desc,
+ sizeof(sio_desc));
+
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM,
+ OMAP_ABE_D_MULTIFRAME_ADDR, (u32 *) abe->MultiFrame,
+ sizeof(abe->MultiFrame));
+ }
+
+}
+
+/**
+ * omap_abe_select_main_port - Select stynchronization port for Event generator.
+ * @id: audio port name
+ *
+ * tells the FW which is the reference stream for adjusting
+ * the processing on 23/24/25 slots
+ */
+int omap_abe_select_main_port(u32 id)
+{
+ u32 selection;
+
+ _log(ABE_ID_SELECT_MAIN_PORT, id, 0, 0);
+
+ /* flow control */
+ selection = OMAP_ABE_D_IODESCR_ADDR + id * sizeof(struct ABE_SIODescriptor) +
+ flow_counter_;
+ /* when the main port is a sink port from AESS point of view
+ the sign the firmware task analysis must be changed */
+ selection &= 0xFFFFL;
+ if (abe_port[id].protocol.direction == ABE_ATC_DIRECTION_IN)
+ selection |= 0x80000;
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM, OMAP_ABE_D_SLOT23_CTRL_ADDR,
+ &selection, 4);
+ return 0;
+}
+/**
+ * abe_decide_main_port - Select stynchronization port for Event generator.
+ * @id: audio port name
+ *
+ * tells the FW which is the reference stream for adjusting
+ * the processing on 23/24/25 slots
+ *
+ * takes the first port in a list which is slave on the data interface
+ */
+u32 abe_valid_port_for_synchro(u32 id)
+{
+ if ((abe_port[id].protocol.protocol_switch ==
+ DMAREQ_PORT_PROT) ||
+ (abe_port[id].protocol.protocol_switch ==
+ PINGPONG_PORT_PROT) ||
+ (abe_port[id].status != OMAP_ABE_PORT_ACTIVITY_RUNNING))
+ return 0;
+ else
+ return 1;
+}
+void abe_decide_main_port(void)
+{
+ u32 id, id_not_found;
+ id_not_found = 1;
+ for (id = 0; id < LAST_PORT_ID - 1; id++) {
+ if (abe_valid_port_for_synchro(abe_port_priority[id])) {
+ id_not_found = 0;
+ break;
+ }
+ }
+ /* if no port is currently activated, the default one is PDM_DL */
+ if (id_not_found)
+ omap_abe_select_main_port(OMAP_ABE_PDM_DL_PORT);
+ else
+ omap_abe_select_main_port(abe_port_priority[id]);
+}
+/**
+ * abe_format_switch
+ * @f: port format
+ * @iter: port iteration
+ * @mulfac: multiplication factor
+ *
+ * translates the sampling and data length to ITER number for the DMA
+ * and the multiplier factor to apply during data move with DMEM
+ *
+ */
+void abe_format_switch(abe_data_format_t *f, u32 *iter, u32 *mulfac)
+{
+ u32 n_freq;
+#if FW_SCHED_LOOP_FREQ == 4000
+ switch (f->f) {
+ /* nb of samples processed by scheduling loop */
+ case 8000:
+ n_freq = 2;
+ break;
+ case 16000:
+ n_freq = 4;
+ break;
+ case 24000:
+ n_freq = 6;
+ break;
+ case 44100:
+ n_freq = 12;
+ break;
+ case 96000:
+ n_freq = 24;
+ break;
+ default/*case 48000 */ :
+ n_freq = 12;
+ break;
+ }
+#else
+ /* erroneous cases */
+ n_freq = 0;
+#endif
+ switch (f->samp_format) {
+ case MONO_MSB:
+ case MONO_RSHIFTED_16:
+ case STEREO_16_16:
+ *mulfac = 1;
+ break;
+ case STEREO_MSB:
+ case STEREO_RSHIFTED_16:
+ *mulfac = 2;
+ break;
+ case THREE_MSB:
+ *mulfac = 3;
+ break;
+ case FOUR_MSB:
+ *mulfac = 4;
+ break;
+ case FIVE_MSB:
+ *mulfac = 5;
+ break;
+ case SIX_MSB:
+ *mulfac = 6;
+ break;
+ case SEVEN_MSB:
+ *mulfac = 7;
+ break;
+ case EIGHT_MSB:
+ *mulfac = 8;
+ break;
+ case NINE_MSB:
+ *mulfac = 9;
+ break;
+ default:
+ *mulfac = 1;
+ break;
+ }
+ *iter = (n_freq * (*mulfac));
+}
+/**
+ * abe_dma_port_iteration
+ * @f: port format
+ *
+ * translates the sampling and data length to ITER number for the DMA
+ */
+u32 abe_dma_port_iteration(abe_data_format_t *f)
+{
+ u32 iter, mulfac;
+ abe_format_switch(f, &iter, &mulfac);
+ return iter;
+}
+/**
+ * abe_dma_port_iter_factor
+ * @f: port format
+ *
+ * returns the multiplier factor to apply during data move with DMEM
+ */
+u32 abe_dma_port_iter_factor(abe_data_format_t *f)
+{
+ u32 iter, mulfac;
+ abe_format_switch(f, &iter, &mulfac);
+ return mulfac;
+}
+/**
+ * omap_abe_dma_port_iter_factor
+ * @f: port format
+ *
+ * returns the multiplier factor to apply during data move with DMEM
+ */
+u32 omap_abe_dma_port_iter_factor(struct omap_abe_data_format *f)
+{
+ u32 iter, mulfac;
+ abe_format_switch((abe_data_format_t *)f, &iter, &mulfac);
+ return mulfac;
+}
+/**
+ * abe_dma_port_copy_subroutine_id
+ *
+ * @port_id: ABE port ID
+ *
+ * returns the index of the function doing the copy in I/O tasks
+ */
+u32 abe_dma_port_copy_subroutine_id(u32 port_id)
+{
+ u32 sub_id;
+ if (abe_port[port_id].protocol.direction == ABE_ATC_DIRECTION_IN) {
+ switch (abe_port[port_id].format.samp_format) {
+ case MONO_MSB:
+ sub_id = D2S_MONO_MSB_CFPID;
+ break;
+ case MONO_RSHIFTED_16:
+ sub_id = D2S_MONO_RSHIFTED_16_CFPID;
+ break;
+ case STEREO_RSHIFTED_16:
+ sub_id = D2S_STEREO_RSHIFTED_16_CFPID;
+ break;
+ case STEREO_16_16:
+ sub_id = D2S_STEREO_16_16_CFPID;
+ break;
+ case STEREO_MSB:
+ sub_id = D2S_STEREO_MSB_CFPID;
+ break;
+ case SIX_MSB:
+ if (port_id == OMAP_ABE_DMIC_PORT) {
+ sub_id = COPY_DMIC_CFPID;
+ break;
+ }
+ default:
+ sub_id = NULL_COPY_CFPID;
+ break;
+ }
+ } else {
+ switch (abe_port[port_id].format.samp_format) {
+ case MONO_MSB:
+ sub_id = S2D_MONO_MSB_CFPID;
+ break;
+ case MONO_RSHIFTED_16:
+ sub_id = S2D_MONO_RSHIFTED_16_CFPID;
+ break;
+ case STEREO_RSHIFTED_16:
+ sub_id = S2D_STEREO_RSHIFTED_16_CFPID;
+ break;
+ case STEREO_16_16:
+ sub_id = S2D_STEREO_16_16_CFPID;
+ break;
+ case STEREO_MSB:
+ sub_id = S2D_STEREO_MSB_CFPID;
+ break;
+ case SIX_MSB:
+ if (port_id == OMAP_ABE_PDM_DL_PORT) {
+ sub_id = COPY_MCPDM_DL_CFPID;
+ break;
+ }
+ if (port_id == OMAP_ABE_MM_UL_PORT) {
+ sub_id = COPY_MM_UL_CFPID;
+ break;
+ }
+ case THREE_MSB:
+ case FOUR_MSB:
+ case FIVE_MSB:
+ case SEVEN_MSB:
+ case EIGHT_MSB:
+ case NINE_MSB:
+ sub_id = COPY_MM_UL_CFPID;
+ break;
+ default:
+ sub_id = NULL_COPY_CFPID;
+ break;
+ }
+ }
+ return sub_id;
+}
+
+/**
+ * abe_read_remaining_data
+ * @id: ABE port_ID
+ * @n: size pointer to the remaining number of 32bits words
+ *
+ * computes the remaining amount of data in the buffer.
+ */
+abehal_status abe_read_remaining_data(u32 port, u32 *n)
+{
+ u32 sio_pp_desc_address;
+ struct ABE_SPingPongDescriptor desc_pp;
+
+ _log(ABE_ID_READ_REMAINING_DATA, port, 0, 0);
+
+ /*
+ * read the port SIO descriptor and extract the
+ * current pointer address after reading the counter
+ */
+ sio_pp_desc_address = OMAP_ABE_D_PINGPONGDESC_ADDR;
+ omap_abe_mem_read(abe, OMAP_ABE_DMEM, sio_pp_desc_address,
+ (u32 *) &desc_pp, sizeof(struct ABE_SPingPongDescriptor));
+ *n = desc_pp.workbuff_Samples;
+
+ return 0;
+}
+EXPORT_SYMBOL(abe_read_remaining_data);
+
+/**
+ * omap_abe_mono_mixer
+ * @id: name of the mixer (MIXDL1, MIXDL2 or MIXAUDUL)
+ * on_off: enable\disable flag
+ *
+ * This API Programs DL1Mixer or DL2Mixer to output mono data
+ * on both left and right data paths.
+ */
+int omap_abe_mono_mixer(struct omap_abe *abe, u32 id, u32 on_off)
+{
+ switch (id) {
+ case MIXDL1:
+ if (on_off)
+ abe->MultiFrame[TASK_DL1Mixer_SLT][TASK_DL1Mixer_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_DL1Mixer_dual_mono);
+ else
+ abe->MultiFrame[TASK_DL1Mixer_SLT][TASK_DL1Mixer_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_DL1Mixer);
+ break;
+ case MIXDL2:
+ if (on_off)
+ abe->MultiFrame[TASK_DL2Mixer_SLT][TASK_DL2Mixer_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_DL2Mixer_dual_mono);
+ else
+ abe->MultiFrame[TASK_DL2Mixer_SLT][TASK_DL2Mixer_IDX] =
+ ABE_TASK_ID(C_ABE_FW_TASK_DL2Mixer);
+ break;
+ case MIXAUDUL:
+ if (on_off)
+ abe->MultiFrame[12][4] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ULMixer_dual_mono);
+ else
+ abe->MultiFrame[12][4] =
+ ABE_TASK_ID(C_ABE_FW_TASK_ULMixer);
+ break;
+ default:
+ break;
+ }
+
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM, OMAP_ABE_D_MULTIFRAME_ADDR,
+ (u32 *) abe->MultiFrame, sizeof(abe->MultiFrame));
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_mono_mixer);
+/**
+ * abe_write_pdmdl_offset - write the desired offset on the DL1/DL2 paths
+ *
+ * Parameters:
+ * path: 1 for the DL1 ABE path, 2 for the DL2 ABE path
+ * offset_left: integer value that will be added on all PDM left samples
+ * offset_right: integer value that will be added on all PDM right samples
+ *
+ */
+void abe_write_pdmdl_offset(u32 path, u32 offset_left, u32 offset_right)
+{
+ switch (path) {
+ case 1:
+ omap_abe_mem_write(abe, OMAP_ABE_SMEM, OMAP_ABE_S_DC_HS_ADDR + 4,
+ &offset_left, sizeof(u32));
+ omap_abe_mem_write(abe, OMAP_ABE_SMEM, OMAP_ABE_S_DC_HS_ADDR,
+ &offset_right, sizeof(u32));
+ break;
+ case 2:
+ omap_abe_mem_write(abe, OMAP_ABE_SMEM, OMAP_ABE_S_DC_HF_ADDR + 4,
+ &offset_left, sizeof(u32));
+ omap_abe_mem_write(abe, OMAP_ABE_SMEM, OMAP_ABE_S_DC_HF_ADDR,
+ &offset_right, sizeof(u32));
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(abe_write_pdmdl_offset);
+
diff --git a/sound/soc/omap/abe/abe_port.h b/sound/soc/omap/abe/abe_port.h
new file mode 100644
index 0000000..290f8b5
--- /dev/null
+++ b/sound/soc/omap/abe/abe_port.h
@@ -0,0 +1,161 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _ABE_PORT_H_
+#define _ABE_PORT_H_
+
+struct omap_abe_data_format {
+ /* Sampling frequency of the stream */
+ u32 f;
+ /* Sample format type */
+ u32 samp_format;
+};
+
+struct omap_abe_port_protocol {
+ /* Direction=0 means input from AESS point of view */
+ u32 direction;
+ /* Protocol type (switch) during the data transfers */
+ u32 protocol_switch;
+ union {
+ /* McBSP/McASP peripheral connected to ATC */
+ struct {
+ u32 desc_addr;
+ /* Address of ATC McBSP/McASP descriptor's in bytes */
+ u32 buf_addr;
+ /* DMEM address in bytes */
+ u32 buf_size;
+ /* ITERation on each DMAreq signals */
+ u32 iter;
+ } serial;
+ /* DMIC peripheral connected to ATC */
+ struct {
+ /* DMEM address in bytes */
+ u32 buf_addr;
+ /* DMEM buffer size in bytes */
+ u32 buf_size;
+ /* Number of activated DMIC */
+ u32 nbchan;
+ } dmic;
+ /* McPDMDL peripheral connected to ATC */
+ struct {
+ /* DMEM address in bytes */
+ u32 buf_addr;
+ /* DMEM size in bytes */
+ u32 buf_size;
+ /* Control allowed on McPDM DL */
+ u32 control;
+ } mcpdmdl;
+ /* McPDMUL peripheral connected to ATC */
+ struct {
+ /* DMEM address size in bytes */
+ u32 buf_addr;
+ /* DMEM buffer size size in bytes */
+ u32 buf_size;
+ } mcpdmul;
+ /* Ping-Pong interface to the Host using cache-flush */
+ struct {
+ /* Address of ATC descriptor's */
+ u32 desc_addr;
+ /* DMEM buffer base address in bytes */
+ u32 buf_addr;
+ /* DMEM size in bytes for each ping and pong buffers */
+ u32 buf_size;
+ /* IRQ address (either DMA (0) MCU (1) or DSP(2)) */
+ u32 irq_addr;
+ /* IRQ data content loaded in the AESS IRQ register */
+ u32 irq_data;
+ /* Call-back function upon IRQ reception */
+ u32 callback;
+ } pingpong;
+ /* DMAreq line to CBPr */
+ struct {
+ /* Address of ATC descriptor's */
+ u32 desc_addr;
+ /* DMEM buffer address in bytes */
+ u32 buf_addr;
+ /* DMEM buffer size size in bytes */
+ u32 buf_size;
+ /* ITERation on each DMAreq signals */
+ u32 iter;
+ /* DMAreq address */
+ u32 dma_addr;
+ /* DMA/AESS = 1 << #DMA */
+ u32 dma_data;
+ } dmareq;
+ /* Circular buffer - direct addressing to DMEM */
+ struct {
+ /* DMEM buffer base address in bytes */
+ u32 buf_addr;
+ /* DMEM buffer size in bytes */
+ u32 buf_size;
+ /* DMAreq address */
+ u32 dma_addr;
+ /* DMA/AESS = 1 << #DMA */
+ u32 dma_data;
+ } circular_buffer;
+ } port;
+};
+
+extern const abe_port_t abe_port_init[];
+extern abe_port_t abe_port[];
+extern const u32 abe_port_priority[];
+
+int omap_abe_select_main_port(u32 id);
+u32 omap_abe_dma_port_iter_factor(struct omap_abe_data_format *f);
+
+#endif/* _ABE_PORT_H_ */
diff --git a/sound/soc/omap/abe/abe_ref.h b/sound/soc/omap/abe/abe_ref.h
new file mode 100644
index 0000000..4c8a9bb
--- /dev/null
+++ b/sound/soc/omap/abe/abe_ref.h
@@ -0,0 +1,152 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _ABE_REF_H_
+#define _ABE_REF_H_
+
+#include "abe_api.h"
+
+/*
+ * 'ABE_PRO.H' all non-API prototypes for INI, IRQ, SEQ ...
+ */
+/*
+ * HAL EXTERNAL AP
+ */
+/*
+ * HAL INTERNAL AP
+ */
+void abe_decide_main_port(void);
+void abe_reset_all_ports(void);
+void abe_reset_all_fifo(void);
+void abe_reset_all_sequence(void);
+u32 abe_dma_port_iteration(abe_data_format_t *format);
+void abe_read_sys_clock(u32 *time);
+void abe_enable_atc(u32 id);
+void abe_disable_atc(u32 id);
+void abe_init_io_tasks(u32 id, abe_data_format_t *format,
+ abe_port_protocol_t *prot);
+void abe_init_dma_t(u32 id, abe_port_protocol_t *prot);
+u32 abe_dma_port_iter_factor(abe_data_format_t *f);
+u32 abe_dma_port_copy_subroutine_id(u32 i);
+void abe_call_subroutine(u32 idx, u32 p1, u32 p2, u32 p3, u32 p4);
+void abe_monitoring(void);
+void abe_add_subroutine(u32 *id, abe_subroutine2 f, u32 nparam, u32 *params);
+abehal_status abe_read_next_ping_pong_buffer(u32 port, u32 *p, u32 *n);
+void abe_irq_ping_pong(void);
+void abe_irq_check_for_sequences(u32 seq_info);
+void abe_default_irq_pingpong_player(void);
+void abe_default_irq_pingpong_player_32bits(void);
+void abe_rshifted16_irq_pingpong_player_32bits(void);
+void abe_1616_irq_pingpong_player_1616bits(void);
+void abe_default_irq_aps_adaptation(void);
+void abe_irq_aps(u32 aps_info);
+void abe_dbg_error_log(u32 x);
+void abe_init_asrc_vx_dl(s32 dppm);
+void abe_init_asrc_vx_ul(s32 dppm);
+void abe_init_asrc_mm_ext_in(s32 dppm);
+void abe_init_asrc_bt_ul(s32 dppm);
+void abe_init_asrc_bt_dl(s32 dppm);
+
+void omap_abe_hw_configuration(struct omap_abe *abe);
+void omap_abe_gain_offset(struct omap_abe *abe, u32 id, u32 *mixer_offset);
+int omap_abe_use_compensated_gain(struct omap_abe *abe, int on_off);
+
+/*
+ * HAL INTERNAL DATA
+ */
+extern const u32 abe_port_priority[LAST_PORT_ID - 1];
+extern const u32 abe_firmware_array[ABE_FIRMWARE_MAX_SIZE];
+extern const u32 abe_atc_srcid[];
+extern const u32 abe_atc_dstid[];
+extern const abe_port_t abe_port_init[];
+extern const abe_seq_t all_sequence_init[];
+extern const abe_router_t abe_router_ul_table_preset
+ [NBROUTE_CONFIG][NBROUTE_UL];
+extern const abe_sequence_t seq_null;
+
+extern abe_port_t abe_port[];
+extern abe_seq_t all_sequence[];
+extern abe_router_t abe_router_ul_table[NBROUTE_CONFIG_MAX][NBROUTE_UL];
+/* table of new subroutines called in the sequence */
+extern abe_subroutine2 abe_all_subsubroutine[MAXNBSUBROUTINE];
+/* number of parameters per calls */
+extern u32 abe_all_subsubroutine_nparam[MAXNBSUBROUTINE];
+extern u32 abe_subroutine_id[MAXNBSUBROUTINE];
+extern u32 *abe_all_subroutine_params[MAXNBSUBROUTINE];
+extern u32 abe_subroutine_write_pointer;
+extern abe_sequence_t abe_all_sequence[MAXNBSEQUENCE];
+extern u32 abe_sequence_write_pointer;
+/* current number of pending sequences (avoids to look in the table) */
+extern u32 abe_nb_pending_sequences;
+/* pending sequences due to ressource collision */
+extern u32 abe_pending_sequences[MAXNBSEQUENCE];
+/* mask of unsharable ressources among other sequences */
+extern u32 abe_global_sequence_mask;
+/* table of active sequences */
+extern abe_seq_t abe_active_sequence[MAXACTIVESEQUENCE][MAXSEQUENCESTEPS];
+/* index of the plugged subroutine doing ping-pong cache-flush
+ DMEM accesses */
+extern u32 abe_irq_aps_adaptation_id;
+/* base addresses of the ping pong buffers */
+extern u32 abe_base_address_pingpong[MAX_PINGPONG_BUFFERS];
+/* size of each ping/pong buffers */
+extern u32 abe_size_pingpong;
+/* number of ping/pong buffer being used */
+extern u32 abe_nb_pingpong;
+
+#endif/* _ABE_REF_H_ */
diff --git a/sound/soc/omap/abe/abe_seq.c b/sound/soc/omap/abe/abe_seq.c
new file mode 100644
index 0000000..6ae2aa5
--- /dev/null
+++ b/sound/soc/omap/abe/abe_seq.c
@@ -0,0 +1,308 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include "abe_legacy.h"
+
+#include "abe_mem.h"
+
+extern struct omap_abe *abe;
+extern u32 abe_irq_pingpong_player_id;
+
+/**
+ * abe_null_subroutine
+ *
+ */
+void abe_null_subroutine_0(void)
+{
+}
+void abe_null_subroutine_2(u32 a, u32 b)
+{
+}
+void abe_null_subroutine_4(u32 a, u32 b, u32 c, u32 d)
+{
+}
+/**
+ * abe_init_subroutine_table - initializes the default table of pointers
+ * to subroutines
+ *
+ * initializes the default table of pointers to subroutines
+ *
+ */
+void abe_init_subroutine_table(void)
+{
+ u32 id;
+ /* reset the table's pointers */
+ abe_subroutine_write_pointer = 0;
+ /* the first index is the NULL task */
+ abe_add_subroutine(&id, (abe_subroutine2) abe_null_subroutine_2,
+ SUB_0_PARAM, (u32 *) 0);
+ /* write mixer has 4 parameters */
+ abe_add_subroutine(&(abe_subroutine_id[SUB_WRITE_MIXER]),
+ (abe_subroutine2) abe_write_mixer, SUB_4_PARAM,
+ (u32 *) 0);
+ /* ping-pong player IRQ */
+ abe_add_subroutine(&abe_irq_pingpong_player_id,
+ (abe_subroutine2) abe_null_subroutine_0, SUB_0_PARAM,
+ (u32 *) 0);
+}
+/**
+ * abe_add_subroutine
+ * @id: ABE port id
+ * @f: pointer to the subroutines
+ * @nparam: number of parameters
+ * @params: pointer to the psrameters
+ *
+ * add one function pointer more and returns the index to it
+ */
+void abe_add_subroutine(u32 *id, abe_subroutine2 f, u32 nparam, u32 *params)
+{
+ u32 i, i_found;
+ if ((abe_subroutine_write_pointer >= MAXNBSUBROUTINE) ||
+ ((u32) f == 0)) {
+ omap_abe_dbg_error(abe, OMAP_ABE_ERR_SEQ,
+ ABE_PARAMETER_OVERFLOW);
+ } else {
+ /* search if this subroutine address was not already
+ * declared, then return the previous index
+ */
+ for (i_found = abe_subroutine_write_pointer, i = 0;
+ i < abe_subroutine_write_pointer; i++) {
+ if (f == abe_all_subsubroutine[i])
+ i_found = i;
+ }
+ if (i_found == abe_subroutine_write_pointer) {
+ *id = abe_subroutine_write_pointer;
+ abe_all_subsubroutine
+ [abe_subroutine_write_pointer] = (f);
+ abe_all_subroutine_params
+ [abe_subroutine_write_pointer] = params;
+ abe_all_subsubroutine_nparam
+ [abe_subroutine_write_pointer] = nparam;
+ abe_subroutine_write_pointer++;
+ } else {
+ abe_all_subroutine_params[i_found] = params;
+ *id = i_found;
+ }
+ }
+}
+/**
+ * abe_add_sequence
+ * @id: returned sequence index after pluging a new sequence
+ * (index in the tables)
+ * @s: sequence to be inserted
+ *
+ * Load a time-sequenced operations.
+ */
+void abe_add_sequence(u32 *id, abe_sequence_t *s)
+{
+ abe_seq_t *seq_src, *seq_dst;
+ u32 i, no_end_of_sequence_found;
+ seq_src = &(s->seq1);
+ seq_dst = &((abe_all_sequence[abe_sequence_write_pointer]).seq1);
+ if ((abe_sequence_write_pointer >= MAXNBSEQUENCE) || ((u32) s == 0)) {
+ omap_abe_dbg_error(abe, OMAP_ABE_ERR_SEQ,
+ ABE_PARAMETER_OVERFLOW);
+ } else {
+ *id = abe_subroutine_write_pointer;
+ /* copy the mask */
+ (abe_all_sequence[abe_sequence_write_pointer]).mask = s->mask;
+ for (no_end_of_sequence_found = 1, i = 0; i < MAXSEQUENCESTEPS;
+ i++, seq_src++, seq_dst++) {
+ /* sequence copied line by line */
+ (*seq_dst) = (*seq_src);
+ /* stop when the line start with time=(-1) */
+ if ((*(s32 *) seq_src) == (-1)) {
+ /* stop when the line start with time=(-1) */
+ no_end_of_sequence_found = 0;
+ break;
+ }
+ }
+ abe_subroutine_write_pointer++;
+ if (no_end_of_sequence_found)
+ omap_abe_dbg_error(abe, OMAP_ABE_ERR_API,
+ ABE_SEQTOOLONG);
+ }
+}
+/**
+ * abe_reset_one_sequence
+ * @id: sequence ID
+ *
+ * load default configuration for that sequence
+ * kill running activities
+ */
+void abe_reset_one_sequence(u32 id)
+{
+}
+/**
+ * abe_reset_all_sequence
+ *
+ * load default configuration for all sequences
+ * kill any running activities
+ */
+void omap_abe_reset_all_sequence(struct omap_abe *abe)
+{
+ u32 i;
+ abe_init_subroutine_table();
+ /* arrange to have the first sequence index=0 to the NULL operation
+ sequence */
+ abe_add_sequence(&i, (abe_sequence_t *) &seq_null);
+ /* reset the the collision protection mask */
+ abe_global_sequence_mask = 0;
+ /* reset the pending sequences list */
+ for (abe_nb_pending_sequences = i = 0; i < MAXNBSEQUENCE; i++)
+ abe_pending_sequences[i] = 0;
+}
+/**
+ * abe_call_subroutine
+ * @idx: index to the table of all registered Call-backs and subroutines
+ *
+ * run and log a subroutine
+ */
+void abe_call_subroutine(u32 idx, u32 p1, u32 p2, u32 p3, u32 p4)
+{
+ abe_subroutine0 f0;
+ abe_subroutine1 f1;
+ abe_subroutine2 f2;
+ abe_subroutine3 f3;
+ abe_subroutine4 f4;
+ u32 *params;
+ if (idx > MAXNBSUBROUTINE)
+ return;
+ switch (idx) {
+ /* call the subroutines defined at compilation time
+ (const .. sequences) */
+#if 0
+ case SUB_WRITE_MIXER_DL1:
+ abe_write_mixer_dl1(p1, p2, p3)
+ abe_fprintf("write_mixer");
+ break;
+#endif
+ /* call the subroutines defined at execution time
+ (dynamic sequences) */
+ default:
+ switch (abe_all_subsubroutine_nparam[idx]) {
+ case SUB_0_PARAM:
+ f0 = (abe_subroutine0) abe_all_subsubroutine[idx];
+ (*f0) ();
+ break;
+ case SUB_1_PARAM:
+ f1 = (abe_subroutine1) abe_all_subsubroutine[idx];
+ params = abe_all_subroutine_params
+ [abe_irq_pingpong_player_id];
+ if (params != (u32 *) 0)
+ p1 = params[0];
+ (*f1) (p1);
+ break;
+ case SUB_2_PARAM:
+ f2 = abe_all_subsubroutine[idx];
+ params = abe_all_subroutine_params
+ [abe_irq_pingpong_player_id];
+ if (params != (u32 *) 0) {
+ p1 = params[0];
+ p2 = params[1];
+ }
+ (*f2) (p1, p2);
+ break;
+ case SUB_3_PARAM:
+ f3 = (abe_subroutine3) abe_all_subsubroutine[idx];
+ params = abe_all_subroutine_params
+ [abe_irq_pingpong_player_id];
+ if (params != (u32 *) 0) {
+ p1 = params[0];
+ p2 = params[1];
+ p3 = params[2];
+ }
+ (*f3) (p1, p2, p3);
+ break;
+ case SUB_4_PARAM:
+ f4 = (abe_subroutine4) abe_all_subsubroutine[idx];
+ params = abe_all_subroutine_params
+ [abe_irq_pingpong_player_id];
+ if (params != (u32 *) 0) {
+ p1 = params[0];
+ p2 = params[1];
+ p3 = params[2];
+ p4 = params[3];
+ }
+ (*f4) (p1, p2, p3, p4);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/**
+ * abe_set_sequence_time_accuracy
+ * @fast: fast counter
+ * @slow: slow counter
+ *
+ */
+abehal_status abe_set_sequence_time_accuracy(u32 fast, u32 slow)
+{
+ u32 data;
+ _log(ABE_ID_SET_SEQUENCE_TIME_ACCURACY, fast, slow, 0);
+ data = minimum(MAX_UINT16, fast / FW_SCHED_LOOP_FREQ_DIV1000);
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM, OMAP_ABE_D_FASTCOUNTER_ADDR,
+ &data, sizeof(data));
+ data = minimum(MAX_UINT16, slow / FW_SCHED_LOOP_FREQ_DIV1000);
+ omap_abe_mem_write(abe, OMAP_ABE_DMEM, OMAP_ABE_D_SLOWCOUNTER_ADDR,
+ &data, sizeof(data));
+ return 0;
+}
+EXPORT_SYMBOL(abe_set_sequence_time_accuracy);
diff --git a/sound/soc/omap/abe/abe_seq.h b/sound/soc/omap/abe/abe_seq.h
new file mode 100644
index 0000000..e5047ad
--- /dev/null
+++ b/sound/soc/omap/abe/abe_seq.h
@@ -0,0 +1,64 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _ABE_SEQ_H_
+#define _ABE_SEQ_H_
+
+void omap_abe_reset_all_sequence(struct omap_abe *abe);
+
+#endif /* _ABE_SEQ_H_ */
diff --git a/sound/soc/omap/abe/abe_sm_addr.h b/sound/soc/omap/abe/abe_sm_addr.h
new file mode 100644
index 0000000..514ed0c
--- /dev/null
+++ b/sound/soc/omap/abe/abe_sm_addr.h
@@ -0,0 +1,363 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#define OMAP_ABE_INIT_SM_ADDR 0x0
+#define OMAP_ABE_INIT_SM_SIZE 0xC80
+#define OMAP_ABE_S_DATA0_ADDR 0xC80
+#define OMAP_ABE_S_DATA0_SIZE 0x8
+#define OMAP_ABE_S_TEMP_ADDR 0xC88
+#define OMAP_ABE_S_TEMP_SIZE 0x8
+#define OMAP_ABE_S_PHOENIXOFFSET_ADDR 0xC90
+#define OMAP_ABE_S_PHOENIXOFFSET_SIZE 0x8
+#define OMAP_ABE_S_GTARGET1_ADDR 0xC98
+#define OMAP_ABE_S_GTARGET1_SIZE 0x38
+#define OMAP_ABE_S_GTARGET_DL1_ADDR 0xCD0
+#define OMAP_ABE_S_GTARGET_DL1_SIZE 0x10
+#define OMAP_ABE_S_GTARGET_DL2_ADDR 0xCE0
+#define OMAP_ABE_S_GTARGET_DL2_SIZE 0x10
+#define OMAP_ABE_S_GTARGET_ECHO_ADDR 0xCF0
+#define OMAP_ABE_S_GTARGET_ECHO_SIZE 0x8
+#define OMAP_ABE_S_GTARGET_SDT_ADDR 0xCF8
+#define OMAP_ABE_S_GTARGET_SDT_SIZE 0x8
+#define OMAP_ABE_S_GTARGET_VXREC_ADDR 0xD00
+#define OMAP_ABE_S_GTARGET_VXREC_SIZE 0x10
+#define OMAP_ABE_S_GTARGET_UL_ADDR 0xD10
+#define OMAP_ABE_S_GTARGET_UL_SIZE 0x10
+#define OMAP_ABE_S_GTARGET_BTUL_ADDR 0xD20
+#define OMAP_ABE_S_GTARGET_BTUL_SIZE 0x8
+#define OMAP_ABE_S_GCURRENT_ADDR 0xD28
+#define OMAP_ABE_S_GCURRENT_SIZE 0x90
+#define OMAP_ABE_S_GAIN_ONE_ADDR 0xDB8
+#define OMAP_ABE_S_GAIN_ONE_SIZE 0x8
+#define OMAP_ABE_S_TONES_ADDR 0xDC0
+#define OMAP_ABE_S_TONES_SIZE 0x60
+#define OMAP_ABE_S_VX_DL_ADDR 0xE20
+#define OMAP_ABE_S_VX_DL_SIZE 0x60
+#define OMAP_ABE_S_MM_UL2_ADDR 0xE80
+#define OMAP_ABE_S_MM_UL2_SIZE 0x60
+#define OMAP_ABE_S_MM_DL_ADDR 0xEE0
+#define OMAP_ABE_S_MM_DL_SIZE 0x60
+#define OMAP_ABE_S_DL1_M_OUT_ADDR 0xF40
+#define OMAP_ABE_S_DL1_M_OUT_SIZE 0x60
+#define OMAP_ABE_S_DL2_M_OUT_ADDR 0xFA0
+#define OMAP_ABE_S_DL2_M_OUT_SIZE 0x60
+#define OMAP_ABE_S_ECHO_M_OUT_ADDR 0x1000
+#define OMAP_ABE_S_ECHO_M_OUT_SIZE 0x60
+#define OMAP_ABE_S_SDT_M_OUT_ADDR 0x1060
+#define OMAP_ABE_S_SDT_M_OUT_SIZE 0x60
+#define OMAP_ABE_S_VX_UL_ADDR 0x10C0
+#define OMAP_ABE_S_VX_UL_SIZE 0x60
+#define OMAP_ABE_S_VX_UL_M_ADDR 0x1120
+#define OMAP_ABE_S_VX_UL_M_SIZE 0x60
+#define OMAP_ABE_S_BT_DL_ADDR 0x1180
+#define OMAP_ABE_S_BT_DL_SIZE 0x60
+#define OMAP_ABE_S_BT_UL_ADDR 0x11E0
+#define OMAP_ABE_S_BT_UL_SIZE 0x60
+#define OMAP_ABE_S_BT_DL_8K_ADDR 0x1240
+#define OMAP_ABE_S_BT_DL_8K_SIZE 0x18
+#define OMAP_ABE_S_BT_DL_16K_ADDR 0x1258
+#define OMAP_ABE_S_BT_DL_16K_SIZE 0x28
+#define OMAP_ABE_S_BT_UL_8K_ADDR 0x1280
+#define OMAP_ABE_S_BT_UL_8K_SIZE 0x10
+#define OMAP_ABE_S_BT_UL_16K_ADDR 0x1290
+#define OMAP_ABE_S_BT_UL_16K_SIZE 0x20
+#define OMAP_ABE_S_SDT_F_ADDR 0x12B0
+#define OMAP_ABE_S_SDT_F_SIZE 0x60
+#define OMAP_ABE_S_SDT_F_DATA_ADDR 0x1310
+#define OMAP_ABE_S_SDT_F_DATA_SIZE 0x48
+#define OMAP_ABE_S_MM_DL_OSR_ADDR 0x1358
+#define OMAP_ABE_S_MM_DL_OSR_SIZE 0xC0
+#define OMAP_ABE_S_24_ZEROS_ADDR 0x1418
+#define OMAP_ABE_S_24_ZEROS_SIZE 0xC0
+#define OMAP_ABE_S_DMIC1_ADDR 0x14D8
+#define OMAP_ABE_S_DMIC1_SIZE 0x60
+#define OMAP_ABE_S_DMIC2_ADDR 0x1538
+#define OMAP_ABE_S_DMIC2_SIZE 0x60
+#define OMAP_ABE_S_DMIC3_ADDR 0x1598
+#define OMAP_ABE_S_DMIC3_SIZE 0x60
+#define OMAP_ABE_S_AMIC_ADDR 0x15F8
+#define OMAP_ABE_S_AMIC_SIZE 0x60
+#define OMAP_ABE_S_DMIC1_L_ADDR 0x1658
+#define OMAP_ABE_S_DMIC1_L_SIZE 0x60
+#define OMAP_ABE_S_DMIC1_R_ADDR 0x16B8
+#define OMAP_ABE_S_DMIC1_R_SIZE 0x60
+#define OMAP_ABE_S_DMIC2_L_ADDR 0x1718
+#define OMAP_ABE_S_DMIC2_L_SIZE 0x60
+#define OMAP_ABE_S_DMIC2_R_ADDR 0x1778
+#define OMAP_ABE_S_DMIC2_R_SIZE 0x60
+#define OMAP_ABE_S_DMIC3_L_ADDR 0x17D8
+#define OMAP_ABE_S_DMIC3_L_SIZE 0x60
+#define OMAP_ABE_S_DMIC3_R_ADDR 0x1838
+#define OMAP_ABE_S_DMIC3_R_SIZE 0x60
+#define OMAP_ABE_S_BT_UL_L_ADDR 0x1898
+#define OMAP_ABE_S_BT_UL_L_SIZE 0x60
+#define OMAP_ABE_S_BT_UL_R_ADDR 0x18F8
+#define OMAP_ABE_S_BT_UL_R_SIZE 0x60
+#define OMAP_ABE_S_AMIC_L_ADDR 0x1958
+#define OMAP_ABE_S_AMIC_L_SIZE 0x60
+#define OMAP_ABE_S_AMIC_R_ADDR 0x19B8
+#define OMAP_ABE_S_AMIC_R_SIZE 0x60
+#define OMAP_ABE_S_ECHOREF_L_ADDR 0x1A18
+#define OMAP_ABE_S_ECHOREF_L_SIZE 0x60
+#define OMAP_ABE_S_ECHOREF_R_ADDR 0x1A78
+#define OMAP_ABE_S_ECHOREF_R_SIZE 0x60
+#define OMAP_ABE_S_MM_DL_L_ADDR 0x1AD8
+#define OMAP_ABE_S_MM_DL_L_SIZE 0x60
+#define OMAP_ABE_S_MM_DL_R_ADDR 0x1B38
+#define OMAP_ABE_S_MM_DL_R_SIZE 0x60
+#define OMAP_ABE_S_MM_UL_ADDR 0x1B98
+#define OMAP_ABE_S_MM_UL_SIZE 0x3C0
+#define OMAP_ABE_S_AMIC_96K_ADDR 0x1F58
+#define OMAP_ABE_S_AMIC_96K_SIZE 0xC0
+#define OMAP_ABE_S_DMIC0_96K_ADDR 0x2018
+#define OMAP_ABE_S_DMIC0_96K_SIZE 0xC0
+#define OMAP_ABE_S_DMIC1_96K_ADDR 0x20D8
+#define OMAP_ABE_S_DMIC1_96K_SIZE 0xC0
+#define OMAP_ABE_S_DMIC2_96K_ADDR 0x2198
+#define OMAP_ABE_S_DMIC2_96K_SIZE 0xC0
+#define OMAP_ABE_S_UL_VX_UL_48_8K_ADDR 0x2258
+#define OMAP_ABE_S_UL_VX_UL_48_8K_SIZE 0x60
+#define OMAP_ABE_S_UL_VX_UL_48_16K_ADDR 0x22B8
+#define OMAP_ABE_S_UL_VX_UL_48_16K_SIZE 0x60
+#define OMAP_ABE_S_UL_MIC_48K_ADDR 0x2318
+#define OMAP_ABE_S_UL_MIC_48K_SIZE 0x60
+#define OMAP_ABE_S_VOICE_8K_UL_ADDR 0x2378
+#define OMAP_ABE_S_VOICE_8K_UL_SIZE 0x18
+#define OMAP_ABE_S_VOICE_8K_DL_ADDR 0x2390
+#define OMAP_ABE_S_VOICE_8K_DL_SIZE 0x10
+#define OMAP_ABE_S_MCPDM_OUT1_ADDR 0x23A0
+#define OMAP_ABE_S_MCPDM_OUT1_SIZE 0xC0
+#define OMAP_ABE_S_MCPDM_OUT2_ADDR 0x2460
+#define OMAP_ABE_S_MCPDM_OUT2_SIZE 0xC0
+#define OMAP_ABE_S_MCPDM_OUT3_ADDR 0x2520
+#define OMAP_ABE_S_MCPDM_OUT3_SIZE 0xC0
+#define OMAP_ABE_S_VOICE_16K_UL_ADDR 0x25E0
+#define OMAP_ABE_S_VOICE_16K_UL_SIZE 0x28
+#define OMAP_ABE_S_VOICE_16K_DL_ADDR 0x2608
+#define OMAP_ABE_S_VOICE_16K_DL_SIZE 0x20
+#define OMAP_ABE_S_XINASRC_DL_VX_ADDR 0x2628
+#define OMAP_ABE_S_XINASRC_DL_VX_SIZE 0x140
+#define OMAP_ABE_S_XINASRC_UL_VX_ADDR 0x2768
+#define OMAP_ABE_S_XINASRC_UL_VX_SIZE 0x140
+#define OMAP_ABE_S_XINASRC_MM_EXT_IN_ADDR 0x28A8
+#define OMAP_ABE_S_XINASRC_MM_EXT_IN_SIZE 0x140
+#define OMAP_ABE_S_VX_REC_ADDR 0x29E8
+#define OMAP_ABE_S_VX_REC_SIZE 0x60
+#define OMAP_ABE_S_VX_REC_L_ADDR 0x2A48
+#define OMAP_ABE_S_VX_REC_L_SIZE 0x60
+#define OMAP_ABE_S_VX_REC_R_ADDR 0x2AA8
+#define OMAP_ABE_S_VX_REC_R_SIZE 0x60
+#define OMAP_ABE_S_DL2_M_L_ADDR 0x2B08
+#define OMAP_ABE_S_DL2_M_L_SIZE 0x60
+#define OMAP_ABE_S_DL2_M_R_ADDR 0x2B68
+#define OMAP_ABE_S_DL2_M_R_SIZE 0x60
+#define OMAP_ABE_S_DL2_M_LR_EQ_DATA_ADDR 0x2BC8
+#define OMAP_ABE_S_DL2_M_LR_EQ_DATA_SIZE 0xC8
+#define OMAP_ABE_S_DL1_M_EQ_DATA_ADDR 0x2C90
+#define OMAP_ABE_S_DL1_M_EQ_DATA_SIZE 0xC8
+#define OMAP_ABE_S_EARP_48_96_LP_DATA_ADDR 0x2D58
+#define OMAP_ABE_S_EARP_48_96_LP_DATA_SIZE 0x78
+#define OMAP_ABE_S_IHF_48_96_LP_DATA_ADDR 0x2DD0
+#define OMAP_ABE_S_IHF_48_96_LP_DATA_SIZE 0x78
+#define OMAP_ABE_S_VX_UL_8_TEMP_ADDR 0x2E48
+#define OMAP_ABE_S_VX_UL_8_TEMP_SIZE 0x10
+#define OMAP_ABE_S_VX_UL_16_TEMP_ADDR 0x2E58
+#define OMAP_ABE_S_VX_UL_16_TEMP_SIZE 0x20
+#define OMAP_ABE_S_VX_DL_8_48_LP_DATA_ADDR 0x2E78
+#define OMAP_ABE_S_VX_DL_8_48_LP_DATA_SIZE 0x68
+#define OMAP_ABE_S_VX_DL_8_48_HP_DATA_ADDR 0x2EE0
+#define OMAP_ABE_S_VX_DL_8_48_HP_DATA_SIZE 0x38
+#define OMAP_ABE_S_VX_DL_16_48_LP_DATA_ADDR 0x2F18
+#define OMAP_ABE_S_VX_DL_16_48_LP_DATA_SIZE 0x68
+#define OMAP_ABE_S_VX_DL_16_48_HP_DATA_ADDR 0x2F80
+#define OMAP_ABE_S_VX_DL_16_48_HP_DATA_SIZE 0x28
+#define OMAP_ABE_S_VX_UL_48_8_LP_DATA_ADDR 0x2FA8
+#define OMAP_ABE_S_VX_UL_48_8_LP_DATA_SIZE 0x68
+#define OMAP_ABE_S_VX_UL_48_8_HP_DATA_ADDR 0x3010
+#define OMAP_ABE_S_VX_UL_48_8_HP_DATA_SIZE 0x38
+#define OMAP_ABE_S_VX_UL_48_16_LP_DATA_ADDR 0x3048
+#define OMAP_ABE_S_VX_UL_48_16_LP_DATA_SIZE 0x68
+#define OMAP_ABE_S_VX_UL_48_16_HP_DATA_ADDR 0x30B0
+#define OMAP_ABE_S_VX_UL_48_16_HP_DATA_SIZE 0x28
+#define OMAP_ABE_S_BT_UL_8_48_LP_DATA_ADDR 0x30D8
+#define OMAP_ABE_S_BT_UL_8_48_LP_DATA_SIZE 0x68
+#define OMAP_ABE_S_BT_UL_8_48_HP_DATA_ADDR 0x3140
+#define OMAP_ABE_S_BT_UL_8_48_HP_DATA_SIZE 0x38
+#define OMAP_ABE_S_BT_UL_16_48_LP_DATA_ADDR 0x3178
+#define OMAP_ABE_S_BT_UL_16_48_LP_DATA_SIZE 0x68
+#define OMAP_ABE_S_BT_UL_16_48_HP_DATA_ADDR 0x31E0
+#define OMAP_ABE_S_BT_UL_16_48_HP_DATA_SIZE 0x28
+#define OMAP_ABE_S_BT_DL_48_8_LP_DATA_ADDR 0x3208
+#define OMAP_ABE_S_BT_DL_48_8_LP_DATA_SIZE 0x68
+#define OMAP_ABE_S_BT_DL_48_8_HP_DATA_ADDR 0x3270
+#define OMAP_ABE_S_BT_DL_48_8_HP_DATA_SIZE 0x38
+#define OMAP_ABE_S_BT_DL_48_16_LP_DATA_ADDR 0x32A8
+#define OMAP_ABE_S_BT_DL_48_16_LP_DATA_SIZE 0x68
+#define OMAP_ABE_S_BT_DL_48_16_HP_DATA_ADDR 0x3310
+#define OMAP_ABE_S_BT_DL_48_16_HP_DATA_SIZE 0x28
+#define OMAP_ABE_S_ECHO_REF_48_8_LP_DATA_ADDR 0x3338
+#define OMAP_ABE_S_ECHO_REF_48_8_LP_DATA_SIZE 0x68
+#define OMAP_ABE_S_ECHO_REF_48_8_HP_DATA_ADDR 0x33A0
+#define OMAP_ABE_S_ECHO_REF_48_8_HP_DATA_SIZE 0x38
+#define OMAP_ABE_S_ECHO_REF_48_16_LP_DATA_ADDR 0x33D8
+#define OMAP_ABE_S_ECHO_REF_48_16_LP_DATA_SIZE 0x68
+#define OMAP_ABE_S_ECHO_REF_48_16_HP_DATA_ADDR 0x3440
+#define OMAP_ABE_S_ECHO_REF_48_16_HP_DATA_SIZE 0x28
+#define OMAP_ABE_S_XINASRC_ECHO_REF_ADDR 0x3468
+#define OMAP_ABE_S_XINASRC_ECHO_REF_SIZE 0x140
+#define OMAP_ABE_S_ECHO_REF_16K_ADDR 0x35A8
+#define OMAP_ABE_S_ECHO_REF_16K_SIZE 0x28
+#define OMAP_ABE_S_ECHO_REF_8K_ADDR 0x35D0
+#define OMAP_ABE_S_ECHO_REF_8K_SIZE 0x18
+#define OMAP_ABE_S_DL1_EQ_ADDR 0x35E8
+#define OMAP_ABE_S_DL1_EQ_SIZE 0x60
+#define OMAP_ABE_S_DL2_EQ_ADDR 0x3648
+#define OMAP_ABE_S_DL2_EQ_SIZE 0x60
+#define OMAP_ABE_S_DL1_GAIN_OUT_ADDR 0x36A8
+#define OMAP_ABE_S_DL1_GAIN_OUT_SIZE 0x60
+#define OMAP_ABE_S_DL2_GAIN_OUT_ADDR 0x3708
+#define OMAP_ABE_S_DL2_GAIN_OUT_SIZE 0x60
+#define OMAP_ABE_S_DC_HS_ADDR 0x3768
+#define OMAP_ABE_S_DC_HS_SIZE 0x8
+#define OMAP_ABE_S_DC_HF_ADDR 0x3770
+#define OMAP_ABE_S_DC_HF_SIZE 0x8
+#define OMAP_ABE_S_VIBRA_ADDR 0x3778
+#define OMAP_ABE_S_VIBRA_SIZE 0x30
+#define OMAP_ABE_S_VIBRA2_IN_ADDR 0x37A8
+#define OMAP_ABE_S_VIBRA2_IN_SIZE 0x30
+#define OMAP_ABE_S_VIBRA2_ADDR_ADDR 0x37D8
+#define OMAP_ABE_S_VIBRA2_ADDR_SIZE 0x8
+#define OMAP_ABE_S_VIBRACTRL_FORRIGHTSM_ADDR 0x37E0
+#define OMAP_ABE_S_VIBRACTRL_FORRIGHTSM_SIZE 0xC0
+#define OMAP_ABE_S_RNOISE_MEM_ADDR 0x38A0
+#define OMAP_ABE_S_RNOISE_MEM_SIZE 0x8
+#define OMAP_ABE_S_CTRL_ADDR 0x38A8
+#define OMAP_ABE_S_CTRL_SIZE 0x90
+#define OMAP_ABE_S_VIBRA1_IN_ADDR 0x3938
+#define OMAP_ABE_S_VIBRA1_IN_SIZE 0x30
+#define OMAP_ABE_S_VIBRA1_TEMP_ADDR 0x3968
+#define OMAP_ABE_S_VIBRA1_TEMP_SIZE 0xC0
+#define OMAP_ABE_S_VIBRACTRL_FORLEFTSM_ADDR 0x3A28
+#define OMAP_ABE_S_VIBRACTRL_FORLEFTSM_SIZE 0xC0
+#define OMAP_ABE_S_VIBRA1_MEM_ADDR 0x3AE8
+#define OMAP_ABE_S_VIBRA1_MEM_SIZE 0x58
+#define OMAP_ABE_S_VIBRACTRL_STEREO_ADDR 0x3B40
+#define OMAP_ABE_S_VIBRACTRL_STEREO_SIZE 0xC0
+#define OMAP_ABE_S_AMIC_96_48_DATA_ADDR 0x3C00
+#define OMAP_ABE_S_AMIC_96_48_DATA_SIZE 0x98
+#define OMAP_ABE_S_DMIC0_96_48_DATA_ADDR 0x3C98
+#define OMAP_ABE_S_DMIC0_96_48_DATA_SIZE 0x98
+#define OMAP_ABE_S_DMIC1_96_48_DATA_ADDR 0x3D30
+#define OMAP_ABE_S_DMIC1_96_48_DATA_SIZE 0x98
+#define OMAP_ABE_S_DMIC2_96_48_DATA_ADDR 0x3DC8
+#define OMAP_ABE_S_DMIC2_96_48_DATA_SIZE 0x98
+#define OMAP_ABE_S_DBG_8K_PATTERN_ADDR 0x3E60
+#define OMAP_ABE_S_DBG_8K_PATTERN_SIZE 0x10
+#define OMAP_ABE_S_DBG_16K_PATTERN_ADDR 0x3E70
+#define OMAP_ABE_S_DBG_16K_PATTERN_SIZE 0x20
+#define OMAP_ABE_S_DBG_24K_PATTERN_ADDR 0x3E90
+#define OMAP_ABE_S_DBG_24K_PATTERN_SIZE 0x30
+#define OMAP_ABE_S_DBG_48K_PATTERN_ADDR 0x3EC0
+#define OMAP_ABE_S_DBG_48K_PATTERN_SIZE 0x60
+#define OMAP_ABE_S_DBG_96K_PATTERN_ADDR 0x3F20
+#define OMAP_ABE_S_DBG_96K_PATTERN_SIZE 0xC0
+#define OMAP_ABE_S_MM_EXT_IN_ADDR 0x3FE0
+#define OMAP_ABE_S_MM_EXT_IN_SIZE 0x60
+#define OMAP_ABE_S_MM_EXT_IN_L_ADDR 0x4040
+#define OMAP_ABE_S_MM_EXT_IN_L_SIZE 0x60
+#define OMAP_ABE_S_MM_EXT_IN_R_ADDR 0x40A0
+#define OMAP_ABE_S_MM_EXT_IN_R_SIZE 0x60
+#define OMAP_ABE_S_MIC4_ADDR 0x4100
+#define OMAP_ABE_S_MIC4_SIZE 0x60
+#define OMAP_ABE_S_MIC4_L_ADDR 0x4160
+#define OMAP_ABE_S_MIC4_L_SIZE 0x60
+#define OMAP_ABE_S_SATURATION_7FFF_ADDR 0x41C0
+#define OMAP_ABE_S_SATURATION_7FFF_SIZE 0x8
+#define OMAP_ABE_S_SATURATION_ADDR 0x41C8
+#define OMAP_ABE_S_SATURATION_SIZE 0x8
+#define OMAP_ABE_S_XINASRC_BT_UL_ADDR 0x41D0
+#define OMAP_ABE_S_XINASRC_BT_UL_SIZE 0x140
+#define OMAP_ABE_S_XINASRC_BT_DL_ADDR 0x4310
+#define OMAP_ABE_S_XINASRC_BT_DL_SIZE 0x140
+#define OMAP_ABE_S_BT_DL_8K_TEMP_ADDR 0x4450
+#define OMAP_ABE_S_BT_DL_8K_TEMP_SIZE 0x10
+#define OMAP_ABE_S_BT_DL_16K_TEMP_ADDR 0x4460
+#define OMAP_ABE_S_BT_DL_16K_TEMP_SIZE 0x20
+#define OMAP_ABE_S_VX_DL_8_48_OSR_LP_DATA_ADDR 0x4480
+#define OMAP_ABE_S_VX_DL_8_48_OSR_LP_DATA_SIZE 0xE0
+#define OMAP_ABE_S_BT_UL_8_48_OSR_LP_DATA_ADDR 0x4560
+#define OMAP_ABE_S_BT_UL_8_48_OSR_LP_DATA_SIZE 0xE0
+#define OMAP_ABE_S_MM_DL_44P1_ADDR 0x4640
+#define OMAP_ABE_S_MM_DL_44P1_SIZE 0x300
+#define OMAP_ABE_S_TONES_44P1_ADDR 0x4940
+#define OMAP_ABE_S_TONES_44P1_SIZE 0x300
+#define OMAP_ABE_S_MM_DL_44P1_XK_ADDR 0x4C40
+#define OMAP_ABE_S_MM_DL_44P1_XK_SIZE 0x10
+#define OMAP_ABE_S_TONES_44P1_XK_ADDR 0x4C50
+#define OMAP_ABE_S_TONES_44P1_XK_SIZE 0x10
+#define OMAP_ABE_S_SRC_44P1_MULFAC1_ADDR 0x4C60
+#define OMAP_ABE_S_SRC_44P1_MULFAC1_SIZE 0x8
+#define OMAP_ABE_S_SATURATION_EQ_ADDR 0x4C68
+#define OMAP_ABE_S_SATURATION_EQ_SIZE 0x8
+#define OMAP_ABE_S_BT_DL_48_8_LP_NEW_DATA_ADDR 0x4C70
+#define OMAP_ABE_S_BT_DL_48_8_LP_NEW_DATA_SIZE 0x88
+#define OMAP_ABE_S_BT_DL_8_48_OSR_LP_DATA_ADDR 0x4CF8
+#define OMAP_ABE_S_BT_DL_8_48_OSR_LP_DATA_SIZE 0x3C8
+#define OMAP_ABE_S_VX_UL_48_8_LP_NEW_DATA_ADDR 0x50C0
+#define OMAP_ABE_S_VX_UL_48_8_LP_NEW_DATA_SIZE 0x88
+#define OMAP_ABE_S_VX_UL_8_48_OSR_LP_DATA_ADDR 0x5148
+#define OMAP_ABE_S_VX_UL_8_48_OSR_LP_DATA_SIZE 0x3C8
diff --git a/sound/soc/omap/abe/abe_taskid.h b/sound/soc/omap/abe/abe_taskid.h
new file mode 100644
index 0000000..abf31f3
--- /dev/null
+++ b/sound/soc/omap/abe/abe_taskid.h
@@ -0,0 +1,196 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef _ABE_TASKID_H_
+#define _ABE_TASKID_H_
+#define C_ABE_FW_TASK_ASRC_VX_DL_8 0
+#define C_ABE_FW_TASK_ASRC_VX_DL_16 1
+#define C_ABE_FW_TASK_ASRC_VX_DL_8_SIB 2
+#define C_ABE_FW_TASK_ASRC_VX_DL_16_SIB 3
+#define C_ABE_FW_TASK_ASRC_MM_EXT_IN 4
+#define C_ABE_FW_TASK_ASRC_VX_UL_8 5
+#define C_ABE_FW_TASK_ASRC_VX_UL_16 6
+#define C_ABE_FW_TASK_ASRC_VX_UL_8_SIB 7
+#define C_ABE_FW_TASK_ASRC_VX_UL_16_SIB 8
+#define C_ABE_FW_TASK_VX_UL_48_8_DEC 9
+#define C_ABE_FW_TASK_VX_UL_48_16_DEC 10
+#define C_ABE_FW_TASK_BT_DL_48_8_DEC 11
+#define C_ABE_FW_TASK_BT_DL_48_16_DEC 12
+#define C_ABE_FW_TASK_ECHO_REF_48_8_DEC 13
+#define C_ABE_FW_TASK_ECHO_REF_48_16_DEC 14
+#define C_ABE_FW_TASK_DL2_EQ 15
+#define C_ABE_FW_TASK_ECHO_REF_48_16 16
+#define C_ABE_FW_TASK_ECHO_REF_48_8 17
+#define C_ABE_FW_TASK_GAIN_UPDATE 18
+#define C_ABE_FW_TASK_SideTone 19
+#define C_ABE_FW_TASK_VX_DL_8_48_LP 20
+#define C_ABE_FW_TASK_VX_DL_8_48_HP 21
+#define C_ABE_FW_TASK_VX_DL_16_48_LP 22
+#define C_ABE_FW_TASK_VX_DL_16_48_HP 23
+#define C_ABE_FW_TASK_VX_UL_48_8_LP 24
+#define C_ABE_FW_TASK_VX_UL_48_8_HP 25
+#define C_ABE_FW_TASK_VX_UL_48_16_LP 26
+#define C_ABE_FW_TASK_VX_UL_48_16_HP 27
+#define C_ABE_FW_TASK_BT_UL_8_48_LP 28
+#define C_ABE_FW_TASK_BT_UL_8_48_HP 29
+#define C_ABE_FW_TASK_BT_UL_16_48_LP 30
+#define C_ABE_FW_TASK_BT_UL_16_48_HP 31
+#define C_ABE_FW_TASK_BT_DL_48_8_LP 32
+#define C_ABE_FW_TASK_BT_DL_48_8_HP 33
+#define C_ABE_FW_TASK_BT_DL_48_16_LP 34
+#define C_ABE_FW_TASK_BT_DL_48_16_HP 35
+#define C_ABE_FW_TASK_ECHO_REF_48_8_LP 36
+#define C_ABE_FW_TASK_ECHO_REF_48_8_HP 37
+#define C_ABE_FW_TASK_ECHO_REF_48_16_LP 38
+#define C_ABE_FW_TASK_ECHO_REF_48_16_HP 39
+#define C_ABE_FW_TASK_DL1_EQ 40
+#define C_ABE_FW_TASK_IHF_48_96_LP 41
+#define C_ABE_FW_TASK_EARP_48_96_LP 42
+#define C_ABE_FW_TASK_DL1_GAIN 43
+#define C_ABE_FW_TASK_DL2_GAIN 44
+#define C_ABE_FW_TASK_IO_PING_PONG 45
+#define C_ABE_FW_TASK_IO_DMIC 46
+#define C_ABE_FW_TASK_IO_PDM_UL 47
+#define C_ABE_FW_TASK_IO_BT_VX_UL 48
+#define C_ABE_FW_TASK_IO_MM_UL 49
+#define C_ABE_FW_TASK_IO_MM_UL2 50
+#define C_ABE_FW_TASK_IO_VX_UL 51
+#define C_ABE_FW_TASK_IO_MM_DL 52
+#define C_ABE_FW_TASK_IO_VX_DL 53
+#define C_ABE_FW_TASK_IO_TONES_DL 54
+#define C_ABE_FW_TASK_IO_VIB_DL 55
+#define C_ABE_FW_TASK_IO_BT_VX_DL 56
+#define C_ABE_FW_TASK_IO_PDM_DL 57
+#define C_ABE_FW_TASK_IO_MM_EXT_OUT 58
+#define C_ABE_FW_TASK_IO_MM_EXT_IN 59
+#define C_ABE_FW_TASK_DEBUG_IRQFIFO 60
+#define C_ABE_FW_TASK_EchoMixer 61
+#define C_ABE_FW_TASK_SDTMixer 62
+#define C_ABE_FW_TASK_DL1Mixer 63
+#define C_ABE_FW_TASK_DL2Mixer 64
+#define C_ABE_FW_TASK_DL1Mixer_dual_mono 65
+#define C_ABE_FW_TASK_DL2Mixer_dual_mono 66
+#define C_ABE_FW_TASK_VXRECMixer 67
+#define C_ABE_FW_TASK_ULMixer 68
+#define C_ABE_FW_TASK_ULMixer_dual_mono 69
+#define C_ABE_FW_TASK_VIBRA_PACK 70
+#define C_ABE_FW_TASK_VX_DL_8_48_0SR 71
+#define C_ABE_FW_TASK_VX_DL_16_48_0SR 72
+#define C_ABE_FW_TASK_BT_UL_8_48_0SR 73
+#define C_ABE_FW_TASK_BT_UL_16_48_0SR 74
+#define C_ABE_FW_TASK_IHF_48_96_0SR 75
+#define C_ABE_FW_TASK_EARP_48_96_0SR 76
+#define C_ABE_FW_TASK_AMIC_SPLIT 77
+#define C_ABE_FW_TASK_DMIC1_SPLIT 78
+#define C_ABE_FW_TASK_DMIC2_SPLIT 79
+#define C_ABE_FW_TASK_DMIC3_SPLIT 80
+#define C_ABE_FW_TASK_VXREC_SPLIT 81
+#define C_ABE_FW_TASK_BT_UL_SPLIT 82
+#define C_ABE_FW_TASK_MM_SPLIT 83
+#define C_ABE_FW_TASK_VIBRA_SPLIT 84
+#define C_ABE_FW_TASK_MM_EXT_IN_SPLIT 85
+#define C_ABE_FW_TASK_ECHO_REF_SPLIT 86
+#define C_ABE_FW_TASK_UNUSED_1 87
+#define C_ABE_FW_TASK_VX_UL_ROUTING 88
+#define C_ABE_FW_TASK_MM_UL2_ROUTING 89
+#define C_ABE_FW_TASK_VIBRA1 90
+#define C_ABE_FW_TASK_VIBRA2 91
+#define C_ABE_FW_TASK_BT_UL_16_48 92
+#define C_ABE_FW_TASK_BT_UL_8_48 93
+#define C_ABE_FW_TASK_BT_DL_48_16 94
+#define C_ABE_FW_TASK_BT_DL_48_8 95
+#define C_ABE_FW_TASK_VX_DL_16_48 96
+#define C_ABE_FW_TASK_VX_DL_8_48 97
+#define C_ABE_FW_TASK_VX_UL_48_16 98
+#define C_ABE_FW_TASK_VX_UL_48_8 99
+#define C_ABE_FW_TASK_DBG_SYNC 100
+#define C_ABE_FW_TASK_AMIC_96_48_LP 101
+#define C_ABE_FW_TASK_DMIC1_96_48_LP 102
+#define C_ABE_FW_TASK_DMIC2_96_48_LP 103
+#define C_ABE_FW_TASK_DMIC3_96_48_LP 104
+#define C_ABE_FW_TASK_INIT_FW_MEMORY 105
+#define C_ABE_FW_TASK_DEBUGTRACE_VX_ASRCs 106
+#define C_ABE_FW_TASK_ASRC_BT_UL_8 107
+#define C_ABE_FW_TASK_ASRC_BT_UL_16 108
+#define C_ABE_FW_TASK_ASRC_BT_UL_8_SIB 109
+#define C_ABE_FW_TASK_ASRC_BT_UL_16_SIB 110
+#define C_ABE_FW_TASK_ASRC_BT_DL_8 111
+#define C_ABE_FW_TASK_ASRC_BT_DL_16 112
+#define C_ABE_FW_TASK_ASRC_BT_DL_8_SIB 113
+#define C_ABE_FW_TASK_ASRC_BT_DL_16_SIB 114
+#define C_ABE_FW_TASK_BT_DL_48_8_HP_OPP100 115
+#define C_ABE_FW_TASK_BT_DL_48_16_HP_OPP100 116
+#define C_ABE_FW_TASK_BT_DL_48_8_OPP100 117
+#define C_ABE_FW_TASK_BT_DL_48_16_OPP100 118
+#define C_ABE_FW_TASK_VX_DL_8_48_OSR_LP 119
+#define C_ABE_FW_TASK_VX_DL_8_48_FIR 120
+#define C_ABE_FW_TASK_BT_UL_8_48_OSR_LP 121
+#define C_ABE_FW_TASK_BT_UL_8_48_FIR 122
+#define C_ABE_FW_TASK_SRC44P1_MMDL 123
+#define C_ABE_FW_TASK_SRC44P1_TONES 124
+#define C_ABE_FW_TASK_SRC44P1_MMDL_1211 125
+#define C_ABE_FW_TASK_SRC44P1_TONES_1211 126
+#define C_ABE_FW_TASK_SRC44P1_MMDL_PP 127
+#define C_ABE_FW_TASK_SRC44P1_MMDL_1211_PP 128
+#define C_ABE_FW_TASK_CHECK_IIR_LEFT 129
+#define C_ABE_FW_TASK_CHECK_IIR_RIGHT 130
+#define C_ABE_FW_TASK_BT_DL_48_8_LP_FIR 131
+#define C_ABE_FW_TASK_BT_DL_48_8_FIR 132
+#define C_ABE_FW_TASK_BT_DL_48_8_FIR_OPP100 133
+#define C_ABE_FW_TASK_VX_UL_48_8_FIR 134
+#define C_ABE_FW_TASK_VX_UL_48_8_LP_FIR 135
+#endif /* _ABE_TASKID_H_ */
diff --git a/sound/soc/omap/abe/abe_typ.h b/sound/soc/omap/abe/abe_typ.h
new file mode 100644
index 0000000..650d043
--- /dev/null
+++ b/sound/soc/omap/abe/abe_typ.h
@@ -0,0 +1,654 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include "abe_def.h"
+#include "abe_initxxx_labels.h"
+
+#ifndef _ABE_TYP_H_
+#define _ABE_TYP_H_
+/*
+ * BASIC TYPES
+ */
+#define MAX_UINT8 ((((1L << 7) - 1) << 1) + 1)
+#define MAX_UINT16 ((((1L << 15) - 1) << 1) + 1)
+#define MAX_UINT32 ((((1L << 31) - 1) << 1) + 1)
+#define s8 char
+#define u8 unsigned char
+#define s16 short
+#define u16 unsigned short
+#define s32 int
+#define u32 unsigned int
+/* returned status from HAL APIs */
+#define abehal_status u32
+/* 4 bytes Bit field indicating the type of informations to be traced */
+typedef u32 abe_dbg_mask_t;
+/* scheduling task loops (250us / 272us with respectively 48kHz /
+ 44.1kHz on Phoenix). */
+typedef u32 abe_dbg_t;
+/* Index to the table of sequences */
+typedef u32 abe_seq_code_t;
+/* Index to the table of subroutines called in the sequence */
+typedef u32 abe_sub_code_t;
+/* subroutine with no parameter */
+typedef void (*abe_subroutine0) (void);
+/* subroutine with one parameter */
+typedef void (*abe_subroutine1) (u32);
+typedef void (*abe_subroutine2) (u32, u32);
+typedef void (*abe_subroutine3) (u32, u32, u32);
+typedef void (*abe_subroutine4) (u32, u32, u32, u32);
+/*
+ * CODE PORTABILITY - FUTURE PATCHES
+ *
+ * 32bits field for having the code compatible with future revisions of
+ * the hardware (audio integration) or evolution of the software
+ * partitionning. Used for the highest level APIs (launch_sequences)
+ */
+typedef u32 abe_patch_rev;
+/*
+ * ENUMS
+ */
+/*
+ * MEMORY CONFIG TYPE
+ *
+ * 0: Ultra Lowest power consumption audio player
+ * 1: OPP 25% (simple multimedia features)
+ * 2: OPP 50% (multimedia and voice calls)
+ * 3: OPP100% (multimedia complex use-cases)
+ */
+#define ABE_AUDIO_PLAYER_ON_HEADSET_OR_EARPHONE 1
+#define ABE_DRIFT_MANAGEMENT_FOR_AUDIO_PLAYER 2
+#define ABE_DRIFT_MANAGEMENT_FOR_VOICE_CALL 3
+#define ABE_VOICE_CALL_ON_HEADSET_OR_EARPHONE_OR_BT 4
+#define ABE_MULTIMEDIA_AUDIO_RECORDER 5
+#define ABE_VIBRATOR_OR_HAPTICS 6
+#define ABE_VOICE_CALL_ON_HANDS_FREE_SPEAKER 7
+#define ABE_RINGER_TONES 8
+#define ABE_VOICE_CALL_WITH_EARPHONE_ACTIVE_NOISE_CANCELLER 9
+#define ABE_LAST_USE_CASE 10
+/*
+ * OPP TYPE
+ *
+ * 0: Ultra Lowest power consumption audio player
+ * 1: OPP 25% (simple multimedia features)
+ * 2: OPP 50% (multimedia and voice calls)
+ * 3: OPP100% (multimedia complex use-cases)
+ */
+#define ABE_OPP0 0
+#define ABE_OPP25 1
+#define ABE_OPP50 2
+#define ABE_OPP100 3
+/*
+ * DMIC DECIMATION RATIO
+ *
+ */
+#define ABE_DEC16 16
+#define ABE_DEC25 25
+#define ABE_DEC32 32
+#define ABE_DEC40 40
+/*
+ * SAMPLES TYPE
+ *
+ * mono 16 bit sample LSB aligned, 16 MSB bits are unused;
+ * mono right shifted to 16bits LSBs on a 32bits DMEM FIFO for McBSP
+ * TX purpose;
+ * mono sample MSB aligned (16/24/32bits);
+ * two successive mono samples in one 32bits container;
+ * Two L/R 16bits samples in a 32bits container;
+ * Two channels defined with two MSB aligned samples;
+ * Three channels defined with three MSB aligned samples (MIC);
+ * Four channels defined with four MSB aligned samples (MIC);
+ * . . .
+ * Eight channels defined with eight MSB aligned samples (MIC);
+ */
+#define MONO_MSB 1
+#define MONO_RSHIFTED_16 2
+#define STEREO_RSHIFTED_16 3
+#define STEREO_16_16 4
+#define STEREO_MSB 5
+#define THREE_MSB 6
+#define FOUR_MSB 7
+#define FIVE_MSB 8
+#define SIX_MSB 9
+#define SEVEN_MSB 10
+#define EIGHT_MSB 11
+#define NINE_MSB 12
+#define TEN_MSB 13
+/*
+ * PORT PROTOCOL TYPE - abe_port_protocol_switch_id
+ */
+#define SLIMBUS_PORT_PROT 1
+#define SERIAL_PORT_PROT 2
+#define TDM_SERIAL_PORT_PROT 3
+#define DMIC_PORT_PROT 4
+#define MCPDMDL_PORT_PROT 5
+#define MCPDMUL_PORT_PROT 6
+#define PINGPONG_PORT_PROT 7
+#define DMAREQ_PORT_PROT 8
+/*
+ * PORT IDs, this list is aligned with the FW data mapping
+ */
+#define OMAP_ABE_DMIC_PORT 0
+#define OMAP_ABE_PDM_UL_PORT 1
+#define OMAP_ABE_BT_VX_UL_PORT 2
+#define OMAP_ABE_MM_UL_PORT 3
+#define OMAP_ABE_MM_UL2_PORT 4
+#define OMAP_ABE_VX_UL_PORT 5
+#define OMAP_ABE_MM_DL_PORT 6
+#define OMAP_ABE_VX_DL_PORT 7
+#define OMAP_ABE_TONES_DL_PORT 8
+#define OMAP_ABE_VIB_DL_PORT 9
+#define OMAP_ABE_BT_VX_DL_PORT 10
+#define OMAP_ABE_PDM_DL_PORT 11
+#define OMAP_ABE_MM_EXT_OUT_PORT 12
+#define OMAP_ABE_MM_EXT_IN_PORT 13
+#define TDM_DL_PORT 14
+#define TDM_UL_PORT 15
+#define DEBUG_PORT 16
+#define LAST_PORT_ID 17
+/* definitions for the compatibility with HAL05xx */
+#define PDM_DL1_PORT 18
+#define PDM_DL2_PORT 19
+#define PDM_VIB_PORT 20
+/* There is only one DMIC port, always used with 6 samples
+ per 96kHz periods */
+#define DMIC_PORT1 DMIC_PORT
+#define DMIC_PORT2 DMIC_PORT
+#define DMIC_PORT3 DMIC_PORT
+/*
+ * ABE_DL_SRC_ID source of samples
+ */
+#define SRC_DL1_MIXER_OUTPUT DL1_M_labelID
+#define SRC_SDT_MIXER_OUTPUT SDT_M_labelID
+#define SRC_DL1_GAIN_OUTPUT DL1_GAIN_out_labelID
+#define SRC_DL1_EQ_OUTPUT DL1_EQ_labelID
+#define SRC_DL2_GAIN_OUTPUT DL2_GAIN_out_labelID
+#define SRC_DL2_EQ_OUTPUT DL2_EQ_labelID
+#define SRC_MM_DL MM_DL_labelID
+#define SRC_TONES_DL Tones_labelID
+#define SRC_VX_DL VX_DL_labelID
+#define SRC_VX_UL VX_UL_labelID
+#define SRC_MM_UL2 MM_UL2_labelID
+#define SRC_MM_UL MM_UL_labelID
+/*
+ * abe_patched_pattern_id
+ * selection of the audio engine signal to
+ * replace by a precomputed pattern
+ */
+#define DBG_PATCH_AMIC 1
+#define DBG_PATCH_DMIC1 2
+#define DBG_PATCH_DMIC2 3
+#define DBG_PATCH_DMIC3 4
+#define DBG_PATCH_VX_REC 5
+#define DBG_PATCH_BT_UL 6
+#define DBG_PATCH_MM_DL 7
+#define DBG_PATCH_DL2_EQ 8
+#define DBG_PATCH_VIBRA 9
+#define DBG_PATCH_MM_EXT_IN 10
+#define DBG_PATCH_EANC_FBK_Out 11
+#define DBG_PATCH_MIC4 12
+#define DBG_PATCH_MM_DL_MIXDL1 13
+#define DBG_PATCH_MM_DL_MIXDL2 14
+/*
+ * Signal processing module names - EQ APS MIX ROUT
+ */
+/* equalizer downlink path headset + earphone */
+#define FEAT_EQ1 1
+/* equalizer downlink path integrated handsfree LEFT */
+#define FEAT_EQ2L (FEAT_EQ1+1)
+/* equalizer downlink path integrated handsfree RIGHT */
+#define FEAT_EQ2R (FEAT_EQ2L+1)
+/* equalizer downlink path side-tone */
+#define FEAT_EQSDT (FEAT_EQ2R+1)
+/* equalizer uplink path AMIC */
+#define FEAT_EQAMIC (FEAT_EQSDT+1)
+/* equalizer uplink path DMIC */
+#define FEAT_EQDMIC (FEAT_EQAMIC+1)
+/* Acoustic protection for headset */
+#define FEAT_APS1 (FEAT_EQDMIC+1)
+/* acoustic protection high-pass filter for handsfree "Left" */
+#define FEAT_APS2 (FEAT_APS1+1)
+/* acoustic protection high-pass filter for handsfree "Right" */
+#define FEAT_APS3 (FEAT_APS2+1)
+/* asynchronous sample-rate-converter for the downlink voice path */
+#define FEAT_ASRC1 (FEAT_APS3+1)
+/* asynchronous sample-rate-converter for the uplink voice path */
+#define FEAT_ASRC2 (FEAT_ASRC1+1)
+/* asynchronous sample-rate-converter for the multimedia player */
+#define FEAT_ASRC3 (FEAT_ASRC2+1)
+/* asynchronous sample-rate-converter for the echo reference */
+#define FEAT_ASRC4 (FEAT_ASRC3+1)
+/* mixer of the headset and earphone path */
+#define FEAT_MIXDL1 (FEAT_ASRC4+1)
+/* mixer of the hands-free path */
+#define FEAT_MIXDL2 (FEAT_MIXDL1+1)
+/* mixer for audio being sent on the voice_ul path */
+#define FEAT_MIXAUDUL (FEAT_MIXDL2+1)
+/* mixer for voice communication recording */
+#define FEAT_MIXVXREC (FEAT_MIXAUDUL+1)
+/* mixer for side-tone */
+#define FEAT_MIXSDT (FEAT_MIXVXREC+1)
+/* mixer for echo reference */
+#define FEAT_MIXECHO (FEAT_MIXSDT+1)
+/* router of the uplink path */
+#define FEAT_UPROUTE (FEAT_MIXECHO+1)
+/* all gains */
+#define FEAT_GAINS (FEAT_UPROUTE+1)
+#define FEAT_GAINS_DMIC1 (FEAT_GAINS+1)
+#define FEAT_GAINS_DMIC2 (FEAT_GAINS_DMIC1+1)
+#define FEAT_GAINS_DMIC3 (FEAT_GAINS_DMIC2+1)
+#define FEAT_GAINS_AMIC (FEAT_GAINS_DMIC3+1)
+#define FEAT_GAINS_SPLIT (FEAT_GAINS_AMIC+1)
+#define FEAT_GAINS_DL1 (FEAT_GAINS_SPLIT+1)
+#define FEAT_GAINS_DL2 (FEAT_GAINS_DL1+1)
+#define FEAT_GAIN_BTUL (FEAT_GAINS_DL2+1)
+/* sequencing queue of micro tasks */
+#define FEAT_SEQ (FEAT_GAIN_BTUL+1)
+/* Phoenix control queue through McPDM */
+#define FEAT_CTL (FEAT_SEQ+1)
+/* list of features of the firmware -------------------------------*/
+#define MAXNBFEATURE FEAT_CTL
+/* abe_equ_id */
+/* equalizer downlink path headset + earphone */
+#define EQ1 FEAT_EQ1
+/* equalizer downlink path integrated handsfree LEFT */
+#define EQ2L FEAT_EQ2L
+#define EQ2R FEAT_EQ2R
+/* equalizer downlink path side-tone */
+#define EQSDT FEAT_EQSDT
+#define EQAMIC FEAT_EQAMIC
+#define EQDMIC FEAT_EQDMIC
+/* abe_aps_id */
+/* Acoustic protection for headset */
+#define APS1 FEAT_APS1
+#define APS2L FEAT_APS2
+#define APS2R FEAT_APS3
+/* abe_asrc_id */
+/* asynchronous sample-rate-converter for the downlink voice path */
+#define ASRC1 FEAT_ASRC1
+/* asynchronous sample-rate-converter for the uplink voice path */
+#define ASRC2 FEAT_ASRC2
+/* asynchronous sample-rate-converter for the multimedia player */
+#define ASRC3 FEAT_ASRC3
+/* asynchronous sample-rate-converter for the voice uplink echo_reference */
+#define ASRC4 FEAT_ASRC4
+/* abe_mixer_id */
+#define MIXDL1 FEAT_MIXDL1
+#define MIXDL2 FEAT_MIXDL2
+#define MIXSDT FEAT_MIXSDT
+#define MIXECHO FEAT_MIXECHO
+#define MIXAUDUL FEAT_MIXAUDUL
+#define MIXVXREC FEAT_MIXVXREC
+/* abe_router_id */
+/* there is only one router up to now */
+#define UPROUTE FEAT_UPROUTE
+/*
+ * GAIN IDs
+ */
+#define GAINS_DMIC1 FEAT_GAINS_DMIC1
+#define GAINS_DMIC2 FEAT_GAINS_DMIC2
+#define GAINS_DMIC3 FEAT_GAINS_DMIC3
+#define GAINS_AMIC FEAT_GAINS_AMIC
+#define GAINS_SPLIT FEAT_GAINS_SPLIT
+#define GAINS_DL1 FEAT_GAINS_DL1
+#define GAINS_DL2 FEAT_GAINS_DL2
+#define GAINS_BTUL FEAT_GAIN_BTUL
+/*
+ * EVENT GENERATORS - abe_event_id
+ */
+#define EVENT_TIMER 0
+#define EVENT_44100 1
+/*
+ * SERIAL PORTS IDs - abe_mcbsp_id
+ */
+#define MCBSP1_TX MCBSP1_DMA_TX
+#define MCBSP1_RX MCBSP1_DMA_RX
+#define MCBSP2_TX MCBSP2_DMA_TX
+#define MCBSP2_RX MCBSP2_DMA_RX
+#define MCBSP3_TX MCBSP3_DMA_TX
+#define MCBSP3_RX MCBSP3_DMA_RX
+/*
+ * SERIAL PORTS IDs - abe_slimbus_id;
+ */
+#define SLIMBUS1_TX0 SLIMBUS1_DMA_TX0
+#define SLIMBUS1_TX1 SLIMBUS1_DMA_TX1
+#define SLIMBUS1_TX2 SLIMBUS1_DMA_TX2
+#define SLIMBUS1_TX3 SLIMBUS1_DMA_TX3
+#define SLIMBUS1_TX4 SLIMBUS1_DMA_TX4
+#define SLIMBUS1_TX5 SLIMBUS1_DMA_TX5
+#define SLIMBUS1_TX6 SLIMBUS1_DMA_TX6
+#define SLIMBUS1_TX7 SLIMBUS1_DMA_TX7
+#define SLIMBUS1_RX0 SLIMBUS1_DMA_RX0
+#define SLIMBUS1_RX1 SLIMBUS1_DMA_RX1
+#define SLIMBUS1_RX2 SLIMBUS1_DMA_RX2
+#define SLIMBUS1_RX3 SLIMBUS1_DMA_RX3
+#define SLIMBUS1_RX4 SLIMBUS1_DMA_RX4
+#define SLIMBUS1_RX5 SLIMBUS1_DMA_RX5
+#define SLIMBUS1_RX6 SLIMBUS1_DMA_RX6
+#define SLIMBUS1_RX7 SLIMBUS1_DMA_RX7
+#define SLIMBUS_UNUSED _DUMMY_FIFO_
+/*
+ * ----------------- TYPES USED FOR APIS ---------------
+ */
+
+/*
+ * EQU_T
+ *
+ * coefficients of the equalizer
+ */
+typedef struct {
+ /* type of filter */
+ u32 equ_type;
+ /* filter length */
+ u32 equ_length;
+ union {
+ /* parameters are the direct and recursive coefficients in */
+ /* Q6.26 integer fixed-point format. */
+ s32 type1[NBEQ1];
+ struct {
+ /* center frequency of the band [Hz] */
+ s32 freq[NBEQ2];
+ /* gain of each band. [dB] */
+ s32 gain[NBEQ2];
+ /* Q factor of this band [dB] */
+ s32 q[NBEQ2];
+ } type2;
+ } coef;
+ s32 equ_param3;
+} abe_equ_t;
+
+/*
+ * APS_T
+ *
+ * coefficients of the Acoustics Protection and Safety
+ */
+struct abe_aps_t {
+ s32 coef1[NBAPS1];
+ s32 coef2[NBAPS2];
+};
+
+struct abe_aps_energy_t {
+ /* structure of two energy_t estimation for coil and membrane */
+ u32 e1;
+ u32 e2;
+};
+/*
+ * ROUTER_T
+ *
+ * table of indexes in unsigned bytes
+ */
+typedef u16 abe_router_t;
+/*
+ * DATA_FORMAT_T
+ *
+ * used in port declaration
+ */
+typedef struct {
+ /* Sampling frequency of the stream */
+ u32 f;
+ /* Sample format type */
+ u32 samp_format;
+} abe_data_format_t;
+/*
+ * PORT_PROTOCOL_T
+ *
+ * port declaration
+ */
+typedef struct {
+ /* Direction=0 means input from AESS point of view */
+ u32 direction;
+ /* Protocol type (switch) during the data transfers */
+ u32 protocol_switch;
+ union {
+ /* Slimbus peripheral connected to ATC */
+ struct {
+ /* Address of ATC Slimbus descriptor's index */
+ u32 desc_addr1;
+ /* DMEM address 1 in bytes */
+ u32 buf_addr1;
+ /* DMEM buffer size size in bytes */
+ u32 buf_size;
+ /* ITERation on each DMAreq signals */
+ u32 iter;
+ /* Second ATC index for SlimBus reception (or NULL) */
+ u32 desc_addr2;
+ /* DMEM address 2 in bytes */
+ u32 buf_addr2;
+ } prot_slimbus;
+ /* McBSP/McASP peripheral connected to ATC */
+ struct {
+ u32 desc_addr;
+ /* Address of ATC McBSP/McASP descriptor's in bytes */
+ u32 buf_addr;
+ /* DMEM address in bytes */
+ u32 buf_size;
+ /* ITERation on each DMAreq signals */
+ u32 iter;
+ } prot_serial;
+ /* DMIC peripheral connected to ATC */
+ struct {
+ /* DMEM address in bytes */
+ u32 buf_addr;
+ /* DMEM buffer size in bytes */
+ u32 buf_size;
+ /* Number of activated DMIC */
+ u32 nbchan;
+ } prot_dmic;
+ /* McPDMDL peripheral connected to ATC */
+ struct {
+ /* DMEM address in bytes */
+ u32 buf_addr;
+ /* DMEM size in bytes */
+ u32 buf_size;
+ /* Control allowed on McPDM DL */
+ u32 control;
+ } prot_mcpdmdl;
+ /* McPDMUL peripheral connected to ATC */
+ struct {
+ /* DMEM address size in bytes */
+ u32 buf_addr;
+ /* DMEM buffer size size in bytes */
+ u32 buf_size;
+ } prot_mcpdmul;
+ /* Ping-Pong interface to the Host using cache-flush */
+ struct {
+ /* Address of ATC descriptor's */
+ u32 desc_addr;
+ /* DMEM buffer base address in bytes */
+ u32 buf_addr;
+ /* DMEM size in bytes for each ping and pong buffers */
+ u32 buf_size;
+ /* IRQ address (either DMA (0) MCU (1) or DSP(2)) */
+ u32 irq_addr;
+ /* IRQ data content loaded in the AESS IRQ register */
+ u32 irq_data;
+ /* Call-back function upon IRQ reception */
+ u32 callback;
+ } prot_pingpong;
+ /* DMAreq line to CBPr */
+ struct {
+ /* Address of ATC descriptor's */
+ u32 desc_addr;
+ /* DMEM buffer address in bytes */
+ u32 buf_addr;
+ /* DMEM buffer size size in bytes */
+ u32 buf_size;
+ /* ITERation on each DMAreq signals */
+ u32 iter;
+ /* DMAreq address */
+ u32 dma_addr;
+ /* DMA/AESS = 1 << #DMA */
+ u32 dma_data;
+ } prot_dmareq;
+ /* Circular buffer - direct addressing to DMEM */
+ struct {
+ /* DMEM buffer base address in bytes */
+ u32 buf_addr;
+ /* DMEM buffer size in bytes */
+ u32 buf_size;
+ /* DMAreq address */
+ u32 dma_addr;
+ /* DMA/AESS = 1 << #DMA */
+ u32 dma_data;
+ } prot_circular_buffer;
+ } p;
+} abe_port_protocol_t;
+/*
+ * DMA_T
+ *
+ * dma structure for easing programming
+ */
+typedef struct {
+ /* OCP L3 pointer to the first address of the */
+ void *data;
+ /* destination buffer (either DMA or Ping-Pong read/write pointers). */
+ /* address L3 when addressing the DMEM buffer instead of CBPr */
+ void *l3_dmem;
+ /* address L3 translated to L4 the ARM memory space */
+ void *l4_dmem;
+ /* number of iterations for the DMA data moves. */
+ u32 iter;
+} abe_dma_t;
+
+typedef struct {
+ /* Offset to the first address of the */
+ u32 data;
+ /* number of iterations for the DMA data moves. */
+ u32 iter;
+} abe_dma_t_offset;
+/*
+ * SEQ_T
+ *
+ * struct {
+ * micros_t time; Waiting time before executing next line
+ * seq_code_t code Subroutine index interpreted in the HAL
+ * and translated to FW subroutine codes
+ * in case of ABE tasks
+ * int32 param[2] Two parameters
+ * } seq_t
+ *
+ */
+typedef struct {
+ u32 delta_time;
+ u32 code;
+ u32 param[4];
+ u8 tag;
+} abe_seq_t;
+
+typedef struct {
+ u32 mask;
+ abe_seq_t seq1;
+ abe_seq_t seq2;
+} abe_sequence_t;
+/*
+ * DRIFT_T abe_drift_t = s32
+ *
+ * ASRC drift parameter in [ppm] value
+ */
+/*
+ * -------------------- INTERNAL DATA TYPES ---------------------
+ */
+/*
+ * ABE_IRQ_DATA_T
+ *
+ * IRQ FIFO content declaration
+ * APS interrupts : IRQ_FIFO[31:28] = IRQtag_APS,
+ * IRQ_FIFO[27:16] = APS_IRQs, IRQ_FIFO[15:0] = loopCounter
+ * SEQ interrupts : IRQ_FIFO[31:28] IRQtag_COUNT,
+ * IRQ_FIFO[27:16] = Count_IRQs, IRQ_FIFO[15:0] = loopCounter
+ * Ping-Pong Interrupts : IRQ_FIFO[31:28] = IRQtag_PP,
+ * IRQ_FIFO[27:16] = PP_MCU_IRQ, IRQ_FIFO[15:0] = loopCounter
+ */
+typedef struct {
+ unsigned int counter:16;
+ unsigned int data:12;
+ unsigned int tag:4;
+} abe_irq_data_t;
+/*
+ * ABE_PORT_T status / format / sampling / protocol(call_back) /
+ * features / gain / name ..
+ *
+ */
+typedef struct {
+ /* running / idled */
+ u16 status;
+ /* Sample format type */
+ abe_data_format_t format;
+ /* API : for ASRC */
+ s32 drift;
+ /* optionnal call-back index for errors and ack */
+ u16 callback;
+ /* IO tasks buffers */
+ u16 smem_buffer1;
+ u16 smem_buffer2;
+ abe_port_protocol_t protocol;
+ /* pointer and iteration counter of the xDMA */
+ abe_dma_t_offset dma;
+ /* list of features associated to a port (EQ, APS, ... , ends with 0) */
+ u16 feature_index[MAXFEATUREPORT];
+ char name[NBCHARPORTNAME];
+} abe_port_t;
+/*
+ * ABE_SUBROUTINE_T
+ *
+ */
+typedef struct {
+ u32 sub_id;
+ s32 param[4];
+} abe_subroutine_t;
+
+#endif/* ifndef _ABE_TYP_H_ */
diff --git a/sound/soc/omap/abe/abe_typedef.h b/sound/soc/omap/abe/abe_typedef.h
new file mode 100644
index 0000000..943968e
--- /dev/null
+++ b/sound/soc/omap/abe/abe_typedef.h
@@ -0,0 +1,240 @@
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2010-2011 Texas Instruments Incorporated,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _ABE_TYPEDEF_H_
+#define _ABE_TYPEDEF_H_
+
+#include "abe_define.h"
+#include "abe_typ.h"
+
+/*
+ * Basic types definition
+ */
+/*
+ * Commonly used structures
+ */
+typedef struct abetaskTag {
+ /* 0 ... Index of called function */
+ u16 iF;
+ /* 2 ... for INITPTR of A0 */
+ u16 A0;
+ /* 4 ... for INITPTR of A1 */
+ u16 A1;
+ /* 6 ... for INITPTR of A2 & A3 */
+ u16 A2_3;
+ /* 8 ... for INITPTR of A4 & A5 */
+ u16 A4_5;
+ /* 10 ... for INITREG of R0, R1, R2, R3 */
+ u16 R;
+ /* 12 */
+ u16 misc0;
+ /* 14 */
+ u16 misc1;
+} ABE_STask;
+typedef ABE_STask *pABE_STask;
+typedef ABE_STask **ppABE_STask;
+
+struct ABE_SIODescriptor {
+ /* 0 */
+ u16 drift_ASRC;
+ /* 2 */
+ u16 drift_io;
+ /* 4 "Function index" of XLS sheet "Functions" */
+ u8 io_type_idx;
+ /* 5 1 = MONO or Stereo1616, 2= STEREO, ... */
+ u8 samp_size;
+ /* 6 drift "issues" for ASRC */
+ s16 flow_counter;
+ /* 8 address for IRQ or DMArequests */
+ u16 hw_ctrl_addr;
+ /* 10 DMA request bit-field or IRQ (DSP/MCU) */
+ u8 atc_irq_data;
+ /* 11 0 = Read, 3 = Write */
+ u8 direction_rw;
+ /* 12 */
+ u8 repeat_last_samp;
+ /* 13 12 at 48kHz, ... */
+ u8 nsamp;
+ /* 14 nsamp x samp_size */
+ u8 x_io;
+ /* 15 ON = 0x80, OFF = 0x00 */
+ u8 on_off;
+ /* 16 For Slimbus and TDM purpose */
+ u16 split_addr1;
+ /* 18 */
+ u16 split_addr2;
+ /* 20 */
+ u16 split_addr3;
+ /* 22 */
+ u8 before_f_index;
+ /* 23 */
+ u8 after_f_index;
+ /* 24 SM/CM INITPTR field */
+ u16 smem_addr1;
+ /* 26 in bytes */
+ u16 atc_address1;
+ /* 28 DMIC_ATC_PTR, MCPDM_UL_ATC_PTR, ... */
+ u16 atc_pointer_saved1;
+ /* 30 samp_size (except in TDM or Slimbus) */
+ u8 data_size1;
+ /* 31 "Function index" of XLS sheet "Functions" */
+ u8 copy_f_index1;
+ /* 32 For Slimbus and TDM purpose */
+ u16 smem_addr2;
+ /* 34 */
+ u16 atc_address2;
+ /* 36 */
+ u16 atc_pointer_saved2;
+ /* 38 */
+ u8 data_size2;
+ /* 39 */
+ u8 copy_f_index2;
+};
+
+/* [w] asrc output used for the next asrc call (+/- 1 / 0) */
+#define drift_asrc_ 0
+/* [w] asrc output used for controlling the number of samples to be
+ exchanged (+/- 1 / 0) */
+#define drift_io_ 2
+/* address of the IO subroutine */
+#define io_type_idx_ 4
+#define samp_size_ 5
+/* flow error counter */
+#define flow_counter_ 6
+/* dmareq address or host irq buffer address (atc address) */
+#define hw_ctrl_addr_ 8
+/* data content to be loaded to "hw_ctrl_addr" */
+#define atc_irq_data_ 10
+/* read dmem =0, write dmem =3 (atc offset of the access pointer) */
+#define direction_rw_ 11
+/* flag set to allow repeating the last sample on downlink paths */
+#define repeat_last_samp_ 12
+/* number of samples (either mono stereo...) */
+#define nsamp_ 13
+/* x number of raw DMEM data moved */
+#define x_io_ 14
+#define on_off_ 15
+/* internal smem buffer initptr pointer index */
+#define split_addr1_ 16
+/* internal smem buffer initptr pointer index */
+#define split_addr2_ 18
+/* internal smem buffer initptr pointer index */
+#define split_addr3_ 20
+/* index of the copy subroutine */
+#define before_f_index_ 22
+/* index of the copy subroutine */
+#define after_f_index_ 23
+#define minidesc1_ 24
+/* internal smem buffer initptr pointer index */
+#define rel_smem_ 0
+/* atc descriptor address (byte address x4) */
+#define rel_atc_ 2
+/* location of the saved ATC pointer (+debug info) */
+#define rel_atc_saved 4
+/* size of each sample (1:mono/1616 2:stereo ... ) */
+#define rel_size_ 6
+/* index of the copy subroutine */
+#define rel_f_ 7
+#define s_mem_mm_ul 24
+#define s_mm_ul_size 30
+#define minidesc2_ 32
+#define Struct_Size 40
+
+struct ABE_SPingPongDescriptor {
+ /* 0: [W] asrc output used for the next ASRC call (+/- 1 / 0) */
+ u16 drift_ASRC;
+ /* 2: [W] asrc output used for controlling the number of
+ samples to be exchanged (+/- 1 / 0) */
+ u16 drift_io;
+ /* 4: DMAReq address or HOST IRQ buffer address (ATC ADDRESS) */
+ u16 hw_ctrl_addr;
+ /* 6: index of the copy subroutine */
+ u8 copy_func_index;
+ /* 7: X number of SMEM samples to move */
+ u8 x_io;
+ /* 8: 0 for mono data, 1 for stereo data */
+ u8 data_size;
+ /* 9: internal SMEM buffer INITPTR pointer index */
+ u8 smem_addr;
+ /* 10: data content to be loaded to "hw_ctrl_addr" */
+ u8 atc_irq_data;
+ /* 11: ping/pong buffer flag */
+ u8 counter;
+ /* 12: reseved */
+ u16 dummy1;
+ /* 14: reseved */
+ u16 dummy2;
+ /* 16 For 12/11 in case of 44.1 mode (same address as SIO desc)*/
+ u16 split_addr1;
+ /* 18: reseved */
+ u16 dummy3;
+ /* 20: current Base address of the working buffer */
+ u16 workbuff_BaseAddr;
+ /* 14: samples left in the working buffer */
+ u16 workbuff_Samples;
+ /* 16: Base address of the ping/pong buffer 0 */
+ u16 nextbuff0_BaseAddr;
+ /* 18: samples available in the ping/pong buffer 0 */
+ u16 nextbuff0_Samples;
+ /* 20: Base address of the ping/pong buffer 1 */
+ u16 nextbuff1_BaseAddr;
+ /* 22: samples available in the ping/pong buffer 1 */
+ u16 nextbuff1_Samples;
+};
+
+#endif/* _ABE_TYPEDEF_H_ */
diff --git a/sound/soc/omap/abe/port_mgr.c b/sound/soc/omap/abe/port_mgr.c
new file mode 100644
index 0000000..ef9d7a6
--- /dev/null
+++ b/sound/soc/omap/abe/port_mgr.c
@@ -0,0 +1,345 @@
+/*
+ * ALSA SoC OMAP ABE port manager
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+//#define DEBUG
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include "port_mgr.h"
+#include "abe_main.h"
+
+#ifdef CONFIG_DEBUG_FS
+/* this must match logical ID numbers in port_mgr.h */
+static const char *lport_name[] = {
+ "dmic0", "dmic1", "dmic2", "pdmdl1", "pdmdl2", "pdmvib",
+ "pdmul1", "bt_vx_dl", "bt_vx_ul", "mm_ext_ul", "mm_ext_dl",
+ "mm_dl1", "mm_ul1", "mm_ul2", "vx_dl", "vx_ul", "vib", "tones"
+};
+#endif
+
+static DEFINE_MUTEX(port_mgr_mutex);
+static struct abe *the_abe = NULL;
+static int users = 0;
+
+/*
+ * Get the Physical port ID based on the logical port ID
+ *
+ * FE and BE ports have unique ID's within the driver but share
+ * ID's within the ABE. This maps a driver port ID to an ABE port ID.
+ */
+static int get_physical_id(int logical_id)
+{
+ switch (logical_id) {
+ /* backend ports */
+ case OMAP_ABE_BE_PORT_DMIC0:
+ case OMAP_ABE_BE_PORT_DMIC1:
+ case OMAP_ABE_BE_PORT_DMIC2:
+ return DMIC_PORT;
+ case OMAP_ABE_BE_PORT_PDM_DL1:
+ case OMAP_ABE_BE_PORT_PDM_DL2:
+ return PDM_DL_PORT;
+ case OMAP_ABE_BE_PORT_PDM_VIB:
+ return VIB_DL_PORT;
+ case OMAP_ABE_BE_PORT_PDM_UL1:
+ return PDM_UL_PORT;
+ case OMAP_ABE_BE_PORT_BT_VX_DL:
+ return BT_VX_DL_PORT;
+ case OMAP_ABE_BE_PORT_BT_VX_UL:
+ return BT_VX_UL_PORT;
+ case OMAP_ABE_BE_PORT_MM_EXT_UL:
+ return MM_EXT_OUT_PORT;
+ case OMAP_ABE_BE_PORT_MM_EXT_DL:
+ return MM_EXT_IN_PORT;
+ /* front end ports */
+ case OMAP_ABE_FE_PORT_MM_DL1:
+ return MM_DL_PORT;
+ case OMAP_ABE_FE_PORT_MM_UL1:
+ return MM_UL_PORT;
+ case OMAP_ABE_FE_PORT_MM_UL2:
+ return MM_UL2_PORT;
+ case OMAP_ABE_FE_PORT_VX_DL:
+ return VX_DL_PORT;
+ case OMAP_ABE_FE_PORT_VX_UL:
+ return VX_UL_PORT;
+ case OMAP_ABE_FE_PORT_VIB:
+ return VIB_DL_PORT;
+ case OMAP_ABE_FE_PORT_TONES:
+ return TONES_DL_PORT;
+ }
+ return -EINVAL;
+}
+
+/*
+ * Get the number of enabled users of the physical port shared by this client.
+ * Locks held by callers.
+ */
+static int port_get_num_users(struct abe *abe, struct omap_abe_port *port)
+{
+ struct omap_abe_port *p;
+ int users = 0;
+
+ list_for_each_entry(p, &abe->ports, list) {
+ if (p->physical_id == port->physical_id && p->state == PORT_ENABLED)
+ users++;
+ }
+ return users;
+}
+
+static int port_is_open(struct abe *abe, int phy_port)
+{
+ struct omap_abe_port *p;
+
+ list_for_each_entry(p, &abe->ports, list) {
+ if (p->physical_id == phy_port && p->state == PORT_ENABLED)
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Check whether the physical port is enabled for this PHY port ID.
+ * Locks held by callers.
+ */
+int omap_abe_port_is_enabled(struct abe *abe, struct omap_abe_port *port)
+{
+ struct omap_abe_port *p;
+ unsigned long flags;
+
+ spin_lock_irqsave(&abe->lock, flags);
+
+ list_for_each_entry(p, &abe->ports, list) {
+ if (p->physical_id == port->physical_id && p->state == PORT_ENABLED) {
+ spin_unlock_irqrestore(&abe->lock, flags);
+ return 1;
+ }
+ }
+
+ spin_unlock_irqrestore(&abe->lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_port_is_enabled);
+
+/*
+ * omap_abe_port_enable - enable ABE logical port
+ *
+ * @abe - ABE.
+ * @port - logical ABE port ID to be enabled.
+ */
+int omap_abe_port_enable(struct abe *abe, struct omap_abe_port *port)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ /* only enable the physical port iff it is disabled */
+ pr_debug("port %s increment count %d\n",
+ lport_name[port->logical_id], port->users);
+
+ spin_lock_irqsave(&abe->lock, flags);
+ if (port->users == 0 && port_get_num_users(abe, port) == 0) {
+
+ /* enable the physical port */
+ pr_debug("port %s phy port %d enabled\n",
+ lport_name[port->logical_id], port->physical_id);
+ abe_enable_data_transfer(port->physical_id);
+ }
+
+ port->state = PORT_ENABLED;
+ port->users++;
+ spin_unlock_irqrestore(&abe->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(omap_abe_port_enable);
+
+/*
+ * omap_abe_port_disable - disable ABE logical port
+ *
+ * @abe - ABE.
+ * @port - logical ABE port ID to be disabled.
+ */
+int omap_abe_port_disable(struct abe *abe, struct omap_abe_port *port)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ /* only disable the port iff no other users are using it */
+ pr_debug("port %s decrement count %d\n",
+ lport_name[port->logical_id], port->users);
+
+ spin_lock_irqsave(&abe->lock, flags);
+
+ WARN(!port->users, "port %s phy port %d is already disabled\n",
+ lport_name[port->logical_id], port->physical_id);
+
+ if (port->users == 1 && port_get_num_users(abe, port) == 1) {
+ /* disable the physical port */
+ pr_debug("port %s phy port %d disabled\n",
+ lport_name[port->logical_id], port->physical_id);
+
+ abe_disable_data_transfer(port->physical_id);
+ }
+
+ port->state = PORT_DISABLED;
+ port->users--;
+ spin_unlock_irqrestore(&abe->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(omap_abe_port_disable);
+
+/*
+ * omap_abe_port_open - open ABE logical port
+ *
+ * @abe - ABE.
+ * @logical_id - logical ABE port ID to be opened.
+ */
+struct omap_abe_port *omap_abe_port_open(struct abe *abe, int logical_id)
+{
+ struct omap_abe_port *port;
+ unsigned long flags;
+
+#ifdef CONFIG_DEBUG_FS
+ char debug_fs_name[32];
+#endif
+
+ if (logical_id < 0 || logical_id > OMAP_ABE_MAX_PORT_ID)
+ return NULL;
+
+ if (port_is_open(abe, logical_id))
+ return NULL;
+
+ port = kzalloc(sizeof(struct omap_abe_port), GFP_KERNEL);
+ if (port == NULL)
+ return NULL;
+
+ port->logical_id = logical_id;
+ port->physical_id = get_physical_id(logical_id);
+ port->state = PORT_DISABLED;
+ port->abe = abe;
+
+ spin_lock_irqsave(&abe->lock, flags);
+ list_add(&port->list, &abe->ports);
+ spin_unlock_irqrestore(&abe->lock, flags);
+ port->physical_users = port_get_num_users(abe, port);
+
+#ifdef CONFIG_DEBUG_FS
+ sprintf(debug_fs_name, "%s_state", lport_name[logical_id]);
+ port->debugfs_lstate = debugfs_create_u32(debug_fs_name, 0644,
+ abe->debugfs_root, &port->state);
+ sprintf(debug_fs_name, "%s_phy", lport_name[logical_id]);
+ port->debugfs_lphy = debugfs_create_u32(debug_fs_name, 0644,
+ abe->debugfs_root, &port->physical_id);
+ sprintf(debug_fs_name, "%s_users", lport_name[logical_id]);
+ port->debugfs_lusers = debugfs_create_u32(debug_fs_name, 0644,
+ abe->debugfs_root, &port->users);
+#endif
+
+ pr_debug("opened port %s\n", lport_name[logical_id]);
+ return port;
+}
+EXPORT_SYMBOL(omap_abe_port_open);
+
+/*
+ * omap_abe_port_close - close ABE logical port
+ *
+ * @port - logical ABE port to be closed (and disabled).
+ */
+void omap_abe_port_close(struct abe *abe, struct omap_abe_port *port)
+{
+ unsigned long flags;
+
+ /* disable the port */
+ omap_abe_port_disable(abe, port);
+
+ spin_lock_irqsave(&abe->lock, flags);
+ list_del(&port->list);
+ spin_unlock_irqrestore(&abe->lock, flags);
+
+ pr_debug("closed port %s\n", lport_name[port->logical_id]);
+ kfree(port);
+}
+EXPORT_SYMBOL(omap_abe_port_close);
+
+static struct abe *omap_abe_port_mgr_init(void)
+{
+ struct abe *abe;
+
+ abe = kzalloc(sizeof(struct abe), GFP_KERNEL);
+ if (abe == NULL)
+ return NULL;
+
+ spin_lock_init(&abe->lock);
+
+ INIT_LIST_HEAD(&abe->ports);
+ the_abe = abe;
+
+#ifdef CONFIG_DEBUG_FS
+ abe->debugfs_root = debugfs_create_dir("abe_port", NULL);
+ if (!abe->debugfs_root) {
+ pr_debug( "Failed to create port manager debugfs directory\n");
+ }
+#endif
+ return abe;
+}
+
+static void omap_abe_port_mgr_free(struct abe *abe)
+{
+ debugfs_remove_recursive(abe->debugfs_root);
+ kfree(abe);
+ the_abe = NULL;
+}
+
+struct abe *omap_abe_port_mgr_get(void)
+{
+ struct abe * abe;
+
+ mutex_lock(&port_mgr_mutex);
+
+ if (the_abe)
+ abe = the_abe;
+ else
+ abe = omap_abe_port_mgr_init();
+
+ users++;
+ mutex_unlock(&port_mgr_mutex);
+ return abe;
+}
+EXPORT_SYMBOL(omap_abe_port_mgr_get);
+
+void omap_abe_port_mgr_put(struct abe *abe)
+{
+ mutex_lock(&port_mgr_mutex);
+
+ if (users == 0)
+ goto out;
+
+ if (--users == 0)
+ omap_abe_port_mgr_free(abe);
+
+out:
+ mutex_unlock(&port_mgr_mutex);
+}
+EXPORT_SYMBOL(omap_abe_port_mgr_put);
+
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/omap/abe/port_mgr.h b/sound/soc/omap/abe/port_mgr.h
new file mode 100644
index 0000000..a65b0d3
--- /dev/null
+++ b/sound/soc/omap/abe/port_mgr.h
@@ -0,0 +1,98 @@
+/*
+ * ABE Port manager
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_SOC_OMAP_PORT_MGR_H
+#define __LINUX_SND_SOC_OMAP_PORT_MGR_H
+
+#include <linux/debugfs.h>
+
+/*
+ * TODO: These structures, enums and port ID macros should be moved to the
+ * new public ABE API header.
+ */
+
+/* Logical PORT IDs - Backend */
+#define OMAP_ABE_BE_PORT_DMIC0 0
+#define OMAP_ABE_BE_PORT_DMIC1 1
+#define OMAP_ABE_BE_PORT_DMIC2 2
+#define OMAP_ABE_BE_PORT_PDM_DL1 3
+#define OMAP_ABE_BE_PORT_PDM_DL2 4
+#define OMAP_ABE_BE_PORT_PDM_VIB 5
+#define OMAP_ABE_BE_PORT_PDM_UL1 6
+#define OMAP_ABE_BE_PORT_BT_VX_DL 7
+#define OMAP_ABE_BE_PORT_BT_VX_UL 8
+#define OMAP_ABE_BE_PORT_MM_EXT_UL 9
+#define OMAP_ABE_BE_PORT_MM_EXT_DL 10
+
+/* Logical PORT IDs - Frontend */
+#define OMAP_ABE_FE_PORT_MM_DL1 11
+#define OMAP_ABE_FE_PORT_MM_UL1 12
+#define OMAP_ABE_FE_PORT_MM_UL2 13
+#define OMAP_ABE_FE_PORT_VX_DL 14
+#define OMAP_ABE_FE_PORT_VX_UL 15
+#define OMAP_ABE_FE_PORT_VIB 16
+#define OMAP_ABE_FE_PORT_TONES 17
+
+#define OMAP_ABE_MAX_PORT_ID OMAP_ABE_FE_PORT_TONES
+
+/* ports can either be enabled or disabled */
+enum port_state {
+ PORT_DISABLED = 0,
+ PORT_ENABLED,
+};
+
+/* structure used for client port info */
+struct omap_abe_port {
+
+ /* logical and physical port IDs that correspond this port */
+ int logical_id;
+ int physical_id;
+ int physical_users;
+
+ /* enabled or disabled */
+ enum port_state state;
+
+ /* logical port ref count */
+ int users;
+
+ struct list_head list;
+ struct abe *abe;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_lstate;
+ struct dentry *debugfs_lphy;
+ struct dentry *debugfs_lusers;
+#endif
+};
+
+/* main ABE structure */
+struct abe {
+
+ /* List of open ABE logical ports */
+ struct list_head ports;
+
+ /* spinlock */
+ spinlock_t lock;
+
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root;
+#endif
+};
+
+struct omap_abe_port *omap_abe_port_open(struct abe *abe, int logical_id);
+void omap_abe_port_close(struct abe *abe, struct omap_abe_port *port);
+int omap_abe_port_enable(struct abe *abe, struct omap_abe_port *port);
+int omap_abe_port_disable(struct abe *abe, struct omap_abe_port *port);
+int omap_abe_port_is_enabled(struct abe *abe, struct omap_abe_port *port);
+struct abe *omap_abe_port_mgr_get(void);
+void omap_abe_port_mgr_put(struct abe *abe);
+
+#endif /* __LINUX_SND_SOC_OMAP_PORT_MGR_H */
diff --git a/sound/soc/omap/mcpdm.c b/sound/soc/omap/mcpdm.c
deleted file mode 100644
index 928f037..0000000
--- a/sound/soc/omap/mcpdm.c
+++ /dev/null
@@ -1,470 +0,0 @@
-/*
- * mcpdm.c -- McPDM interface driver
- *
- * Author: Jorge Eduardo Candelaria <x0107209@ti.com>
- * Copyright (C) 2009 - Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/wait.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-
-#include "mcpdm.h"
-
-static struct omap_mcpdm *mcpdm;
-
-static inline void omap_mcpdm_write(u16 reg, u32 val)
-{
- __raw_writel(val, mcpdm->io_base + reg);
-}
-
-static inline int omap_mcpdm_read(u16 reg)
-{
- return __raw_readl(mcpdm->io_base + reg);
-}
-
-static void omap_mcpdm_reg_dump(void)
-{
- dev_dbg(mcpdm->dev, "***********************\n");
- dev_dbg(mcpdm->dev, "IRQSTATUS_RAW: 0x%04x\n",
- omap_mcpdm_read(MCPDM_IRQSTATUS_RAW));
- dev_dbg(mcpdm->dev, "IRQSTATUS: 0x%04x\n",
- omap_mcpdm_read(MCPDM_IRQSTATUS));
- dev_dbg(mcpdm->dev, "IRQENABLE_SET: 0x%04x\n",
- omap_mcpdm_read(MCPDM_IRQENABLE_SET));
- dev_dbg(mcpdm->dev, "IRQENABLE_CLR: 0x%04x\n",
- omap_mcpdm_read(MCPDM_IRQENABLE_CLR));
- dev_dbg(mcpdm->dev, "IRQWAKE_EN: 0x%04x\n",
- omap_mcpdm_read(MCPDM_IRQWAKE_EN));
- dev_dbg(mcpdm->dev, "DMAENABLE_SET: 0x%04x\n",
- omap_mcpdm_read(MCPDM_DMAENABLE_SET));
- dev_dbg(mcpdm->dev, "DMAENABLE_CLR: 0x%04x\n",
- omap_mcpdm_read(MCPDM_DMAENABLE_CLR));
- dev_dbg(mcpdm->dev, "DMAWAKEEN: 0x%04x\n",
- omap_mcpdm_read(MCPDM_DMAWAKEEN));
- dev_dbg(mcpdm->dev, "CTRL: 0x%04x\n",
- omap_mcpdm_read(MCPDM_CTRL));
- dev_dbg(mcpdm->dev, "DN_DATA: 0x%04x\n",
- omap_mcpdm_read(MCPDM_DN_DATA));
- dev_dbg(mcpdm->dev, "UP_DATA: 0x%04x\n",
- omap_mcpdm_read(MCPDM_UP_DATA));
- dev_dbg(mcpdm->dev, "FIFO_CTRL_DN: 0x%04x\n",
- omap_mcpdm_read(MCPDM_FIFO_CTRL_DN));
- dev_dbg(mcpdm->dev, "FIFO_CTRL_UP: 0x%04x\n",
- omap_mcpdm_read(MCPDM_FIFO_CTRL_UP));
- dev_dbg(mcpdm->dev, "DN_OFFSET: 0x%04x\n",
- omap_mcpdm_read(MCPDM_DN_OFFSET));
- dev_dbg(mcpdm->dev, "***********************\n");
-}
-
-/*
- * Takes the McPDM module in and out of reset state.
- * Uplink and downlink can be reset individually.
- */
-static void omap_mcpdm_reset_capture(int reset)
-{
- int ctrl = omap_mcpdm_read(MCPDM_CTRL);
-
- if (reset)
- ctrl |= SW_UP_RST;
- else
- ctrl &= ~SW_UP_RST;
-
- omap_mcpdm_write(MCPDM_CTRL, ctrl);
-}
-
-static void omap_mcpdm_reset_playback(int reset)
-{
- int ctrl = omap_mcpdm_read(MCPDM_CTRL);
-
- if (reset)
- ctrl |= SW_DN_RST;
- else
- ctrl &= ~SW_DN_RST;
-
- omap_mcpdm_write(MCPDM_CTRL, ctrl);
-}
-
-/*
- * Enables the transfer through the PDM interface to/from the Phoenix
- * codec by enabling the corresponding UP or DN channels.
- */
-void omap_mcpdm_start(int stream)
-{
- int ctrl = omap_mcpdm_read(MCPDM_CTRL);
-
- if (stream)
- ctrl |= mcpdm->up_channels;
- else
- ctrl |= mcpdm->dn_channels;
-
- omap_mcpdm_write(MCPDM_CTRL, ctrl);
-}
-
-/*
- * Disables the transfer through the PDM interface to/from the Phoenix
- * codec by disabling the corresponding UP or DN channels.
- */
-void omap_mcpdm_stop(int stream)
-{
- int ctrl = omap_mcpdm_read(MCPDM_CTRL);
-
- if (stream)
- ctrl &= ~mcpdm->up_channels;
- else
- ctrl &= ~mcpdm->dn_channels;
-
- omap_mcpdm_write(MCPDM_CTRL, ctrl);
-}
-
-/*
- * Configures McPDM uplink for audio recording.
- * This function should be called before omap_mcpdm_start.
- */
-int omap_mcpdm_capture_open(struct omap_mcpdm_link *uplink)
-{
- int irq_mask = 0;
- int ctrl;
-
- if (!uplink)
- return -EINVAL;
-
- mcpdm->uplink = uplink;
-
- /* Enable irq request generation */
- irq_mask |= uplink->irq_mask & MCPDM_UPLINK_IRQ_MASK;
- omap_mcpdm_write(MCPDM_IRQENABLE_SET, irq_mask);
-
- /* Configure uplink threshold */
- if (uplink->threshold > UP_THRES_MAX)
- uplink->threshold = UP_THRES_MAX;
-
- omap_mcpdm_write(MCPDM_FIFO_CTRL_UP, uplink->threshold);
-
- /* Configure DMA controller */
- omap_mcpdm_write(MCPDM_DMAENABLE_SET, DMA_UP_ENABLE);
-
- /* Set pdm out format */
- ctrl = omap_mcpdm_read(MCPDM_CTRL);
- ctrl &= ~PDMOUTFORMAT;
- ctrl |= uplink->format & PDMOUTFORMAT;
-
- /* Uplink channels */
- mcpdm->up_channels = uplink->channels & (PDM_UP_MASK | PDM_STATUS_MASK);
-
- omap_mcpdm_write(MCPDM_CTRL, ctrl);
-
- return 0;
-}
-
-/*
- * Configures McPDM downlink for audio playback.
- * This function should be called before omap_mcpdm_start.
- */
-int omap_mcpdm_playback_open(struct omap_mcpdm_link *downlink)
-{
- int irq_mask = 0;
- int ctrl;
-
- if (!downlink)
- return -EINVAL;
-
- mcpdm->downlink = downlink;
-
- /* Enable irq request generation */
- irq_mask |= downlink->irq_mask & MCPDM_DOWNLINK_IRQ_MASK;
- omap_mcpdm_write(MCPDM_IRQENABLE_SET, irq_mask);
-
- /* Configure uplink threshold */
- if (downlink->threshold > DN_THRES_MAX)
- downlink->threshold = DN_THRES_MAX;
-
- omap_mcpdm_write(MCPDM_FIFO_CTRL_DN, downlink->threshold);
-
- /* Enable DMA request generation */
- omap_mcpdm_write(MCPDM_DMAENABLE_SET, DMA_DN_ENABLE);
-
- /* Set pdm out format */
- ctrl = omap_mcpdm_read(MCPDM_CTRL);
- ctrl &= ~PDMOUTFORMAT;
- ctrl |= downlink->format & PDMOUTFORMAT;
-
- /* Downlink channels */
- mcpdm->dn_channels = downlink->channels & (PDM_DN_MASK | PDM_CMD_MASK);
-
- omap_mcpdm_write(MCPDM_CTRL, ctrl);
-
- return 0;
-}
-
-/*
- * Cleans McPDM uplink configuration.
- * This function should be called when the stream is closed.
- */
-int omap_mcpdm_capture_close(struct omap_mcpdm_link *uplink)
-{
- int irq_mask = 0;
-
- if (!uplink)
- return -EINVAL;
-
- /* Disable irq request generation */
- irq_mask |= uplink->irq_mask & MCPDM_UPLINK_IRQ_MASK;
- omap_mcpdm_write(MCPDM_IRQENABLE_CLR, irq_mask);
-
- /* Disable DMA request generation */
- omap_mcpdm_write(MCPDM_DMAENABLE_CLR, DMA_UP_ENABLE);
-
- /* Clear Downlink channels */
- mcpdm->up_channels = 0;
-
- mcpdm->uplink = NULL;
-
- return 0;
-}
-
-/*
- * Cleans McPDM downlink configuration.
- * This function should be called when the stream is closed.
- */
-int omap_mcpdm_playback_close(struct omap_mcpdm_link *downlink)
-{
- int irq_mask = 0;
-
- if (!downlink)
- return -EINVAL;
-
- /* Disable irq request generation */
- irq_mask |= downlink->irq_mask & MCPDM_DOWNLINK_IRQ_MASK;
- omap_mcpdm_write(MCPDM_IRQENABLE_CLR, irq_mask);
-
- /* Disable DMA request generation */
- omap_mcpdm_write(MCPDM_DMAENABLE_CLR, DMA_DN_ENABLE);
-
- /* clear Downlink channels */
- mcpdm->dn_channels = 0;
-
- mcpdm->downlink = NULL;
-
- return 0;
-}
-
-static irqreturn_t omap_mcpdm_irq_handler(int irq, void *dev_id)
-{
- struct omap_mcpdm *mcpdm_irq = dev_id;
- int irq_status;
-
- irq_status = omap_mcpdm_read(MCPDM_IRQSTATUS);
-
- /* Acknowledge irq event */
- omap_mcpdm_write(MCPDM_IRQSTATUS, irq_status);
-
- if (irq & MCPDM_DN_IRQ_FULL) {
- dev_err(mcpdm_irq->dev, "DN FIFO error %x\n", irq_status);
- omap_mcpdm_reset_playback(1);
- omap_mcpdm_playback_open(mcpdm_irq->downlink);
- omap_mcpdm_reset_playback(0);
- }
-
- if (irq & MCPDM_DN_IRQ_EMPTY) {
- dev_err(mcpdm_irq->dev, "DN FIFO error %x\n", irq_status);
- omap_mcpdm_reset_playback(1);
- omap_mcpdm_playback_open(mcpdm_irq->downlink);
- omap_mcpdm_reset_playback(0);
- }
-
- if (irq & MCPDM_DN_IRQ) {
- dev_dbg(mcpdm_irq->dev, "DN write request\n");
- }
-
- if (irq & MCPDM_UP_IRQ_FULL) {
- dev_err(mcpdm_irq->dev, "UP FIFO error %x\n", irq_status);
- omap_mcpdm_reset_capture(1);
- omap_mcpdm_capture_open(mcpdm_irq->uplink);
- omap_mcpdm_reset_capture(0);
- }
-
- if (irq & MCPDM_UP_IRQ_EMPTY) {
- dev_err(mcpdm_irq->dev, "UP FIFO error %x\n", irq_status);
- omap_mcpdm_reset_capture(1);
- omap_mcpdm_capture_open(mcpdm_irq->uplink);
- omap_mcpdm_reset_capture(0);
- }
-
- if (irq & MCPDM_UP_IRQ) {
- dev_dbg(mcpdm_irq->dev, "UP write request\n");
- }
-
- return IRQ_HANDLED;
-}
-
-int omap_mcpdm_request(void)
-{
- int ret;
-
- clk_enable(mcpdm->clk);
-
- spin_lock(&mcpdm->lock);
-
- if (!mcpdm->free) {
- dev_err(mcpdm->dev, "McPDM interface is in use\n");
- spin_unlock(&mcpdm->lock);
- ret = -EBUSY;
- goto err;
- }
- mcpdm->free = 0;
-
- spin_unlock(&mcpdm->lock);
-
- /* Disable lines while request is ongoing */
- omap_mcpdm_write(MCPDM_CTRL, 0x00);
-
- ret = request_irq(mcpdm->irq, omap_mcpdm_irq_handler,
- 0, "McPDM", (void *)mcpdm);
- if (ret) {
- dev_err(mcpdm->dev, "Request for McPDM IRQ failed\n");
- goto err;
- }
-
- return 0;
-
-err:
- clk_disable(mcpdm->clk);
- return ret;
-}
-
-void omap_mcpdm_free(void)
-{
- spin_lock(&mcpdm->lock);
- if (mcpdm->free) {
- dev_err(mcpdm->dev, "McPDM interface is already free\n");
- spin_unlock(&mcpdm->lock);
- return;
- }
- mcpdm->free = 1;
- spin_unlock(&mcpdm->lock);
-
- clk_disable(mcpdm->clk);
-
- free_irq(mcpdm->irq, (void *)mcpdm);
-}
-
-/* Enable/disable DC offset cancelation for the analog
- * headset path (PDM channels 1 and 2).
- */
-int omap_mcpdm_set_offset(int offset1, int offset2)
-{
- int offset;
-
- if ((offset1 > DN_OFST_MAX) || (offset2 > DN_OFST_MAX))
- return -EINVAL;
-
- offset = (offset1 << DN_OFST_RX1) | (offset2 << DN_OFST_RX2);
-
- /* offset cancellation for channel 1 */
- if (offset1)
- offset |= DN_OFST_RX1_EN;
- else
- offset &= ~DN_OFST_RX1_EN;
-
- /* offset cancellation for channel 2 */
- if (offset2)
- offset |= DN_OFST_RX2_EN;
- else
- offset &= ~DN_OFST_RX2_EN;
-
- omap_mcpdm_write(MCPDM_DN_OFFSET, offset);
-
- return 0;
-}
-
-int __devinit omap_mcpdm_probe(struct platform_device *pdev)
-{
- struct resource *res;
- int ret = 0;
-
- mcpdm = kzalloc(sizeof(struct omap_mcpdm), GFP_KERNEL);
- if (!mcpdm) {
- ret = -ENOMEM;
- goto exit;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "no resource\n");
- goto err_resource;
- }
-
- spin_lock_init(&mcpdm->lock);
- mcpdm->free = 1;
- mcpdm->io_base = ioremap(res->start, resource_size(res));
- if (!mcpdm->io_base) {
- ret = -ENOMEM;
- goto err_resource;
- }
-
- mcpdm->irq = platform_get_irq(pdev, 0);
-
- mcpdm->clk = clk_get(&pdev->dev, "pdm_ck");
- if (IS_ERR(mcpdm->clk)) {
- ret = PTR_ERR(mcpdm->clk);
- dev_err(&pdev->dev, "unable to get pdm_ck: %d\n", ret);
- goto err_clk;
- }
-
- mcpdm->dev = &pdev->dev;
- platform_set_drvdata(pdev, mcpdm);
-
- return 0;
-
-err_clk:
- iounmap(mcpdm->io_base);
-err_resource:
- kfree(mcpdm);
-exit:
- return ret;
-}
-
-int __devexit omap_mcpdm_remove(struct platform_device *pdev)
-{
- struct omap_mcpdm *mcpdm_ptr = platform_get_drvdata(pdev);
-
- platform_set_drvdata(pdev, NULL);
-
- clk_put(mcpdm_ptr->clk);
-
- iounmap(mcpdm_ptr->io_base);
-
- mcpdm_ptr->clk = NULL;
- mcpdm_ptr->free = 0;
- mcpdm_ptr->dev = NULL;
-
- kfree(mcpdm_ptr);
-
- return 0;
-}
-
diff --git a/sound/soc/omap/mcpdm.h b/sound/soc/omap/mcpdm.h
deleted file mode 100644
index df3e16f..0000000
--- a/sound/soc/omap/mcpdm.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * mcpdm.h -- Defines for McPDM driver
- *
- * Author: Jorge Eduardo Candelaria <x0107209@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-/* McPDM registers */
-
-#define MCPDM_REVISION 0x00
-#define MCPDM_SYSCONFIG 0x10
-#define MCPDM_IRQSTATUS_RAW 0x24
-#define MCPDM_IRQSTATUS 0x28
-#define MCPDM_IRQENABLE_SET 0x2C
-#define MCPDM_IRQENABLE_CLR 0x30
-#define MCPDM_IRQWAKE_EN 0x34
-#define MCPDM_DMAENABLE_SET 0x38
-#define MCPDM_DMAENABLE_CLR 0x3C
-#define MCPDM_DMAWAKEEN 0x40
-#define MCPDM_CTRL 0x44
-#define MCPDM_DN_DATA 0x48
-#define MCPDM_UP_DATA 0x4C
-#define MCPDM_FIFO_CTRL_DN 0x50
-#define MCPDM_FIFO_CTRL_UP 0x54
-#define MCPDM_DN_OFFSET 0x58
-
-/*
- * MCPDM_IRQ bit fields
- * IRQSTATUS_RAW, IRQSTATUS, IRQENABLE_SET, IRQENABLE_CLR
- */
-
-#define MCPDM_DN_IRQ (1 << 0)
-#define MCPDM_DN_IRQ_EMPTY (1 << 1)
-#define MCPDM_DN_IRQ_ALMST_EMPTY (1 << 2)
-#define MCPDM_DN_IRQ_FULL (1 << 3)
-
-#define MCPDM_UP_IRQ (1 << 8)
-#define MCPDM_UP_IRQ_EMPTY (1 << 9)
-#define MCPDM_UP_IRQ_ALMST_FULL (1 << 10)
-#define MCPDM_UP_IRQ_FULL (1 << 11)
-
-#define MCPDM_DOWNLINK_IRQ_MASK 0x00F
-#define MCPDM_UPLINK_IRQ_MASK 0xF00
-
-/*
- * MCPDM_DMAENABLE bit fields
- */
-
-#define DMA_DN_ENABLE 0x1
-#define DMA_UP_ENABLE 0x2
-
-/*
- * MCPDM_CTRL bit fields
- */
-
-#define PDM_UP1_EN 0x0001
-#define PDM_UP2_EN 0x0002
-#define PDM_UP3_EN 0x0004
-#define PDM_DN1_EN 0x0008
-#define PDM_DN2_EN 0x0010
-#define PDM_DN3_EN 0x0020
-#define PDM_DN4_EN 0x0040
-#define PDM_DN5_EN 0x0080
-#define PDMOUTFORMAT 0x0100
-#define CMD_INT 0x0200
-#define STATUS_INT 0x0400
-#define SW_UP_RST 0x0800
-#define SW_DN_RST 0x1000
-#define PDM_UP_MASK 0x007
-#define PDM_DN_MASK 0x0F8
-#define PDM_CMD_MASK 0x200
-#define PDM_STATUS_MASK 0x400
-
-
-#define PDMOUTFORMAT_LJUST (0 << 8)
-#define PDMOUTFORMAT_RJUST (1 << 8)
-
-/*
- * MCPDM_FIFO_CTRL bit fields
- */
-
-#define UP_THRES_MAX 0xF
-#define DN_THRES_MAX 0xF
-
-/*
- * MCPDM_DN_OFFSET bit fields
- */
-
-#define DN_OFST_RX1_EN 0x0001
-#define DN_OFST_RX2_EN 0x0100
-
-#define DN_OFST_RX1 1
-#define DN_OFST_RX2 9
-#define DN_OFST_MAX 0x1F
-
-#define MCPDM_UPLINK 1
-#define MCPDM_DOWNLINK 2
-
-struct omap_mcpdm_link {
- int irq_mask;
- int threshold;
- int format;
- int channels;
-};
-
-struct omap_mcpdm_platform_data {
- unsigned long phys_base;
- u16 irq;
-};
-
-struct omap_mcpdm {
- struct device *dev;
- unsigned long phys_base;
- void __iomem *io_base;
- u8 free;
- int irq;
-
- spinlock_t lock;
- struct omap_mcpdm_platform_data *pdata;
- struct clk *clk;
- struct omap_mcpdm_link *downlink;
- struct omap_mcpdm_link *uplink;
- struct completion irq_completion;
-
- int dn_channels;
- int up_channels;
-};
-
-extern void omap_mcpdm_start(int stream);
-extern void omap_mcpdm_stop(int stream);
-extern int omap_mcpdm_capture_open(struct omap_mcpdm_link *uplink);
-extern int omap_mcpdm_playback_open(struct omap_mcpdm_link *downlink);
-extern int omap_mcpdm_capture_close(struct omap_mcpdm_link *uplink);
-extern int omap_mcpdm_playback_close(struct omap_mcpdm_link *downlink);
-extern int omap_mcpdm_request(void);
-extern void omap_mcpdm_free(void);
-extern int omap_mcpdm_set_offset(int offset1, int offset2);
-int __devinit omap_mcpdm_probe(struct platform_device *pdev);
-int __devexit omap_mcpdm_remove(struct platform_device *pdev);
diff --git a/sound/soc/omap/omap-abe-dsp.c b/sound/soc/omap/omap-abe-dsp.c
new file mode 100644
index 0000000..b66db25
--- /dev/null
+++ b/sound/soc/omap/omap-abe-dsp.c
@@ -0,0 +1,2876 @@
+/*
+ * omap-abe-dsp.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * Copyright (C) 2010 Texas Instruments Inc.
+ *
+ * Authors: Liam Girdwood <lrg@ti.com>
+ * Misael Lopez Cruz <misael.lopez@ti.com>
+ * Sebastien Guiriec <s-guiriec@ti.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/i2c/twl.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+#include <linux/wait.h>
+#include <linux/firmware.h>
+#include <linux/debugfs.h>
+#include <linux/opp.h>
+
+#include <plat/omap_hwmod.h>
+#include <plat/omap_device.h>
+#include <plat/dma.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <sound/omap-abe-dsp.h>
+
+#include "omap-abe-dsp.h"
+#include "omap-abe.h"
+#include "abe/abe_main.h"
+#include "abe/port_mgr.h"
+
+#define OMAP_ABE_HS_DC_OFFSET_STEP (1800 / 8)
+#define OMAP_ABE_HF_DC_OFFSET_STEP (4600 / 8)
+
+static const char *abe_memory_bank[5] = {
+ "dmem",
+ "cmem",
+ "smem",
+ "pmem",
+ "mpu"
+};
+
+
+/*
+ * ABE loadable coefficients.
+ * The coefficient and their mixer configurations are loaded with the firmware
+ * blob duing probe().
+ */
+
+struct coeff_config {
+ char name[ABE_COEFF_NAME_SIZE];
+ u32 count;
+ u32 coeff;
+ char texts[ABE_COEFF_NUM_TEXTS][ABE_COEFF_TEXT_SIZE];
+};
+
+/*
+ * ABE Firmware Header.
+ * The ABE firmware blob has a header that describes each data section. This
+ * way we can store coefficients etc in the firmware.
+ */
+struct fw_header {
+ u32 magic; /* magic number */
+ u32 crc; /* optional crc */
+ u32 firmware_size; /* payload size */
+ u32 coeff_size; /* payload size */
+ u32 coeff_version; /* coefficent version */
+ u32 firmware_version; /* min version of ABE firmware required */
+ u32 num_equ; /* number of equalizers */
+};
+
+struct abe_opp_req {
+ struct device *dev;
+ struct list_head node;
+ int opp;
+};
+
+/*
+ * ABE private data.
+ */
+struct abe_data {
+ struct omap4_abe_dsp_pdata *abe_pdata;
+ struct device *dev;
+ struct snd_soc_platform *platform;
+ struct delayed_work delayed_work;
+ struct mutex mutex;
+ struct mutex opp_mutex;
+ struct mutex opp_req_mutex;
+ struct clk *clk;
+ void __iomem *io_base[5];
+ int irq;
+ int opp;
+ unsigned long opp_freqs[OMAP_ABE_OPP_COUNT];
+
+ /* DC offset cancellation */
+ int power_mode;
+ u32 dc_hsl;
+ u32 dc_hsr;
+ u32 dc_hfl;
+ u32 dc_hfr;
+
+ int active;
+
+ /* coefficients */
+ struct fw_header hdr;
+ u32 *firmware;
+ s32 *equ[ABE_MAX_EQU];
+ int equ_profile[ABE_MAX_EQU];
+ struct soc_enum equalizer_enum[ABE_MAX_EQU];
+ struct snd_kcontrol_new equalizer_control[ABE_MAX_EQU];
+ struct coeff_config *equ_texts;
+
+ int mono_mix[ABE_NUM_MONO_MIXERS];
+
+ /* DAPM mixer config - TODO: some of this can be replaced with HAL update */
+ u32 widget_opp[ABE_NUM_DAPM_REG + 1];
+
+ struct list_head opp_req;
+ int opp_req_count;
+
+ u16 router[16];
+
+ struct snd_pcm_substream *ping_pong_substream;
+ int first_irq;
+
+ struct snd_pcm_substream *psubs;
+
+#ifdef CONFIG_DEBUG_FS
+ /* ABE runtime debug config */
+
+ /* its intended we can switch on/off individual debug items */
+ u32 dbg_format1; /* TODO: match flag names here to debug format flags */
+ u32 dbg_format2;
+ u32 dbg_format3;
+
+ u32 dbg_buffer_bytes;
+ u32 dbg_circular;
+ u32 dbg_buffer_msecs; /* size of buffer in secs */
+ u32 dbg_elem_bytes;
+ dma_addr_t dbg_buffer_addr;
+ wait_queue_head_t wait;
+ int dbg_reader_offset;
+ int dbg_dma_offset;
+ int dbg_complete;
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_fmt1;
+ struct dentry *debugfs_fmt2;
+ struct dentry *debugfs_fmt3;
+ struct dentry *debugfs_size;
+ struct dentry *debugfs_data;
+ struct dentry *debugfs_circ;
+ struct dentry *debugfs_elem_bytes;
+ struct dentry *debugfs_opp_level;
+ char *dbg_buffer;
+ struct omap_pcm_dma_data *dma_data;
+ int dma_ch;
+ int dma_req;
+#endif
+};
+
+static struct abe_data *the_abe;
+
+static int aess_set_runtime_opp_level(struct abe_data *abe);
+
+// TODO: map to the new version of HAL
+static unsigned int abe_dsp_read(struct snd_soc_platform *platform,
+ unsigned int reg)
+{
+ struct abe_data *abe = snd_soc_platform_get_drvdata(platform);
+
+ BUG_ON(reg > ABE_NUM_DAPM_REG);
+ return abe->widget_opp[reg];
+}
+
+static int abe_dsp_write(struct snd_soc_platform *platform, unsigned int reg,
+ unsigned int val)
+{
+ struct abe_data *abe = snd_soc_platform_get_drvdata(platform);
+
+ BUG_ON(reg > ABE_NUM_DAPM_REG);
+ abe->widget_opp[reg] = val;
+ return 0;
+}
+
+static void abe_irq_pingpong_subroutine(u32 *data)
+{
+ u32 dst, n_bytes;
+
+ abe_read_next_ping_pong_buffer(MM_DL_PORT, &dst, &n_bytes);
+ abe_set_ping_pong_buffer(MM_DL_PORT, n_bytes);
+
+ /* Do not call ALSA function for first IRQ */
+ if (the_abe->first_irq) {
+ the_abe->first_irq = 0;
+ } else {
+ if (the_abe->ping_pong_substream)
+ snd_pcm_period_elapsed(the_abe->ping_pong_substream);
+ }
+}
+
+static irqreturn_t abe_irq_handler(int irq, void *dev_id)
+{
+ struct abe_data *abe = dev_id;
+
+ /* TODO: handle underruns/overruns/errors */
+ pm_runtime_get_sync(abe->dev);
+ abe_clear_irq(); // TODO: why is IRQ not cleared after processing ?
+ abe_irq_processing();
+ pm_runtime_put_sync_suspend(abe->dev);
+ return IRQ_HANDLED;
+}
+
+// TODO: these should really be called internally since we will know the McPDM state
+void abe_dsp_pm_get(void)
+{
+ pm_runtime_get_sync(the_abe->dev);
+}
+EXPORT_SYMBOL_GPL(abe_dsp_pm_get);
+
+void abe_dsp_pm_put(void)
+{
+ pm_runtime_put_sync(the_abe->dev);
+}
+EXPORT_SYMBOL_GPL(abe_dsp_pm_put);
+
+void abe_dsp_shutdown(void)
+{
+ struct omap4_abe_dsp_pdata *pdata = the_abe->abe_pdata;
+ int ret;
+
+ if (!the_abe->active && !abe_check_activity()) {
+ abe_set_opp_processing(ABE_OPP25);
+ the_abe->opp = 25;
+ abe_stop_event_generator();
+ udelay(250);
+ if (pdata && pdata->device_scale) {
+ ret = pdata->device_scale(the_abe->dev, the_abe->dev,
+ the_abe->opp_freqs[0]);
+ if (ret)
+ dev_err(the_abe->dev,
+ "failed to scale to lowest OPP\n");
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(abe_dsp_shutdown);
+
+void abe_dsp_set_hs_offset(int left, int right, int mult)
+{
+ /* TODO: do not use abe global structure */
+ if (the_abe == NULL)
+ return;
+
+ if (left >= 8)
+ left -= 16;
+ the_abe->dc_hsl = OMAP_ABE_HS_DC_OFFSET_STEP * left * mult;
+
+ if (right >= 8)
+ right -= 16;
+ the_abe->dc_hsr = OMAP_ABE_HS_DC_OFFSET_STEP * right * mult;
+}
+EXPORT_SYMBOL(abe_dsp_set_hs_offset);
+
+void abe_dsp_set_hf_offset(int left, int right)
+{
+ /* TODO: do not use abe global structure */
+ if (the_abe == NULL)
+ return;
+
+ if (left >= 8)
+ left -= 16;
+ the_abe->dc_hfl = OMAP_ABE_HF_DC_OFFSET_STEP * left;
+
+ if (right >= 8)
+ right -= 16;
+ the_abe->dc_hfr = OMAP_ABE_HF_DC_OFFSET_STEP * right;
+}
+EXPORT_SYMBOL(abe_dsp_set_hf_offset);
+
+void abe_dsp_set_power_mode(int mode)
+{
+ if (the_abe == NULL)
+ return;
+
+ /* TODO: do not use abe global structure */
+ the_abe->power_mode = mode;
+}
+EXPORT_SYMBOL(abe_dsp_set_power_mode);
+
+/*
+ * These TLV settings will need fine tuned for each individual control
+ */
+
+/* Media DL1 volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(mm_dl1_tlv, -12000, 100, 3000);
+
+/* Media DL1 volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(tones_dl1_tlv, -12000, 100, 3000);
+
+/* Media DL1 volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(voice_dl1_tlv, -12000, 100, 3000);
+
+/* Media DL1 volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(capture_dl1_tlv, -12000, 100, 3000);
+
+/* Media DL2 volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(mm_dl2_tlv, -12000, 100, 3000);
+
+/* Media DL2 volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(tones_dl2_tlv, -12000, 100, 3000);
+
+/* Media DL2 volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(voice_dl2_tlv, -12000, 100, 3000);
+
+/* Media DL2 volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(capture_dl2_tlv, -12000, 100, 3000);
+
+/* SDT volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(sdt_ul_tlv, -12000, 100, 3000);
+
+/* SDT volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(sdt_dl_tlv, -12000, 100, 3000);
+
+/* AUDUL volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(audul_mm_tlv, -12000, 100, 3000);
+
+/* AUDUL volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(audul_tones_tlv, -12000, 100, 3000);
+
+/* AUDUL volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(audul_vx_ul_tlv, -12000, 100, 3000);
+
+/* AUDUL volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(audul_vx_dl_tlv, -12000, 100, 3000);
+
+/* VXREC volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(vxrec_mm_dl_tlv, -12000, 100, 3000);
+
+/* VXREC volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(vxrec_tones_tlv, -12000, 100, 3000);
+
+/* VXREC volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(vxrec_vx_dl_tlv, -12000, 100, 3000);
+
+/* VXREC volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(vxrec_vx_ul_tlv, -12000, 100, 3000);
+
+/* DMIC volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(dmic_tlv, -12000, 100, 3000);
+
+/* BT UL volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(btul_tlv, -12000, 100, 3000);
+
+/* AMIC volume control from -120 to 30 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(amic_tlv, -12000, 100, 3000);
+
+//TODO: we have to use the shift value atm to represent register id due to current HAL
+static int dl1_put_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ pm_runtime_get_sync(the_abe->dev);
+
+ // TODO: optimise all of these to call HAL abe_enable_gain(mixer, enable)
+ if (ucontrol->value.integer.value[0]) {
+ the_abe->widget_opp[mc->shift] = ucontrol->value.integer.value[0];
+ snd_soc_dapm_mixer_update_power(widget, kcontrol, 1);
+ abe_enable_gain(MIXDL1, mc->reg);
+ } else {
+ the_abe->widget_opp[mc->shift] = ucontrol->value.integer.value[0];
+ snd_soc_dapm_mixer_update_power(widget, kcontrol, 0);
+ abe_disable_gain(MIXDL1, mc->reg);
+ }
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 1;
+}
+
+static int dl2_put_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ pm_runtime_get_sync(the_abe->dev);
+
+ if (ucontrol->value.integer.value[0]) {
+ the_abe->widget_opp[mc->shift] = ucontrol->value.integer.value[0];
+ snd_soc_dapm_mixer_update_power(widget, kcontrol, 1);
+ abe_enable_gain(MIXDL2, mc->reg);
+ } else {
+ the_abe->widget_opp[mc->shift] = ucontrol->value.integer.value[0];
+ snd_soc_dapm_mixer_update_power(widget, kcontrol, 0);
+ abe_disable_gain(MIXDL2, mc->reg);
+ }
+
+ pm_runtime_put_sync(the_abe->dev);
+ return 1;
+}
+
+static int audio_ul_put_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ pm_runtime_get_sync(the_abe->dev);
+
+ if (ucontrol->value.integer.value[0]) {
+ the_abe->widget_opp[mc->shift] = ucontrol->value.integer.value[0];
+ snd_soc_dapm_mixer_update_power(widget, kcontrol, 1);
+ abe_enable_gain(MIXAUDUL, mc->reg);
+ } else {
+ the_abe->widget_opp[mc->shift] = ucontrol->value.integer.value[0];
+ snd_soc_dapm_mixer_update_power(widget, kcontrol, 0);
+ abe_disable_gain(MIXAUDUL, mc->reg);
+ }
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 1;
+}
+
+static int vxrec_put_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ pm_runtime_get_sync(the_abe->dev);
+
+ if (ucontrol->value.integer.value[0]) {
+ the_abe->widget_opp[mc->shift] = ucontrol->value.integer.value[0];
+ snd_soc_dapm_mixer_update_power(widget, kcontrol, 1);
+ abe_enable_gain(MIXVXREC, mc->reg);
+ } else {
+ the_abe->widget_opp[mc->shift] = ucontrol->value.integer.value[0];
+ snd_soc_dapm_mixer_update_power(widget, kcontrol, 0);
+ abe_disable_gain(MIXVXREC, mc->reg);
+ }
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 1;
+}
+
+static int sdt_put_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ pm_runtime_get_sync(the_abe->dev);
+
+ if (ucontrol->value.integer.value[0]) {
+ the_abe->widget_opp[mc->shift] = ucontrol->value.integer.value[0];
+ snd_soc_dapm_mixer_update_power(widget, kcontrol, 1);
+ abe_enable_gain(MIXSDT, mc->reg);
+ } else {
+ the_abe->widget_opp[mc->shift] = ucontrol->value.integer.value[0];
+ snd_soc_dapm_mixer_update_power(widget, kcontrol, 0);
+ abe_disable_gain(MIXSDT, mc->reg);
+ }
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 1;
+}
+
+static int abe_get_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ ucontrol->value.integer.value[0] = the_abe->widget_opp[mc->shift];
+ return 0;
+}
+
+static int abe_dsp_set_mono_mixer(int id, int enable)
+{
+ int mixer;
+
+ switch (id) {
+ case MIX_DL1_MONO:
+ mixer = MIXDL1;
+ break;
+ case MIX_DL2_MONO:
+ mixer = MIXDL2;
+ break;
+ case MIX_AUDUL_MONO:
+ mixer = MIXAUDUL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pm_runtime_get_sync(the_abe->dev);
+ abe_mono_mixer(mixer, enable);
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 0;
+}
+
+static int abe_put_mono_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ int id = mc->shift - MIX_DL1_MONO;
+
+ the_abe->mono_mix[id] = ucontrol->value.integer.value[0];
+ abe_dsp_set_mono_mixer(mc->shift, the_abe->mono_mix[id]);
+
+ return 1;
+}
+
+static int abe_get_mono_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ int id = mc->shift - MIX_DL1_MONO;
+
+ ucontrol->value.integer.value[0] = the_abe->mono_mix[id];
+ return 0;
+}
+
+/* router IDs that match our mixer strings */
+static const abe_router_t router[] = {
+ ZERO_labelID, /* strangely this is not 0 */
+ DMIC1_L_labelID, DMIC1_R_labelID,
+ DMIC2_L_labelID, DMIC2_R_labelID,
+ DMIC3_L_labelID, DMIC3_R_labelID,
+ BT_UL_L_labelID, BT_UL_R_labelID,
+ MM_EXT_IN_L_labelID, MM_EXT_IN_R_labelID,
+ AMIC_L_labelID, AMIC_R_labelID,
+ VX_REC_L_labelID, VX_REC_R_labelID,
+};
+
+static int ul_mux_put_route(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ int mux = ucontrol->value.enumerated.item[0];
+ int reg = e->reg - ABE_MUX(0);
+
+ pm_runtime_get_sync(the_abe->dev);
+
+ if (mux > ABE_ROUTES_UL)
+ return 0;
+
+ // TODO: get all this via firmware
+ if (reg < 8) {
+ /* 0 .. 9 = MM_UL */
+ the_abe->router[reg] = router[mux];
+ } else if (reg < 12) {
+ /* 10 .. 11 = MM_UL2 */
+ /* 12 .. 13 = VX_UL */
+ the_abe->router[reg + 2] = router[mux];
+ }
+
+ /* 2nd arg here is unused */
+ abe_set_router_configuration(UPROUTE, 0, (u32 *)the_abe->router);
+
+ if (router[mux] != ZERO_labelID)
+ the_abe->widget_opp[e->reg] = e->shift_l;
+ else
+ the_abe->widget_opp[e->reg] = 0;
+
+ snd_soc_dapm_mux_update_power(widget, kcontrol, 1, mux, e);
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 1;
+}
+
+static int ul_mux_get_route(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_enum *e =
+ (struct soc_enum *)kcontrol->private_value;
+ int reg = e->reg - ABE_MUX(0), i, rval = 0;
+
+ // TODO: get all this via firmware
+ if (reg < 8) {
+ /* 0 .. 9 = MM_UL */
+ rval = the_abe->router[reg];
+ } else if (reg < 12) {
+ /* 10 .. 11 = MM_UL2 */
+ /* 12 .. 13 = VX_UL */
+ rval = the_abe->router[reg + 2];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(router); i++) {
+ if (router[i] == rval) {
+ ucontrol->value.integer.value[0] = i;
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+
+static int abe_put_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ pm_runtime_get_sync(the_abe->dev);
+
+ if (ucontrol->value.integer.value[0]) {
+ the_abe->widget_opp[mc->shift] = ucontrol->value.integer.value[0];
+ snd_soc_dapm_mixer_update_power(widget, kcontrol, 1);
+ } else {
+ the_abe->widget_opp[mc->shift] = ucontrol->value.integer.value[0];
+ snd_soc_dapm_mixer_update_power(widget, kcontrol, 0);
+ }
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 1;
+}
+
+
+static int volume_put_sdt_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ pm_runtime_get_sync(the_abe->dev);
+
+ abe_write_mixer(MIXSDT, abe_val_to_gain(ucontrol->value.integer.value[0]),
+ RAMP_2MS, mc->reg);
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 1;
+}
+
+static int volume_put_audul_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ pm_runtime_get_sync(the_abe->dev);
+ abe_write_mixer(MIXAUDUL, abe_val_to_gain(ucontrol->value.integer.value[0]),
+ RAMP_2MS, mc->reg);
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 1;
+}
+
+static int volume_put_vxrec_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ pm_runtime_get_sync(the_abe->dev);
+ abe_write_mixer(MIXVXREC, abe_val_to_gain(ucontrol->value.integer.value[0]),
+ RAMP_2MS, mc->reg);
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 1;
+}
+
+static int volume_put_dl1_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ pm_runtime_get_sync(the_abe->dev);
+ abe_write_mixer(MIXDL1, abe_val_to_gain(ucontrol->value.integer.value[0]),
+ RAMP_2MS, mc->reg);
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 1;
+}
+
+static int volume_put_dl2_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ pm_runtime_get_sync(the_abe->dev);
+ abe_write_mixer(MIXDL2, abe_val_to_gain(ucontrol->value.integer.value[0]),
+ RAMP_2MS, mc->reg);
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 1;
+}
+
+static int volume_put_gain(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ pm_runtime_get_sync(the_abe->dev);
+ abe_write_gain(mc->reg,
+ abe_val_to_gain(ucontrol->value.integer.value[0]),
+ RAMP_2MS, mc->shift);
+ abe_write_gain(mc->reg,
+ -12000 + (ucontrol->value.integer.value[1] * 100),
+ RAMP_2MS, mc->rshift);
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 1;
+}
+
+static int volume_get_dl1_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ u32 val;
+
+ pm_runtime_get_sync(the_abe->dev);
+ abe_read_mixer(MIXDL1, &val, mc->reg);
+ ucontrol->value.integer.value[0] = abe_gain_to_val(val);
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 0;
+}
+
+static int volume_get_dl2_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ u32 val;
+
+ pm_runtime_get_sync(the_abe->dev);
+ abe_read_mixer(MIXDL2, &val, mc->reg);
+ ucontrol->value.integer.value[0] = abe_gain_to_val(val);
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 0;
+}
+
+static int volume_get_audul_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ u32 val;
+
+ pm_runtime_get_sync(the_abe->dev);
+ abe_read_mixer(MIXAUDUL, &val, mc->reg);
+ ucontrol->value.integer.value[0] = abe_gain_to_val(val);
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 0;
+}
+
+static int volume_get_vxrec_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ u32 val;
+
+ pm_runtime_get_sync(the_abe->dev);
+ abe_read_mixer(MIXVXREC, &val, mc->reg);
+ ucontrol->value.integer.value[0] = abe_gain_to_val(val);
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 0;
+}
+
+static int volume_get_sdt_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ u32 val;
+
+ pm_runtime_get_sync(the_abe->dev);
+ abe_read_mixer(MIXSDT, &val, mc->reg);
+ ucontrol->value.integer.value[0] = abe_gain_to_val(val);
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 0;
+}
+
+static int volume_get_gain(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ u32 val;
+
+ pm_runtime_get_sync(the_abe->dev);
+ abe_read_gain(mc->reg, &val, mc->shift);
+ ucontrol->value.integer.value[0] = abe_gain_to_val(val);
+ abe_read_gain(mc->reg, &val, mc->rshift);
+ ucontrol->value.integer.value[1] = abe_gain_to_val(val);
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 0;
+}
+
+static int abe_dsp_set_equalizer(unsigned int id, unsigned int profile)
+{
+ abe_equ_t equ_params;
+ int len;
+
+ if (id >= the_abe->hdr.num_equ)
+ return -EINVAL;
+
+ if (profile >= the_abe->equ_texts[id].count)
+ return -EINVAL;
+
+ len = the_abe->equ_texts[id].coeff;
+ equ_params.equ_length = len;
+ memcpy(equ_params.coef.type1, the_abe->equ[id] + profile * len,
+ len * sizeof(u32));
+ the_abe->equ_profile[id] = profile;
+
+ pm_runtime_get_sync(the_abe->dev);
+ abe_write_equalizer(id + 1, &equ_params);
+ pm_runtime_put_sync(the_abe->dev);
+
+ return 0;
+}
+
+static int abe_get_equalizer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_enum *eqc = (struct soc_enum *)kcontrol->private_value;
+
+ ucontrol->value.integer.value[0] = the_abe->equ_profile[eqc->reg];
+ return 0;
+}
+
+static int abe_put_equalizer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_enum *eqc = (struct soc_enum *)kcontrol->private_value;
+ u16 val = ucontrol->value.enumerated.item[0];
+ int ret;
+
+ ret = abe_dsp_set_equalizer(eqc->reg, val);
+ if (ret < 0)
+ return ret;
+
+ return 1;
+}
+
+int snd_soc_info_enum_ext1(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = e->max;
+
+ if (uinfo->value.enumerated.item > e->max - 1)
+ uinfo->value.enumerated.item = e->max - 1;
+ strcpy(uinfo->value.enumerated.name,
+ snd_soc_get_enum_text(e, uinfo->value.enumerated.item));
+
+ return 0;
+}
+
+static const char *route_ul_texts[] = {
+ "None", "DMic0L", "DMic0R", "DMic1L", "DMic1R", "DMic2L", "DMic2R",
+ "BT Left", "BT Right", "MMExt Left", "MMExt Right", "AMic0", "AMic1",
+ "VX Left", "VX Right"
+};
+
+/* ROUTE_UL Mux table */
+static const struct soc_enum abe_enum[] = {
+ SOC_ENUM_SINGLE(MUX_MM_UL10, 0, 15, route_ul_texts),
+ SOC_ENUM_SINGLE(MUX_MM_UL11, 0, 15, route_ul_texts),
+ SOC_ENUM_SINGLE(MUX_MM_UL12, 0, 15, route_ul_texts),
+ SOC_ENUM_SINGLE(MUX_MM_UL13, 0, 15, route_ul_texts),
+ SOC_ENUM_SINGLE(MUX_MM_UL14, 0, 15, route_ul_texts),
+ SOC_ENUM_SINGLE(MUX_MM_UL15, 0, 15, route_ul_texts),
+ SOC_ENUM_SINGLE(MUX_MM_UL16, 0, 15, route_ul_texts),
+ SOC_ENUM_SINGLE(MUX_MM_UL17, 0, 15, route_ul_texts),
+ SOC_ENUM_SINGLE(MUX_MM_UL20, 0, 15, route_ul_texts),
+ SOC_ENUM_SINGLE(MUX_MM_UL21, 0, 15, route_ul_texts),
+ SOC_ENUM_SINGLE(MUX_VX_UL0, 0, 15, route_ul_texts),
+ SOC_ENUM_SINGLE(MUX_VX_UL1, 0, 15, route_ul_texts),
+};
+
+static const struct snd_kcontrol_new mm_ul00_control =
+ SOC_DAPM_ENUM_EXT("Route", abe_enum[0],
+ ul_mux_get_route, ul_mux_put_route);
+
+static const struct snd_kcontrol_new mm_ul01_control =
+ SOC_DAPM_ENUM_EXT("Route", abe_enum[1],
+ ul_mux_get_route, ul_mux_put_route);
+
+static const struct snd_kcontrol_new mm_ul02_control =
+ SOC_DAPM_ENUM_EXT("Route", abe_enum[2],
+ ul_mux_get_route, ul_mux_put_route);
+
+static const struct snd_kcontrol_new mm_ul03_control =
+ SOC_DAPM_ENUM_EXT("Route", abe_enum[3],
+ ul_mux_get_route, ul_mux_put_route);
+
+static const struct snd_kcontrol_new mm_ul04_control =
+ SOC_DAPM_ENUM_EXT("Route", abe_enum[4],
+ ul_mux_get_route, ul_mux_put_route);
+
+static const struct snd_kcontrol_new mm_ul05_control =
+ SOC_DAPM_ENUM_EXT("Route", abe_enum[5],
+ ul_mux_get_route, ul_mux_put_route);
+
+static const struct snd_kcontrol_new mm_ul06_control =
+ SOC_DAPM_ENUM_EXT("Route", abe_enum[6],
+ ul_mux_get_route, ul_mux_put_route);
+
+static const struct snd_kcontrol_new mm_ul07_control =
+ SOC_DAPM_ENUM_EXT("Route", abe_enum[7],
+ ul_mux_get_route, ul_mux_put_route);
+
+static const struct snd_kcontrol_new mm_ul10_control =
+ SOC_DAPM_ENUM_EXT("Route", abe_enum[8],
+ ul_mux_get_route, ul_mux_put_route);
+
+static const struct snd_kcontrol_new mm_ul11_control =
+ SOC_DAPM_ENUM_EXT("Route", abe_enum[9],
+ ul_mux_get_route, ul_mux_put_route);
+
+static const struct snd_kcontrol_new mm_vx0_control =
+ SOC_DAPM_ENUM_EXT("Route", abe_enum[10],
+ ul_mux_get_route, ul_mux_put_route);
+
+static const struct snd_kcontrol_new mm_vx1_control =
+ SOC_DAPM_ENUM_EXT("Route", abe_enum[11],
+ ul_mux_get_route, ul_mux_put_route);
+
+/* DL1 mixer paths */
+static const struct snd_kcontrol_new dl1_mixer_controls[] = {
+ SOC_SINGLE_EXT("Tones", MIX_DL1_INPUT_TONES, MIX_DL1_TONES, 1, 0,
+ abe_get_mixer, dl1_put_mixer),
+ SOC_SINGLE_EXT("Voice", MIX_DL1_INPUT_VX_DL, MIX_DL1_VOICE, 1, 0,
+ abe_get_mixer, dl1_put_mixer),
+ SOC_SINGLE_EXT("Capture", MIX_DL1_INPUT_MM_UL2, MIX_DL1_CAPTURE, 1, 0,
+ abe_get_mixer, dl1_put_mixer),
+ SOC_SINGLE_EXT("Multimedia", MIX_DL1_INPUT_MM_DL, MIX_DL1_MEDIA, 1, 0,
+ abe_get_mixer, dl1_put_mixer),
+};
+
+/* DL2 mixer paths */
+static const struct snd_kcontrol_new dl2_mixer_controls[] = {
+ SOC_SINGLE_EXT("Tones", MIX_DL2_INPUT_TONES, MIX_DL2_TONES, 1, 0,
+ abe_get_mixer, dl2_put_mixer),
+ SOC_SINGLE_EXT("Voice", MIX_DL2_INPUT_VX_DL, MIX_DL2_VOICE, 1, 0,
+ abe_get_mixer, dl2_put_mixer),
+ SOC_SINGLE_EXT("Capture", MIX_DL2_INPUT_MM_UL2, MIX_DL2_CAPTURE, 1, 0,
+ abe_get_mixer, dl2_put_mixer),
+ SOC_SINGLE_EXT("Multimedia", MIX_DL2_INPUT_MM_DL, MIX_DL2_MEDIA, 1, 0,
+ abe_get_mixer, dl2_put_mixer),
+};
+
+/* AUDUL ("Voice Capture Mixer") mixer paths */
+static const struct snd_kcontrol_new audio_ul_mixer_controls[] = {
+ SOC_SINGLE_EXT("Tones Playback", MIX_AUDUL_INPUT_TONES, MIX_AUDUL_TONES, 1, 0,
+ abe_get_mixer, audio_ul_put_mixer),
+ SOC_SINGLE_EXT("Media Playback", MIX_AUDUL_INPUT_MM_DL, MIX_AUDUL_MEDIA, 1, 0,
+ abe_get_mixer, audio_ul_put_mixer),
+ SOC_SINGLE_EXT("Capture", MIX_AUDUL_INPUT_UPLINK, MIX_AUDUL_CAPTURE, 1, 0,
+ abe_get_mixer, audio_ul_put_mixer),
+};
+
+/* VXREC ("Capture Mixer") mixer paths */
+static const struct snd_kcontrol_new vx_rec_mixer_controls[] = {
+ SOC_SINGLE_EXT("Tones", MIX_VXREC_INPUT_TONES, MIX_VXREC_TONES, 1, 0,
+ abe_get_mixer, vxrec_put_mixer),
+ SOC_SINGLE_EXT("Voice Playback", MIX_VXREC_INPUT_VX_DL,
+ MIX_VXREC_VOICE_PLAYBACK, 1, 0, abe_get_mixer, vxrec_put_mixer),
+ SOC_SINGLE_EXT("Voice Capture", MIX_VXREC_INPUT_VX_UL,
+ MIX_VXREC_VOICE_CAPTURE, 1, 0, abe_get_mixer, vxrec_put_mixer),
+ SOC_SINGLE_EXT("Media Playback", MIX_VXREC_INPUT_MM_DL,
+ MIX_VXREC_MEDIA, 1, 0, abe_get_mixer, vxrec_put_mixer),
+};
+
+/* SDT ("Sidetone Mixer") mixer paths */
+static const struct snd_kcontrol_new sdt_mixer_controls[] = {
+ SOC_SINGLE_EXT("Capture", MIX_SDT_INPUT_UP_MIXER, MIX_SDT_CAPTURE, 1, 0,
+ abe_get_mixer, sdt_put_mixer),
+ SOC_SINGLE_EXT("Playback", MIX_SDT_INPUT_DL1_MIXER, MIX_SDT_PLAYBACK, 1, 0,
+ abe_get_mixer, sdt_put_mixer),
+};
+
+/* Virtual PDM_DL Switch */
+static const struct snd_kcontrol_new pdm_dl1_switch_controls =
+ SOC_SINGLE_EXT("Switch", ABE_VIRTUAL_SWITCH, MIX_SWITCH_PDM_DL, 1, 0,
+ abe_get_mixer, abe_put_switch);
+
+/* Virtual BT_VX_DL Switch */
+static const struct snd_kcontrol_new bt_vx_dl_switch_controls =
+ SOC_SINGLE_EXT("Switch", ABE_VIRTUAL_SWITCH, MIX_SWITCH_BT_VX_DL, 1, 0,
+ abe_get_mixer, abe_put_switch);
+
+/* Virtual MM_EXT_DL Switch */
+static const struct snd_kcontrol_new mm_ext_dl_switch_controls =
+ SOC_SINGLE_EXT("Switch", ABE_VIRTUAL_SWITCH, MIX_SWITCH_MM_EXT_DL, 1, 0,
+ abe_get_mixer, abe_put_switch);
+
+static const struct snd_kcontrol_new abe_controls[] = {
+ /* DL1 mixer gains */
+ SOC_SINGLE_EXT_TLV("DL1 Media Playback Volume",
+ MIX_DL1_INPUT_MM_DL, 0, 149, 0,
+ volume_get_dl1_mixer, volume_put_dl1_mixer, mm_dl1_tlv),
+ SOC_SINGLE_EXT_TLV("DL1 Tones Playback Volume",
+ MIX_DL1_INPUT_TONES, 0, 149, 0,
+ volume_get_dl1_mixer, volume_put_dl1_mixer, tones_dl1_tlv),
+ SOC_SINGLE_EXT_TLV("DL1 Voice Playback Volume",
+ MIX_DL1_INPUT_VX_DL, 0, 149, 0,
+ volume_get_dl1_mixer, volume_put_dl1_mixer, voice_dl1_tlv),
+ SOC_SINGLE_EXT_TLV("DL1 Capture Playback Volume",
+ MIX_DL1_INPUT_MM_UL2, 0, 149, 0,
+ volume_get_dl1_mixer, volume_put_dl1_mixer, capture_dl1_tlv),
+
+ /* DL2 mixer gains */
+ SOC_SINGLE_EXT_TLV("DL2 Media Playback Volume",
+ MIX_DL2_INPUT_MM_DL, 0, 149, 0,
+ volume_get_dl2_mixer, volume_put_dl2_mixer, mm_dl2_tlv),
+ SOC_SINGLE_EXT_TLV("DL2 Tones Playback Volume",
+ MIX_DL2_INPUT_TONES, 0, 149, 0,
+ volume_get_dl2_mixer, volume_put_dl2_mixer, tones_dl2_tlv),
+ SOC_SINGLE_EXT_TLV("DL2 Voice Playback Volume",
+ MIX_DL2_INPUT_VX_DL, 0, 149, 0,
+ volume_get_dl2_mixer, volume_put_dl2_mixer, voice_dl2_tlv),
+ SOC_SINGLE_EXT_TLV("DL2 Capture Playback Volume",
+ MIX_DL2_INPUT_MM_UL2, 0, 149, 0,
+ volume_get_dl2_mixer, volume_put_dl2_mixer, capture_dl2_tlv),
+
+ /* VXREC mixer gains */
+ SOC_SINGLE_EXT_TLV("VXREC Media Volume",
+ MIX_VXREC_INPUT_MM_DL, 0, 149, 0,
+ volume_get_vxrec_mixer, volume_put_vxrec_mixer, vxrec_mm_dl_tlv),
+ SOC_SINGLE_EXT_TLV("VXREC Tones Volume",
+ MIX_VXREC_INPUT_TONES, 0, 149, 0,
+ volume_get_vxrec_mixer, volume_put_vxrec_mixer, vxrec_tones_tlv),
+ SOC_SINGLE_EXT_TLV("VXREC Voice DL Volume",
+ MIX_VXREC_INPUT_VX_UL, 0, 149, 0,
+ volume_get_vxrec_mixer, volume_put_vxrec_mixer, vxrec_vx_dl_tlv),
+ SOC_SINGLE_EXT_TLV("VXREC Voice UL Volume",
+ MIX_VXREC_INPUT_VX_DL, 0, 149, 0,
+ volume_get_vxrec_mixer, volume_put_vxrec_mixer, vxrec_vx_ul_tlv),
+
+ /* AUDUL mixer gains */
+ SOC_SINGLE_EXT_TLV("AUDUL Media Volume",
+ MIX_AUDUL_INPUT_MM_DL, 0, 149, 0,
+ volume_get_audul_mixer, volume_put_audul_mixer, audul_mm_tlv),
+ SOC_SINGLE_EXT_TLV("AUDUL Tones Volume",
+ MIX_AUDUL_INPUT_TONES, 0, 149, 0,
+ volume_get_audul_mixer, volume_put_audul_mixer, audul_tones_tlv),
+ SOC_SINGLE_EXT_TLV("AUDUL Voice UL Volume",
+ MIX_AUDUL_INPUT_UPLINK, 0, 149, 0,
+ volume_get_audul_mixer, volume_put_audul_mixer, audul_vx_ul_tlv),
+ SOC_SINGLE_EXT_TLV("AUDUL Voice DL Volume",
+ MIX_AUDUL_INPUT_VX_DL, 0, 149, 0,
+ volume_get_audul_mixer, volume_put_audul_mixer, audul_vx_dl_tlv),
+
+ /* SDT mixer gains */
+ SOC_SINGLE_EXT_TLV("SDT UL Volume",
+ MIX_SDT_INPUT_UP_MIXER, 0, 149, 0,
+ volume_get_sdt_mixer, volume_put_sdt_mixer, sdt_ul_tlv),
+ SOC_SINGLE_EXT_TLV("SDT DL Volume",
+ MIX_SDT_INPUT_DL1_MIXER, 0, 149, 0,
+ volume_get_sdt_mixer, volume_put_sdt_mixer, sdt_dl_tlv),
+
+ /* DMIC gains */
+ SOC_DOUBLE_EXT_TLV("DMIC1 UL Volume",
+ GAINS_DMIC1, GAIN_LEFT_OFFSET, GAIN_RIGHT_OFFSET, 149, 0,
+ volume_get_gain, volume_put_gain, dmic_tlv),
+
+ SOC_DOUBLE_EXT_TLV("DMIC2 UL Volume",
+ GAINS_DMIC2, GAIN_LEFT_OFFSET, GAIN_RIGHT_OFFSET, 149, 0,
+ volume_get_gain, volume_put_gain, dmic_tlv),
+
+ SOC_DOUBLE_EXT_TLV("DMIC3 UL Volume",
+ GAINS_DMIC3, GAIN_LEFT_OFFSET, GAIN_RIGHT_OFFSET, 149, 0,
+ volume_get_gain, volume_put_gain, dmic_tlv),
+
+ SOC_DOUBLE_EXT_TLV("AMIC UL Volume",
+ GAINS_AMIC, GAIN_LEFT_OFFSET, GAIN_RIGHT_OFFSET, 149, 0,
+ volume_get_gain, volume_put_gain, amic_tlv),
+
+ SOC_DOUBLE_EXT_TLV("BT UL Volume",
+ GAINS_BTUL, GAIN_LEFT_OFFSET, GAIN_RIGHT_OFFSET, 149, 0,
+ volume_get_gain, volume_put_gain, btul_tlv),
+
+ SOC_SINGLE_EXT("DL1 Mono Mixer", MIXDL1, MIX_DL1_MONO, 1, 0,
+ abe_get_mono_mixer, abe_put_mono_mixer),
+ SOC_SINGLE_EXT("DL2 Mono Mixer", MIXDL2, MIX_DL2_MONO, 1, 0,
+ abe_get_mono_mixer, abe_put_mono_mixer),
+ SOC_SINGLE_EXT("AUDUL Mono Mixer", MIXAUDUL, MIX_AUDUL_MONO, 1, 0,
+ abe_get_mono_mixer, abe_put_mono_mixer),
+};
+
+static const struct snd_soc_dapm_widget abe_dapm_widgets[] = {
+
+ /* Frontend AIFs */
+ SND_SOC_DAPM_AIF_IN("TONES_DL", "Tones Playback", 0,
+ W_AIF_TONES_DL, ABE_OPP_25, 0),
+ SND_SOC_DAPM_AIF_IN("VX_DL", "Voice Playback", 0,
+ W_AIF_VX_DL, ABE_OPP_50, 0),
+ SND_SOC_DAPM_AIF_OUT("VX_UL", "Voice Capture", 0,
+ W_AIF_VX_UL, ABE_OPP_50, 0),
+ /* the MM_UL mapping is intentional */
+ SND_SOC_DAPM_AIF_OUT("MM_UL1", "MultiMedia1 Capture", 0,
+ W_AIF_MM_UL1, ABE_OPP_100, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0,
+ W_AIF_MM_UL2, ABE_OPP_50, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL", " MultiMedia1 Playback", 0,
+ W_AIF_MM_DL, ABE_OPP_25, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL_LP", " MultiMedia1 LP Playback", 0,
+ W_AIF_MM_DL_LP, ABE_OPP_25, 0),
+ SND_SOC_DAPM_AIF_IN("VIB_DL", "Vibra Playback", 0,
+ W_AIF_VIB_DL, ABE_OPP_100, 0),
+ SND_SOC_DAPM_AIF_IN("MODEM_DL", "MODEM Playback", 0,
+ W_AIF_MODEM_DL, ABE_OPP_50, 0),
+ SND_SOC_DAPM_AIF_OUT("MODEM_UL", "MODEM Capture", 0,
+ W_AIF_MODEM_UL, ABE_OPP_50, 0),
+
+ /* Backend DAIs */
+ SND_SOC_DAPM_AIF_IN("PDM_UL1", "Analog Capture", 0,
+ W_AIF_PDM_UL1, ABE_OPP_50, 0),
+ SND_SOC_DAPM_AIF_OUT("PDM_DL1", "HS Playback", 0,
+ W_AIF_PDM_DL1, ABE_OPP_25, 0),
+ SND_SOC_DAPM_AIF_OUT("PDM_DL2", "HF Playback", 0,
+ W_AIF_PDM_DL2, ABE_OPP_100, 0),
+ SND_SOC_DAPM_AIF_OUT("PDM_VIB", "Vibra Playback", 0,
+ W_AIF_PDM_VIB, ABE_OPP_100, 0),
+ SND_SOC_DAPM_AIF_IN("BT_VX_UL", "BT Capture", 0,
+ W_AIF_BT_VX_UL, ABE_OPP_50, 0),
+ SND_SOC_DAPM_AIF_OUT("BT_VX_DL", "BT Playback", 0,
+ W_AIF_BT_VX_DL, ABE_OPP_50, 0),
+ SND_SOC_DAPM_AIF_IN("MM_EXT_UL", "FM Capture", 0,
+ W_AIF_MM_EXT_UL, ABE_OPP_50, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_EXT_DL", "FM Playback", 0,
+ W_AIF_MM_EXT_DL, ABE_OPP_25, 0),
+ SND_SOC_DAPM_AIF_IN("DMIC0", "DMIC0 Capture", 0,
+ W_AIF_DMIC0, ABE_OPP_50, 0),
+ SND_SOC_DAPM_AIF_IN("DMIC1", "DMIC1 Capture", 0,
+ W_AIF_DMIC1, ABE_OPP_50, 0),
+ SND_SOC_DAPM_AIF_IN("DMIC2", "DMIC2 Capture", 0,
+ W_AIF_DMIC2, ABE_OPP_50, 0),
+
+ /* ROUTE_UL Capture Muxes */
+ SND_SOC_DAPM_MUX("MUX_UL00",
+ W_MUX_UL00, ABE_OPP_50, 0, &mm_ul00_control),
+ SND_SOC_DAPM_MUX("MUX_UL01",
+ W_MUX_UL01, ABE_OPP_50, 0, &mm_ul01_control),
+ SND_SOC_DAPM_MUX("MUX_UL02",
+ W_MUX_UL02, ABE_OPP_50, 0, &mm_ul02_control),
+ SND_SOC_DAPM_MUX("MUX_UL03",
+ W_MUX_UL03, ABE_OPP_50, 0, &mm_ul03_control),
+ SND_SOC_DAPM_MUX("MUX_UL04",
+ W_MUX_UL04, ABE_OPP_50, 0, &mm_ul04_control),
+ SND_SOC_DAPM_MUX("MUX_UL05",
+ W_MUX_UL05, ABE_OPP_50, 0, &mm_ul05_control),
+ SND_SOC_DAPM_MUX("MUX_UL06",
+ W_MUX_UL06, ABE_OPP_50, 0, &mm_ul06_control),
+ SND_SOC_DAPM_MUX("MUX_UL07",
+ W_MUX_UL07, ABE_OPP_50, 0, &mm_ul07_control),
+ SND_SOC_DAPM_MUX("MUX_UL10",
+ W_MUX_UL10, ABE_OPP_50, 0, &mm_ul10_control),
+ SND_SOC_DAPM_MUX("MUX_UL11",
+ W_MUX_UL11, ABE_OPP_50, 0, &mm_ul11_control),
+ SND_SOC_DAPM_MUX("MUX_VX0",
+ W_MUX_VX00, ABE_OPP_50, 0, &mm_vx0_control),
+ SND_SOC_DAPM_MUX("MUX_VX1",
+ W_MUX_VX01, ABE_OPP_50, 0, &mm_vx1_control),
+
+ /* DL1 & DL2 Playback Mixers */
+ SND_SOC_DAPM_MIXER("DL1 Mixer",
+ W_MIXER_DL1, ABE_OPP_25, 0, dl1_mixer_controls,
+ ARRAY_SIZE(dl1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("DL2 Mixer",
+ W_MIXER_DL2, ABE_OPP_100, 0, dl2_mixer_controls,
+ ARRAY_SIZE(dl2_mixer_controls)),
+
+ /* DL1 Mixer Input volumes ?????*/
+ SND_SOC_DAPM_PGA("DL1 Media Volume",
+ W_VOLUME_DL1, 0, 0, NULL, 0),
+
+ /* AUDIO_UL_MIXER */
+ SND_SOC_DAPM_MIXER("Voice Capture Mixer",
+ W_MIXER_AUDIO_UL, ABE_OPP_50, 0, audio_ul_mixer_controls,
+ ARRAY_SIZE(audio_ul_mixer_controls)),
+
+ /* VX_REC_MIXER */
+ SND_SOC_DAPM_MIXER("Capture Mixer",
+ W_MIXER_VX_REC, ABE_OPP_50, 0, vx_rec_mixer_controls,
+ ARRAY_SIZE(vx_rec_mixer_controls)),
+
+ /* SDT_MIXER - TODO: shoult this not be OPP25 ??? */
+ SND_SOC_DAPM_MIXER("Sidetone Mixer",
+ W_MIXER_SDT, ABE_OPP_25, 0, sdt_mixer_controls,
+ ARRAY_SIZE(sdt_mixer_controls)),
+
+ /*
+ * The Following three are virtual switches to select the output port
+ * after DL1 Gain.
+ */
+
+ /* Virtual PDM_DL1 Switch */
+ SND_SOC_DAPM_MIXER("DL1 PDM",
+ W_VSWITCH_DL1_PDM, ABE_OPP_25, 0, &pdm_dl1_switch_controls, 1),
+
+ /* Virtual BT_VX_DL Switch */
+ SND_SOC_DAPM_MIXER("DL1 BT_VX",
+ W_VSWITCH_DL1_BT_VX, ABE_OPP_50, 0, &bt_vx_dl_switch_controls, 1),
+
+ /* Virtual MM_EXT_DL Switch TODO: confrm OPP level here */
+ SND_SOC_DAPM_MIXER("DL1 MM_EXT",
+ W_VSWITCH_DL1_MM_EXT, ABE_OPP_50, 0, &mm_ext_dl_switch_controls, 1),
+
+ /* Virtuals to join our capture sources */
+ SND_SOC_DAPM_MIXER("Sidetone Capture VMixer", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("Voice Capture VMixer", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("DL1 Capture VMixer", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("DL2 Capture VMixer", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* Join our MM_DL and MM_DL_LP playback */
+ SND_SOC_DAPM_MIXER("MM_DL VMixer", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* Virtual MODEM and VX_UL mixer */
+ SND_SOC_DAPM_MIXER("VX UL VMixer", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("VX DL VMixer", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* Virtual Pins to force backends ON atm */
+ SND_SOC_DAPM_OUTPUT("BE_OUT"),
+ SND_SOC_DAPM_INPUT("BE_IN"),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+
+ /* MUX_UL00 - ROUTE_UL - Chan 0 */
+ {"MUX_UL00", "DMic0L", "DMIC0"},
+ {"MUX_UL00", "DMic0R", "DMIC0"},
+ {"MUX_UL00", "DMic1L", "DMIC1"},
+ {"MUX_UL00", "DMic1R", "DMIC1"},
+ {"MUX_UL00", "DMic2L", "DMIC2"},
+ {"MUX_UL00", "DMic2R", "DMIC2"},
+ {"MUX_UL00", "BT Left", "BT_VX_UL"},
+ {"MUX_UL00", "BT Right", "BT_VX_UL"},
+ {"MUX_UL00", "MMExt Left", "MM_EXT_UL"},
+ {"MUX_UL00", "MMExt Right", "MM_EXT_UL"},
+ {"MUX_UL00", "AMic0", "PDM_UL1"},
+ {"MUX_UL00", "AMic1", "PDM_UL1"},
+ {"MUX_UL00", "VX Left", "Capture Mixer"},
+ {"MUX_UL00", "VX Right", "Capture Mixer"},
+ {"MM_UL1", NULL, "MUX_UL00"},
+
+ /* MUX_UL01 - ROUTE_UL - Chan 1 */
+ {"MUX_UL01", "DMic0L", "DMIC0"},
+ {"MUX_UL01", "DMic0R", "DMIC0"},
+ {"MUX_UL01", "DMic1L", "DMIC1"},
+ {"MUX_UL01", "DMic1R", "DMIC1"},
+ {"MUX_UL01", "DMic2L", "DMIC2"},
+ {"MUX_UL01", "DMic2R", "DMIC2"},
+ {"MUX_UL01", "BT Left", "BT_VX_UL"},
+ {"MUX_UL01", "BT Right", "BT_VX_UL"},
+ {"MUX_UL01", "MMExt Left", "MM_EXT_UL"},
+ {"MUX_UL01", "MMExt Right", "MM_EXT_UL"},
+ {"MUX_UL01", "AMic0", "PDM_UL1"},
+ {"MUX_UL01", "AMic1", "PDM_UL1"},
+ {"MUX_UL01", "VX Left", "Capture Mixer"},
+ {"MUX_UL01", "VX Right", "Capture Mixer"},
+ {"MM_UL1", NULL, "MUX_UL01"},
+
+ /* MUX_UL02 - ROUTE_UL - Chan 2 */
+ {"MUX_UL02", "DMic0L", "DMIC0"},
+ {"MUX_UL02", "DMic0R", "DMIC0"},
+ {"MUX_UL02", "DMic1L", "DMIC1"},
+ {"MUX_UL02", "DMic1R", "DMIC1"},
+ {"MUX_UL02", "DMic2L", "DMIC2"},
+ {"MUX_UL02", "DMic2R", "DMIC2"},
+ {"MUX_UL02", "BT Left", "BT_VX_UL"},
+ {"MUX_UL02", "BT Right", "BT_VX_UL"},
+ {"MUX_UL02", "MMExt Left", "MM_EXT_UL"},
+ {"MUX_UL02", "MMExt Right", "MM_EXT_UL"},
+ {"MUX_UL02", "AMic0", "PDM_UL1"},
+ {"MUX_UL02", "AMic1", "PDM_UL1"},
+ {"MUX_UL02", "VX Left", "Capture Mixer"},
+ {"MUX_UL02", "VX Right", "Capture Mixer"},
+ {"MM_UL1", NULL, "MUX_UL02"},
+
+ /* MUX_UL03 - ROUTE_UL - Chan 3 */
+ {"MUX_UL03", "DMic0L", "DMIC0"},
+ {"MUX_UL03", "DMic0R", "DMIC0"},
+ {"MUX_UL03", "DMic1L", "DMIC1"},
+ {"MUX_UL03", "DMic1R", "DMIC1"},
+ {"MUX_UL03", "DMic2L", "DMIC2"},
+ {"MUX_UL03", "DMic2R", "DMIC2"},
+ {"MUX_UL03", "BT Left", "BT_VX_UL"},
+ {"MUX_UL03", "BT Right", "BT_VX_UL"},
+ {"MUX_UL03", "MMExt Left", "MM_EXT_UL"},
+ {"MUX_UL03", "MMExt Right", "MM_EXT_UL"},
+ {"MUX_UL03", "AMic0", "PDM_UL1"},
+ {"MUX_UL03", "AMic1", "PDM_UL1"},
+ {"MUX_UL03", "VX Left", "Capture Mixer"},
+ {"MUX_UL03", "VX Right", "Capture Mixer"},
+ {"MM_UL1", NULL, "MUX_UL03"},
+
+ /* MUX_UL04 - ROUTE_UL - Chan 4 */
+ {"MUX_UL04", "DMic0L", "DMIC0"},
+ {"MUX_UL04", "DMic0R", "DMIC0"},
+ {"MUX_UL04", "DMic1L", "DMIC1"},
+ {"MUX_UL04", "DMic1R", "DMIC1"},
+ {"MUX_UL04", "DMic2L", "DMIC2"},
+ {"MUX_UL04", "DMic2R", "DMIC2"},
+ {"MUX_UL04", "BT Left", "BT_VX_UL"},
+ {"MUX_UL04", "BT Right", "BT_VX_UL"},
+ {"MUX_UL04", "MMExt Left", "MM_EXT_UL"},
+ {"MUX_UL04", "MMExt Right", "MM_EXT_UL"},
+ {"MUX_UL04", "AMic0", "PDM_UL1"},
+ {"MUX_UL04", "AMic1", "PDM_UL1"},
+ {"MUX_UL04", "VX Left", "Capture Mixer"},
+ {"MUX_UL04", "VX Right", "Capture Mixer"},
+ {"MM_UL1", NULL, "MUX_UL04"},
+
+ /* MUX_UL05 - ROUTE_UL - Chan 5 */
+ {"MUX_UL05", "DMic0L", "DMIC0"},
+ {"MUX_UL05", "DMic0R", "DMIC0"},
+ {"MUX_UL05", "DMic1L", "DMIC1"},
+ {"MUX_UL05", "DMic1R", "DMIC1"},
+ {"MUX_UL05", "DMic2L", "DMIC2"},
+ {"MUX_UL05", "DMic2R", "DMIC2"},
+ {"MUX_UL05", "BT Left", "BT_VX_UL"},
+ {"MUX_UL05", "BT Right", "BT_VX_UL"},
+ {"MUX_UL05", "MMExt Left", "MM_EXT_UL"},
+ {"MUX_UL05", "MMExt Right", "MM_EXT_UL"},
+ {"MUX_UL05", "AMic0", "PDM_UL1"},
+ {"MUX_UL05", "AMic1", "PDM_UL1"},
+ {"MUX_UL05", "VX Left", "Capture Mixer"},
+ {"MUX_UL05", "VX Right", "Capture Mixer"},
+ {"MM_UL1", NULL, "MUX_UL05"},
+
+ /* MUX_UL06 - ROUTE_UL - Chan 6 */
+ {"MUX_UL06", "DMic0L", "DMIC0"},
+ {"MUX_UL06", "DMic0R", "DMIC0"},
+ {"MUX_UL06", "DMic1L", "DMIC1"},
+ {"MUX_UL06", "DMic1R", "DMIC1"},
+ {"MUX_UL06", "DMic2L", "DMIC2"},
+ {"MUX_UL06", "DMic2R", "DMIC2"},
+ {"MUX_UL06", "BT Left", "BT_VX_UL"},
+ {"MUX_UL06", "BT Right", "BT_VX_UL"},
+ {"MUX_UL06", "MMExt Left", "MM_EXT_UL"},
+ {"MUX_UL06", "MMExt Right", "MM_EXT_UL"},
+ {"MUX_UL06", "AMic0", "PDM_UL1"},
+ {"MUX_UL06", "AMic1", "PDM_UL1"},
+ {"MUX_UL06", "VX Left", "Capture Mixer"},
+ {"MUX_UL06", "VX Right", "Capture Mixer"},
+ {"MM_UL1", NULL, "MUX_UL06"},
+
+ /* MUX_UL07 - ROUTE_UL - Chan 7 */
+ {"MUX_UL07", "DMic0L", "DMIC0"},
+ {"MUX_UL07", "DMic0R", "DMIC0"},
+ {"MUX_UL07", "DMic1L", "DMIC1"},
+ {"MUX_UL07", "DMic1R", "DMIC1"},
+ {"MUX_UL07", "DMic2L", "DMIC2"},
+ {"MUX_UL07", "DMic2R", "DMIC2"},
+ {"MUX_UL07", "BT Left", "BT_VX_UL"},
+ {"MUX_UL07", "BT Right", "BT_VX_UL"},
+ {"MUX_UL07", "MMExt Left", "MM_EXT_UL"},
+ {"MUX_UL07", "MMExt Right", "MM_EXT_UL"},
+ {"MUX_UL07", "AMic0", "PDM_UL1"},
+ {"MUX_UL07", "AMic1", "PDM_UL1"},
+ {"MUX_UL07", "VX Left", "Capture Mixer"},
+ {"MUX_UL07", "VX Right", "Capture Mixer"},
+ {"MM_UL1", NULL, "MUX_UL07"},
+
+ /* MUX_UL10 - ROUTE_UL - Chan 10 */
+ {"MUX_UL10", "DMic0L", "DMIC0"},
+ {"MUX_UL10", "DMic0R", "DMIC0"},
+ {"MUX_UL10", "DMic1L", "DMIC1"},
+ {"MUX_UL10", "DMic1R", "DMIC1"},
+ {"MUX_UL10", "DMic2L", "DMIC2"},
+ {"MUX_UL10", "DMic2R", "DMIC2"},
+ {"MUX_UL10", "BT Left", "BT_VX_UL"},
+ {"MUX_UL10", "BT Right", "BT_VX_UL"},
+ {"MUX_UL10", "MMExt Left", "MM_EXT_UL"},
+ {"MUX_UL10", "MMExt Right", "MM_EXT_UL"},
+ {"MUX_UL10", "AMic0", "PDM_UL1"},
+ {"MUX_UL10", "AMic1", "PDM_UL1"},
+ {"MUX_UL10", "VX Left", "Capture Mixer"},
+ {"MUX_UL10", "VX Right", "Capture Mixer"},
+ {"MM_UL2", NULL, "MUX_UL10"},
+
+ /* MUX_UL11 - ROUTE_UL - Chan 11 */
+ {"MUX_UL11", "DMic0L", "DMIC0"},
+ {"MUX_UL11", "DMic0R", "DMIC0"},
+ {"MUX_UL11", "DMic1L", "DMIC1"},
+ {"MUX_UL11", "DMic1R", "DMIC1"},
+ {"MUX_UL11", "DMic2L", "DMIC2"},
+ {"MUX_UL11", "DMic2R", "DMIC2"},
+ {"MUX_UL11", "BT Left", "BT_VX_UL"},
+ {"MUX_UL11", "BT Right", "BT_VX_UL"},
+ {"MUX_UL11", "MMExt Left", "MM_EXT_UL"},
+ {"MUX_UL11", "MMExt Right", "MM_EXT_UL"},
+ {"MUX_UL11", "AMic0", "PDM_UL1"},
+ {"MUX_UL11", "AMic1", "PDM_UL1"},
+ {"MUX_UL11", "VX Left", "Capture Mixer"},
+ {"MUX_UL11", "VX Right", "Capture Mixer"},
+ {"MM_UL2", NULL, "MUX_UL11"},
+
+ /* MUX_VX0 - ROUTE_UL - Chan 20 */
+ {"MUX_VX0", "DMic0L", "DMIC0"},
+ {"MUX_VX0", "DMic0R", "DMIC0"},
+ {"MUX_VX0", "DMic1L", "DMIC1"},
+ {"MUX_VX0", "DMic1R", "DMIC1"},
+ {"MUX_VX0", "DMic2L", "DMIC2"},
+ {"MUX_VX0", "DMic2R", "DMIC2"},
+ {"MUX_VX0", "BT Left", "BT_VX_UL"},
+ {"MUX_VX0", "BT Right", "BT_VX_UL"},
+ {"MUX_VX0", "MMExt Left", "MM_EXT_UL"},
+ {"MUX_VX0", "MMExt Right", "MM_EXT_UL"},
+ {"MUX_VX0", "AMic0", "PDM_UL1"},
+ {"MUX_VX0", "AMic1", "PDM_UL1"},
+ {"MUX_VX0", "VX Left", "Capture Mixer"},
+ {"MUX_VX0", "VX Right", "Capture Mixer"},
+
+ /* MUX_VX1 - ROUTE_UL - Chan 20 */
+ {"MUX_VX1", "DMic0L", "DMIC0"},
+ {"MUX_VX1", "DMic0R", "DMIC0"},
+ {"MUX_VX1", "DMic1L", "DMIC1"},
+ {"MUX_VX1", "DMic1R", "DMIC1"},
+ {"MUX_VX1", "DMic2L", "DMIC2"},
+ {"MUX_VX1", "DMic2R", "DMIC2"},
+ {"MUX_VX1", "BT Left", "BT_VX_UL"},
+ {"MUX_VX1", "BT Right", "BT_VX_UL"},
+ {"MUX_VX1", "MMExt Left", "MM_EXT_UL"},
+ {"MUX_VX1", "MMExt Right", "MM_EXT_UL"},
+ {"MUX_VX1", "AMic0", "PDM_UL1"},
+ {"MUX_VX1", "AMic1", "PDM_UL1"},
+ {"MUX_VX1", "VX Left", "Capture Mixer"},
+ {"MUX_VX1", "VX Right", "Capture Mixer"},
+
+ /* Headset (DL1) playback path */
+ {"DL1 Mixer", "Tones", "TONES_DL"},
+ {"DL1 Mixer", "Voice", "VX DL VMixer"},
+ {"DL1 Mixer", "Capture", "DL1 Capture VMixer"},
+ {"DL1 Capture VMixer", NULL, "MUX_UL10"},
+ {"DL1 Capture VMixer", NULL, "MUX_UL11"},
+ {"DL1 Mixer", "Multimedia", "MM_DL VMixer"},
+ {"MM_DL VMixer", NULL, "MM_DL"},
+ {"MM_DL VMixer", NULL, "MM_DL_LP"},
+
+ /* Sidetone Mixer */
+ {"Sidetone Mixer", "Playback", "DL1 Mixer"},
+ {"Sidetone Mixer", "Capture", "Sidetone Capture VMixer"},
+ {"Sidetone Capture VMixer", NULL, "MUX_VX0"},
+ {"Sidetone Capture VMixer", NULL, "MUX_VX1"},
+
+ /* Playback Output selection after DL1 Gain */
+ {"DL1 BT_VX", "Switch", "Sidetone Mixer"},
+ {"DL1 MM_EXT", "Switch", "Sidetone Mixer"},
+ {"DL1 PDM", "Switch", "Sidetone Mixer"},
+ {"PDM_DL1", NULL, "DL1 PDM"},
+ {"BT_VX_DL", NULL, "DL1 BT_VX"},
+ {"MM_EXT_DL", NULL, "DL1 MM_EXT"},
+
+ /* Handsfree (DL2) playback path */
+ {"DL2 Mixer", "Tones", "TONES_DL"},
+ {"DL2 Mixer", "Voice", "VX DL VMixer"},
+ {"DL2 Mixer", "Capture", "DL2 Capture VMixer"},
+ {"DL2 Capture VMixer", NULL, "MUX_UL10"},
+ {"DL2 Capture VMixer", NULL, "MUX_UL11"},
+ {"DL2 Mixer", "Multimedia", "MM_DL VMixer"},
+ {"MM_DL VMixer", NULL, "MM_DL"},
+ {"MM_DL VMixer", NULL, "MM_DL_LP"},
+ {"PDM_DL2", NULL, "DL2 Mixer"},
+
+ /* VxREC Mixer */
+ {"Capture Mixer", "Tones", "TONES_DL"},
+ {"Capture Mixer", "Voice Playback", "VX DL VMixer"},
+ {"Capture Mixer", "Voice Capture", "VX UL VMixer"},
+ {"Capture Mixer", "Media Playback", "MM_DL VMixer"},
+ {"MM_DL VMixer", NULL, "MM_DL"},
+ {"MM_DL VMixer", NULL, "MM_DL_LP"},
+
+ /* Audio UL mixer */
+ {"Voice Capture Mixer", "Tones Playback", "TONES_DL"},
+ {"Voice Capture Mixer", "Media Playback", "MM_DL VMixer"},
+ {"MM_DL VMixer", NULL, "MM_DL"},
+ {"MM_DL VMixer", NULL, "MM_DL_LP"},
+ {"Voice Capture Mixer", "Capture", "Voice Capture VMixer"},
+ {"Voice Capture VMixer", NULL, "MUX_VX0"},
+ {"Voice Capture VMixer", NULL, "MUX_VX1"},
+
+ /* BT */
+ {"VX UL VMixer", NULL, "Voice Capture Mixer"},
+
+ /* Vibra */
+ {"PDM_VIB", NULL, "VIB_DL"},
+
+ /* VX and MODEM */
+ {"VX_UL", NULL, "VX UL VMixer"},
+ {"MODEM_UL", NULL, "VX UL VMixer"},
+ {"VX DL VMixer", NULL, "VX_DL"},
+ {"VX DL VMixer", NULL, "MODEM_DL"},
+
+ /* Backend Enablement - TODO: maybe re-work*/
+ {"BE_OUT", NULL, "PDM_DL1"},
+ {"BE_OUT", NULL, "PDM_DL2"},
+ {"BE_OUT", NULL, "PDM_VIB"},
+ {"BE_OUT", NULL, "MM_EXT_DL"},
+ {"BE_OUT", NULL, "BT_VX_DL"},
+ {"PDM_UL1", NULL, "BE_IN"},
+ {"BT_VX_UL", NULL, "BE_IN"},
+ {"MM_EXT_UL", NULL, "BE_IN"},
+ {"DMIC0", NULL, "BE_IN"},
+ {"DMIC1", NULL, "BE_IN"},
+ {"DMIC2", NULL, "BE_IN"},
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+static int abe_dbg_get_dma_pos(struct abe_data *abe)
+{
+ return omap_get_dma_dst_pos(abe->dma_ch) - abe->dbg_buffer_addr;
+}
+
+static void abe_dbg_dma_irq(int ch, u16 stat, void *data)
+{
+}
+
+static int abe_dbg_start_dma(struct abe_data *abe, int circular)
+{
+ struct omap_dma_channel_params dma_params;
+ int err;
+
+ /* TODO: start the DMA in either :-
+ *
+ * 1) circular buffer mode where the DMA will restart when it get to
+ * the end of the buffer.
+ * 2) default mode, where DMA stops at the end of the buffer.
+ */
+
+ abe->dma_req = OMAP44XX_DMA_ABE_REQ_7;
+ err = omap_request_dma(abe->dma_req, "ABE debug",
+ abe_dbg_dma_irq, abe, &abe->dma_ch);
+ if (abe->dbg_circular) {
+ /*
+ * Link channel with itself so DMA doesn't need any
+ * reprogramming while looping the buffer
+ */
+ omap_dma_link_lch(abe->dma_ch, abe->dma_ch);
+ }
+
+ memset(&dma_params, 0, sizeof(dma_params));
+ dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+ dma_params.trigger = abe->dma_req;
+ dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
+ dma_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
+ dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;
+ dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
+ dma_params.src_start = D_DEBUG_FIFO_ADDR + ABE_DMEM_BASE_ADDRESS_L3;
+ dma_params.dst_start = abe->dbg_buffer_addr;
+ dma_params.src_port = OMAP_DMA_PORT_MPUI;
+ dma_params.src_ei = 1;
+ dma_params.src_fi = 1 - abe->dbg_elem_bytes;
+
+ dma_params.elem_count = abe->dbg_elem_bytes >> 2; /* 128 bytes shifted into words */
+ dma_params.frame_count = abe->dbg_buffer_bytes / abe->dbg_elem_bytes;
+ omap_set_dma_params(abe->dma_ch, &dma_params);
+
+ omap_enable_dma_irq(abe->dma_ch, OMAP_DMA_FRAME_IRQ);
+ omap_set_dma_src_burst_mode(abe->dma_ch, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_dest_burst_mode(abe->dma_ch, OMAP_DMA_DATA_BURST_16);
+
+ abe->dbg_reader_offset = 0;
+
+ pm_runtime_get_sync(abe->dev);
+ omap_start_dma(abe->dma_ch);
+ return 0;
+}
+
+static void abe_dbg_stop_dma(struct abe_data *abe)
+{
+ while (omap_get_dma_active_status(abe->dma_ch))
+ omap_stop_dma(abe->dma_ch);
+
+ if (abe->dbg_circular)
+ omap_dma_unlink_lch(abe->dma_ch, abe->dma_ch);
+ omap_free_dma(abe->dma_ch);
+ pm_runtime_put_sync(abe->dev);
+}
+
+static int abe_open_data(struct inode *inode, struct file *file)
+{
+ struct abe_data *abe = inode->i_private;
+
+ abe->dbg_elem_bytes = 128; /* size of debug data per tick */
+
+ if (abe->dbg_format1)
+ abe->dbg_elem_bytes += ABE_DBG_FLAG1_SIZE;
+ if (abe->dbg_format2)
+ abe->dbg_elem_bytes += ABE_DBG_FLAG2_SIZE;
+ if (abe->dbg_format3)
+ abe->dbg_elem_bytes += ABE_DBG_FLAG3_SIZE;
+
+ abe->dbg_buffer_bytes = abe->dbg_elem_bytes * 4 *
+ abe->dbg_buffer_msecs;
+
+ abe->dbg_buffer = dma_alloc_writecombine(abe->dev,
+ abe->dbg_buffer_bytes, &abe->dbg_buffer_addr, GFP_KERNEL);
+ if (abe->dbg_buffer == NULL)
+ return -ENOMEM;
+
+ file->private_data = inode->i_private;
+ abe->dbg_complete = 0;
+ abe_dbg_start_dma(abe, abe->dbg_circular);
+
+ return 0;
+}
+
+static int abe_release_data(struct inode *inode, struct file *file)
+{
+ struct abe_data *abe = inode->i_private;
+
+ abe_dbg_stop_dma(abe);
+
+ dma_free_writecombine(abe->dev, abe->dbg_buffer_bytes,
+ abe->dbg_buffer, abe->dbg_buffer_addr);
+ return 0;
+}
+
+static ssize_t abe_copy_to_user(struct abe_data *abe, char __user *user_buf,
+ size_t count)
+{
+ /* check for reader buffer wrap */
+ if (abe->dbg_reader_offset + count > abe->dbg_buffer_bytes) {
+ int size = abe->dbg_buffer_bytes - abe->dbg_reader_offset;
+
+ /* wrap */
+ if (copy_to_user(user_buf,
+ abe->dbg_buffer + abe->dbg_reader_offset, size))
+ return -EFAULT;
+
+ /* need to just return if non circular */
+ if (!abe->dbg_circular) {
+ abe->dbg_complete = 1;
+ return count;
+ }
+
+ if (copy_to_user(user_buf,
+ abe->dbg_buffer, count - size))
+ return -EFAULT;
+ abe->dbg_reader_offset = count - size;
+ return count;
+ } else {
+ /* no wrap */
+ if (copy_to_user(user_buf,
+ abe->dbg_buffer + abe->dbg_reader_offset, count))
+ return -EFAULT;
+ abe->dbg_reader_offset += count;
+
+ if (!abe->dbg_circular &&
+ abe->dbg_reader_offset == abe->dbg_buffer_bytes)
+ abe->dbg_complete = 1;
+
+ return count;
+ }
+}
+
+static ssize_t abe_read_data(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t ret = 0;
+ struct abe_data *abe = file->private_data;
+ DECLARE_WAITQUEUE(wait, current);
+ int dma_offset, bytes;
+
+ add_wait_queue(&abe->wait, &wait);
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+ /* TODO: Check if really needed. Or adjust sleep delay
+ * If not delay trace is not working */
+ msleep_interruptible(1);
+ dma_offset = abe_dbg_get_dma_pos(abe);
+
+ /* is DMA finished ? */
+ if (abe->dbg_complete)
+ break;
+
+ /* get maximum amount of debug bytes we can read */
+ if (dma_offset >= abe->dbg_reader_offset) {
+ /* dma ptr is ahead of reader */
+ bytes = dma_offset - abe->dbg_reader_offset;
+ } else {
+ /* dma ptr is behind reader */
+ bytes = dma_offset + abe->dbg_buffer_bytes -
+ abe->dbg_reader_offset;
+ }
+
+ if (count > bytes)
+ count = bytes;
+
+ if (count > 0) {
+ ret = abe_copy_to_user(abe, user_buf, count);
+ break;
+ }
+
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ schedule();
+
+ } while (1);
+
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&abe->wait, &wait);
+
+ return ret;
+}
+
+static const struct file_operations abe_data_fops = {
+ .open = abe_open_data,
+ .read = abe_read_data,
+ .release = abe_release_data,
+};
+
+static void abe_init_debugfs(struct abe_data *abe)
+{
+ abe->debugfs_root = debugfs_create_dir("omap4-abe", NULL);
+ if (!abe->debugfs_root) {
+ printk(KERN_WARNING "ABE: Failed to create debugfs directory\n");
+ return;
+ }
+
+ abe->debugfs_fmt1 = debugfs_create_bool("format1", 0644,
+ abe->debugfs_root,
+ &abe->dbg_format1);
+ if (!abe->debugfs_fmt1)
+ printk(KERN_WARNING "ABE: Failed to create format1 debugfs file\n");
+
+ abe->debugfs_fmt2 = debugfs_create_bool("format2", 0644,
+ abe->debugfs_root,
+ &abe->dbg_format2);
+ if (!abe->debugfs_fmt2)
+ printk(KERN_WARNING "ABE: Failed to create format2 debugfs file\n");
+
+ abe->debugfs_fmt3 = debugfs_create_bool("format3", 0644,
+ abe->debugfs_root,
+ &abe->dbg_format3);
+ if (!abe->debugfs_fmt3)
+ printk(KERN_WARNING "ABE: Failed to create format3 debugfs file\n");
+
+ abe->debugfs_elem_bytes = debugfs_create_u32("element_bytes", 0604,
+ abe->debugfs_root,
+ &abe->dbg_elem_bytes);
+ if (!abe->debugfs_elem_bytes)
+ printk(KERN_WARNING "ABE: Failed to create element size debugfs file\n");
+
+ abe->debugfs_size = debugfs_create_u32("msecs", 0644,
+ abe->debugfs_root,
+ &abe->dbg_buffer_msecs);
+ if (!abe->debugfs_size)
+ printk(KERN_WARNING "ABE: Failed to create buffer size debugfs file\n");
+
+ abe->debugfs_circ = debugfs_create_bool("circular", 0644,
+ abe->debugfs_root,
+ &abe->dbg_circular);
+ if (!abe->debugfs_size)
+ printk(KERN_WARNING "ABE: Failed to create circular mode debugfs file\n");
+
+ abe->debugfs_data = debugfs_create_file("debug", 0644,
+ abe->debugfs_root,
+ abe, &abe_data_fops);
+ if (!abe->debugfs_data)
+ printk(KERN_WARNING "ABE: Failed to create data debugfs file\n");
+
+ abe->debugfs_opp_level = debugfs_create_u32("opp_level", 0604,
+ abe->debugfs_root,
+ &abe->opp);
+ if (!abe->debugfs_opp_level)
+ printk(KERN_WARNING "ABE: Failed to create OPP level debugfs file\n");
+
+ abe->dbg_buffer_msecs = 500;
+ init_waitqueue_head(&abe->wait);
+}
+
+static void abe_cleanup_debugfs(struct abe_data *abe)
+{
+ debugfs_remove_recursive(abe->debugfs_root);
+}
+
+#else
+
+static inline void abe_init_debugfs(struct abe_data *abe)
+{
+}
+
+static inline void abe_cleanup_debugfs(struct abe_data *abe)
+{
+}
+#endif
+
+static const struct snd_pcm_hardware omap_abe_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ .period_bytes_min = 4 * 1024,
+ .period_bytes_max = 24 * 1024,
+ .periods_min = 4,
+ .periods_max = 4,
+ .buffer_bytes_max = 24 * 1024 * 2,
+};
+
+static struct abe_opp_req *abe_opp_req_lookup(struct abe_data *abe,
+ struct device *dev)
+{
+ struct abe_opp_req *req, *tmp_req;
+
+ req = NULL;
+ list_for_each_entry(tmp_req, &abe->opp_req, node) {
+ if (tmp_req->dev == dev) {
+ req = tmp_req;
+ break;
+ }
+ }
+
+ return req;
+}
+
+static int abe_get_opp_req(struct abe_data *abe)
+{
+ struct abe_opp_req *req;
+ int opp = 0;
+
+ list_for_each_entry(req, &abe->opp_req, node)
+ opp |= req->opp;
+
+ opp = (1 << (fls(opp) - 1)) * 25;
+
+ return opp;
+}
+
+int abe_add_opp_req(struct device *dev, int opp)
+{
+ struct abe_opp_req *req;
+ int ret = 0;
+
+ mutex_lock(&the_abe->opp_req_mutex);
+
+ req = abe_opp_req_lookup(the_abe, dev);
+ if (!req) {
+ req = kzalloc(sizeof(struct abe_opp_req), GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ req->dev = dev;
+ /* use the same convention as ABE DSP DAPM */
+ req->opp = 1 << opp;
+ list_add(&req->node, &the_abe->opp_req);
+ the_abe->opp_req_count++;
+ } else {
+ req->opp = opp;
+ }
+
+ aess_set_runtime_opp_level(the_abe);
+
+out:
+ mutex_unlock(&the_abe->opp_req_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(abe_add_opp_req);
+
+int abe_remove_opp_req(struct device *dev)
+{
+ struct abe_opp_req *req;
+ int ret = 0;
+
+ mutex_lock(&the_abe->opp_req_mutex);
+
+ req = abe_opp_req_lookup(the_abe, dev);
+ if (!req) {
+ dev_err(dev, "trying to remove an invalid opp req\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ list_del(&req->node);
+ the_abe->opp_req_count--;
+ kfree(req);
+
+ aess_set_runtime_opp_level(the_abe);
+
+out:
+ mutex_unlock(&the_abe->opp_req_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(abe_remove_opp_req);
+
+static int abe_set_opp_mode(struct abe_data *abe, int opp)
+{
+ struct omap4_abe_dsp_pdata *pdata = abe->abe_pdata;
+ int ret = 0;
+
+ if (abe->opp > opp) {
+ /* Decrease OPP mode - no need of OPP100% */
+ switch (opp) {
+ case 25:
+ abe_set_opp_processing(ABE_OPP25);
+ udelay(250);
+ if (pdata && pdata->device_scale) {
+ ret = pdata->device_scale(abe->dev, abe->dev,
+ abe->opp_freqs[OMAP_ABE_OPP25]);
+ if (ret)
+ goto err_scale;
+ }
+ break;
+ case 50:
+ default:
+ abe_set_opp_processing(ABE_OPP50);
+ udelay(250);
+ if (pdata && pdata->device_scale) {
+ ret = pdata->device_scale(abe->dev, abe->dev,
+ abe->opp_freqs[OMAP_ABE_OPP50]);
+ if (ret)
+ goto err_scale;
+ }
+ break;
+ }
+ } else if (abe->opp < opp) {
+ /* Increase OPP mode */
+ switch (opp) {
+ case 25:
+ if (pdata && pdata->device_scale) {
+ pdata->device_scale(abe->dev, abe->dev,
+ abe->opp_freqs[OMAP_ABE_OPP25]);
+ if (ret)
+ goto err_scale;
+ }
+ abe_set_opp_processing(ABE_OPP25);
+ break;
+ case 50:
+ if (pdata && pdata->device_scale) {
+ ret = pdata->device_scale(abe->dev, abe->dev,
+ abe->opp_freqs[OMAP_ABE_OPP50]);
+ if (ret)
+ goto err_scale;
+ }
+ abe_set_opp_processing(ABE_OPP50);
+ break;
+ case 100:
+ default:
+ if (pdata && pdata->device_scale) {
+ ret = pdata->device_scale(abe->dev, abe->dev,
+ abe->opp_freqs[OMAP_ABE_OPP100]);
+ if (ret)
+ goto err_scale;
+ }
+ abe_set_opp_processing(ABE_OPP100);
+ break;
+ }
+ }
+ abe->opp = opp;
+ dev_dbg(abe->dev, "new OPP level is %d\n", opp);
+
+ return 0;
+
+err_scale:
+ dev_err(abe->dev, "failed to scale to OPP%d\n", opp);
+ return ret;
+}
+
+static int aess_set_runtime_opp_level(struct abe_data *abe)
+{
+ int i, req_opp, opp = 0;
+
+ mutex_lock(&abe->opp_mutex);
+
+ /* now calculate OPP level based upon DAPM widget status */
+ for (i = 0; i < ABE_NUM_WIDGETS; i++) {
+ if (abe->widget_opp[ABE_WIDGET(i)]) {
+ dev_dbg(abe->dev, "OPP: id %d = %d%%\n", i,
+ abe->widget_opp[ABE_WIDGET(i)] * 25);
+ opp |= abe->widget_opp[ABE_WIDGET(i)];
+ }
+ }
+ opp = (1 << (fls(opp) - 1)) * 25;
+
+ /* opps requested outside ABE DSP driver (e.g. McPDM) */
+ req_opp = abe_get_opp_req(abe);
+
+ pm_runtime_get_sync(abe->dev);
+ abe_set_opp_mode(abe, max(opp, req_opp));
+ pm_runtime_put_sync(abe->dev);
+
+ mutex_unlock(&abe->opp_mutex);
+
+ return 0;
+}
+
+static void abe_dsp_init_gains(struct abe_data *abe)
+{
+ /* Uplink gains */
+ abe_mute_gain(MIXAUDUL, MIX_AUDUL_INPUT_MM_DL);
+ abe_mute_gain(MIXAUDUL, MIX_AUDUL_INPUT_TONES);
+ abe_mute_gain(MIXAUDUL, MIX_AUDUL_INPUT_UPLINK);
+ abe_mute_gain(MIXAUDUL, MIX_AUDUL_INPUT_VX_DL);
+
+ abe_mute_gain(MIXVXREC, MIX_VXREC_INPUT_TONES);
+ abe_mute_gain(MIXVXREC, MIX_VXREC_INPUT_VX_DL);
+ abe_mute_gain(MIXVXREC, MIX_VXREC_INPUT_MM_DL);
+ abe_mute_gain(MIXVXREC, MIX_VXREC_INPUT_VX_UL);
+
+ abe_mute_gain(GAINS_DMIC1, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_DMIC1, GAIN_RIGHT_OFFSET);
+ abe_mute_gain(GAINS_DMIC2, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_DMIC2, GAIN_RIGHT_OFFSET);
+ abe_mute_gain(GAINS_DMIC3, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_DMIC3, GAIN_RIGHT_OFFSET);
+
+ abe_mute_gain(GAINS_AMIC, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_AMIC, GAIN_RIGHT_OFFSET);
+
+ abe_mute_gain(GAINS_BTUL, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_BTUL, GAIN_RIGHT_OFFSET);
+
+ /* Downlink gains */
+ abe_write_gain(GAINS_DL1, GAIN_0dB, RAMP_2MS, GAIN_LEFT_OFFSET);
+ abe_write_gain(GAINS_DL1, GAIN_0dB, RAMP_2MS, GAIN_RIGHT_OFFSET);
+ abe_mute_gain(GAINS_DL1, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_DL1, GAIN_RIGHT_OFFSET);
+
+ abe_write_gain(GAINS_DL2, GAIN_M7dB, RAMP_2MS, GAIN_LEFT_OFFSET);
+ abe_write_gain(GAINS_DL2, GAIN_M7dB, RAMP_2MS, GAIN_RIGHT_OFFSET);
+ abe_mute_gain(GAINS_DL2, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_DL2, GAIN_RIGHT_OFFSET);
+
+ abe_mute_gain(MIXDL1, MIX_DL1_INPUT_MM_DL);
+ abe_mute_gain(MIXDL1, MIX_DL1_INPUT_MM_UL2);
+ abe_mute_gain(MIXDL1, MIX_DL1_INPUT_VX_DL);
+ abe_mute_gain(MIXDL1, MIX_DL1_INPUT_TONES);
+
+ abe_mute_gain(MIXDL2, MIX_DL2_INPUT_TONES);
+ abe_mute_gain(MIXDL2, MIX_DL2_INPUT_VX_DL);
+ abe_mute_gain(MIXDL2, MIX_DL2_INPUT_MM_DL);
+ abe_mute_gain(MIXDL2, MIX_DL2_INPUT_MM_UL2);
+
+ abe_mute_gain(MIXECHO, MIX_ECHO_DL1);
+ abe_mute_gain(MIXECHO, MIX_ECHO_DL2);
+
+ /* Sidetone gains */
+ abe_mute_gain(MIXSDT, MIX_SDT_INPUT_UP_MIXER);
+ abe_mute_gain(MIXSDT, MIX_SDT_INPUT_DL1_MIXER);
+}
+
+static int aess_save_context(struct abe_data *abe)
+{
+ /* mute gains not associated with FEs/BEs */
+ abe_mute_gain(MIXAUDUL, MIX_AUDUL_INPUT_MM_DL);
+ abe_mute_gain(MIXAUDUL, MIX_AUDUL_INPUT_TONES);
+ abe_mute_gain(MIXAUDUL, MIX_AUDUL_INPUT_VX_DL);
+ abe_mute_gain(MIXVXREC, MIX_VXREC_INPUT_TONES);
+ abe_mute_gain(MIXVXREC, MIX_VXREC_INPUT_VX_DL);
+ abe_mute_gain(MIXVXREC, MIX_VXREC_INPUT_MM_DL);
+ abe_mute_gain(MIXVXREC, MIX_VXREC_INPUT_VX_UL);
+ abe_mute_gain(MIXECHO, MIX_ECHO_DL1);
+ abe_mute_gain(MIXECHO, MIX_ECHO_DL2);
+
+ return 0;
+}
+
+static int aess_restore_context(struct abe_data *abe)
+{
+ struct omap4_abe_dsp_pdata *pdata = abe->abe_pdata;
+ int i, ret;
+
+ if (pdata && pdata->device_scale) {
+ ret = pdata->device_scale(the_abe->dev, the_abe->dev,
+ abe->opp_freqs[OMAP_ABE_OPP50]);
+ if (ret) {
+ dev_err(abe->dev, "failed to scale to OPP50\n");
+ return ret;
+ }
+ }
+
+ if (pdata->was_context_lost && pdata->was_context_lost(abe->dev))
+ abe_reload_fw(abe->firmware);
+
+ /* unmute gains not associated with FEs/BEs */
+ abe_unmute_gain(MIXAUDUL, MIX_AUDUL_INPUT_MM_DL);
+ abe_unmute_gain(MIXAUDUL, MIX_AUDUL_INPUT_TONES);
+ abe_unmute_gain(MIXAUDUL, MIX_AUDUL_INPUT_VX_DL);
+ abe_unmute_gain(MIXVXREC, MIX_VXREC_INPUT_TONES);
+ abe_unmute_gain(MIXVXREC, MIX_VXREC_INPUT_VX_DL);
+ abe_unmute_gain(MIXVXREC, MIX_VXREC_INPUT_MM_DL);
+ abe_unmute_gain(MIXVXREC, MIX_VXREC_INPUT_VX_UL);
+ abe_unmute_gain(MIXECHO, MIX_ECHO_DL1);
+ abe_unmute_gain(MIXECHO, MIX_ECHO_DL2);
+
+ abe_set_router_configuration(UPROUTE, 0, (u32 *)abe->router);
+
+ /* DC offset cancellation setting */
+ if (abe->power_mode)
+ abe_write_pdmdl_offset(1, abe->dc_hsl * 2, abe->dc_hsr * 2);
+ else
+ abe_write_pdmdl_offset(1, abe->dc_hsl, abe->dc_hsr);
+
+ abe_write_pdmdl_offset(2, abe->dc_hfl, abe->dc_hfr);
+
+ for (i = 0; i < abe->hdr.num_equ; i++)
+ abe_dsp_set_equalizer(i, abe->equ_profile[i]);
+
+ for (i = 0; i < ABE_NUM_MONO_MIXERS; i++)
+ abe_dsp_set_mono_mixer(MIX_DL1_MONO + i, abe->mono_mix[i]);
+
+ return 0;
+}
+
+static int aess_open(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_platform *platform = rtd->platform;
+ struct abe_data *abe = snd_soc_platform_get_drvdata(platform);
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ int ret = 0;
+
+ mutex_lock(&abe->mutex);
+
+ dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+
+ pm_runtime_get_sync(abe->dev);
+
+ if (!abe->active++) {
+ abe->opp = 0;
+ aess_restore_context(abe);
+ abe_set_opp_mode(abe, 100);
+ abe_wakeup();
+ }
+
+ switch (dai->id) {
+ case ABE_FRONTEND_DAI_MODEM:
+ break;
+ case ABE_FRONTEND_DAI_LP_MEDIA:
+ snd_soc_set_runtime_hwparams(substream, &omap_abe_hardware);
+ ret = snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 1024);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&abe->mutex);
+ return ret;
+}
+
+static int aess_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_platform *platform = rtd->platform;
+ struct abe_data *abe = snd_soc_platform_get_drvdata(platform);
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ abe_data_format_t format;
+ size_t period_size;
+ u32 dst;
+
+ dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+
+ if (dai->id != ABE_FRONTEND_DAI_LP_MEDIA)
+ return 0;
+
+ /*Storing substream pointer for irq*/
+ abe->ping_pong_substream = substream;
+
+ format.f = params_rate(params);
+ if (params_format(params) == SNDRV_PCM_FORMAT_S32_LE)
+ format.samp_format = STEREO_MSB;
+ else
+ format.samp_format = STEREO_16_16;
+
+ if (format.f == 44100)
+ abe_write_event_generator(EVENT_44100);
+
+ period_size = params_period_bytes(params);
+
+ /*Adding ping pong buffer subroutine*/
+ abe_plug_subroutine(&abe_irq_pingpong_player_id,
+ (abe_subroutine2) abe_irq_pingpong_subroutine,
+ SUB_1_PARAM, (u32 *)abe);
+
+ /* Connect a Ping-Pong cache-flush protocol to MM_DL port */
+ abe_connect_irq_ping_pong_port(MM_DL_PORT, &format,
+ abe_irq_pingpong_player_id,
+ period_size, &dst,
+ PING_PONG_WITH_MCU_IRQ);
+
+ /* Memory mapping for hw params */
+ runtime->dma_area = abe->io_base[0] + dst;
+ runtime->dma_addr = 0;
+ runtime->dma_bytes = period_size * 4;
+
+ /* Need to set the first buffer in order to get interrupt */
+ abe_set_ping_pong_buffer(MM_DL_PORT, period_size);
+ abe->first_irq = 1;
+
+ return 0;
+}
+
+static int aess_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_platform *platform = rtd->platform;
+ struct abe_data *abe = snd_soc_platform_get_drvdata(platform);
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+
+ mutex_lock(&abe->mutex);
+ dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+ aess_set_runtime_opp_level(abe);
+ mutex_unlock(&abe->mutex);
+ return 0;
+}
+
+static int aess_close(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_platform *platform = rtd->platform;
+ struct abe_data *abe = snd_soc_platform_get_drvdata(platform);
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+
+ mutex_lock(&abe->mutex);
+
+ dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+
+ if (!--abe->active) {
+ abe_disable_irq();
+ aess_save_context(abe);
+ abe_dsp_shutdown();
+ } else {
+ /* Only scale OPP level
+ * if ABE is still active */
+ aess_set_runtime_opp_level(abe);
+ }
+
+ pm_runtime_put_sync(abe->dev);
+
+ mutex_unlock(&abe->mutex);
+ return 0;
+}
+
+static int aess_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ int offset, size, err;
+
+ if (dai->id != ABE_FRONTEND_DAI_LP_MEDIA)
+ return -EINVAL;
+
+ vma->vm_flags |= VM_IO | VM_RESERVED;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ size = vma->vm_end - vma->vm_start;
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+
+ err = io_remap_pfn_range(vma, vma->vm_start,
+ (ABE_DMEM_BASE_ADDRESS_MPU +
+ ABE_DMEM_BASE_OFFSET_PING_PONG + offset) >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+
+ if (err)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static snd_pcm_uframes_t aess_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_platform *platform = rtd->platform;
+ struct abe_data *abe = snd_soc_platform_get_drvdata(platform);
+ snd_pcm_uframes_t offset = 0;
+ u32 pingpong;
+
+ if (!abe->first_irq) {
+ abe_read_offset_from_ping_buffer(MM_DL_PORT, &pingpong);
+ offset = (snd_pcm_uframes_t)pingpong;
+ }
+
+ return offset;
+}
+
+static struct snd_pcm_ops omap_aess_pcm_ops = {
+ .open = aess_open,
+ .hw_params = aess_hw_params,
+ .prepare = aess_prepare,
+ .close = aess_close,
+ .pointer = aess_pointer,
+ .mmap = aess_mmap,
+};
+
+static int aess_stream_event(struct snd_soc_dapm_context *dapm)
+{
+ struct snd_soc_platform *platform = dapm->platform;
+ struct abe_data *abe = snd_soc_platform_get_drvdata(platform);
+
+ if (abe->active)
+ aess_set_runtime_opp_level(abe);
+
+ return 0;
+}
+
+static int abe_add_widgets(struct snd_soc_platform *platform)
+{
+ struct abe_data *abe = snd_soc_platform_get_drvdata(platform);
+ struct fw_header *hdr = &abe->hdr;
+ int i, j;
+
+ /* create equalizer controls */
+ for (i = 0; i < hdr->num_equ; i++) {
+ struct soc_enum *equalizer_enum = &abe->equalizer_enum[i];
+ struct snd_kcontrol_new *equalizer_control =
+ &abe->equalizer_control[i];
+
+ equalizer_enum->reg = i;
+ equalizer_enum->max = abe->equ_texts[i].count;
+ for (j = 0; j < abe->equ_texts[i].count; j++)
+ equalizer_enum->dtexts[j] = abe->equ_texts[i].texts[j];
+
+ equalizer_control->name = abe->equ_texts[i].name;
+ equalizer_control->private_value = (unsigned long)equalizer_enum;
+ equalizer_control->get = abe_get_equalizer;
+ equalizer_control->put = abe_put_equalizer;
+ equalizer_control->info = snd_soc_info_enum_ext1;
+ equalizer_control->iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+
+ dev_dbg(platform->dev, "added EQU mixer: %s profiles %d\n",
+ abe->equ_texts[i].name, abe->equ_texts[i].count);
+
+ for (j = 0; j < abe->equ_texts[i].count; j++)
+ dev_dbg(platform->dev, " %s\n", equalizer_enum->dtexts[j]);
+ }
+
+ snd_soc_add_platform_controls(platform, abe->equalizer_control,
+ hdr->num_equ);
+
+ snd_soc_add_platform_controls(platform, abe_controls,
+ ARRAY_SIZE(abe_controls));
+
+ snd_soc_dapm_new_controls(&platform->dapm, abe_dapm_widgets,
+ ARRAY_SIZE(abe_dapm_widgets));
+
+ snd_soc_dapm_add_routes(&platform->dapm, intercon, ARRAY_SIZE(intercon));
+
+ snd_soc_dapm_new_widgets(&platform->dapm);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int abe_suspend(struct snd_soc_dai *dai)
+{
+ struct abe_data *abe = the_abe;
+ int ret = 0;
+
+ dev_dbg(dai->dev, "%s: %s active %d\n",
+ __func__, dai->name, dai->active);
+
+ if (!dai->active)
+ return 0;
+
+ pm_runtime_get_sync(abe->dev);
+
+ switch (dai->id) {
+ case OMAP_ABE_DAI_PDM_UL:
+ abe_mute_gain(GAINS_AMIC, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_AMIC, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_DAI_PDM_DL1:
+ case OMAP_ABE_DAI_PDM_DL2:
+ case OMAP_ABE_DAI_PDM_VIB:
+ break;
+ case OMAP_ABE_DAI_BT_VX:
+ abe_mute_gain(GAINS_BTUL, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_BTUL, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_DAI_MM_FM:
+ case OMAP_ABE_DAI_MODEM:
+ break;
+ case OMAP_ABE_DAI_DMIC0:
+ abe_mute_gain(GAINS_DMIC1, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_DMIC1, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_DAI_DMIC1:
+ abe_mute_gain(GAINS_DMIC2, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_DMIC2, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_DAI_DMIC2:
+ abe_mute_gain(GAINS_DMIC3, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_DMIC3, GAIN_RIGHT_OFFSET);
+ break;
+ default:
+ dev_err(dai->dev, "%s: invalid DAI id %d\n",
+ __func__, dai->id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ pm_runtime_put_sync(abe->dev);
+ return ret;
+}
+
+static int abe_resume(struct snd_soc_dai *dai)
+{
+ struct abe_data *abe = the_abe;
+ struct omap4_abe_dsp_pdata *pdata = abe->abe_pdata;
+ int i, ret = 0;
+
+ dev_dbg(dai->dev, "%s: %s active %d\n",
+ __func__, dai->name, dai->active);
+
+ if (!dai->active)
+ return 0;
+
+ /* context retained, no need to restore */
+ if (pdata->was_context_lost && !pdata->was_context_lost(abe->dev))
+ return 0;
+
+ pm_runtime_get_sync(abe->dev);
+
+ if (pdata && pdata->device_scale) {
+ ret = pdata->device_scale(abe->dev, abe->dev,
+ abe->opp_freqs[OMAP_ABE_OPP50]);
+ if (ret) {
+ dev_err(abe->dev, "failed to scale to OPP50\n");
+ goto out;
+ }
+ }
+
+ abe_reload_fw(abe->firmware);
+
+ switch (dai->id) {
+ case OMAP_ABE_DAI_PDM_UL:
+ abe_unmute_gain(GAINS_AMIC, GAIN_LEFT_OFFSET);
+ abe_unmute_gain(GAINS_AMIC, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_DAI_PDM_DL1:
+ case OMAP_ABE_DAI_PDM_DL2:
+ case OMAP_ABE_DAI_PDM_VIB:
+ break;
+ case OMAP_ABE_DAI_BT_VX:
+ abe_unmute_gain(GAINS_BTUL, GAIN_LEFT_OFFSET);
+ abe_unmute_gain(GAINS_BTUL, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_DAI_MM_FM:
+ case OMAP_ABE_DAI_MODEM:
+ break;
+ case OMAP_ABE_DAI_DMIC0:
+ abe_unmute_gain(GAINS_DMIC1, GAIN_LEFT_OFFSET);
+ abe_unmute_gain(GAINS_DMIC1, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_DAI_DMIC1:
+ abe_unmute_gain(GAINS_DMIC2, GAIN_LEFT_OFFSET);
+ abe_unmute_gain(GAINS_DMIC2, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_DAI_DMIC2:
+ abe_unmute_gain(GAINS_DMIC3, GAIN_LEFT_OFFSET);
+ abe_unmute_gain(GAINS_DMIC3, GAIN_RIGHT_OFFSET);
+ break;
+ default:
+ dev_err(dai->dev, "%s: invalid DAI id %d\n",
+ __func__, dai->id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ abe_set_router_configuration(UPROUTE, 0, (u32 *)abe->router);
+
+ if (abe->power_mode)
+ abe_write_pdmdl_offset(1, abe->dc_hsl * 2, abe->dc_hsr * 2);
+ else
+ abe_write_pdmdl_offset(1, abe->dc_hsl, abe->dc_hsr);
+
+ abe_write_pdmdl_offset(2, abe->dc_hfl, abe->dc_hfr);
+
+ for (i = 0; i < abe->hdr.num_equ; i++)
+ abe_dsp_set_equalizer(i, abe->equ_profile[i]);
+
+ for (i = 0; i < ABE_NUM_MONO_MIXERS; i++)
+ abe_dsp_set_mono_mixer(MIX_DL1_MONO + i, abe->mono_mix[i]);
+out:
+ pm_runtime_put_sync(abe->dev);
+ return ret;
+}
+#else
+#define abe_suspend NULL
+#define abe_resume NULL
+#endif
+
+static int abe_probe(struct snd_soc_platform *platform)
+{
+ struct abe_data *abe = snd_soc_platform_get_drvdata(platform);
+ struct opp *opp;
+ const u8 *fw_data;
+ unsigned long freq = ULONG_MAX;
+ int ret = 0, i, opp_count, offset = 0;
+#if defined(CONFIG_SND_OMAP_SOC_ABE_DSP_MODULE)
+ const struct firmware *fw;
+#endif
+
+ abe->platform = platform;
+
+ pm_runtime_enable(abe->dev);
+ pm_runtime_irq_safe(abe->dev);
+
+#if defined(CONFIG_SND_OMAP_SOC_ABE_DSP_MODULE)
+ /* request firmware & coefficients */
+ ret = request_firmware(&fw, "omap4_abe", platform->dev);
+ if (ret != 0) {
+ dev_err(abe->dev, "Failed to load firmware: %d\n", ret);
+ return ret;
+ }
+ fw_data = fw->data;
+#else
+ fw_data = (u8 *)abe_get_default_fw();
+#endif
+
+ /* get firmware and coefficients header info */
+ memcpy(&abe->hdr, fw_data, sizeof(struct fw_header));
+ if (abe->hdr.firmware_size > ABE_MAX_FW_SIZE) {
+ dev_err(abe->dev, "Firmware too large at %d bytes: %d\n",
+ abe->hdr.firmware_size, ret);
+ ret = -EINVAL;
+ goto err_fw;
+ }
+ dev_dbg(abe->dev, "ABE firmware size %d bytes\n", abe->hdr.firmware_size);
+
+ if (abe->hdr.coeff_size > ABE_MAX_COEFF_SIZE) {
+ dev_err(abe->dev, "Coefficients too large at %d bytes: %d\n",
+ abe->hdr.coeff_size, ret);
+ ret = -EINVAL;
+ goto err_fw;
+ }
+ dev_dbg(abe->dev, "ABE coefficients size %d bytes\n", abe->hdr.coeff_size);
+
+ /* get coefficient EQU mixer strings */
+ if (abe->hdr.num_equ >= ABE_MAX_EQU) {
+ dev_err(abe->dev, "Too many equalizers got %d\n", abe->hdr.num_equ);
+ ret = -EINVAL;
+ goto err_fw;
+ }
+ abe->equ_texts = kzalloc(abe->hdr.num_equ * sizeof(struct coeff_config),
+ GFP_KERNEL);
+ if (abe->equ_texts == NULL) {
+ ret = -ENOMEM;
+ goto err_fw;
+ }
+ offset = sizeof(struct fw_header);
+ memcpy(abe->equ_texts, fw_data + offset,
+ abe->hdr.num_equ * sizeof(struct coeff_config));
+
+ /* get coefficients from firmware */
+ abe->equ[0] = kmalloc(abe->hdr.coeff_size, GFP_KERNEL);
+ if (abe->equ[0] == NULL) {
+ ret = -ENOMEM;
+ goto err_equ;
+ }
+ offset += abe->hdr.num_equ * sizeof(struct coeff_config);
+ memcpy(abe->equ[0], fw_data + offset, abe->hdr.coeff_size);
+
+ /* allocate coefficient mixer texts */
+ dev_dbg(abe->dev, "loaded %d equalizers\n", abe->hdr.num_equ);
+ for (i = 0; i < abe->hdr.num_equ; i++) {
+ dev_dbg(abe->dev, "equ %d: %s profiles %d\n", i,
+ abe->equ_texts[i].name, abe->equ_texts[i].count);
+ if (abe->equ_texts[i].count >= ABE_MAX_PROFILES) {
+ dev_err(abe->dev, "Too many profiles got %d for equ %d\n",
+ abe->equ_texts[i].count, i);
+ ret = -EINVAL;
+ goto err_texts;
+ }
+ abe->equalizer_enum[i].dtexts =
+ kzalloc(abe->equ_texts[i].count * sizeof(char *), GFP_KERNEL);
+ if (abe->equalizer_enum[i].dtexts == NULL) {
+ ret = -ENOMEM;
+ goto err_texts;
+ }
+ }
+
+ /* initialise coefficient equalizers */
+ for (i = 1; i < abe->hdr.num_equ; i++) {
+ abe->equ[i] = abe->equ[i - 1] +
+ abe->equ_texts[i - 1].count * abe->equ_texts[i - 1].coeff;
+ }
+
+ /* store ABE firmware for later context restore */
+ abe->firmware = kzalloc(abe->hdr.firmware_size, GFP_KERNEL);
+ if (abe->firmware == NULL) {
+ ret = -ENOMEM;
+ goto err_texts;
+ }
+
+ memcpy(abe->firmware,
+ fw_data + sizeof(struct fw_header) + abe->hdr.coeff_size,
+ abe->hdr.firmware_size);
+
+ ret = request_threaded_irq(abe->irq, NULL, abe_irq_handler,
+ IRQF_ONESHOT, "ABE", (void *)abe);
+ if (ret) {
+ dev_err(platform->dev, "request for ABE IRQ %d failed %d\n",
+ abe->irq, ret);
+ goto err_irq;
+ }
+
+ /* query supported opps */
+ rcu_read_lock();
+ opp_count = opp_get_opp_count(abe->dev);
+ if (opp_count <= 0) {
+ dev_err(abe->dev, "invalid OPP data\n");
+ ret = opp_count;
+ goto err_opp;
+ } else if (opp_count > OMAP_ABE_OPP_COUNT) {
+ dev_err(abe->dev, "unsupported OPP count %d (max:%d)\n",
+ opp_count, OMAP_ABE_OPP_COUNT);
+ ret = -EINVAL;
+ goto err_opp;
+ }
+
+ /* assume provided opps are always higher */
+ for (i = OMAP_ABE_OPP_COUNT - 1; i >= 0; i--) {
+ opp = opp_find_freq_floor(abe->dev, &freq);
+ if (IS_ERR_OR_NULL(opp))
+ break;
+ abe->opp_freqs[i] = freq;
+ /* prepare to obtain next available opp */
+ freq--;
+ }
+ /* use lowest available opp for non-populated items */
+ for (freq++; i >= 0; i--)
+ abe->opp_freqs[i] = freq;
+ rcu_read_unlock();
+
+ /* aess_clk has to be enabled to access hal register.
+ * Disable the clk after it has been used.
+ */
+ pm_runtime_get_sync(abe->dev);
+
+ abe_init_mem(abe->io_base);
+
+ abe_reset_hal();
+
+ abe_load_fw(abe->firmware);
+
+ /* "tick" of the audio engine */
+ abe_write_event_generator(EVENT_TIMER);
+
+ abe_dsp_init_gains(abe);
+
+ /* Stop the engine */
+ abe_stop_event_generator();
+ abe_disable_irq();
+
+ pm_runtime_put_sync(abe->dev);
+ abe_add_widgets(platform);
+
+#if defined(CONFIG_SND_OMAP_SOC_ABE_DSP_MODULE)
+ release_firmware(fw);
+#endif
+ return ret;
+
+err_opp:
+ rcu_read_unlock();
+ free_irq(abe->irq, (void *)abe);
+err_irq:
+ kfree(abe->firmware);
+err_texts:
+ for (i = 0; i < abe->hdr.num_equ; i++)
+ kfree(abe->equalizer_enum[i].texts);
+ kfree(abe->equ[0]);
+err_equ:
+ kfree(abe->equ_texts);
+err_fw:
+#if defined(CONFIG_SND_OMAP_SOC_ABE_DSP_MODULE)
+ release_firmware(fw);
+#endif
+ return ret;
+}
+
+static int abe_remove(struct snd_soc_platform *platform)
+{
+ struct abe_data *abe = snd_soc_platform_get_drvdata(platform);
+ int i;
+
+ free_irq(abe->irq, (void *)abe);
+
+ for (i = 0; i < abe->hdr.num_equ; i++)
+ kfree(abe->equalizer_enum[i].texts);
+
+ kfree(abe->equ[0]);
+ kfree(abe->equ_texts);
+ kfree(abe->firmware);
+
+ pm_runtime_disable(abe->dev);
+
+ return 0;
+}
+
+static struct snd_soc_platform_driver omap_aess_platform = {
+ .ops = &omap_aess_pcm_ops,
+ .probe = abe_probe,
+ .remove = abe_remove,
+ .suspend = abe_suspend,
+ .resume = abe_resume,
+ .read = abe_dsp_read,
+ .write = abe_dsp_write,
+ .stream_event = aess_stream_event,
+};
+
+static int __devinit abe_engine_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct omap4_abe_dsp_pdata *pdata = pdev->dev.platform_data;
+ struct abe_data *abe;
+ int ret = -EINVAL, i;
+
+ abe = kzalloc(sizeof(struct abe_data), GFP_KERNEL);
+ if (abe == NULL)
+ return -ENOMEM;
+ dev_set_drvdata(&pdev->dev, abe);
+ the_abe = abe;
+
+ /* ZERO_labelID should really be 0 */
+ for (i = 0; i < ABE_ROUTES_UL + 2; i++)
+ abe->router[i] = ZERO_labelID;
+
+ for (i = 0; i < 5; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ abe_memory_bank[i]);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no resource %s\n",
+ abe_memory_bank[i]);
+ goto err;
+ }
+ abe->io_base[i] = ioremap(res->start, resource_size(res));
+ if (!abe->io_base[i]) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
+ abe->irq = platform_get_irq(pdev, 0);
+ if (abe->irq < 0) {
+ ret = abe->irq;
+ goto err;
+ }
+
+ abe->abe_pdata = pdata;
+ abe->dev = &pdev->dev;
+ mutex_init(&abe->mutex);
+ mutex_init(&abe->opp_mutex);
+ mutex_init(&abe->opp_req_mutex);
+ INIT_LIST_HEAD(&abe->opp_req);
+ abe->opp_req_count = 0;
+
+ ret = snd_soc_register_platform(abe->dev,
+ &omap_aess_platform);
+ if (ret < 0)
+ return ret;
+
+ abe_init_debugfs(abe);
+ return ret;
+
+err:
+ for (--i; i >= 0; i--)
+ iounmap(abe->io_base[i]);
+ kfree(abe);
+ return ret;
+}
+
+static int __devexit abe_engine_remove(struct platform_device *pdev)
+{
+ struct abe_data *abe = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ abe_cleanup_debugfs(abe);
+ snd_soc_unregister_platform(&pdev->dev);
+ for (i = 0; i < 5; i++)
+ iounmap(abe->io_base[i]);
+ kfree(abe);
+ return 0;
+}
+
+static struct platform_driver omap_aess_driver = {
+ .driver = {
+ .name = "aess",
+ .owner = THIS_MODULE,
+ },
+ .probe = abe_engine_probe,
+ .remove = __devexit_p(abe_engine_remove),
+};
+
+static int __init abe_engine_init(void)
+{
+ return platform_driver_register(&omap_aess_driver);
+}
+module_init(abe_engine_init);
+
+static void __exit abe_engine_exit(void)
+{
+ platform_driver_unregister(&omap_aess_driver);
+}
+module_exit(abe_engine_exit);
+
+MODULE_DESCRIPTION("ASoC OMAP4 ABE");
+MODULE_AUTHOR("Liam Girdwood <lrg@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/omap/omap-abe-dsp.h b/sound/soc/omap/omap-abe-dsp.h
new file mode 100644
index 0000000..8462290
--- /dev/null
+++ b/sound/soc/omap/omap-abe-dsp.h
@@ -0,0 +1,177 @@
+/*
+ * omap-abe-dsp.h
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * Contact: Liam Girdwood <lrg@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __OMAP_ABE_DSP_H__
+#define __OMAP_ABE_DSP_H__
+
+#define ABE_MIXER(x) (x)
+
+#define MIX_DL1_TONES ABE_MIXER(0)
+#define MIX_DL1_VOICE ABE_MIXER(1)
+#define MIX_DL1_CAPTURE ABE_MIXER(2)
+#define MIX_DL1_MEDIA ABE_MIXER(3)
+#define MIX_DL2_TONES ABE_MIXER(4)
+#define MIX_DL2_VOICE ABE_MIXER(5)
+#define MIX_DL2_CAPTURE ABE_MIXER(6)
+#define MIX_DL2_MEDIA ABE_MIXER(7)
+#define MIX_AUDUL_TONES ABE_MIXER(8)
+#define MIX_AUDUL_MEDIA ABE_MIXER(9)
+#define MIX_AUDUL_CAPTURE ABE_MIXER(10)
+#define MIX_VXREC_TONES ABE_MIXER(11)
+#define MIX_VXREC_VOICE_PLAYBACK ABE_MIXER(12)
+#define MIX_VXREC_VOICE_CAPTURE ABE_MIXER(13)
+#define MIX_VXREC_MEDIA ABE_MIXER(14)
+#define MIX_SDT_CAPTURE ABE_MIXER(15)
+#define MIX_SDT_PLAYBACK ABE_MIXER(16)
+#define MIX_SWITCH_PDM_DL ABE_MIXER(17)
+#define MIX_SWITCH_BT_VX_DL ABE_MIXER(18)
+#define MIX_SWITCH_MM_EXT_DL ABE_MIXER(19)
+#define MIX_DL1_MONO ABE_MIXER(20)
+#define MIX_DL2_MONO ABE_MIXER(21)
+#define MIX_AUDUL_MONO ABE_MIXER(22)
+
+#define ABE_NUM_MONO_MIXERS (MIX_AUDUL_MONO - MIX_DL1_MONO + 1)
+#define ABE_NUM_MIXERS (MIX_AUDUL_MONO + 1)
+
+#define ABE_MUX(x) (x + ABE_NUM_MIXERS)
+
+#define MUX_MM_UL10 ABE_MUX(0)
+#define MUX_MM_UL11 ABE_MUX(1)
+#define MUX_MM_UL12 ABE_MUX(2)
+#define MUX_MM_UL13 ABE_MUX(3)
+#define MUX_MM_UL14 ABE_MUX(4)
+#define MUX_MM_UL15 ABE_MUX(5)
+#define MUX_MM_UL16 ABE_MUX(6)
+#define MUX_MM_UL17 ABE_MUX(7)
+#define MUX_MM_UL20 ABE_MUX(8)
+#define MUX_MM_UL21 ABE_MUX(9)
+#define MUX_VX_UL0 ABE_MUX(10)
+#define MUX_VX_UL1 ABE_MUX(11)
+
+#define ABE_NUM_MUXES (MUX_VX_UL1 - MUX_MM_UL10)
+
+#define ABE_WIDGET(x) (x + ABE_NUM_MIXERS + ABE_NUM_MUXES)
+
+/* ABE AIF Frontend Widgets */
+#define W_AIF_TONES_DL ABE_WIDGET(0)
+#define W_AIF_VX_DL ABE_WIDGET(1)
+#define W_AIF_VX_UL ABE_WIDGET(2)
+#define W_AIF_MM_UL1 ABE_WIDGET(3)
+#define W_AIF_MM_UL2 ABE_WIDGET(4)
+#define W_AIF_MM_DL ABE_WIDGET(5)
+#define W_AIF_MM_DL_LP W_AIF_MM_DL
+#define W_AIF_VIB_DL ABE_WIDGET(6)
+#define W_AIF_MODEM_DL ABE_WIDGET(7)
+#define W_AIF_MODEM_UL ABE_WIDGET(8)
+
+/* ABE AIF Backend Widgets */
+#define W_AIF_PDM_UL1 ABE_WIDGET(9)
+#define W_AIF_PDM_DL1 ABE_WIDGET(10)
+#define W_AIF_PDM_DL2 ABE_WIDGET(11)
+#define W_AIF_PDM_VIB ABE_WIDGET(12)
+#define W_AIF_BT_VX_UL ABE_WIDGET(13)
+#define W_AIF_BT_VX_DL ABE_WIDGET(14)
+#define W_AIF_MM_EXT_UL ABE_WIDGET(15)
+#define W_AIF_MM_EXT_DL ABE_WIDGET(16)
+#define W_AIF_DMIC0 ABE_WIDGET(17)
+#define W_AIF_DMIC1 ABE_WIDGET(18)
+#define W_AIF_DMIC2 ABE_WIDGET(19)
+
+/* ABE ROUTE_UL MUX Widgets */
+#define W_MUX_UL00 ABE_WIDGET(20)
+#define W_MUX_UL01 ABE_WIDGET(21)
+#define W_MUX_UL02 ABE_WIDGET(22)
+#define W_MUX_UL03 ABE_WIDGET(23)
+#define W_MUX_UL04 ABE_WIDGET(24)
+#define W_MUX_UL05 ABE_WIDGET(25)
+#define W_MUX_UL06 ABE_WIDGET(26)
+#define W_MUX_UL07 ABE_WIDGET(27)
+#define W_MUX_UL10 ABE_WIDGET(28)
+#define W_MUX_UL11 ABE_WIDGET(29)
+#define W_MUX_VX00 ABE_WIDGET(30)
+#define W_MUX_VX01 ABE_WIDGET(31)
+
+/* ABE Volume and Mixer Widgets */
+#define W_MIXER_DL1 ABE_WIDGET(32)
+#define W_MIXER_DL2 ABE_WIDGET(33)
+#define W_VOLUME_DL1 ABE_WIDGET(34)
+#define W_MIXER_AUDIO_UL ABE_WIDGET(35)
+#define W_MIXER_VX_REC ABE_WIDGET(36)
+#define W_MIXER_SDT ABE_WIDGET(37)
+#define W_VSWITCH_DL1_PDM ABE_WIDGET(38)
+#define W_VSWITCH_DL1_BT_VX ABE_WIDGET(39)
+#define W_VSWITCH_DL1_MM_EXT ABE_WIDGET(40)
+
+#define ABE_NUM_WIDGETS (W_VSWITCH_DL1_MM_EXT - W_AIF_TONES_DL)
+#define ABE_WIDGET_LAST W_VSWITCH_DL1_MM_EXT
+
+#define ABE_NUM_DAPM_REG \
+ (ABE_NUM_MIXERS + ABE_NUM_MUXES + ABE_NUM_WIDGETS)
+
+#define ABE_VIRTUAL_SWITCH 0
+#define ABE_ROUTES_UL 14
+
+// TODO: OPP bitmask - Use HAL version after update
+#define ABE_OPP_25 0
+#define ABE_OPP_50 1
+#define ABE_OPP_100 2
+
+/* TODO: size in bytes of debug options */
+#define ABE_DBG_FLAG1_SIZE 0
+#define ABE_DBG_FLAG2_SIZE 0
+#define ABE_DBG_FLAG3_SIZE 0
+
+/* TODO: Pong start offset of DMEM */
+/* Ping pong buffer DMEM offset */
+#define ABE_DMEM_BASE_OFFSET_PING_PONG 0x4000
+
+/* Gain value conversion */
+#define ABE_MAX_GAIN 12000
+#define ABE_GAIN_SCALE 100
+#define abe_gain_to_val(gain) ((val + ABE_MAX_GAIN) / ABE_GAIN_SCALE)
+#define abe_val_to_gain(val) (-ABE_MAX_GAIN + (val * ABE_GAIN_SCALE))
+
+/* Firmware coefficients and equalizers */
+#define ABE_MAX_FW_SIZE (1024 * 128)
+#define ABE_MAX_COEFF_SIZE (1024 * 4)
+#define ABE_COEFF_NAME_SIZE 20
+#define ABE_COEFF_TEXT_SIZE 20
+#define ABE_COEFF_NUM_TEXTS 10
+#define ABE_MAX_EQU 10
+#define ABE_MAX_PROFILES 30
+
+#define OMAP_ABE_OPP25 0
+#define OMAP_ABE_OPP50 1
+#define OMAP_ABE_OPP100 2
+#define OMAP_ABE_OPP_COUNT 3
+
+void abe_dsp_shutdown(void);
+void abe_dsp_pm_get(void);
+void abe_dsp_pm_put(void);
+int abe_add_opp_req(struct device *dev, int opp);
+int abe_remove_opp_req(struct device *dev);
+void abe_dsp_set_power_mode(int mode);
+void abe_dsp_set_hs_offset(int left, int right, int mult);
+void abe_dsp_set_hf_offset(int left, int right);
+
+#endif /* End of __OMAP_ABE_DSP_H__ */
diff --git a/sound/soc/omap/omap-abe.c b/sound/soc/omap/omap-abe.c
new file mode 100644
index 0000000..da5ba44
--- /dev/null
+++ b/sound/soc/omap/omap-abe.c
@@ -0,0 +1,1563 @@
+/*
+ * omap-abe.c -- OMAP ALSA SoC DAI driver using Audio Backend
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * Contact: Liam Girdwood <lrg@ti.com>
+ * Misael Lopez Cruz <misael.lopez@ti.com>
+ * Sebastien Guiriec <s-guiriec@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/soc-dsp.h>
+
+#include <plat/dma-44xx.h>
+#include <plat/dma.h>
+#include "omap-pcm.h"
+#include "omap-abe.h"
+#include "omap-abe-dsp.h"
+#include "abe/abe_main.h"
+#include "abe/port_mgr.h"
+
+#define OMAP_ABE_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+struct omap_abe_data {
+ /* MODEM FE*/
+ struct snd_pcm_substream *modem_substream[2];
+ struct snd_soc_dai *modem_dai;
+
+ struct abe *abe;
+
+ /* BE & FE Ports */
+ struct omap_abe_port *port[OMAP_ABE_MAX_PORT_ID + 1];
+
+ int active_dais;
+ int suspended_dais;
+};
+
+/*
+ * Stream DMA parameters
+ */
+static struct omap_pcm_dma_data omap_abe_dai_dma_params[7][2] = {
+{
+ {
+ .name = "Media Playback",
+ .dma_req = OMAP44XX_DMA_ABE_REQ_0,
+ .sync_mode = OMAP_DMA_SYNC_PACKET,
+ },
+ {
+ .name = "Media Capture1",
+ .dma_req = OMAP44XX_DMA_ABE_REQ_3,
+ .sync_mode = OMAP_DMA_SYNC_PACKET,
+ },
+},
+{
+ {},
+ {
+ .name = "Media Capture2",
+ .dma_req = OMAP44XX_DMA_ABE_REQ_4,
+ .sync_mode = OMAP_DMA_SYNC_PACKET,
+ },
+},
+{
+ {
+ .name = "Voice Playback",
+ .dma_req = OMAP44XX_DMA_ABE_REQ_1,
+ .sync_mode = OMAP_DMA_SYNC_PACKET,
+ },
+ {
+ .name = "Voice Capture",
+ .dma_req = OMAP44XX_DMA_ABE_REQ_2,
+ .sync_mode = OMAP_DMA_SYNC_PACKET,
+ },
+},
+{
+ {
+ .name = "Tones Playback",
+ .dma_req = OMAP44XX_DMA_ABE_REQ_5,
+ .sync_mode = OMAP_DMA_SYNC_PACKET,
+ },{},
+},
+{
+ {
+ .name = "Vibra Playback",
+ .dma_req = OMAP44XX_DMA_ABE_REQ_6,
+ .sync_mode = OMAP_DMA_SYNC_PACKET,
+ },{},
+},
+{
+ {
+ .name = "MODEM Playback",
+ .dma_req = OMAP44XX_DMA_ABE_REQ_1,
+ .sync_mode = OMAP_DMA_SYNC_PACKET,
+ },
+ {
+ .name = "MODEM Capture",
+ .dma_req = OMAP44XX_DMA_ABE_REQ_2,
+ .sync_mode = OMAP_DMA_SYNC_PACKET,
+ },
+},
+{
+ {
+ .name = "Low Power Playback",
+ .dma_req = OMAP44XX_DMA_ABE_REQ_0,
+ .sync_mode = OMAP_DMA_SYNC_PACKET,
+ },{},
+},};
+
+static int modem_get_dai(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct omap_abe_data *abe_priv = snd_soc_dai_get_drvdata(dai);
+ struct snd_soc_pcm_runtime *modem_rtd;
+
+ abe_priv->modem_substream[substream->stream] =
+ snd_soc_get_dai_substream(rtd->card,
+ OMAP_ABE_BE_MM_EXT1, !substream->stream);
+
+ if (abe_priv->modem_substream[substream->stream] == NULL)
+ return -ENODEV;
+
+ modem_rtd = abe_priv->modem_substream[substream->stream]->private_data;
+ abe_priv->modem_substream[substream->stream]->runtime = substream->runtime;
+ abe_priv->modem_dai = modem_rtd->cpu_dai;
+
+ return 0;
+}
+
+int omap_abe_set_dl1_output(int output)
+{
+ int gain;
+
+ /*
+ * the output itself is not important, but the DL1 gain
+ * to use when each output is active
+ */
+ switch (output) {
+ case OMAP_ABE_DL1_HEADSET_LP:
+ gain = GAIN_M8dB;
+ break;
+ case OMAP_ABE_DL1_HEADSET_HP:
+ case OMAP_ABE_DL1_EARPIECE:
+ gain = GAIN_M1dB;
+ break;
+ case OMAP_ABE_DL1_NO_PDM:
+ gain = GAIN_0dB;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ abe_write_gain(GAINS_DL1, gain, RAMP_2MS, GAIN_LEFT_OFFSET);
+ abe_write_gain(GAINS_DL1, gain, RAMP_2MS, GAIN_RIGHT_OFFSET);
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_abe_set_dl1_output);
+
+static int omap_abe_dl1_enabled(struct omap_abe_data *abe_priv)
+{
+ /* DL1 path is common for PDM_DL1, BT_VX_DL and MM_EXT_DL */
+ return omap_abe_port_is_enabled(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_PDM_DL1]) +
+ omap_abe_port_is_enabled(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_BT_VX_DL]) +
+ omap_abe_port_is_enabled(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_MM_EXT_DL]);
+}
+
+static int omap_abe_dl2_enabled(struct omap_abe_data *abe_priv)
+{
+ return omap_abe_port_is_enabled(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_PDM_DL2]);
+}
+
+static void mute_be(struct snd_soc_pcm_runtime *be,
+ struct snd_soc_dai *dai, int stream)
+{
+ struct omap_abe_data *abe_priv = snd_soc_dai_get_drvdata(dai);
+
+ dev_dbg(&be->dev, "%s: %s %d\n", __func__, be->cpu_dai->name, stream);
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ switch (be->dai_link->be_id) {
+ case OMAP_ABE_DAI_PDM_DL1:
+ case OMAP_ABE_DAI_BT_VX:
+ case OMAP_ABE_DAI_MM_FM:
+ /*
+ * DL1 Mixer->SDT Mixer and DL1 gain are common for
+ * PDM_DL1, BT_VX_DL and MM_EXT_DL, mute those gains
+ * only if the last active BE
+ */
+ if (omap_abe_dl1_enabled(abe_priv) == 1) {
+ abe_mute_gain(GAINS_DL1, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_DL1, GAIN_RIGHT_OFFSET);
+ abe_mute_gain(MIXSDT, MIX_SDT_INPUT_DL1_MIXER);
+ }
+ break;
+ case OMAP_ABE_DAI_PDM_DL2:
+ abe_mute_gain(GAINS_DL2, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_DL2, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_DAI_PDM_VIB:
+ case OMAP_ABE_DAI_MODEM:
+ break;
+ }
+ } else {
+ switch (be->dai_link->be_id) {
+ case OMAP_ABE_DAI_PDM_UL:
+ abe_mute_gain(GAINS_AMIC, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_AMIC, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_DAI_BT_VX:
+ abe_mute_gain(GAINS_BTUL, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_BTUL, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_DAI_MM_FM:
+ case OMAP_ABE_DAI_MODEM:
+ break;
+ case OMAP_ABE_DAI_DMIC0:
+ abe_mute_gain(GAINS_DMIC1, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_DMIC1, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_DAI_DMIC1:
+ abe_mute_gain(GAINS_DMIC2, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_DMIC2, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_DAI_DMIC2:
+ abe_mute_gain(GAINS_DMIC3, GAIN_LEFT_OFFSET);
+ abe_mute_gain(GAINS_DMIC3, GAIN_RIGHT_OFFSET);
+ break;
+ }
+ }
+}
+
+static void unmute_be(struct snd_soc_pcm_runtime *be,
+ struct snd_soc_dai *dai, int stream)
+{
+ dev_dbg(&be->dev, "%s: %s %d\n", __func__, be->cpu_dai->name, stream);
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ switch (be->dai_link->be_id) {
+ case OMAP_ABE_DAI_PDM_DL1:
+ case OMAP_ABE_DAI_BT_VX:
+ case OMAP_ABE_DAI_MM_FM:
+ /*
+ * DL1 Mixer->SDT Mixer and DL1 gain are common for
+ * PDM_DL1, BT_VX_DL and MM_EXT_DL, unmute when any
+ * of them becomes active
+ */
+ abe_unmute_gain(GAINS_DL1, GAIN_LEFT_OFFSET);
+ abe_unmute_gain(GAINS_DL1, GAIN_RIGHT_OFFSET);
+ abe_unmute_gain(MIXSDT, MIX_SDT_INPUT_DL1_MIXER);
+ break;
+ case OMAP_ABE_DAI_PDM_DL2:
+ abe_unmute_gain(GAINS_DL2, GAIN_LEFT_OFFSET);
+ abe_unmute_gain(GAINS_DL2, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_DAI_PDM_VIB:
+ break;
+ case OMAP_ABE_DAI_MODEM:
+ break;
+ }
+ } else {
+
+ switch (be->dai_link->be_id) {
+ case OMAP_ABE_DAI_PDM_UL:
+ abe_unmute_gain(GAINS_AMIC, GAIN_LEFT_OFFSET);
+ abe_unmute_gain(GAINS_AMIC, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_DAI_BT_VX:
+ abe_unmute_gain(GAINS_BTUL, GAIN_LEFT_OFFSET);
+ abe_unmute_gain(GAINS_BTUL, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_DAI_MM_FM:
+ case OMAP_ABE_DAI_MODEM:
+ break;
+ case OMAP_ABE_DAI_DMIC0:
+ abe_unmute_gain(GAINS_DMIC1, GAIN_LEFT_OFFSET);
+ abe_unmute_gain(GAINS_DMIC1, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_DAI_DMIC1:
+ abe_unmute_gain(GAINS_DMIC2, GAIN_LEFT_OFFSET);
+ abe_unmute_gain(GAINS_DMIC2, GAIN_RIGHT_OFFSET);
+ break;
+ case OMAP_ABE_DAI_DMIC2:
+ abe_unmute_gain(GAINS_DMIC3, GAIN_LEFT_OFFSET);
+ abe_unmute_gain(GAINS_DMIC3, GAIN_RIGHT_OFFSET);
+ break;
+ }
+ }
+}
+
+static void enable_be_port(struct snd_soc_pcm_runtime *be,
+ struct snd_soc_dai *dai, int stream)
+{
+ struct omap_abe_data *abe_priv = snd_soc_dai_get_drvdata(dai);
+ abe_data_format_t format;
+
+ dev_dbg(&be->dev, "%s: %s %d\n", __func__, be->cpu_dai->name, stream);
+
+ switch (be->dai_link->be_id) {
+ /* McPDM is a special case, handled by McPDM driver */
+ case OMAP_ABE_DAI_PDM_DL1:
+ case OMAP_ABE_DAI_PDM_DL2:
+ case OMAP_ABE_DAI_PDM_VIB:
+ case OMAP_ABE_DAI_PDM_UL:
+ break;
+ case OMAP_ABE_DAI_BT_VX:
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+
+ /* port can only be configured if it's not running */
+ if (omap_abe_port_is_enabled(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_BT_VX_DL]))
+ return;
+
+ /* BT_DL connection to McBSP 1 ports */
+ format.f = 8000;
+ format.samp_format = MONO_RSHIFTED_16;
+ abe_connect_serial_port(BT_VX_DL_PORT, &format, MCBSP1_TX);
+ omap_abe_port_enable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_BT_VX_DL]);
+ } else {
+
+ /* port can only be configured if it's not running */
+ if (omap_abe_port_is_enabled(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_BT_VX_UL]))
+ return;
+
+ /* BT_UL connection to McBSP 1 ports */
+ format.f = 8000;
+ format.samp_format = MONO_RSHIFTED_16;
+ abe_connect_serial_port(BT_VX_UL_PORT, &format, MCBSP1_RX);
+ omap_abe_port_enable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_BT_VX_UL]);
+ }
+ break;
+ case OMAP_ABE_DAI_MM_FM:
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+
+ /* port can only be configured if it's not running */
+ if (omap_abe_port_is_enabled(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_MM_EXT_DL]))
+ return;
+
+ /* MM_EXT connection to McBSP 2 ports */
+ format.f = 48000;
+ format.samp_format = STEREO_RSHIFTED_16;
+ abe_connect_serial_port(MM_EXT_OUT_PORT, &format, MCBSP2_TX);
+ omap_abe_port_enable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_MM_EXT_DL]);
+ } else {
+
+ /* port can only be configured if it's not running */
+ if (omap_abe_port_is_enabled(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_MM_EXT_UL]))
+ return;
+
+ /* MM_EXT connection to McBSP 2 ports */
+ format.f = 48000;
+ format.samp_format = STEREO_RSHIFTED_16;
+ abe_connect_serial_port(MM_EXT_IN_PORT, &format, MCBSP2_RX);
+ omap_abe_port_enable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_MM_EXT_UL]);
+ }
+ break;
+ case OMAP_ABE_DAI_DMIC0:
+ omap_abe_port_enable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_DMIC0]);
+ break;
+ case OMAP_ABE_DAI_DMIC1:
+ omap_abe_port_enable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_DMIC1]);
+ break;
+ case OMAP_ABE_DAI_DMIC2:
+ omap_abe_port_enable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_DMIC2]);
+ break;
+ }
+}
+
+static void enable_fe_port(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai, int stream)
+{
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+ struct omap_abe_data *abe_priv = snd_soc_dai_get_drvdata(dai);
+
+ dev_dbg(&fe->dev, "%s: %s %d\n", __func__, dai->name, stream);
+
+ switch(dai->id) {
+ case ABE_FRONTEND_DAI_MEDIA:
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ omap_abe_port_enable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_FE_PORT_MM_DL1]);
+ else
+ omap_abe_port_enable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_FE_PORT_MM_UL1]);
+ break;
+ case ABE_FRONTEND_DAI_LP_MEDIA:
+ abe_enable_data_transfer(MM_DL_PORT);
+ break;
+ case ABE_FRONTEND_DAI_MEDIA_CAPTURE:
+ if (stream == SNDRV_PCM_STREAM_CAPTURE)
+ omap_abe_port_enable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_FE_PORT_MM_UL2]);
+ break;
+ case ABE_FRONTEND_DAI_MODEM:
+ case ABE_FRONTEND_DAI_VOICE:
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ omap_abe_port_enable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_FE_PORT_VX_DL]);
+ else
+ omap_abe_port_enable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_FE_PORT_VX_UL]);
+ break;
+ case ABE_FRONTEND_DAI_TONES:
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ omap_abe_port_enable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_FE_PORT_TONES]);
+ break;
+ case ABE_FRONTEND_DAI_VIBRA:
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ omap_abe_port_enable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_FE_PORT_VIB]);
+ break;
+ }
+}
+
+static void disable_be_port(struct snd_soc_pcm_runtime *be,
+ struct snd_soc_dai *dai, int stream)
+{
+ struct omap_abe_data *abe_priv = snd_soc_dai_get_drvdata(dai);
+
+ dev_dbg(&be->dev, "%s: %s %d\n", __func__, be->cpu_dai->name, stream);
+
+ switch (be->dai_link->be_id) {
+ /* McPDM is a special case, handled by McPDM driver */
+ case OMAP_ABE_DAI_PDM_DL1:
+ case OMAP_ABE_DAI_PDM_DL2:
+ case OMAP_ABE_DAI_PDM_VIB:
+ case OMAP_ABE_DAI_PDM_UL:
+ break;
+ case OMAP_ABE_DAI_BT_VX:
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ omap_abe_port_disable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_BT_VX_DL]);
+ else
+ omap_abe_port_disable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_BT_VX_UL]);
+ break;
+ case OMAP_ABE_DAI_MM_FM:
+ case OMAP_ABE_DAI_MODEM:
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ omap_abe_port_disable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_MM_EXT_DL]);
+ else
+ omap_abe_port_disable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_MM_EXT_UL]);
+ break;
+ case OMAP_ABE_DAI_DMIC0:
+ omap_abe_port_disable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_DMIC0]);
+ break;
+ case OMAP_ABE_DAI_DMIC1:
+ omap_abe_port_disable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_DMIC1]);
+ break;
+ case OMAP_ABE_DAI_DMIC2:
+ omap_abe_port_disable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_BE_PORT_DMIC2]);
+ break;
+ }
+}
+
+static void disable_fe_port(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai, int stream)
+{
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+ struct omap_abe_data *abe_priv = snd_soc_dai_get_drvdata(dai);
+
+ dev_dbg(&fe->dev, "%s: %s %d\n", __func__, dai->name, stream);
+
+ switch(dai->id) {
+ case ABE_FRONTEND_DAI_MEDIA:
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ omap_abe_port_disable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_FE_PORT_MM_DL1]);
+ else
+ omap_abe_port_disable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_FE_PORT_MM_UL1]);
+ break;
+ case ABE_FRONTEND_DAI_LP_MEDIA:
+ abe_disable_data_transfer(MM_DL_PORT);
+ break;
+ case ABE_FRONTEND_DAI_MEDIA_CAPTURE:
+ if (stream == SNDRV_PCM_STREAM_CAPTURE)
+ omap_abe_port_disable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_FE_PORT_MM_UL2]);
+ break;
+ case ABE_FRONTEND_DAI_MODEM:
+ case ABE_FRONTEND_DAI_VOICE:
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ omap_abe_port_disable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_FE_PORT_VX_DL]);
+ else
+ omap_abe_port_disable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_FE_PORT_VX_UL]);
+ break;
+ case ABE_FRONTEND_DAI_TONES:
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ omap_abe_port_disable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_FE_PORT_TONES]);
+ break;
+ case ABE_FRONTEND_DAI_VIBRA:
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ omap_abe_port_disable(abe_priv->abe,
+ abe_priv->port[OMAP_ABE_FE_PORT_VIB]);
+ break;
+ }
+}
+
+static void mute_fe_port_capture(struct snd_soc_pcm_runtime *fe,
+ struct snd_soc_pcm_runtime *be, int mute)
+{
+ struct omap_abe_data *abe_priv = snd_soc_dai_get_drvdata(fe->cpu_dai);
+
+ dev_dbg(&fe->dev, "%s: %s FE %s BE %s\n",
+ __func__, mute ? "mute" : "unmute",
+ fe->dai_link->name, be->dai_link->name);
+
+ switch (fe->cpu_dai->id) {
+ case ABE_FRONTEND_DAI_MEDIA_CAPTURE:
+ if (omap_abe_dl1_enabled(abe_priv)) {
+ if (mute)
+ abe_mute_gain(MIXDL1, MIX_DL1_INPUT_MM_UL2);
+ else
+ abe_unmute_gain(MIXDL1, MIX_DL1_INPUT_MM_UL2);
+ }
+ if (omap_abe_dl2_enabled(abe_priv)) {
+ if (mute)
+ abe_mute_gain(MIXDL2, MIX_DL2_INPUT_MM_UL2);
+ else
+ abe_unmute_gain(MIXDL2, MIX_DL2_INPUT_MM_UL2);
+ }
+ break;
+ case ABE_FRONTEND_DAI_MODEM:
+ case ABE_FRONTEND_DAI_VOICE:
+ if (mute) {
+ abe_mute_gain(MIXSDT, MIX_SDT_INPUT_UP_MIXER);
+ abe_mute_gain(MIXAUDUL, MIX_AUDUL_INPUT_UPLINK);
+ } else {
+ abe_unmute_gain(MIXSDT, MIX_SDT_INPUT_UP_MIXER);
+ abe_unmute_gain(MIXAUDUL, MIX_AUDUL_INPUT_UPLINK);
+ }
+ break;
+ case ABE_FRONTEND_DAI_MEDIA:
+ default:
+ break;
+ }
+}
+
+static void mute_fe_port_playback(struct snd_soc_pcm_runtime *fe,
+ struct snd_soc_pcm_runtime *be, int mute)
+{
+ struct omap_abe_data *abe_priv = snd_soc_dai_get_drvdata(fe->cpu_dai);
+
+ dev_dbg(&fe->dev, "%s: %s FE %s BE %s\n",
+ __func__, mute ? "mute" : "unmute",
+ fe->dai_link->name, be->dai_link->name);
+
+ switch (fe->cpu_dai->id) {
+ case ABE_FRONTEND_DAI_MEDIA:
+ case ABE_FRONTEND_DAI_LP_MEDIA:
+ switch (be->dai_link->be_id) {
+ case OMAP_ABE_DAI_PDM_DL1:
+ case OMAP_ABE_DAI_BT_VX:
+ case OMAP_ABE_DAI_MM_FM:
+ if (mute) {
+ /* mute if last running DL1-related BE */
+ if (omap_abe_dl1_enabled(abe_priv) == 1)
+ abe_mute_gain(MIXDL1,
+ MIX_DL1_INPUT_MM_DL);
+ } else {
+ abe_unmute_gain(MIXDL1, MIX_DL1_INPUT_MM_DL);
+ }
+ break;
+ case OMAP_ABE_DAI_PDM_DL2:
+ if (mute)
+ abe_mute_gain(MIXDL2, MIX_DL2_INPUT_MM_DL);
+ else
+ abe_unmute_gain(MIXDL2, MIX_DL2_INPUT_MM_DL);
+ break;
+ case OMAP_ABE_DAI_MODEM:
+ case OMAP_ABE_DAI_PDM_VIB:
+ default:
+ break;
+ }
+ break;
+ case ABE_FRONTEND_DAI_VOICE:
+ case ABE_FRONTEND_DAI_MODEM:
+ switch (be->dai_link->be_id) {
+ case OMAP_ABE_DAI_PDM_DL1:
+ case OMAP_ABE_DAI_BT_VX:
+ case OMAP_ABE_DAI_MM_FM:
+ if (mute) {
+ /* mute if last running DL1-related BE */
+ if (omap_abe_dl1_enabled(abe_priv) == 1)
+ abe_mute_gain(MIXDL1,
+ MIX_DL1_INPUT_VX_DL);
+ } else {
+ abe_unmute_gain(MIXDL1, MIX_DL1_INPUT_VX_DL);
+ }
+ break;
+ case OMAP_ABE_DAI_PDM_DL2:
+ if (mute)
+ abe_mute_gain(MIXDL2, MIX_DL2_INPUT_VX_DL);
+ else
+ abe_unmute_gain(MIXDL2, MIX_DL2_INPUT_VX_DL);
+ break;
+ case OMAP_ABE_DAI_MODEM:
+ case OMAP_ABE_DAI_PDM_VIB:
+ default:
+ break;
+ }
+ break;
+ case ABE_FRONTEND_DAI_TONES:
+ switch (be->dai_link->be_id) {
+ case OMAP_ABE_DAI_PDM_DL1:
+ case OMAP_ABE_DAI_BT_VX:
+ case OMAP_ABE_DAI_MM_FM:
+ if (mute) {
+ /* mute if last running DL1-related BE */
+ if (omap_abe_dl1_enabled(abe_priv) == 1)
+ abe_mute_gain(MIXDL1,
+ MIX_DL1_INPUT_TONES);
+ } else{
+ abe_unmute_gain(MIXDL1, MIX_DL1_INPUT_TONES);
+ }
+ break;
+ case OMAP_ABE_DAI_PDM_DL2:
+ if (mute)
+ abe_mute_gain(MIXDL2, MIX_DL2_INPUT_TONES);
+ else
+ abe_unmute_gain(MIXDL2, MIX_DL2_INPUT_TONES);
+ break;
+ case OMAP_ABE_DAI_MODEM:
+ case OMAP_ABE_DAI_PDM_VIB:
+ default:
+ break;
+ }
+ break;
+ case ABE_FRONTEND_DAI_VIBRA:
+ default:
+ break;
+ }
+}
+
+static void mute_fe_port(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai, int stream)
+{
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+ struct snd_soc_dsp_params *dsp_params;
+
+ dev_dbg(&fe->dev, "%s: %s %d\n", __func__, dai->name, stream);
+
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+
+ if (!snd_soc_dsp_is_op_for_be(fe, be, stream))
+ continue;
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ mute_fe_port_playback(fe, be, 1);
+ else
+ mute_fe_port_capture(fe, be, 1);
+ }
+}
+
+static void unmute_fe_port(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai, int stream)
+{
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+ struct snd_soc_dsp_params *dsp_params;
+
+ dev_dbg(&fe->dev, "%s: %s %d\n", __func__, dai->name, stream);
+
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+
+ if (!snd_soc_dsp_is_op_for_be(fe, be, stream))
+ continue;
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ mute_fe_port_playback(fe, be, 0);
+ else
+ mute_fe_port_capture(fe, be, 0);
+ }
+}
+
+static void capture_trigger(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai, int cmd)
+{
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+ struct snd_soc_dsp_params *dsp_params;
+ struct snd_pcm_substream *be_substream;
+ int stream = substream->stream;
+
+ dev_dbg(&fe->dev, "%s: %s %d\n", __func__, fe->cpu_dai->name, stream);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+
+ /* mute and enable BE ports */
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+
+ /* does this trigger() apply to this BE and stream ? */
+ if (!snd_soc_dsp_is_op_for_be(fe, be, stream))
+ continue;
+
+ if ((be->dsp[stream].state != SND_SOC_DSP_STATE_PREPARE) &&
+ (be->dsp[stream].state != SND_SOC_DSP_STATE_STOP))
+ continue;
+
+ be_substream = snd_soc_dsp_get_substream(be, stream);
+
+ /* mute the BE port */
+ mute_be(be, dai, stream);
+
+ /* enable the BE port */
+ enable_be_port(be, dai, stream);
+
+ /* DAI work must be started/stopped at least 250us after ABE */
+ udelay(250);
+
+ /* trigger the BE port */
+ snd_soc_dai_trigger(be_substream, cmd, be->cpu_dai);
+
+ be->dsp[stream].state = SND_SOC_DSP_STATE_START;
+ }
+
+ /* does this trigger() apply to the FE ? */
+ if (snd_soc_dsp_is_trigger_for_fe(fe, stream)) {
+ /* Enable Frontend sDMA */
+ snd_soc_dsp_platform_trigger(substream, cmd, fe->platform);
+ enable_fe_port(substream, dai, stream);
+
+ /* unmute FE port */
+ unmute_fe_port(substream, dai, stream);
+ }
+
+ /* Restore ABE GAINS AMIC */
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+
+ /* does this trigger() apply to this BE and stream ? */
+ if (!snd_soc_dsp_is_op_for_be(fe, be, stream))
+ continue;
+
+ /* unmute this BE port */
+ unmute_be(be, dai, stream);
+ }
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ /* Enable sDMA */
+ snd_soc_dsp_platform_trigger(substream, cmd, fe->platform);
+ enable_fe_port(substream, dai, stream);
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ /* Disable sDMA */
+ disable_fe_port(substream, dai, stream);
+ snd_soc_dsp_platform_trigger(substream, cmd, fe->platform);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ /* does this trigger() apply to the FE ? */
+ if (snd_soc_dsp_is_trigger_for_fe(fe, stream)) {
+ /* mute FE port */
+ mute_fe_port(substream, dai, stream);
+
+ /* Disable sDMA */
+ disable_fe_port(substream, dai, stream);
+ snd_soc_dsp_platform_trigger(substream, cmd, fe->platform);
+ }
+
+ /* disable BE ports */
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+
+ /* does this trigger() apply to this BE and stream ? */
+ if (!snd_soc_dsp_is_op_for_be(fe, be, stream))
+ continue;
+
+ if (be->dsp[stream].state != SND_SOC_DSP_STATE_START)
+ continue;
+
+ /* only stop if last running user */
+ if (soc_dsp_fe_state_count(be, stream,
+ SND_SOC_DSP_STATE_START) > 1)
+ continue;
+
+ be_substream = snd_soc_dsp_get_substream(be, stream);
+
+ /* mute the BE port */
+ mute_be(be, dai, stream);
+
+ /* disable the BE port */
+ disable_be_port(be, dai, stream);
+
+ /* DAI work must be started/stopped at least 250us after ABE */
+ udelay(250);
+
+ /* trigger BE port */
+ snd_soc_dai_trigger(be_substream, cmd, be->cpu_dai);
+
+ be->dsp[stream].state = SND_SOC_DSP_STATE_STOP;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void playback_trigger(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai, int cmd)
+{
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+ struct snd_soc_dsp_params *dsp_params;
+ struct snd_pcm_substream *be_substream;
+ int stream = substream->stream;
+
+ dev_dbg(&fe->dev, "%s: %s %d\n", __func__, fe->cpu_dai->name, stream);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+
+ /* mute and enable ports */
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+
+ /* does this trigger() apply to the FE ? */
+ if (!snd_soc_dsp_is_op_for_be(fe, be, stream))
+ continue;
+
+ if ((be->dsp[stream].state != SND_SOC_DSP_STATE_PREPARE) &&
+ (be->dsp[stream].state != SND_SOC_DSP_STATE_STOP))
+ continue;
+
+ be_substream = snd_soc_dsp_get_substream(be, stream);
+
+ /* mute BE port */
+ mute_be(be, dai, stream);
+
+ /* enabled BE port */
+ enable_be_port(be, dai, stream);
+
+ /* DAI work must be started/stopped at least 250us after ABE */
+ udelay(250);
+
+ /* trigger BE port */
+ snd_soc_dai_trigger(be_substream, cmd, be->cpu_dai);
+
+ /* unmute the BE port */
+ unmute_be(be, dai, stream);
+
+ be->dsp[stream].state = SND_SOC_DSP_STATE_START;
+ }
+
+ /* does this trigger() apply to the FE ? */
+ if (snd_soc_dsp_is_trigger_for_fe(fe, stream)) {
+ /* Enable Frontend sDMA */
+ snd_soc_dsp_platform_trigger(substream, cmd, fe->platform);
+ enable_fe_port(substream, dai, stream);
+ }
+
+ /* unmute FE port (sensitive to runtime udpates) */
+ unmute_fe_port(substream, dai, stream);
+
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ /* Enable Frontend sDMA */
+ snd_soc_dsp_platform_trigger(substream, cmd, fe->platform);
+ enable_fe_port(substream, dai, stream);
+
+ /* unmute FE port */
+ unmute_fe_port(substream, dai, stream);
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ /* mute FE port */
+ mute_fe_port(substream, dai, stream);
+
+ /* disable Frontend sDMA */
+ disable_fe_port(substream, dai, stream);
+ snd_soc_dsp_platform_trigger(substream, cmd, fe->platform);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+
+ /* mute FE port (sensitive to runtime udpates) */
+ mute_fe_port(substream, dai, stream);
+
+ /* does this trigger() apply to the FE ? */
+ if (snd_soc_dsp_is_trigger_for_fe(fe, stream)) {
+ /* disable the transfer */
+ disable_fe_port(substream, dai, stream);
+ snd_soc_dsp_platform_trigger(substream, cmd, fe->platform);
+ }
+
+ /* disable BE ports */
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+
+ /* does this trigger() apply to this BE and stream ? */
+ if (!snd_soc_dsp_is_op_for_be(fe, be, stream))
+ continue;
+
+ if (be->dsp[stream].state != SND_SOC_DSP_STATE_START)
+ continue;
+
+ /* only stop if last running user */
+ if (soc_dsp_fe_state_count(be, stream,
+ SND_SOC_DSP_STATE_START) > 1)
+ continue;
+
+ be_substream = snd_soc_dsp_get_substream(be, stream);
+
+ /* mute the BE port */
+ mute_be(be, dai, stream);
+
+ /* disable the BE */
+ disable_be_port(be, dai, stream);
+
+ /* DAI work must be started/stopped at least 250us after ABE */
+ udelay(250);
+
+ /* trigger the BE port */
+ snd_soc_dai_trigger(be_substream, cmd, be->cpu_dai);
+
+ be->dsp[stream].state = SND_SOC_DSP_STATE_STOP;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int omap_abe_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct omap_abe_data *abe_priv = snd_soc_dai_get_drvdata(dai);
+ int ret = 0;
+
+ dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+
+ abe_priv->active_dais++;
+
+ abe_dsp_pm_get();
+
+ if (dai->id == ABE_FRONTEND_DAI_MODEM) {
+
+ ret = modem_get_dai(substream, dai);
+ if (ret < 0) {
+ dev_err(dai->dev, "failed to get MODEM DAI\n");
+ return ret;
+ }
+ dev_dbg(abe_priv->modem_dai->dev, "%s: MODEM stream %d\n",
+ __func__, substream->stream);
+
+ ret = snd_soc_dai_startup(abe_priv->modem_substream[substream->stream],
+ abe_priv->modem_dai);
+ if (ret < 0) {
+ dev_err(abe_priv->modem_dai->dev, "failed to open DAI %d\n", ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int omap_abe_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct omap_abe_data *abe_priv = snd_soc_dai_get_drvdata(dai);
+ struct omap_pcm_dma_data *dma_data;
+ abe_data_format_t format;
+ abe_dma_t dma_sink;
+ abe_dma_t dma_params;
+ int data_type = OMAP_DMA_DATA_TYPE_S32;
+ int ret;
+
+ dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+
+ dma_data = &omap_abe_dai_dma_params[dai->id][substream->stream];
+
+ switch (params_channels(params)) {
+ case 1:
+ if (params_format(params) == SNDRV_PCM_FORMAT_S16_LE) {
+ format.samp_format = MONO_RSHIFTED_16;
+ data_type = OMAP_DMA_DATA_TYPE_S16;
+ } else {
+ format.samp_format = MONO_MSB;
+ }
+ break;
+ case 2:
+ if (params_format(params) == SNDRV_PCM_FORMAT_S16_LE)
+ format.samp_format = STEREO_16_16;
+ else
+ format.samp_format = STEREO_MSB;
+ break;
+ case 3:
+ format.samp_format = THREE_MSB;
+ break;
+ case 4:
+ format.samp_format = FOUR_MSB;
+ break;
+ case 5:
+ format.samp_format = FIVE_MSB;
+ break;
+ case 6 :
+ format.samp_format = SIX_MSB;
+ break;
+ case 7 :
+ format.samp_format = SEVEN_MSB;
+ break;
+ case 8:
+ format.samp_format = EIGHT_MSB;
+ break;
+ default:
+ dev_err(dai->dev, "%d channels not supported",
+ params_channels(params));
+ return -EINVAL;
+ }
+
+ format.f = params_rate(params);
+
+ switch (dai->id) {
+ case ABE_FRONTEND_DAI_MEDIA:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ abe_connect_cbpr_dmareq_port(MM_DL_PORT, &format, ABE_CBPR0_IDX,
+ &dma_sink);
+ abe_read_port_address(MM_DL_PORT, &dma_params);
+ } else {
+ abe_connect_cbpr_dmareq_port(MM_UL_PORT, &format, ABE_CBPR3_IDX,
+ &dma_sink);
+ abe_read_port_address(MM_UL_PORT, &dma_params);
+ }
+ break;
+ case ABE_FRONTEND_DAI_LP_MEDIA:
+ return 0;
+ break;
+ case ABE_FRONTEND_DAI_MEDIA_CAPTURE:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ return -EINVAL;
+ else {
+ abe_connect_cbpr_dmareq_port(MM_UL2_PORT, &format, ABE_CBPR4_IDX,
+ &dma_sink);
+ abe_read_port_address(MM_UL2_PORT, &dma_params);
+ }
+ break;
+ case ABE_FRONTEND_DAI_VOICE:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ abe_connect_cbpr_dmareq_port(VX_DL_PORT, &format, ABE_CBPR1_IDX,
+ &dma_sink);
+ abe_read_port_address(VX_DL_PORT, &dma_params);
+ } else {
+ abe_connect_cbpr_dmareq_port(VX_UL_PORT, &format, ABE_CBPR2_IDX,
+ &dma_sink);
+ abe_read_port_address(VX_UL_PORT, &dma_params);
+ }
+ break;
+ case ABE_FRONTEND_DAI_TONES:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ abe_connect_cbpr_dmareq_port(TONES_DL_PORT, &format, ABE_CBPR5_IDX,
+ &dma_sink);
+ abe_read_port_address(TONES_DL_PORT, &dma_params);
+ } else
+ return -EINVAL;
+ break;
+ case ABE_FRONTEND_DAI_VIBRA:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ abe_connect_cbpr_dmareq_port(VIB_DL_PORT, &format, ABE_CBPR6_IDX,
+ &dma_sink);
+ abe_read_port_address(VIB_DL_PORT, &dma_params);
+ } else
+ return -EINVAL;
+ break;
+ case ABE_FRONTEND_DAI_MODEM:
+ /* MODEM is special case where data IO is performed by McBSP2
+ * directly onto VX_DL and VX_UL (instead of SDMA).
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ /* Vx_DL connection to McBSP 2 ports */
+ format.samp_format = STEREO_RSHIFTED_16;
+ abe_connect_serial_port(VX_DL_PORT, &format, MCBSP2_RX);
+ abe_read_port_address(VX_DL_PORT, &dma_params);
+ } else {
+ /* Vx_UL connection to McBSP 2 ports */
+ format.samp_format = STEREO_RSHIFTED_16;
+ abe_connect_serial_port(VX_UL_PORT, &format, MCBSP2_TX);
+ abe_read_port_address(VX_UL_PORT, &dma_params);
+ }
+ break;
+ }
+
+ /* configure frontend SDMA data */
+ dma_data->port_addr = (unsigned long)dma_params.data;
+ dma_data->packet_size = dma_params.iter;
+ dma_data->data_type = data_type;
+
+ if (dai->id == ABE_FRONTEND_DAI_MODEM) {
+ /* call hw_params on McBSP with correct DMA data */
+ snd_soc_dai_set_dma_data(abe_priv->modem_dai, substream,
+ dma_data);
+
+ dev_dbg(abe_priv->modem_dai->dev, "%s: MODEM stream %d\n",
+ __func__, substream->stream);
+
+ ret = snd_soc_dai_hw_params(abe_priv->modem_substream[substream->stream],
+ params, abe_priv->modem_dai);
+ if (ret < 0)
+ dev_err(abe_priv->modem_dai->dev, "MODEM hw_params failed\n");
+ return ret;
+ }
+
+ snd_soc_dai_set_dma_data(dai, substream, dma_data);
+
+ return 0;
+}
+
+static int omap_abe_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct omap_abe_data *abe_priv = snd_soc_dai_get_drvdata(dai);
+ int ret = 0;
+
+ dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+
+ if (dai->id == ABE_FRONTEND_DAI_MODEM) {
+ ret = snd_soc_dai_prepare(abe_priv->modem_substream[substream->stream],
+ abe_priv->modem_dai);
+
+ dev_dbg(abe_priv->modem_dai->dev, "%s: MODEM stream %d\n",
+ __func__, substream->stream);
+
+ if (ret < 0) {
+ dev_err(abe_priv->modem_dai->dev, "MODEM prepare failed\n");
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static int omap_abe_dai_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct omap_abe_data *abe_priv = snd_soc_dai_get_drvdata(dai);
+ int ret = 0;
+
+ dev_dbg(dai->dev, "%s: %s cmd %d\n", __func__, dai->name, cmd);
+
+ if (dai->id == ABE_FRONTEND_DAI_MODEM) {
+
+ dev_dbg(abe_priv->modem_dai->dev, "%s: MODEM stream %d cmd %d\n",
+ __func__, substream->stream, cmd);
+
+ ret = snd_soc_dai_trigger(abe_priv->modem_substream[substream->stream],
+ cmd, abe_priv->modem_dai);
+ if (ret < 0) {
+ dev_err(abe_priv->modem_dai->dev, "MODEM trigger failed\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int omap_abe_dai_bespoke_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+ struct omap_abe_data *abe_priv = snd_soc_dai_get_drvdata(dai);
+ int ret = 0;
+
+ dev_dbg(dai->dev, "%s: %s cmd %d\n", __func__, dai->name, cmd);
+
+ if ((dai->id == ABE_FRONTEND_DAI_MODEM) &&
+ snd_soc_dsp_is_trigger_for_fe(fe, substream->stream)) {
+
+ dev_dbg(abe_priv->modem_dai->dev, "%s: MODEM stream %d cmd %d\n",
+ __func__, substream->stream, cmd);
+
+ ret = snd_soc_dai_trigger(abe_priv->modem_substream[substream->stream],
+ cmd, abe_priv->modem_dai);
+ if (ret < 0) {
+ dev_err(abe_priv->modem_dai->dev, "MODEM trigger failed\n");
+ return ret;
+ }
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ playback_trigger(substream, dai, cmd);
+ else
+ capture_trigger(substream, dai, cmd);
+
+ return ret;
+}
+
+static int omap_abe_dai_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct omap_abe_data *abe_priv = snd_soc_dai_get_drvdata(dai);
+ int ret = 0;
+
+ dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+
+ if (dai->id == ABE_FRONTEND_DAI_MODEM) {
+
+ dev_dbg(abe_priv->modem_dai->dev, "%s: MODEM stream %d\n",
+ __func__, substream->stream);
+
+ ret = snd_soc_dai_hw_free(abe_priv->modem_substream[substream->stream],
+ abe_priv->modem_dai);
+ if (ret < 0) {
+ dev_err(abe_priv->modem_dai->dev, "MODEM hw_free failed\n");
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static void omap_abe_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct omap_abe_data *abe_priv = snd_soc_dai_get_drvdata(dai);
+
+ dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+
+ if (dai->id == ABE_FRONTEND_DAI_MODEM) {
+ dev_dbg(abe_priv->modem_dai->dev, "%s: MODEM stream %d\n",
+ __func__, substream->stream);
+
+ snd_soc_dai_shutdown(abe_priv->modem_substream[substream->stream],
+ abe_priv->modem_dai);
+ }
+
+ abe_dsp_shutdown();
+ abe_dsp_pm_put();
+
+ abe_priv->active_dais--;
+}
+
+#ifdef CONFIG_PM
+static int omap_abe_dai_suspend(struct snd_soc_dai *dai)
+{
+ struct omap_abe_data *abe_priv = snd_soc_dai_get_drvdata(dai);
+
+ dev_dbg(dai->dev, "%s: %s active %d\n",
+ __func__, dai->name, dai->active);
+
+ if (!dai->active)
+ return 0;
+
+ if (++abe_priv->suspended_dais < abe_priv->active_dais)
+ return 0;
+
+ abe_mute_gain(MIXSDT, MIX_SDT_INPUT_UP_MIXER);
+ abe_mute_gain(MIXSDT, MIX_SDT_INPUT_DL1_MIXER);
+ abe_mute_gain(MIXAUDUL, MIX_AUDUL_INPUT_MM_DL);
+ abe_mute_gain(MIXAUDUL, MIX_AUDUL_INPUT_TONES);
+ abe_mute_gain(MIXAUDUL, MIX_AUDUL_INPUT_UPLINK);
+ abe_mute_gain(MIXAUDUL, MIX_AUDUL_INPUT_VX_DL);
+ abe_mute_gain(MIXVXREC, MIX_VXREC_INPUT_TONES);
+ abe_mute_gain(MIXVXREC, MIX_VXREC_INPUT_VX_DL);
+ abe_mute_gain(MIXVXREC, MIX_VXREC_INPUT_MM_DL);
+ abe_mute_gain(MIXVXREC, MIX_VXREC_INPUT_VX_UL);
+ abe_mute_gain(MIXDL1, MIX_DL1_INPUT_MM_DL);
+ abe_mute_gain(MIXDL1, MIX_DL1_INPUT_MM_UL2);
+ abe_mute_gain(MIXDL1, MIX_DL1_INPUT_VX_DL);
+ abe_mute_gain(MIXDL1, MIX_DL1_INPUT_TONES);
+ abe_mute_gain(MIXDL2, MIX_DL2_INPUT_TONES);
+ abe_mute_gain(MIXDL2, MIX_DL2_INPUT_VX_DL);
+ abe_mute_gain(MIXDL2, MIX_DL2_INPUT_MM_DL);
+ abe_mute_gain(MIXDL2, MIX_DL2_INPUT_MM_UL2);
+ abe_mute_gain(MIXECHO, MIX_ECHO_DL1);
+ abe_mute_gain(MIXECHO, MIX_ECHO_DL2);
+
+ return 0;
+}
+
+static int omap_abe_dai_resume(struct snd_soc_dai *dai)
+{
+ struct omap_abe_data *abe_priv = snd_soc_dai_get_drvdata(dai);
+
+ dev_dbg(dai->dev, "%s: %s active %d\n",
+ __func__, dai->name, dai->active);
+
+ if (!dai->active)
+ return 0;
+
+ if (abe_priv->suspended_dais-- < abe_priv->active_dais)
+ return 0;
+
+ abe_unmute_gain(MIXSDT, MIX_SDT_INPUT_UP_MIXER);
+ abe_unmute_gain(MIXSDT, MIX_SDT_INPUT_DL1_MIXER);
+ abe_unmute_gain(MIXAUDUL, MIX_AUDUL_INPUT_MM_DL);
+ abe_unmute_gain(MIXAUDUL, MIX_AUDUL_INPUT_TONES);
+ abe_unmute_gain(MIXAUDUL, MIX_AUDUL_INPUT_UPLINK);
+ abe_unmute_gain(MIXAUDUL, MIX_AUDUL_INPUT_VX_DL);
+ abe_unmute_gain(MIXVXREC, MIX_VXREC_INPUT_TONES);
+ abe_unmute_gain(MIXVXREC, MIX_VXREC_INPUT_VX_DL);
+ abe_unmute_gain(MIXVXREC, MIX_VXREC_INPUT_MM_DL);
+ abe_unmute_gain(MIXVXREC, MIX_VXREC_INPUT_VX_UL);
+ abe_unmute_gain(MIXDL1, MIX_DL1_INPUT_MM_DL);
+ abe_unmute_gain(MIXDL1, MIX_DL1_INPUT_MM_UL2);
+ abe_unmute_gain(MIXDL1, MIX_DL1_INPUT_VX_DL);
+ abe_unmute_gain(MIXDL1, MIX_DL1_INPUT_TONES);
+ abe_unmute_gain(MIXDL2, MIX_DL2_INPUT_TONES);
+ abe_unmute_gain(MIXDL2, MIX_DL2_INPUT_VX_DL);
+ abe_unmute_gain(MIXDL2, MIX_DL2_INPUT_MM_DL);
+ abe_unmute_gain(MIXDL2, MIX_DL2_INPUT_MM_UL2);
+ abe_unmute_gain(MIXECHO, MIX_ECHO_DL1);
+ abe_unmute_gain(MIXECHO, MIX_ECHO_DL2);
+
+ return 0;
+}
+#else
+#define omap_abe_dai_suspend NULL
+#define omap_abe_dai_resume NULL
+#endif
+
+static int omap_abe_dai_probe(struct snd_soc_dai *dai)
+{
+ struct omap_abe_data *abe_priv;
+ int i;
+
+ abe_priv = kzalloc(sizeof(struct omap_abe_data), GFP_KERNEL);
+ if (abe_priv == NULL)
+ return -ENOMEM;
+
+ abe_priv->abe = omap_abe_port_mgr_get();
+ if (!abe_priv->abe)
+ goto err;
+
+ for (i = 0; i <= OMAP_ABE_MAX_PORT_ID; i++) {
+
+ abe_priv->port[i] = omap_abe_port_open(abe_priv->abe, i);
+ if (abe_priv->port[i] == NULL) {
+ for (--i; i >= 0; i--)
+ omap_abe_port_close(abe_priv->abe, abe_priv->port[i]);
+
+ goto err_port;
+ }
+ }
+
+ snd_soc_dai_set_drvdata(dai, abe_priv);
+ return 0;
+
+err_port:
+ omap_abe_port_mgr_put(abe_priv->abe);
+err:
+ kfree(abe_priv);
+ return -ENOMEM;
+}
+
+static int omap_abe_dai_remove(struct snd_soc_dai *dai)
+{
+ struct omap_abe_data *abe_priv = snd_soc_dai_get_drvdata(dai);
+
+ omap_abe_port_mgr_put(abe_priv->abe);
+ kfree(abe_priv);
+ return 0;
+}
+
+static struct snd_soc_dai_ops omap_abe_dai_ops = {
+ .startup = omap_abe_dai_startup,
+ .shutdown = omap_abe_dai_shutdown,
+ .hw_params = omap_abe_dai_hw_params,
+ .hw_free = omap_abe_dai_hw_free,
+ .prepare = omap_abe_dai_prepare,
+ .trigger = omap_abe_dai_trigger,
+ .bespoke_trigger = omap_abe_dai_bespoke_trigger,
+};
+
+static struct snd_soc_dai_driver omap_abe_dai[] = {
+ { /* Multimedia Playback and Capture */
+ .name = "MultiMedia1",
+ .probe = omap_abe_dai_probe,
+ .remove = omap_abe_dai_remove,
+ .suspend = omap_abe_dai_suspend,
+ .resume = omap_abe_dai_resume,
+ .playback = {
+ .stream_name = "MultiMedia1 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = OMAP_ABE_FORMATS,
+ },
+ .capture = {
+ .stream_name = "MultiMedia1 Capture",
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = OMAP_ABE_FORMATS,
+ },
+ .ops = &omap_abe_dai_ops,
+ },
+ { /* Multimedia Capture */
+ .name = "MultiMedia2",
+ .suspend = omap_abe_dai_suspend,
+ .resume = omap_abe_dai_resume,
+ .capture = {
+ .stream_name = "MultiMedia2 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = OMAP_ABE_FORMATS,
+ },
+ .ops = &omap_abe_dai_ops,
+ },
+ { /* Voice Playback and Capture */
+ .name = "Voice",
+ .suspend = omap_abe_dai_suspend,
+ .resume = omap_abe_dai_resume,
+ .playback = {
+ .stream_name = "Voice Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .formats = OMAP_ABE_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Voice Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .formats = OMAP_ABE_FORMATS,
+ },
+ .ops = &omap_abe_dai_ops,
+ },
+ { /* Tones Playback */
+ .name = "Tones",
+ .suspend = omap_abe_dai_suspend,
+ .resume = omap_abe_dai_resume,
+ .playback = {
+ .stream_name = "Tones Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = OMAP_ABE_FORMATS,
+ },
+ .ops = &omap_abe_dai_ops,
+ },
+ { /* Vibra */
+ .name = "Vibra",
+ .suspend = omap_abe_dai_suspend,
+ .resume = omap_abe_dai_resume,
+ .playback = {
+ .stream_name = "Vibra Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .formats = OMAP_ABE_FORMATS,
+ },
+ .ops = &omap_abe_dai_ops,
+ },
+ { /* MODEM Voice Playback and Capture */
+ .name = "MODEM",
+ .suspend = omap_abe_dai_suspend,
+ .resume = omap_abe_dai_resume,
+ .playback = {
+ .stream_name = "MODEM Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .formats = OMAP_ABE_FORMATS,
+ },
+ .capture = {
+ .stream_name = "MODEM Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .formats = OMAP_ABE_FORMATS,
+ },
+ .ops = &omap_abe_dai_ops,
+ },
+ { /* Low Power HiFi Playback */
+ .name = "MultiMedia1 LP",
+ .suspend = omap_abe_dai_suspend,
+ .resume = omap_abe_dai_resume,
+ .playback = {
+ .stream_name = "MultiMedia1 LP Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
+ .formats = OMAP_ABE_FORMATS,
+ },
+ .ops = &omap_abe_dai_ops,
+ },
+};
+
+static int __devinit omap_abe_probe(struct platform_device *pdev)
+{
+ return snd_soc_register_dais(&pdev->dev, omap_abe_dai,
+ ARRAY_SIZE(omap_abe_dai));
+}
+
+static int __devexit omap_abe_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(omap_abe_dai));
+ return 0;
+}
+
+static struct platform_driver omap_abe_driver = {
+ .driver = {
+ .name = "omap-abe-dai",
+ .owner = THIS_MODULE,
+ },
+ .probe = omap_abe_probe,
+ .remove = __devexit_p(omap_abe_remove),
+};
+
+static int __init omap_abe_init(void)
+{
+ return platform_driver_register(&omap_abe_driver);
+}
+module_init(omap_abe_init);
+
+static void __exit omap_abe_exit(void)
+{
+ platform_driver_unregister(&omap_abe_driver);
+}
+module_exit(omap_abe_exit);
+
+MODULE_AUTHOR("Liam Girdwood <lrg@ti.com>");
+MODULE_DESCRIPTION("OMAP ABE SoC Interface");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/omap/omap-abe.h b/sound/soc/omap/omap-abe.h
new file mode 100644
index 0000000..91c5f1d
--- /dev/null
+++ b/sound/soc/omap/omap-abe.h
@@ -0,0 +1,68 @@
+/*
+ * omap-abe.h
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * Contact: Liam Girdwood <lrg@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __OMAP_ABE_H__
+#define __OMAP_ABE_H__
+
+#define ABE_FRONTEND_DAI_MEDIA 0
+#define ABE_FRONTEND_DAI_MEDIA_CAPTURE 1
+#define ABE_FRONTEND_DAI_VOICE 2
+#define ABE_FRONTEND_DAI_TONES 3
+#define ABE_FRONTEND_DAI_VIBRA 4
+#define ABE_FRONTEND_DAI_MODEM 5
+#define ABE_FRONTEND_DAI_LP_MEDIA 6
+#define ABE_FRONTEND_DAI_NUM 7
+
+/* This must currently match the BE order in DSP */
+#define OMAP_ABE_DAI_PDM_UL 0
+#define OMAP_ABE_DAI_PDM_DL1 1
+#define OMAP_ABE_DAI_PDM_DL2 2
+#define OMAP_ABE_DAI_PDM_VIB 3
+#define OMAP_ABE_DAI_BT_VX 4
+#define OMAP_ABE_DAI_MM_FM 5
+#define OMAP_ABE_DAI_MODEM 6
+#define OMAP_ABE_DAI_DMIC0 7
+#define OMAP_ABE_DAI_DMIC1 8
+#define OMAP_ABE_DAI_DMIC2 9
+#define OMAP_ABE_DAI_NUM 10
+
+#define OMAP_ABE_BE_PDM_DL1 "PDM-DL1"
+#define OMAP_ABE_BE_PDM_UL1 "PDM-UL1"
+#define OMAP_ABE_BE_PDM_DL2 "PDM-DL2"
+#define OMAP_ABE_BE_PDM_VIB "PDM-VIB"
+#define OMAP_ABE_BE_BT_VX_UL "BT-VX-UL"
+#define OMAP_ABE_BE_BT_VX_DL "BT-VX-DL"
+#define OMAP_ABE_BE_MM_EXT0 "FM-EXT"
+#define OMAP_ABE_BE_MM_EXT1 "MODEM-EXT"
+#define OMAP_ABE_BE_DMIC0 "DMIC0"
+#define OMAP_ABE_BE_DMIC1 "DMIC1"
+#define OMAP_ABE_BE_DMIC2 "DMIC2"
+
+#define OMAP_ABE_DL1_NO_PDM 0
+#define OMAP_ABE_DL1_HEADSET_LP 1
+#define OMAP_ABE_DL1_HEADSET_HP 2
+#define OMAP_ABE_DL1_EARPIECE 3
+
+int omap_abe_set_dl1_output(int output);
+
+#endif /* End of __OMAP_MCPDM_H__ */
diff --git a/sound/soc/omap/omap-hdmi.c b/sound/soc/omap/omap-hdmi.c
new file mode 100644
index 0000000..69dd059
--- /dev/null
+++ b/sound/soc/omap/omap-hdmi.c
@@ -0,0 +1,158 @@
+/*
+ * omap-hdmi.c
+ *
+ * OMAP ALSA SoC DAI driver for HDMI audio on OMAP4 processors.
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Jorge Candelaria <jorge.candelaria@ti.com>
+ * Ricardo Neri <ricardo.neri@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+
+#include <plat/dma.h>
+#include "omap-pcm.h"
+#include "omap-hdmi.h"
+
+#define DRV_NAME "hdmi-audio-dai"
+
+static struct omap_pcm_dma_data omap_hdmi_dai_dma_params = {
+ .name = "HDMI playback",
+ .sync_mode = OMAP_DMA_SYNC_PACKET,
+};
+
+static int omap_hdmi_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ int err;
+ /*
+ * Make sure that the period bytes are multiple of the DMA packet size.
+ * Largest packet size we use is 32 32-bit words = 128 bytes
+ */
+ err = snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 128);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int omap_hdmi_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ int err = 0;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ omap_hdmi_dai_dma_params.packet_size = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ omap_hdmi_dai_dma_params.packet_size = 32;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ omap_hdmi_dai_dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+
+ snd_soc_dai_set_dma_data(dai, substream,
+ &omap_hdmi_dai_dma_params);
+
+ return err;
+}
+
+static struct snd_soc_dai_ops omap_hdmi_dai_ops = {
+ .startup = omap_hdmi_dai_startup,
+ .hw_params = omap_hdmi_dai_hw_params,
+};
+
+static struct snd_soc_dai_driver omap_hdmi_dai = {
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = OMAP_HDMI_RATES,
+ .formats = OMAP_HDMI_FORMATS,
+ },
+ .ops = &omap_hdmi_dai_ops,
+};
+
+static __devinit int omap_hdmi_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *hdmi_rsrc;
+
+ hdmi_rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!hdmi_rsrc) {
+ dev_err(&pdev->dev, "Cannot obtain IORESOURCE_MEM HDMI\n");
+ return -EINVAL;
+ }
+
+ omap_hdmi_dai_dma_params.port_addr = hdmi_rsrc->start
+ + OMAP_HDMI_AUDIO_DMA_PORT;
+
+ hdmi_rsrc = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!hdmi_rsrc) {
+ dev_err(&pdev->dev, "Cannot obtain IORESOURCE_DMA HDMI\n");
+ return -EINVAL;
+ }
+
+ omap_hdmi_dai_dma_params.dma_req = hdmi_rsrc->start;
+
+ ret = snd_soc_register_dai(&pdev->dev, &omap_hdmi_dai);
+ return ret;
+}
+
+static int __devexit omap_hdmi_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_dai(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver hdmi_dai_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = omap_hdmi_probe,
+ .remove = __devexit_p(omap_hdmi_remove),
+};
+
+static int __init hdmi_dai_init(void)
+{
+ return platform_driver_register(&hdmi_dai_driver);
+}
+module_init(hdmi_dai_init);
+
+static void __exit hdmi_dai_exit(void)
+{
+ platform_driver_unregister(&hdmi_dai_driver);
+}
+module_exit(hdmi_dai_exit);
+
+MODULE_AUTHOR("Jorge Candelaria <jorge.candelaria@ti.com>");
+MODULE_AUTHOR("Ricardo Neri <ricardo.neri@ti.com>");
+MODULE_DESCRIPTION("OMAP HDMI SoC Interface");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/omap/omap-hdmi.h b/sound/soc/omap/omap-hdmi.h
new file mode 100644
index 0000000..34c298d
--- /dev/null
+++ b/sound/soc/omap/omap-hdmi.h
@@ -0,0 +1,36 @@
+/*
+ * omap-hdmi.h
+ *
+ * Definitions for OMAP ALSA SoC DAI driver for HDMI audio on OMAP4 processors.
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Jorge Candelaria <jorge.candelaria@ti.com>
+ * Ricardo Neri <ricardo.neri@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __OMAP_HDMI_H__
+#define __OMAP_HDMI_H__
+
+#define OMAP_HDMI_AUDIO_DMA_PORT 0x8c
+
+#define OMAP_HDMI_RATES (SNDRV_PCM_RATE_32000 | \
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
+
+#define OMAP_HDMI_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+#endif
diff --git a/sound/soc/omap/omap-mcasp.c b/sound/soc/omap/omap-mcasp.c
new file mode 100644
index 0000000..e0a6e24
--- /dev/null
+++ b/sound/soc/omap/omap-mcasp.c
@@ -0,0 +1,722 @@
+/*
+ * ALSA SoC McASP Audio Layer for TI OMAP processor
+ *
+ * Multi-channel Audio Serial Port Driver
+ *
+ * Author: Jon Hunter <jon-hunter@ti.com>,
+ * Dan Milea <dan.milea@ti.com>,
+ *
+ * Based upon McASP driver written for TI DaVinci
+ *
+ * Copyright: (C) 2011 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+
+#include <plat/omap_hwmod.h>
+#include <plat/clock.h>
+#include <plat/dma.h>
+#include <plat/dma-44xx.h>
+
+#include "omap-pcm.h"
+#include "omap-mcasp.h"
+
+/*
+ * McASP register definitions
+ */
+#define OMAP_MCASP_PID_REG 0x00
+#define OMAP_MCASP_SYSCONFIG_REG 0x04
+
+#define OMAP_MCASP_PFUNC_REG 0x10
+#define OMAP_MCASP_PDIR_REG 0x14
+#define OMAP_MCASP_PDOUT_REG 0x18
+#define OMAP_MCASP_PDIN_REG 0x1c
+#define OMAP_MCASP_PDSET_REG 0x1c
+#define OMAP_MCASP_PDCLR_REG 0x20
+
+#define OMAP_MCASP_GBLCTL_REG 0x44
+#define OMAP_MCASP_AMUTE_REG 0x48
+
+#define OMAP_MCASP_TXDITCTL_REG 0x50
+
+#define OMAP_MCASP_TXMASK_REG 0xa4
+#define OMAP_MCASP_TXFMT_REG 0xa8
+#define OMAP_MCASP_TXFMCTL_REG 0xac
+
+#define OMAP_MCASP_ACLKXCTL_REG 0xb0
+#define OMAP_MCASP_AHCLKXCTL_REG 0xb4
+#define OMAP_MCASP_TXTDM_REG 0xb8
+#define OMAP_MCASP_EVTCTLX_REG 0xbc
+
+#define OMAP_MCASP_TXSTAT_REG 0xc0
+#define OMAP_MCASP_TXSTAT_MASK 0x1ff
+
+#define OMAP_MCASP_TXTDMSLOT_REG 0xc4
+#define OMAP_MCASP_TXCLKCHK_REG 0xc8
+#define OMAP_MCASP_TXEVTCTL_REG 0xcc
+
+/* Left(even TDM Slot) Channel Status Register File */
+#define OMAP_MCASP_DITCSRA_REG 0x100
+/* Right(odd TDM slot) Channel Status Register File */
+#define OMAP_MCASP_DITCSRB_REG 0x118
+/* Left(even TDM slot) User Data Register File */
+#define OMAP_MCASP_DITUDRA_REG 0x130
+/* Right(odd TDM Slot) User Data Register File */
+#define OMAP_MCASP_DITUDRB_REG 0x148
+
+/* Serializer n Control Register */
+#define OMAP_MCASP_XRSRCTL0_REG 0x180
+
+/* Transmit Buffer for Serializer */
+#define OMAP_MCASP_TXBUF0_REG 0x200
+
+/*
+ * OMAP_MCASP_PFUNC_REG - Pin Function / GPIO Enable Register Bits
+ */
+#define AXR0 BIT(0)
+#define PFUNC_AMUTE BIT(25)
+#define ACLKX BIT(26)
+#define AHCLKX BIT(27)
+#define AFSX BIT(28)
+
+/*
+ * OMAP_MCASP_PDIR_REG - Pin Direction Register Bits
+ */
+#define AXR0 BIT(0)
+#define PDIR_AMUTE BIT(25)
+#define ACLKX BIT(26)
+#define AHCLKX BIT(27)
+#define AFSX BIT(28)
+
+/*
+ * OMAP_MCASP_TXDITCTL_REG - Transmit DIT Control Register Bits
+ */
+#define DITEN BIT(0) /* Transmit DIT mode enable/disable */
+#define VA BIT(2)
+#define VB BIT(3)
+
+/*
+ * OMAP_MCASP_TXFMT_REG - Transmit Bitstream Format Register Bits
+ */
+#define TXROT(val) (val)
+#define TXROT_MASK TXROT(0x7)
+#define TXSEL BIT(3)
+#define TXSSZ(val) (val<<4)
+#define TXSSZ_MASK TXSSZ(0xf<<4)
+#define TXPAD(val) (val<<13)
+#define TXORD BIT(15)
+#define FSXDLY(val) (val<<16)
+
+#define ROTATE_24 0x6
+#define SLOTSIZE_32 0xf
+
+/*
+ * OMAP_MCASP_TXFMCTL_REG - Transmit Frame Control Register Bits
+ */
+#define FSXPOL BIT(0)
+#define AFSXE BIT(1)
+#define FSXDUR BIT(4)
+#define FSXMOD(val) (val<<7)
+
+/*
+ * OMAP_MCASP_ACLKXCTL_REG - Transmit Clock Control Register Bits
+ */
+#define ACLKXDIV(val) (val)
+#define ACLKXE BIT(5)
+#define TX_ASYNC BIT(6)
+
+/*
+ * OMAP_MCASP_AHCLKXCTL_REG - High Frequency Transmit Clock Control
+ * Register Bits
+ */
+#define AHCLKXDIV(val) (val)
+#define AHCLKXE BIT(15)
+
+/*
+ * OMAP_MCASP_EVTCTLX_REG - Transmitter Interrupt Control Register bits
+ */
+#define EVTCTLX_XUNDRN BIT(0)
+
+/*
+ * OMAP_MCASP_TXSTAT_REG - Transmit Status Register Bits
+ */
+#define TXSTAT_XUNDRN (0x1 << 0)
+#define TXSTAT_XSYNCERR (0x1 << 1)
+#define TXSTAT_XCKFAIL (0x1 << 2)
+#define TXSTAT_XDMSLOT (0x1 << 3)
+#define TXSTAT_XLAST (0x1 << 4)
+#define TXSTAT_XDATA (0x1 << 5)
+#define TXSTAT_XSTAFRM (0x1 << 6)
+#define TXSTAT_XDMAERR (0x1 << 7)
+#define TXSTAT_XERR (0x1 << 8)
+
+/*
+ * OMAP_MCASP_XRSRCTL_BASE_REG - Serializer Control Register Bits
+ */
+#define MODE(val) (val)
+#define TXSTATE BIT(4)
+
+/*
+ * OMAP_MCASP_TXTDMSLOT_REG - Transmit TDM Slot Register configuration
+ */
+#define TXTDMS(n) (1<<n)
+
+/*
+ * OMAP_MCASP_GBLCTL_REG - Global Control Register Bits
+ */
+#define TXCLKRST BIT(8) /* Transmitter Clock Divider Reset */
+#define TXHCLKRST BIT(9) /* Transmitter High Frequency Clock Divider*/
+#define TXSERCLR BIT(10) /* Transmit Serializer Clear */
+#define TXSMRST BIT(11) /* Transmitter State Machine Reset */
+#define TXFSRST BIT(12) /* Frame Sync Generator Reset */
+
+/*
+ * OMAP_MCASP_AMUTE_REG - Mute Control Register Bits
+ */
+#define MUTENA(val) (val)
+#define MUTEINPOL BIT(2)
+#define MUTEINENA BIT(3)
+#define MUTEIN BIT(4)
+#define MUTEX BIT(6)
+#define MUTEFSX BIT(8)
+#define MUTEBADCLKX BIT(10)
+#define MUTETXDMAERR BIT(12)
+
+/*
+ * OMAP_MCASP_TXEVTCTL_REG - Transmitter DMA Event Control Register bits
+ */
+#define TXDATADMADIS BIT(0)
+
+#define MCASP_ALLOWED_PPM 100
+
+/*
+ * OMAP_MCASP_DITCSRA_REG/OMAP_MCASP_DITCSRB_REG
+ */
+#define OMAP_MCASP_DITCSR_44100HZ (0x0 << 24)
+#define OMAP_MCASP_DITCSR_48000HZ (0x2 << 24)
+#define OMAP_MCASP_DITCSR_32000HZ (0x3 << 24)
+#define OMAP_MCASP_DITCSR_22050HZ (0x4 << 24)
+#define OMAP_MCASP_DITCSR_24000HZ (0x6 << 24)
+#define OMAP_MCASP_DITCSR_88200HZ (0x8 << 24)
+#define OMAP_MCASP_DITCSR_96000HZ (0xA << 24)
+#define OMAP_MCASP_DITCSR_176400HZ (0xC << 24)
+#define OMAP_MCASP_DITCSR_192000HZ (0xE << 24)
+
+/*
+ * Stream DMA parameters
+ */
+static struct omap_pcm_dma_data omap_mcasp_dai_dma_params[] = {
+ {
+ .name = "Audio playback",
+ .dma_req = OMAP44XX_DMA_MCASP1_AXEVT,
+ .data_type = OMAP_DMA_DATA_TYPE_S16,
+ .sync_mode = OMAP_DMA_SYNC_ELEMENT,
+ .port_addr = OMAP44XX_MCASP_DAT_BASE + OMAP_MCASP_TXBUF0_REG,
+ },
+};
+
+static inline void mcasp_set_bits(void __iomem *reg, u32 val)
+{
+ __raw_writel(__raw_readl(reg) | val, reg);
+}
+
+static inline void mcasp_clr_bits(void __iomem *reg, u32 val)
+{
+ __raw_writel((__raw_readl(reg) & ~(val)), reg);
+}
+
+static inline void mcasp_mod_bits(void __iomem *reg, u32 val, u32 mask)
+{
+ __raw_writel((__raw_readl(reg) & ~mask) | val, reg);
+}
+
+static inline void mcasp_set_reg(void __iomem *reg, u32 val)
+{
+ __raw_writel(val, reg);
+}
+
+static inline u32 mcasp_get_reg(void __iomem *reg)
+{
+ return (unsigned int)__raw_readl(reg);
+}
+
+static inline void mcasp_set_ctl_reg(void __iomem *regs, u32 val)
+{
+ int i = 0;
+
+ mcasp_set_bits(regs, val);
+
+ /* programming GBLCTL needs to read back from GBLCTL and verfiy */
+ /* loop count is to avoid the lock-up */
+ for (i = 0; i < 1000; i++) {
+ if ((mcasp_get_reg(regs) & val) == val)
+ break;
+ }
+
+ if (i == 1000 && ((mcasp_get_reg(regs) & val) != val))
+ printk(KERN_ERR "GBLCTL write error\n");
+}
+
+static int mcasp_compute_clock_dividers(long fclk_rate, int tgt_sample_rate,
+ int *out_div_lo, int *out_div_hi)
+{
+ /* Given a particular functional clock rate and a target audio sample
+ * rate, determine the proper values for the ACLKXCTL and AHCLKXCTL, the
+ * dividers which produce the high frequency transmit master clock and
+ * the transmit clock.
+ */
+ long divisor;
+ unsigned long ppm;
+ int sample_rate, i;
+ BUG_ON(!out_div_lo);
+ BUG_ON(!out_div_hi);
+
+ /* A single S/PDIF frame requires 128 clocks */
+ divisor = DIV_ROUND_CLOSEST(fclk_rate, tgt_sample_rate << 7);
+ if (!divisor)
+ return -EINVAL;
+
+ sample_rate = (fclk_rate >> 7) / divisor;
+
+ /* ppm calculation in two steps to avoid overflow */
+ ppm = abs(tgt_sample_rate - sample_rate);
+ ppm = (1000000 * ppm) / tgt_sample_rate;
+
+ if (ppm > MCASP_ALLOWED_PPM)
+ return -EINVAL;
+
+ /* At this point, divisor holds the product of the two divider values we
+ * need to use for ACLKXCTL and AHCLKXCTL. ACLKXCTL holds a 5 bit
+ * divider [1, 32], while AHCLKXCTL holds a 12 bit divider [1, 4096].
+ * We need to make sure that we can factor divisor into two integers
+ * which will fit into these divider registers. Find the largest 5-bit
+ * + 1 value which divides divisor and use that as our smaller divider.
+ * After removing this factor from divisor, if the result is <= 4096,
+ * then we have succeeded and will be able to produce the target sample
+ * rate.
+ */
+ for (i = 32; (i > 1) && (divisor % i); --i)
+ ; /* no body */
+
+ /* Make sure to subtract one, registers hold the value of the divider
+ * minus one (IOW, to divide by 5, the register gets programmed with the
+ * value 4. */
+ *out_div_lo = i - 1;
+ *out_div_hi = (divisor / i) - 1;
+
+ return (*out_div_hi <= 4096) ? 0 : -EINVAL;
+}
+
+static int omap_mcasp_start(struct omap_mcasp *mcasp)
+{
+ int i;
+ mcasp_set_ctl_reg(mcasp->base + OMAP_MCASP_GBLCTL_REG, TXHCLKRST);
+ mcasp_set_ctl_reg(mcasp->base + OMAP_MCASP_GBLCTL_REG, TXCLKRST);
+ mcasp_set_ctl_reg(mcasp->base + OMAP_MCASP_GBLCTL_REG, TXSERCLR);
+
+ /* Wait until the DMA has loaded the first sample into TXBUF before we
+ * let the TX state machine and frame sync generator out of reset. */
+ i = 0;
+ while (1) {
+ u32 reg = mcasp_get_reg(mcasp->base + OMAP_MCASP_TXSTAT_REG);
+ if (!(reg & TXSTAT_XDATA))
+ break;
+
+ if (++i > 1000) {
+ printk(KERN_ERR "Timeout waiting for DMA to load first"
+ " sample of audio.\n");
+ return -ETIMEDOUT;
+ }
+
+ udelay(1);
+ }
+
+ mcasp_set_ctl_reg(mcasp->base + OMAP_MCASP_GBLCTL_REG, TXSMRST);
+ mcasp_set_ctl_reg(mcasp->base + OMAP_MCASP_GBLCTL_REG, TXFSRST);
+ mcasp_clr_bits(mcasp->base + OMAP_MCASP_TXEVTCTL_REG, TXDATADMADIS);
+
+ /* enable IRQ sources */
+ mcasp_set_bits(mcasp->base + OMAP_MCASP_EVTCTLX_REG, EVTCTLX_XUNDRN);
+
+ return 0;
+}
+
+static void omap_mcasp_stop(struct omap_mcasp *mcasp)
+{
+ /* disable IRQ sources */
+ mcasp_set_reg(mcasp->base + OMAP_MCASP_EVTCTLX_REG, 0);
+
+ mcasp_set_reg(mcasp->base + OMAP_MCASP_GBLCTL_REG, 0);
+ mcasp_set_reg(mcasp->base + OMAP_MCASP_TXSTAT_REG,
+ OMAP_MCASP_TXSTAT_MASK);
+}
+
+/* S/PDIF */
+static int omap_mcasp_setup(struct omap_mcasp *mcasp, unsigned int rate)
+{
+ u32 aclkxdiv, ahclkxdiv, ditcsr;
+ int res;
+
+ /* Set TX frame synch : DIT Mode, 1 bit width, internal, rising edge */
+ mcasp_set_reg(mcasp->base + OMAP_MCASP_TXFMCTL_REG,
+ AFSXE | FSXMOD(0x180));
+
+ /* Set the TX clock controls : div = 1 and internal */
+ mcasp_set_reg(mcasp->base + OMAP_MCASP_ACLKXCTL_REG,
+ ACLKXE | TX_ASYNC);
+
+ /* Set the HS TX clock controls : div = 1 and internal */
+ mcasp_set_reg(mcasp->base + OMAP_MCASP_AHCLKXCTL_REG, AHCLKXE);
+
+ /* The SPDIF bit clock is derived from the McASP functional clock.
+ * The McASP has two programmable clock dividers (aclkxdiv and
+ * ahclkxdiv) that are configured via the registers MCASP_ACLKXCTL
+ * and MCASP_AHCLKXCTL. For SPDIF the bit clock frequency should be
+ * 128 * sample rate freq. The dividers are defined as part of
+ * platform data as they are dependent upon the functional clock
+ * setting. Lookup the appropriate dividers for the sampling
+ * frequency that we are playing.
+ */
+ res = mcasp_compute_clock_dividers(clk_get_rate(mcasp->fclk),
+ rate,
+ &aclkxdiv,
+ &ahclkxdiv);
+ if (res) {
+ dev_err(mcasp->dev,
+ "%s: No valid McASP config for sampling rate (%d)!\n",
+ __func__, rate);
+ return res;
+ }
+
+ switch (rate) {
+ case 22050:
+ ditcsr = OMAP_MCASP_DITCSR_22050HZ;
+ break;
+ case 24000:
+ ditcsr = OMAP_MCASP_DITCSR_24000HZ;
+ break;
+ case 32000:
+ ditcsr = OMAP_MCASP_DITCSR_32000HZ;
+ break;
+ case 44100:
+ ditcsr = OMAP_MCASP_DITCSR_44100HZ;
+ break;
+ case 48000:
+ ditcsr = OMAP_MCASP_DITCSR_48000HZ;
+ break;
+ case 88200:
+ ditcsr = OMAP_MCASP_DITCSR_88200HZ;
+ break;
+ case 96000:
+ ditcsr = OMAP_MCASP_DITCSR_96000HZ;
+ break;
+ case 176400:
+ ditcsr = OMAP_MCASP_DITCSR_176400HZ;
+ break;
+ case 192000:
+ ditcsr = OMAP_MCASP_DITCSR_192000HZ;
+ break;
+ default:
+ dev_err(mcasp->dev, "%s: Invalid sampling rate: %d\n",
+ __func__, rate);
+ return -EINVAL;
+ }
+ mcasp_set_reg(mcasp->base + OMAP_MCASP_DITCSRA_REG, ditcsr);
+ mcasp_set_reg(mcasp->base + OMAP_MCASP_DITCSRB_REG, ditcsr);
+ mcasp_set_bits(mcasp->base + OMAP_MCASP_AHCLKXCTL_REG,
+ AHCLKXDIV(ahclkxdiv));
+ mcasp_set_bits(mcasp->base + OMAP_MCASP_ACLKXCTL_REG,
+ AHCLKXDIV(aclkxdiv));
+
+ /* Configure McASP formatter */
+ mcasp_mod_bits(mcasp->base + OMAP_MCASP_TXFMT_REG,
+ TXSSZ(SLOTSIZE_32), TXSSZ_MASK);
+ mcasp_mod_bits(mcasp->base + OMAP_MCASP_TXFMT_REG, TXROT(ROTATE_24),
+ TXROT_MASK);
+ mcasp_set_reg(mcasp->base + OMAP_MCASP_TXMASK_REG, 0xFFFF);
+
+ /* Set the TX tdm : for all the slots */
+ mcasp_set_reg(mcasp->base + OMAP_MCASP_TXTDM_REG, 0xFFFFFFFF);
+
+ /* configure the serializer for transmit mode operation */
+ mcasp_set_bits(mcasp->base + OMAP_MCASP_XRSRCTL0_REG, MODE(1));
+
+ /* All PINS as McASP */
+ mcasp_set_reg(mcasp->base + OMAP_MCASP_PFUNC_REG, 0);
+
+ mcasp_set_bits(mcasp->base + OMAP_MCASP_PDIR_REG, AXR0);
+
+ /* Enable the DIT */
+ mcasp_set_bits(mcasp->base + OMAP_MCASP_TXDITCTL_REG, DITEN);
+
+ mcasp_set_reg(mcasp->base + OMAP_MCASP_TXSTAT_REG, 0xFF);
+
+ return 0;
+}
+
+static irqreturn_t omap_mcasp_irq_handler(int irq, void *data)
+{
+ struct omap_mcasp *mcasp = data;
+ u32 txstat;
+
+ txstat = mcasp_get_reg(mcasp->base + OMAP_MCASP_TXSTAT_REG);
+ if (txstat & TXSTAT_XUNDRN) {
+ dev_err(mcasp->dev, "%s: Underrun (0x%08x)\n", __func__,
+ txstat);
+
+ /* Try to recover from this state */
+ spin_lock(&mcasp->lock);
+ if (likely(mcasp->stream_rate)) {
+ dev_err(mcasp->dev, "%s: Trying to recover\n",
+ __func__);
+ omap_mcasp_stop(mcasp);
+ omap_mcasp_setup(mcasp, mcasp->stream_rate);
+ omap_mcasp_start(mcasp);
+ }
+ spin_unlock(&mcasp->lock);
+ }
+
+ mcasp_set_reg(mcasp->base + OMAP_MCASP_TXSTAT_REG, txstat);
+
+ return IRQ_HANDLED;
+}
+
+static int omap_mcasp_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct omap_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
+
+ /* HACK: Only allow C2 state */
+ pm_qos_add_request(mcasp->pm_qos, PM_QOS_CPU_DMA_LATENCY, 1150);
+
+ pm_runtime_get_sync(mcasp->dev);
+
+ return 0;
+}
+
+static void omap_mcasp_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct omap_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
+
+ pm_runtime_put_sync(mcasp->dev);
+
+ /* HACK: remove qos */
+ pm_qos_remove_request(mcasp->pm_qos);
+}
+
+static int omap_mcasp_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct omap_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
+ int stream = substream->stream;
+
+ omap_mcasp_stop(mcasp);
+
+ if (omap_mcasp_setup(mcasp, params_rate(params)) < 0)
+ return -EPERM;
+
+ snd_soc_dai_set_dma_data(dai, substream,
+ &omap_mcasp_dai_dma_params[stream]);
+
+ return 0;
+}
+
+static int omap_mcasp_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *cpu_dai)
+{
+ struct omap_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&mcasp->lock, flags);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ mcasp->stream_rate = substream->runtime->rate;
+ ret = omap_mcasp_start(mcasp);
+ break;
+
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ mcasp->stream_rate = 0;
+ omap_mcasp_stop(mcasp);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&mcasp->lock, flags);
+
+ return ret;
+}
+
+static struct snd_soc_dai_ops omap_mcasp_dai_ops = {
+ .startup = omap_mcasp_startup,
+ .shutdown = omap_mcasp_shutdown,
+ .trigger = omap_mcasp_trigger,
+ .hw_params = omap_mcasp_hw_params,
+
+};
+
+#define MCASP_RATES (SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | \
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \
+ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | \
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
+
+static struct snd_soc_dai_driver omap_mcasp_dai[] = {
+ {
+ .name = "omap-mcasp-dai",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 384,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = MCASP_RATES,
+ },
+ .ops = &omap_mcasp_dai_ops,
+ },
+};
+
+static __devinit int omap_mcasp_probe(struct platform_device *pdev)
+{
+ struct omap_mcasp *mcasp;
+ struct resource *res;
+ long fclk_rate;
+ int ret = 0;
+
+ mcasp = kzalloc(sizeof(struct omap_mcasp), GFP_KERNEL);
+ if (!mcasp)
+ return -ENOMEM;
+
+ spin_lock_init(&mcasp->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no resource\n");
+ ret = -ENODEV;
+ goto err_res;
+ }
+
+ mcasp->base = ioremap(res->start, resource_size(res));
+ if (!mcasp->base) {
+ ret = -ENOMEM;
+ goto err_res;
+ }
+
+ mcasp->irq = platform_get_irq(pdev, 0);
+ if (mcasp->irq < 0) {
+ ret = mcasp->irq;
+ goto err_irq;
+ }
+
+ ret = request_threaded_irq(mcasp->irq, NULL, omap_mcasp_irq_handler,
+ 0, "McASP", mcasp);
+ if (ret) {
+ dev_err(mcasp->dev, "IRQ request failed\n");
+ goto err_irq;
+ }
+
+ mcasp->fclk = clk_get(&pdev->dev, "mcasp_fck");
+ if (!mcasp->fclk) {
+ ret = -ENODEV;
+ goto err_clk;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ fclk_rate = clk_get_rate(mcasp->fclk);
+
+ platform_set_drvdata(pdev, mcasp);
+ mcasp->dev = &pdev->dev;
+
+ ret = snd_soc_register_dai(&pdev->dev, omap_mcasp_dai);
+ if (ret < 0)
+ goto err_dai;
+
+ /* HACK: qos */
+ mcasp->pm_qos = kzalloc(sizeof(struct pm_qos_request_list), GFP_KERNEL);
+ if (!mcasp->pm_qos) {
+ ret = -ENOMEM;
+ goto err_dai;
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ return 0;
+
+err_dai:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+err_clk:
+ free_irq(mcasp->irq, (void *)mcasp);
+err_irq:
+ iounmap(mcasp->base);
+err_res:
+ kfree(mcasp);
+ return ret;
+}
+
+static __devexit int omap_mcasp_remove(struct platform_device *pdev)
+{
+ struct omap_mcasp *mcasp = dev_get_drvdata(&pdev->dev);
+
+ snd_soc_unregister_dai(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ clk_put(mcasp->fclk);
+ free_irq(mcasp->irq, (void *)mcasp);
+ iounmap(mcasp->base);
+ /* HACK: qos */
+ kfree(mcasp->pm_qos);
+ kfree(mcasp);
+
+ return 0;
+}
+
+static struct platform_driver omap_mcasp_driver = {
+ .probe = omap_mcasp_probe,
+ .remove = omap_mcasp_remove,
+ .driver = {
+ .name = "omap-mcasp-dai",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init omap_mcasp_init(void)
+{
+ return platform_driver_register(&omap_mcasp_driver);
+}
+module_init(omap_mcasp_init);
+
+static void __exit omap_mcasp_exit(void)
+{
+ platform_driver_unregister(&omap_mcasp_driver);
+}
+module_exit(omap_mcasp_exit);
+
+MODULE_AUTHOR("Jon Hunter <jon-hunter@ti.com>");
+MODULE_DESCRIPTION("TI OMAP McASP SoC Interface");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/omap/omap-mcasp.h b/sound/soc/omap/omap-mcasp.h
new file mode 100644
index 0000000..30ce3f0
--- /dev/null
+++ b/sound/soc/omap/omap-mcasp.h
@@ -0,0 +1,37 @@
+/*
+ * ALSA SoC McASP Audio Layer for TI OMAP processor
+ *
+ * MCASP related definitions
+ *
+ * Author: Jon Hunter <jon-hunter@ti.com>,
+ * Dan Milea <dan.milea@ti.com>,
+ *
+ * Based upon McASP driver written for TI DaVinci
+ *
+ * Copyright: (C) 2011 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OMAP_MCASP_H
+#define OMAP_MCASP_H
+
+#include <linux/io.h>
+#include <plat/mcasp.h>
+
+#define OMAP44XX_MCASP_CFG_BASE 0x49028000
+#define OMAP44XX_MCASP_DAT_BASE 0x4902A000
+
+struct omap_mcasp {
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t lock;
+ struct clk *fclk;
+ int irq;
+ unsigned int stream_rate;
+ struct pm_qos_request_list *pm_qos;
+};
+
+#endif /* OMAP_MCASP_H */
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 4b82290..8b6a510 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -258,7 +258,7 @@
default:
return -EINVAL;
}
- if (cpu_is_omap34xx()) {
+ if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
dma_data->set_threshold = omap_mcbsp_set_threshold;
/* TODO: Currently, MODE_ELEMENT == MODE_FRAME */
if (omap_mcbsp_get_dma_op_mode(bus_id) ==
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
index bed09c2..601082e 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/omap/omap-mcpdm.c
@@ -25,7 +25,16 @@
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -33,33 +42,54 @@
#include <sound/soc.h>
#include <plat/dma.h>
-#include <plat/mcbsp.h>
-#include "mcpdm.h"
+#include <plat/omap_hwmod.h>
+#include <plat/mcpdm.h>
+#include "../../../arch/arm/mach-omap2/cm1_44xx.h"
+#include "omap-mcpdm.h"
#include "omap-pcm.h"
+#if defined(CONFIG_SND_OMAP_SOC_ABE_DSP) ||\
+ defined(CONFIG_SND_OMAP_SOC_ABE_DSP_MODULE)
+#include "omap-abe-dsp.h"
+#include "abe/abe_main.h"
+#include "abe/port_mgr.h"
+#endif
-struct omap_mcpdm_data {
- struct omap_mcpdm_link *links;
+#define MCPDM_LEGACY_DAI_DL1 0
+#define MCPDM_LEGACY_DAI_UL1 1
+#define MCPDM_ABE_DAI_DL1 2
+#define MCPDM_ABE_DAI_DL2 3
+#define MCPDM_ABE_DAI_VIB 4
+#define MCPDM_ABE_DAI_UL1 5
+
+#define CLKCTRL_MODULEMODE_MASK 0x0003
+#define CLKCTRL_MODULEMODE_DISABLED 0x0000
+#define CLKCTRL_MODULEMODE_ENABLED 0x0002
+
+struct omap_mcpdm {
+ struct device *dev;
+ unsigned long phys_base;
+ void __iomem *io_base;
+ int irq;
+
+ struct mutex mutex;
+ struct omap_mcpdm_platform_data *pdata;
+ struct completion irq_completion;
+ struct delayed_work esd_work;
+ struct abe *abe;
+ struct omap_abe_port *dl_port;
+ struct omap_abe_port *ul_port;
+
+ u32 *reg_cache;
+
+ /* channel data */
+ u32 dn_channels;
+ u32 up_channels;
int active;
-};
+ int abe_mode;
-static struct omap_mcpdm_link omap_mcpdm_links[] = {
- /* downlink */
- {
- .irq_mask = MCPDM_DN_IRQ_EMPTY | MCPDM_DN_IRQ_FULL,
- .threshold = 1,
- .format = PDMOUTFORMAT_LJUST,
- },
- /* uplink */
- {
- .irq_mask = MCPDM_UP_IRQ_EMPTY | MCPDM_UP_IRQ_FULL,
- .threshold = 1,
- .format = PDMOUTFORMAT_LJUST,
- },
-};
-
-static struct omap_mcpdm_data mcpdm_data = {
- .links = omap_mcpdm_links,
- .active = 0,
+ /* DC offset */
+ unsigned long dl1_offset;
+ unsigned long dl2_offset;
};
/*
@@ -84,64 +114,362 @@
},
};
+static inline void omap_mcpdm_write(struct omap_mcpdm *mcpdm,
+ u16 reg, u32 val)
+{
+ __raw_writel(val, mcpdm->io_base + reg);
+}
+
+static inline int omap_mcpdm_read(struct omap_mcpdm *mcpdm, u16 reg)
+{
+ return __raw_readl(mcpdm->io_base + reg);
+}
+
+static inline void omap_mcpdm_write_cache(struct omap_mcpdm *mcpdm,
+ u16 reg, u32 val)
+{
+ mcpdm->reg_cache[reg / sizeof(u32)] = val;
+}
+
+static inline int omap_mcpdm_read_cache(struct omap_mcpdm *mcpdm, u16 reg)
+{
+ return mcpdm->reg_cache[reg / sizeof(u32)];
+}
+
+#ifdef DEBUG
+static void omap_mcpdm_reg_dump(struct omap_mcpdm *mcpdm)
+{
+ dev_dbg(mcpdm->dev, "***********************\n");
+ dev_dbg(mcpdm->dev, "IRQSTATUS_RAW: 0x%04x\n",
+ omap_mcpdm_read(mcpdm, MCPDM_IRQSTATUS_RAW));
+ dev_dbg(mcpdm->dev, "IRQSTATUS: 0x%04x\n",
+ omap_mcpdm_read(mcpdm, MCPDM_IRQSTATUS));
+ dev_dbg(mcpdm->dev, "IRQENABLE_SET: 0x%04x\n",
+ omap_mcpdm_read(mcpdm, MCPDM_IRQENABLE_SET));
+ dev_dbg(mcpdm->dev, "IRQENABLE_CLR: 0x%04x\n",
+ omap_mcpdm_read(mcpdm, MCPDM_IRQENABLE_CLR));
+ dev_dbg(mcpdm->dev, "IRQWAKE_EN: 0x%04x\n",
+ omap_mcpdm_read(mcpdm, MCPDM_IRQWAKE_EN));
+ dev_dbg(mcpdm->dev, "DMAENABLE_SET: 0x%04x\n",
+ omap_mcpdm_read(mcpdm, MCPDM_DMAENABLE_SET));
+ dev_dbg(mcpdm->dev, "DMAENABLE_CLR: 0x%04x\n",
+ omap_mcpdm_read(mcpdm, MCPDM_DMAENABLE_CLR));
+ dev_dbg(mcpdm->dev, "DMAWAKEEN: 0x%04x\n",
+ omap_mcpdm_read(mcpdm, MCPDM_DMAWAKEEN));
+ dev_dbg(mcpdm->dev, "CTRL: 0x%04x\n",
+ omap_mcpdm_read(mcpdm, MCPDM_CTRL));
+ dev_dbg(mcpdm->dev, "DN_DATA: 0x%04x\n",
+ omap_mcpdm_read(mcpdm, MCPDM_DN_DATA));
+ dev_dbg(mcpdm->dev, "UP_DATA: 0x%04x\n",
+ omap_mcpdm_read(mcpdm, MCPDM_UP_DATA));
+ dev_dbg(mcpdm->dev, "FIFO_CTRL_DN: 0x%04x\n",
+ omap_mcpdm_read(mcpdm, MCPDM_FIFO_CTRL_DN));
+ dev_dbg(mcpdm->dev, "FIFO_CTRL_UP: 0x%04x\n",
+ omap_mcpdm_read(mcpdm, MCPDM_FIFO_CTRL_UP));
+ dev_dbg(mcpdm->dev, "DN_OFFSET: 0x%04x\n",
+ omap_mcpdm_read(mcpdm, MCPDM_DN_OFFSET));
+ dev_dbg(mcpdm->dev, "***********************\n");
+}
+#else
+static void omap_mcpdm_reg_dump(struct omap_mcpdm *mcpdm) {}
+#endif
+
+/*
+ * Enables the transfer through the PDM interface to/from the Phoenix
+ * codec by enabling the corresponding UP and DN channels.
+ */
+static void omap_mcpdm_start(struct omap_mcpdm *mcpdm)
+{
+ u32 ctrl = omap_mcpdm_read(mcpdm, MCPDM_CTRL);
+
+ ctrl |= (SW_UP_RST | SW_DN_RST);
+ omap_mcpdm_write(mcpdm, MCPDM_CTRL, ctrl);
+ ctrl |= (mcpdm->up_channels | mcpdm->dn_channels);
+ omap_mcpdm_write(mcpdm, MCPDM_CTRL, ctrl);
+ ctrl &= ~(SW_UP_RST | SW_DN_RST);
+ omap_mcpdm_write(mcpdm, MCPDM_CTRL, ctrl);
+}
+
+/*
+ * Disables the transfer through the PDM interface to/from the Phoenix
+ * codec by disabling the corresponding UP and DN channels.
+ */
+static void omap_mcpdm_stop(struct omap_mcpdm *mcpdm)
+{
+ u32 ctrl = omap_mcpdm_read(mcpdm, MCPDM_CTRL);
+
+ ctrl |= (SW_UP_RST | SW_DN_RST);
+ omap_mcpdm_write(mcpdm, MCPDM_CTRL, ctrl);
+ ctrl &= ~(mcpdm->up_channels | mcpdm->dn_channels);
+ omap_mcpdm_write(mcpdm, MCPDM_CTRL, ctrl);
+ ctrl &= ~(SW_UP_RST | SW_DN_RST);
+ omap_mcpdm_write(mcpdm, MCPDM_CTRL, ctrl);
+}
+
+/*
+ * Is the physical McPDM interface active.
+ */
+static inline int omap_mcpdm_active(struct omap_mcpdm *mcpdm)
+{
+ return omap_mcpdm_read(mcpdm, MCPDM_CTRL) & (PDM_DN_MASK | PDM_UP_MASK);
+}
+
+/*
+ * Configures McPDM uplink/downlink for audio recording/playback
+ * This function should be called before omap_mcpdm_start.
+ */
+static void omap_mcpdm_open(struct omap_mcpdm *mcpdm)
+{
+ /* Enable irq request generation */
+ omap_mcpdm_write(mcpdm, MCPDM_IRQENABLE_SET,
+ MCPDM_UP_IRQ_EMPTY | MCPDM_UP_IRQ_FULL |
+ MCPDM_DN_IRQ_EMPTY | MCPDM_DN_IRQ_FULL);
+
+ /* Configure uplink threshold */
+ omap_mcpdm_write(mcpdm, MCPDM_FIFO_CTRL_UP, 2);
+ omap_mcpdm_write(mcpdm, MCPDM_FIFO_CTRL_DN, 2);
+
+ /* Configure DMA controller */
+ omap_mcpdm_write(mcpdm, MCPDM_DMAENABLE_SET,
+ DMA_UP_ENABLE | DMA_DN_ENABLE);
+}
+
+/*
+ * Cleans McPDM uplink/downlink configuration.
+ * This function should be called when the stream is closed.
+ */
+static void omap_mcpdm_close(struct omap_mcpdm *mcpdm)
+{
+ /* Disable irq request generation */
+ omap_mcpdm_write(mcpdm, MCPDM_IRQENABLE_CLR,
+ MCPDM_UP_IRQ_EMPTY | MCPDM_UP_IRQ_FULL |
+ MCPDM_DN_IRQ_EMPTY | MCPDM_DN_IRQ_FULL);
+
+ /* Disable DMA request generation */
+ omap_mcpdm_write(mcpdm, MCPDM_DMAENABLE_CLR,
+ DMA_UP_ENABLE | DMA_DN_ENABLE);
+}
+
+static irqreturn_t omap_mcpdm_irq_handler(int irq, void *dev_id)
+{
+ struct omap_mcpdm *mcpdm = dev_id;
+ int irq_status;
+
+ irq_status = omap_mcpdm_read(mcpdm, MCPDM_IRQSTATUS);
+
+ /* Acknowledge irq event */
+ omap_mcpdm_write(mcpdm, MCPDM_IRQSTATUS, irq_status);
+
+ if (irq & MCPDM_DN_IRQ_FULL)
+ dev_err(mcpdm->dev, "DN FIFO error %x\n", irq_status);
+
+ if (irq & MCPDM_DN_IRQ_EMPTY)
+ dev_err(mcpdm->dev, "DN FIFO error %x\n", irq_status);
+
+ if (irq & MCPDM_DN_IRQ)
+ dev_dbg(mcpdm->dev, "DN write request\n");
+
+ if (irq & MCPDM_UP_IRQ_FULL)
+ dev_err(mcpdm->dev, "UP FIFO error %x\n", irq_status);
+
+ if (irq & MCPDM_UP_IRQ_EMPTY)
+ dev_err(mcpdm->dev, "UP FIFO error %x\n", irq_status);
+
+ if (irq & MCPDM_UP_IRQ)
+ dev_dbg(mcpdm->dev, "UP write request\n");
+
+ return IRQ_HANDLED;
+}
+
+/* Enable/disable DC offset cancelation for the analog
+ * headset path (PDM channels 1 and 2).
+ */
+static void omap_mcpdm_set_offset(struct omap_mcpdm *mcpdm)
+{
+ int offset;
+
+ if (mcpdm->dl1_offset > DN_OFST_MAX) {
+ dev_err(mcpdm->dev, "DC DL1 offset out of range\n");
+ return;
+ }
+
+ if (mcpdm->dl2_offset > DN_OFST_MAX) {
+ dev_err(mcpdm->dev, "DC DL2 offset out of range\n");
+ return;
+ }
+
+ offset = (mcpdm->dl1_offset << DN_OFST_RX1) |
+ (mcpdm->dl2_offset << DN_OFST_RX2);
+
+ /* offset cancellation for channel 1 */
+ if (mcpdm->dl1_offset)
+ offset |= DN_OFST_RX1_EN;
+ else
+ offset &= ~DN_OFST_RX1_EN;
+
+ /* offset cancellation for channel 2 */
+ if (mcpdm->dl2_offset)
+ offset |= DN_OFST_RX2_EN;
+ else
+ offset &= ~DN_OFST_RX2_EN;
+
+ omap_mcpdm_write(mcpdm, MCPDM_DN_OFFSET, offset);
+}
+
+static ssize_t mcpdm_dl1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_mcpdm *mcpdm = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%ld\n", mcpdm->dl1_offset);
+}
+
+static ssize_t mcpdm_dl1_set(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct omap_mcpdm *mcpdm = dev_get_drvdata(dev);
+ int ret;
+ unsigned long value;
+
+ ret = strict_strtol(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ if (value > DN_OFST_MAX)
+ return -EINVAL;
+
+ mcpdm->dl1_offset = value;
+ return count;
+}
+
+static ssize_t mcpdm_dl2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_mcpdm *mcpdm = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%ld\n", mcpdm->dl2_offset);
+}
+
+static ssize_t mcpdm_dl2_set(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct omap_mcpdm *mcpdm = dev_get_drvdata(dev);
+ int ret;
+ unsigned long value;
+
+ ret = strict_strtol(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ if (value > DN_OFST_MAX)
+ return -EINVAL;
+
+ mcpdm->dl2_offset = value;
+ return count;
+}
+
+static DEVICE_ATTR(dl1, 0644, mcpdm_dl1_show, mcpdm_dl1_set);
+static DEVICE_ATTR(dl2, 0644, mcpdm_dl2_show, mcpdm_dl2_set);
+
static int omap_mcpdm_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
+ struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+ u32 ctrl;
+ u32 val;
int err = 0;
- if (!dai->active)
- err = omap_mcpdm_request();
+ dev_dbg(dai->dev, "%s: active %d\n", __func__, dai->active);
+ mutex_lock(&mcpdm->mutex);
+
+ /* nothing to do if already active */
+ if (mcpdm->active++)
+ goto out;
+
+ if (dai->id >= MCPDM_ABE_DAI_DL1)
+ mcpdm->abe_mode = 1;
+ else
+ mcpdm->abe_mode = 0;
+
+ pm_runtime_get_sync(mcpdm->dev);
+
+ val = __raw_readl(OMAP4430_CM1_ABE_PDM_CLKCTRL);
+ if ((val & CLKCTRL_MODULEMODE_MASK) != CLKCTRL_MODULEMODE_ENABLED) {
+ WARN(1, "Clock not enabled: PDM_CLKCTRL=0x%x\n", val);
+ mcpdm->active--;
+ pm_runtime_put_sync(mcpdm->dev);
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* Enable McPDM watch dog for ES above ES 1.0 to avoid saturation */
+ if (omap_rev() != OMAP4430_REV_ES1_0) {
+ ctrl = omap_mcpdm_read(mcpdm, MCPDM_CTRL);
+ omap_mcpdm_write(mcpdm, MCPDM_CTRL, ctrl | WD_EN);
+ }
+
+ omap_mcpdm_set_offset(mcpdm);
+ omap_mcpdm_open(mcpdm);
+ schedule_delayed_work(&mcpdm->esd_work, msecs_to_jiffies(250));
+out:
+ mutex_unlock(&mcpdm->mutex);
return err;
}
static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- if (!dai->active)
- omap_mcpdm_free();
-}
+ struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
-static int omap_mcpdm_dai_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
-{
- struct omap_mcpdm_data *mcpdm_priv = snd_soc_dai_get_drvdata(dai);
- int stream = substream->stream;
- int err = 0;
+ dev_dbg(dai->dev, "%s: active %d\n", __func__, dai->active);
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- if (!mcpdm_priv->active++)
- omap_mcpdm_start(stream);
- break;
+ mutex_lock(&mcpdm->mutex);
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- if (!--mcpdm_priv->active)
- omap_mcpdm_stop(stream);
- break;
- default:
- err = -EINVAL;
+ if (--mcpdm->active)
+ goto out;
+
+ if (mcpdm->abe_mode) {
+ if (omap_mcpdm_active(mcpdm)) {
+ omap_abe_port_disable(mcpdm->abe, mcpdm->dl_port);
+ omap_abe_port_disable(mcpdm->abe, mcpdm->ul_port);
+ udelay(250);
+ abe_remove_opp_req(mcpdm->dev);
+ omap_mcpdm_stop(mcpdm);
+ }
+ } else {
+ omap_mcpdm_stop(mcpdm);
}
- return err;
+ cancel_delayed_work_sync(&mcpdm->esd_work);
+ omap_mcpdm_close(mcpdm);
+
+ pm_runtime_put_sync(mcpdm->dev);
+
+out:
+ mutex_unlock(&mcpdm->mutex);
}
static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- struct omap_mcpdm_data *mcpdm_priv = snd_soc_dai_get_drvdata(dai);
- struct omap_mcpdm_link *mcpdm_links = mcpdm_priv->links;
+ struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
int stream = substream->stream;
- int channels, err, link_mask = 0;
+ int channels, link_mask = 0;
snd_soc_dai_set_dma_data(dai, substream,
&omap_mcpdm_dai_dma_params[stream]);
+ /* ABE DAIs have fixed channels */
+ if (mcpdm->abe_mode) {
+ mcpdm->dn_channels = PDM_DN_MASK | PDM_CMD_MASK;
+ mcpdm->up_channels = PDM_UP1_EN | PDM_UP2_EN;
+ return 0;
+ }
+
channels = params_channels(params);
switch (channels) {
case 4:
@@ -164,58 +492,224 @@
return -EINVAL;
}
- if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
- mcpdm_links[stream].channels = link_mask << 3;
- err = omap_mcpdm_playback_open(&mcpdm_links[stream]);
- } else {
- mcpdm_links[stream].channels = link_mask << 0;
- err = omap_mcpdm_capture_open(&mcpdm_links[stream]);
- }
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ /* Downlink channels */
+ mcpdm->dn_channels = (link_mask << 3) & (PDM_DN_MASK | PDM_CMD_MASK);
+ else
+ /* Uplink channels */
+ mcpdm->up_channels = link_mask & (PDM_UP_MASK | PDM_STATUS_MASK);
- return err;
+ return 0;
}
-static int omap_mcpdm_dai_hw_free(struct snd_pcm_substream *substream,
+static int omap_mcpdm_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct omap_mcpdm_data *mcpdm_priv = snd_soc_dai_get_drvdata(dai);
- struct omap_mcpdm_link *mcpdm_links = mcpdm_priv->links;
- int stream = substream->stream;
- int err;
+ struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- err = omap_mcpdm_playback_close(&mcpdm_links[stream]);
- else
- err = omap_mcpdm_capture_close(&mcpdm_links[stream]);
+ mutex_lock(&mcpdm->mutex);
- return err;
+ if (omap_mcpdm_active(mcpdm))
+ goto out;
+
+ if (mcpdm->abe_mode) {
+ /* Check if ABE McPDM is already started */
+ if (omap_abe_port_is_enabled(mcpdm->abe, mcpdm->ul_port) ||
+ omap_abe_port_is_enabled(mcpdm->abe, mcpdm->dl_port))
+ goto out;
+
+ /* PDM tasks require ABE OPP 50 */
+ abe_add_opp_req(mcpdm->dev, ABE_OPP_50);
+
+ /* start ATC before McPDM IP */
+ omap_abe_port_enable(mcpdm->abe, mcpdm->dl_port);
+ omap_abe_port_enable(mcpdm->abe, mcpdm->ul_port);
+
+ /* wait 250us for ABE tick */
+ udelay(250);
+ }
+
+ omap_mcpdm_start(mcpdm);
+
+out:
+ mutex_unlock(&mcpdm->mutex);
+ return 0;
+}
+
+static int omap_mcpdm_dai_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+
+ dev_dbg(dai->dev, "cmd %d\n", cmd);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ break;
+ default:
+ break;
+ }
+ omap_mcpdm_reg_dump(mcpdm);
+ return 0;
+}
+
+static void mcpdm_esd_work(struct work_struct *work)
+{
+ struct omap_mcpdm *mcpdm = container_of(work, struct omap_mcpdm,
+ esd_work.work);
+
+ if (omap_mcpdm_read(mcpdm, MCPDM_STATUS)) {
+ if (mcpdm->abe_mode) {
+ omap_abe_port_disable(mcpdm->abe, mcpdm->dl_port);
+ omap_abe_port_disable(mcpdm->abe, mcpdm->ul_port);
+ udelay(250);
+ }
+ omap_mcpdm_stop(mcpdm);
+
+ if (mcpdm->abe_mode) {
+ omap_abe_port_enable(mcpdm->abe, mcpdm->dl_port);
+ omap_abe_port_enable(mcpdm->abe, mcpdm->ul_port);
+ udelay(250);
+ }
+ omap_mcpdm_start(mcpdm);
+ }
+ schedule_delayed_work(&mcpdm->esd_work, msecs_to_jiffies(250));
}
static struct snd_soc_dai_ops omap_mcpdm_dai_ops = {
.startup = omap_mcpdm_dai_startup,
.shutdown = omap_mcpdm_dai_shutdown,
- .trigger = omap_mcpdm_dai_trigger,
.hw_params = omap_mcpdm_dai_hw_params,
- .hw_free = omap_mcpdm_dai_hw_free,
+ .prepare = omap_mcpdm_prepare,
+ .trigger = omap_mcpdm_dai_trigger,
};
-#define OMAP_MCPDM_RATES (SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
-#define OMAP_MCPDM_FORMATS (SNDRV_PCM_FMTBIT_S32_LE)
-
-static int omap_mcpdm_dai_probe(struct snd_soc_dai *dai)
+#ifdef CONFIG_PM
+static int omap_mcpdm_suspend(struct snd_soc_dai *dai)
{
- snd_soc_dai_set_drvdata(dai, &mcpdm_data);
+ struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+
+ /* save context only if we are streaming */
+ if (!mcpdm->active)
+ return 0;
+
+ omap_mcpdm_write_cache(mcpdm, MCPDM_DN_OFFSET,
+ omap_mcpdm_read(mcpdm, MCPDM_DN_OFFSET));
+ omap_mcpdm_write_cache(mcpdm, MCPDM_IRQENABLE_SET,
+ omap_mcpdm_read(mcpdm, MCPDM_IRQENABLE_SET));
+ omap_mcpdm_write_cache(mcpdm, MCPDM_DMAENABLE_SET,
+ omap_mcpdm_read(mcpdm, MCPDM_DMAENABLE_SET));
+ omap_mcpdm_write_cache(mcpdm, MCPDM_FIFO_CTRL_DN,
+ omap_mcpdm_read(mcpdm, MCPDM_FIFO_CTRL_DN));
+ omap_mcpdm_write_cache(mcpdm, MCPDM_FIFO_CTRL_UP,
+ omap_mcpdm_read(mcpdm, MCPDM_FIFO_CTRL_UP));
+ omap_mcpdm_write_cache(mcpdm, MCPDM_CTRL,
+ omap_mcpdm_read(mcpdm, MCPDM_CTRL));
+
+ pm_runtime_put_sync(mcpdm->dev);
+
return 0;
}
-static struct snd_soc_dai_driver omap_mcpdm_dai = {
- .probe = omap_mcpdm_dai_probe,
+static int omap_mcpdm_resume(struct snd_soc_dai *dai)
+{
+ struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+ struct omap_mcpdm_platform_data *pdata = mcpdm->pdata;
+
+ /* restore context only if we were streaming */
+ if (!mcpdm->active)
+ return 0;
+
+ if (!pdata->was_context_lost(mcpdm->dev))
+ return 0;
+
+ pm_runtime_get_sync(mcpdm->dev);
+
+ /* restore from reg cache */
+ omap_mcpdm_write(mcpdm, MCPDM_DN_OFFSET,
+ omap_mcpdm_read_cache(mcpdm, MCPDM_DN_OFFSET));
+ omap_mcpdm_write(mcpdm, MCPDM_IRQENABLE_SET,
+ omap_mcpdm_read_cache(mcpdm, MCPDM_IRQENABLE_SET));
+ omap_mcpdm_write(mcpdm, MCPDM_DMAENABLE_SET,
+ omap_mcpdm_read_cache(mcpdm, MCPDM_DMAENABLE_SET));
+ omap_mcpdm_write(mcpdm, MCPDM_FIFO_CTRL_DN,
+ omap_mcpdm_read_cache(mcpdm, MCPDM_FIFO_CTRL_DN));
+ omap_mcpdm_write(mcpdm, MCPDM_FIFO_CTRL_UP,
+ omap_mcpdm_read_cache(mcpdm, MCPDM_FIFO_CTRL_UP));
+ omap_mcpdm_write(mcpdm, MCPDM_CTRL,
+ omap_mcpdm_read_cache(mcpdm, MCPDM_CTRL));
+
+ return 0;
+}
+#else
+#define omap_mcpdm_suspend NULL
+#define omap_mcpdm_resume NULL
+#endif
+
+static int omap_mcpdm_probe(struct snd_soc_dai *dai)
+{
+ struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ pm_runtime_enable(mcpdm->dev);
+
+ /* Disable lines while request is ongoing */
+ pm_runtime_get_sync(mcpdm->dev);
+ omap_mcpdm_write(mcpdm, MCPDM_CTRL, 0x00);
+
+ ret = request_irq(mcpdm->irq, omap_mcpdm_irq_handler,
+ 0, "McPDM", (void *)mcpdm);
+ if (ret)
+ dev_err(mcpdm->dev, "Request for McPDM IRQ failed\n");
+
+ pm_runtime_put_sync(mcpdm->dev);
+ return ret;
+}
+
+static int omap_mcpdm_remove(struct snd_soc_dai *dai)
+{
+ struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+
+ free_irq(mcpdm->irq, (void *)mcpdm);
+ pm_runtime_disable(mcpdm->dev);
+
+ return 0;
+}
+
+#define OMAP_MCPDM_RATES (SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
+#define OMAP_MCPDM_FORMATS SNDRV_PCM_FMTBIT_S32_LE
+
+static struct snd_soc_dai_driver omap_mcpdm_dai[] = {
+{
+ .name = "mcpdm-dl",
+ .id = MCPDM_LEGACY_DAI_DL1,
+ .probe = omap_mcpdm_probe,
+ .remove = omap_mcpdm_remove,
+ .probe_order = SND_SOC_COMP_ORDER_LATE,
+ .remove_order = SND_SOC_COMP_ORDER_EARLY,
+ .suspend = omap_mcpdm_suspend,
+ .resume = omap_mcpdm_resume,
.playback = {
.channels_min = 1,
.channels_max = 4,
.rates = OMAP_MCPDM_RATES,
.formats = OMAP_MCPDM_FORMATS,
},
+ .ops = &omap_mcpdm_dai_ops,
+},
+{
+ .name = "mcpdm-ul",
+ .id = MCPDM_LEGACY_DAI_UL1,
+ .probe_order = SND_SOC_COMP_ORDER_LATE,
+ .remove_order = SND_SOC_COMP_ORDER_EARLY,
+ .suspend = omap_mcpdm_suspend,
+ .resume = omap_mcpdm_resume,
.capture = {
.channels_min = 1,
.channels_max = 2,
@@ -223,31 +717,196 @@
.formats = OMAP_MCPDM_FORMATS,
},
.ops = &omap_mcpdm_dai_ops,
-};
+},
+#if defined(CONFIG_SND_OMAP_SOC_ABE_DSP) ||\
+ defined(CONFIG_SND_OMAP_SOC_ABE_DSP_MODULE)
+{
+ .name = "mcpdm-dl1",
+ .id = MCPDM_ABE_DAI_DL1,
+ .probe_order = SND_SOC_COMP_ORDER_LATE,
+ .remove_order = SND_SOC_COMP_ORDER_EARLY,
+ .suspend = omap_mcpdm_suspend,
+ .resume = omap_mcpdm_resume,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = OMAP_MCPDM_RATES,
+ .formats = OMAP_MCPDM_FORMATS,
+ },
+ .ops = &omap_mcpdm_dai_ops,
+},
+{
+ .name = "mcpdm-dl2",
+ .id = MCPDM_ABE_DAI_DL2,
+ .probe_order = SND_SOC_COMP_ORDER_LATE,
+ .remove_order = SND_SOC_COMP_ORDER_EARLY,
+ .suspend = omap_mcpdm_suspend,
+ .resume = omap_mcpdm_resume,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = OMAP_MCPDM_RATES,
+ .formats = OMAP_MCPDM_FORMATS,
+ },
+ .ops = &omap_mcpdm_dai_ops,
+},
+{
+ .name = "mcpdm-vib",
+ .id = MCPDM_ABE_DAI_VIB,
+ .probe_order = SND_SOC_COMP_ORDER_LATE,
+ .remove_order = SND_SOC_COMP_ORDER_EARLY,
+ .suspend = omap_mcpdm_suspend,
+ .resume = omap_mcpdm_resume,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = OMAP_MCPDM_RATES,
+ .formats = OMAP_MCPDM_FORMATS,
+ },
+ .ops = &omap_mcpdm_dai_ops,
+},
+{
+ .name = "mcpdm-ul1",
+ .id = MCPDM_ABE_DAI_UL1,
+ .probe_order = SND_SOC_COMP_ORDER_LATE,
+ .remove_order = SND_SOC_COMP_ORDER_EARLY,
+ .suspend = omap_mcpdm_suspend,
+ .resume = omap_mcpdm_resume,
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = OMAP_MCPDM_RATES,
+ .formats = OMAP_MCPDM_FORMATS,
+ },
+ .ops = &omap_mcpdm_dai_ops,
+},
+#endif
+ };
static __devinit int asoc_mcpdm_probe(struct platform_device *pdev)
{
- int ret;
+ struct omap_mcpdm_platform_data *pdata = pdev->dev.platform_data;
+ struct omap_mcpdm *mcpdm;
+ struct resource *res;
+ int ret = 0, err;
- ret = omap_mcpdm_probe(pdev);
+ mcpdm = kzalloc(sizeof(struct omap_mcpdm), GFP_KERNEL);
+ if (!mcpdm)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mcpdm);
+
+ mutex_init(&mcpdm->mutex);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no resource\n");
+ goto err_res;
+ }
+
+ mcpdm->io_base = ioremap(res->start, resource_size(res));
+ if (!mcpdm->io_base) {
+ ret = -ENOMEM;
+ goto err_iomap;
+ }
+
+ mcpdm->reg_cache = kzalloc(resource_size(res), GFP_KERNEL);
+ if (!mcpdm->reg_cache) {
+ ret = -ENOMEM;
+ goto err_cache;
+ }
+
+ mcpdm->irq = platform_get_irq(pdev, 0);
+ if (mcpdm->irq < 0) {
+ ret = mcpdm->irq;
+ goto err_irq;
+ }
+
+ mcpdm->dev = &pdev->dev;
+ mcpdm->pdata = pdata;
+
+ /* DL1 and DL2 DC offset values will be different for each device */
+ mcpdm->dl1_offset = DN_OFST_MAX >> 1;
+ mcpdm->dl2_offset = DN_OFST_MAX >> 1;
+ err = device_create_file(mcpdm->dev, &dev_attr_dl1);
+ if (err < 0)
+ dev_err(mcpdm->dev,"failed to DL1 DC offset sysfs: %d\n", err);
+ err = device_create_file(mcpdm->dev, &dev_attr_dl2);
+ if (err < 0)
+ dev_err(mcpdm->dev,"failed to DL2 DC offset sysfs: %d\n", err);
+
+#if defined(CONFIG_SND_OMAP_SOC_ABE_DSP) ||\
+ defined(CONFIG_SND_OMAP_SOC_ABE_DSP_MODULE)
+
+ mcpdm->abe = omap_abe_port_mgr_get();
+ if (!mcpdm->abe)
+ goto err_irq;
+
+ mcpdm->ul_port = omap_abe_port_open(mcpdm->abe, OMAP_ABE_BE_PORT_PDM_UL1);
+ if (!mcpdm->ul_port)
+ goto err_ul;
+
+ mcpdm->dl_port = omap_abe_port_open(mcpdm->abe, OMAP_ABE_BE_PORT_PDM_DL1);
+ if (!mcpdm->dl_port)
+ goto err_dl;
+#endif
+
+ INIT_DELAYED_WORK(&mcpdm->esd_work, mcpdm_esd_work);
+
+ ret = snd_soc_register_dais(&pdev->dev, omap_mcpdm_dai,
+ ARRAY_SIZE(omap_mcpdm_dai));
if (ret < 0)
- return ret;
- ret = snd_soc_register_dai(&pdev->dev, &omap_mcpdm_dai);
- if (ret < 0)
- omap_mcpdm_remove(pdev);
+ goto err_dai;
+
+ return 0;
+
+err_dai:
+#if defined(CONFIG_SND_OMAP_SOC_ABE_DSP) ||\
+ defined(CONFIG_SND_OMAP_SOC_ABE_DSP_MODULE)
+ omap_abe_port_close(mcpdm->abe, mcpdm->dl_port);
+err_dl:
+ omap_abe_port_close(mcpdm->abe, mcpdm->ul_port);
+err_ul:
+ omap_abe_port_mgr_put(mcpdm->abe);
+#endif
+err_irq:
+ kfree(mcpdm->reg_cache);
+err_cache:
+ iounmap(mcpdm->io_base);
+err_iomap:
+ release_mem_region(res->start, resource_size(res));
+err_res:
+ kfree(mcpdm);
return ret;
}
static int __devexit asoc_mcpdm_remove(struct platform_device *pdev)
{
- snd_soc_unregister_dai(&pdev->dev);
- omap_mcpdm_remove(pdev);
+ struct omap_mcpdm *mcpdm = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(omap_mcpdm_dai));
+
+ device_remove_file(&pdev->dev, &dev_attr_dl1);
+ device_remove_file(&pdev->dev, &dev_attr_dl2);
+
+#if defined(CONFIG_SND_OMAP_SOC_ABE_DSP) ||\
+ defined(CONFIG_SND_OMAP_SOC_ABE_DSP_MODULE)
+ omap_abe_port_close(mcpdm->abe, mcpdm->dl_port);
+ omap_abe_port_close(mcpdm->abe, mcpdm->ul_port);
+ omap_abe_port_mgr_put(mcpdm->abe);
+#endif
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iounmap(mcpdm->io_base);
+ kfree(mcpdm->reg_cache);
+ kfree(mcpdm);
return 0;
}
static struct platform_driver asoc_mcpdm_driver = {
.driver = {
- .name = "omap-mcpdm-dai",
+ .name = "omap-mcpdm",
.owner = THIS_MODULE,
},
diff --git a/sound/soc/omap/omap-mcpdm.h b/sound/soc/omap/omap-mcpdm.h
new file mode 100644
index 0000000..3aa5011
--- /dev/null
+++ b/sound/soc/omap/omap-mcpdm.h
@@ -0,0 +1,120 @@
+/*
+ * omap-mcpdm.h
+ *
+ * Copyright (C) 2009 Texas Instruments
+ *
+ * Contact: Misael Lopez Cruz <x0052729@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __OMAP_MCPDM_H__
+#define __OMAP_MCPDM_H__
+
+#include <linux/platform_device.h>
+
+#define MCPDM_REVISION 0x00
+#define MCPDM_SYSCONFIG 0x10
+#define MCPDM_IRQSTATUS_RAW 0x24
+#define MCPDM_IRQSTATUS 0x28
+#define MCPDM_IRQENABLE_SET 0x2C
+#define MCPDM_IRQENABLE_CLR 0x30
+#define MCPDM_IRQWAKE_EN 0x34
+#define MCPDM_DMAENABLE_SET 0x38
+#define MCPDM_DMAENABLE_CLR 0x3C
+#define MCPDM_DMAWAKEEN 0x40
+#define MCPDM_CTRL 0x44
+#define MCPDM_DN_DATA 0x48
+#define MCPDM_UP_DATA 0x4C
+#define MCPDM_FIFO_CTRL_DN 0x50
+#define MCPDM_FIFO_CTRL_UP 0x54
+#define MCPDM_DN_OFFSET 0x58
+#define MCPDM_STATUS 0x68
+
+/*
+ * MCPDM_IRQ bit fields
+ * IRQSTATUS_RAW, IRQSTATUS, IRQENABLE_SET, IRQENABLE_CLR
+ */
+
+#define MCPDM_DN_IRQ (1 << 0)
+#define MCPDM_DN_IRQ_EMPTY (1 << 1)
+#define MCPDM_DN_IRQ_ALMST_EMPTY (1 << 2)
+#define MCPDM_DN_IRQ_FULL (1 << 3)
+
+#define MCPDM_UP_IRQ (1 << 8)
+#define MCPDM_UP_IRQ_EMPTY (1 << 9)
+#define MCPDM_UP_IRQ_ALMST_FULL (1 << 10)
+#define MCPDM_UP_IRQ_FULL (1 << 11)
+
+#define MCPDM_DOWNLINK_IRQ_MASK 0x00F
+#define MCPDM_UPLINK_IRQ_MASK 0xF00
+
+/*
+ * MCPDM_DMAENABLE bit fields
+ */
+
+#define DMA_DN_ENABLE 0x1
+#define DMA_UP_ENABLE 0x2
+
+/*
+ * MCPDM_CTRL bit fields
+ */
+
+#define PDM_UP1_EN 0x0001
+#define PDM_UP2_EN 0x0002
+#define PDM_UP3_EN 0x0004
+#define PDM_DN1_EN 0x0008
+#define PDM_DN2_EN 0x0010
+#define PDM_DN3_EN 0x0020
+#define PDM_DN4_EN 0x0040
+#define PDM_DN5_EN 0x0080
+#define PDMOUTFORMAT 0x0100
+#define CMD_INT 0x0200
+#define STATUS_INT 0x0400
+#define SW_UP_RST 0x0800
+#define SW_DN_RST 0x1000
+#define WD_EN 0x4000
+#define PDM_UP_MASK 0x007
+#define PDM_DN_MASK 0x0F8
+#define PDM_CMD_MASK 0x200
+#define PDM_STATUS_MASK 0x400
+
+
+#define PDMOUTFORMAT_LJUST (0 << 8)
+#define PDMOUTFORMAT_RJUST (1 << 8)
+
+/*
+ * MCPDM_FIFO_CTRL bit fields
+ */
+
+#define UP_THRES_MAX 0xF
+#define DN_THRES_MAX 0xF
+
+/*
+ * MCPDM_DN_OFFSET bit fields
+ */
+
+#define DN_OFST_RX1_EN 0x0001
+#define DN_OFST_RX2_EN 0x0100
+
+#define DN_OFST_RX1 1
+#define DN_OFST_RX2 9
+#define DN_OFST_MAX 0x1F
+
+#define MCPDM_UPLINK 1
+#define MCPDM_DOWNLINK 2
+
+#endif /* End of __OMAP_MCPDM_H__ */
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index e6a6b99..a2a464f 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -198,6 +198,14 @@
OMAP_DMA_LAST_IRQ | OMAP_DMA_BLOCK_IRQ);
else if (!substream->runtime->no_period_wakeup)
omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ);
+ else {
+ /*
+ * No period wakeup:
+ * we need to disable BLOCK_IRQ, which is enabled by the omap
+ * dma core at request dma time.
+ */
+ omap_disable_dma_irq(prtd->dma_ch, OMAP_DMA_BLOCK_IRQ);
+ }
if (!(cpu_class_is_omap1())) {
omap_set_dma_src_burst_mode(prtd->dma_ch,
@@ -235,6 +243,11 @@
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
prtd->period_index = -1;
omap_stop_dma(prtd->dma_ch);
+ /* Since we are using self linking, there is a
+ chance that the DMA as re-enabled the channel
+ just after disabling it */
+ while (omap_get_dma_active_status(prtd->dma_ch))
+ omap_stop_dma(prtd->dma_ch);
break;
default:
ret = -EINVAL;
@@ -280,6 +293,15 @@
SNDRV_PCM_HW_PARAM_PERIODS);
if (ret < 0)
goto out;
+ if (cpu_is_omap44xx()) {
+ /* ABE needs a step of 24 * 4 data bits, and HDMI 32 * 4
+ * Ensure buffer size satisfies both constraints.
+ */
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 384);
+ if (ret < 0)
+ goto out;
+ }
prtd = kzalloc(sizeof(*prtd), GFP_KERNEL);
if (prtd == NULL) {
@@ -366,9 +388,10 @@
}
}
-static int omap_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
- struct snd_pcm *pcm)
+static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_pcm *pcm = rtd->pcm;
int ret = 0;
if (!card->dev->dma_mask)
@@ -376,14 +399,14 @@
if (!card->dev->coherent_dma_mask)
card->dev->coherent_dma_mask = DMA_BIT_MASK(64);
- if (dai->driver->playback.channels_min) {
+ if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = omap_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_PLAYBACK);
if (ret)
goto out;
}
- if (dai->driver->capture.channels_min) {
+ if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
ret = omap_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_CAPTURE);
if (ret)
diff --git a/sound/soc/omap/omap4-hdmi-card.c b/sound/soc/omap/omap4-hdmi-card.c
new file mode 100644
index 0000000..9024735
--- /dev/null
+++ b/sound/soc/omap/omap4-hdmi-card.c
@@ -0,0 +1,133 @@
+/*
+ * omap4-hdmi-card.c
+ *
+ * OMAP ALSA SoC machine driver for TI OMAP4 HDMI
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Ricardo Neri <ricardo.neri@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/delay.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <asm/mach-types.h>
+#include <video/omapdss.h>
+
+#define DRV_NAME "omap4-hdmi-audio"
+
+static int omap4_hdmi_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ int i, count = 0;
+ struct omap_overlay_manager *mgr = NULL;
+ struct device *dev = substream->pcm->card->dev;
+
+ /* Find DSS HDMI device */
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
+ mgr = omap_dss_get_overlay_manager(i);
+ if (mgr && mgr->device
+ && mgr->device->type == OMAP_DISPLAY_TYPE_HDMI)
+ break;
+ }
+
+ if (i == omap_dss_get_num_overlay_managers()) {
+ dev_err(dev, "HDMI display device not found!\n");
+ return -ENODEV;
+ }
+
+ /* Make sure HDMI is power-on to avoid L3 interconnect errors */
+ while (mgr->device->state != OMAP_DSS_DISPLAY_ACTIVE) {
+ msleep(50);
+ if (count > 5)
+ return -EIO;
+ dev_err(dev, "HDMI display is not active!\n");
+ count++;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_ops omap4_hdmi_dai_ops = {
+ .hw_params = omap4_hdmi_dai_hw_params,
+};
+
+static struct snd_soc_dai_link omap4_hdmi_dai = {
+ .name = "HDMI",
+ .stream_name = "HDMI",
+ .cpu_dai_name = "hdmi-audio-dai",
+ .platform_name = "omap-pcm-audio",
+ .codec_name = "omap-hdmi-codec",
+ .codec_dai_name = "hdmi-audio-codec",
+ .ops = &omap4_hdmi_dai_ops,
+};
+
+static struct snd_soc_card snd_soc_omap4_hdmi = {
+ .name = "OMAP4HDMI",
+ .dai_link = &omap4_hdmi_dai,
+ .num_links = 1,
+};
+
+static __devinit int omap4_hdmi_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &snd_soc_omap4_hdmi;
+ int ret;
+
+ card->dev = &pdev->dev;
+
+ ret = snd_soc_register_card(card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
+ card->dev = NULL;
+ return ret;
+ }
+ return 0;
+}
+
+static int __devexit omap4_hdmi_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ snd_soc_unregister_card(card);
+ card->dev = NULL;
+ return 0;
+}
+
+static struct platform_driver omap4_hdmi_driver = {
+ .driver = {
+ .name = "omap4-hdmi-audio",
+ .owner = THIS_MODULE,
+ },
+ .probe = omap4_hdmi_probe,
+ .remove = __devexit_p(omap4_hdmi_remove),
+};
+
+static int __init omap4_hdmi_init(void)
+{
+ return platform_driver_register(&omap4_hdmi_driver);
+}
+module_init(omap4_hdmi_init);
+
+static void __exit omap4_hdmi_exit(void)
+{
+ platform_driver_unregister(&omap4_hdmi_driver);
+}
+module_exit(omap4_hdmi_exit);
+
+MODULE_AUTHOR("Ricardo Neri <ricardo.neri@ti.com>");
+MODULE_DESCRIPTION("OMAP4 HDMI machine ASoC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/omap/sdp4430.c b/sound/soc/omap/sdp4430.c
old mode 100644
new mode 100755
index 189e039..9fc42e7
--- a/sound/soc/omap/sdp4430.c
+++ b/sound/soc/omap/sdp4430.c
@@ -21,34 +21,133 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
+#include <linux/i2c.h>
#include <sound/core.h>
#include <sound/pcm.h>
+#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/soc-dapm.h>
#include <sound/jack.h>
+#include <sound/soc-dsp.h>
#include <asm/mach-types.h>
#include <plat/hardware.h>
#include <plat/mux.h>
+#include <plat/mcbsp.h>
-#include "mcpdm.h"
+#include "omap-mcpdm.h"
+#include "omap-abe.h"
+#include "omap-abe-dsp.h"
#include "omap-pcm.h"
+#include "omap-mcbsp.h"
#include "../codecs/twl6040.h"
static int twl6040_power_mode;
+static int mcbsp_cfg;
+static struct snd_soc_codec *twl6040_codec;
-static int sdp4430_hw_params(struct snd_pcm_substream *substream,
+static int sdp4430_modem_mcbsp_configure(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, int flag)
+{
+ int ret = 0;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_substream *modem_substream[2];
+ struct snd_soc_pcm_runtime *modem_rtd;
+ int channels;
+
+ if (flag) {
+ modem_substream[substream->stream] =
+ snd_soc_get_dai_substream(rtd->card,
+ OMAP_ABE_BE_MM_EXT1,
+ substream->stream);
+ if (unlikely(modem_substream[substream->stream] == NULL))
+ return -ENODEV;
+
+ modem_rtd =
+ modem_substream[substream->stream]->private_data;
+
+ if (!mcbsp_cfg) {
+ /* Set cpu DAI configuration */
+ ret = snd_soc_dai_set_fmt(modem_rtd->cpu_dai,
+ SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM);
+
+ if (unlikely(ret < 0)) {
+ printk(KERN_ERR "can't set Modem cpu DAI configuration\n");
+ goto exit;
+ } else {
+ mcbsp_cfg = 1;
+ }
+ }
+
+ if (params != NULL) {
+ /* Configure McBSP internal buffer usage */
+ /* this need to be done for playback and/or record */
+ channels = params_channels(params);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ omap_mcbsp_set_rx_threshold(
+ modem_rtd->cpu_dai->id, channels);
+ else
+ omap_mcbsp_set_tx_threshold(
+ modem_rtd->cpu_dai->id, channels);
+ }
+ } else {
+ mcbsp_cfg = 0;
+ }
+
+exit:
+ return ret;
+}
+
+static int sdp4430_modem_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- int clk_id, freq;
int ret;
+ ret = sdp4430_modem_mcbsp_configure(substream, params, 1);
+ if (ret)
+ printk(KERN_ERR "can't set modem cpu DAI configuration\n");
+
+ return ret;
+}
+
+static int sdp4430_modem_hw_free(struct snd_pcm_substream *substream)
+{
+ int ret;
+
+ ret = sdp4430_modem_mcbsp_configure(substream, NULL, 0);
+ if (ret)
+ printk(KERN_ERR "can't clear modem cpu DAI configuration\n");
+
+ return ret;
+}
+
+static struct snd_soc_ops sdp4430_modem_ops = {
+ .hw_params = sdp4430_modem_hw_params,
+ .hw_free = sdp4430_modem_hw_free,
+};
+
+static int sdp4430_mcpdm_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct twl6040 *twl6040 = codec->control_data;
+ int clk_id, freq, ret;
+
+ /* TWL6040 supplies McPDM PAD_CLKS */
+ ret = twl6040_enable(twl6040);
+ if (ret) {
+ printk(KERN_ERR "failed to enable TWL6040\n");
+ return ret;
+ }
+
if (twl6040_power_mode) {
- clk_id = TWL6040_SYSCLK_SEL_HPPLL;
+ clk_id = TWL6040_HPPLL_ID;
freq = 38400000;
} else {
- clk_id = TWL6040_SYSCLK_SEL_LPPLL;
+ clk_id = TWL6040_LPPLL_ID;
freq = 32768;
}
@@ -57,15 +156,95 @@
SND_SOC_CLOCK_IN);
if (ret) {
printk(KERN_ERR "can't set codec system clock\n");
- return ret;
+ goto err;
}
+
+ return 0;
+
+err:
+ twl6040_disable(twl6040);
return ret;
}
-static struct snd_soc_ops sdp4430_ops = {
- .hw_params = sdp4430_hw_params,
+static void sdp4430_mcpdm_shutdown(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct twl6040 *twl6040 = codec->control_data;
+
+ /* TWL6040 supplies McPDM PAD_CLKS */
+ twl6040_disable(twl6040);
+}
+
+static struct snd_soc_ops sdp4430_mcpdm_ops = {
+ .startup = sdp4430_mcpdm_startup,
+ .shutdown = sdp4430_mcpdm_shutdown,
};
+static int sdp4430_mcbsp_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ int ret = 0;
+ unsigned int be_id;
+
+
+ be_id = rtd->dai_link->be_id;
+
+ if (be_id == OMAP_ABE_DAI_MM_FM) {
+ /* Set cpu DAI configuration */
+ ret = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM);
+ } else if (be_id == OMAP_ABE_DAI_BT_VX) {
+ ret = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_DSP_B |
+ SND_SOC_DAIFMT_NB_IF |
+ SND_SOC_DAIFMT_CBM_CFM);
+ }
+
+ if (ret < 0) {
+ printk(KERN_ERR "can't set cpu DAI configuration\n");
+ return ret;
+ }
+
+ /*
+ * TODO: where does this clock come from (external source??) -
+ * do we need to enable it.
+ */
+ /* Set McBSP clock to external */
+ ret = snd_soc_dai_set_sysclk(cpu_dai, OMAP_MCBSP_SYSCLK_CLKS_FCLK,
+ 64 * params_rate(params),
+ SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ printk(KERN_ERR "can't set cpu system clock\n");
+
+ return ret;
+}
+
+static struct snd_soc_ops sdp4430_mcbsp_ops = {
+ .hw_params = sdp4430_mcbsp_hw_params,
+};
+
+static int mcbsp_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ unsigned int be_id = rtd->dai_link->be_id;
+
+ if (be_id == OMAP_ABE_DAI_MM_FM)
+ channels->min = 2;
+ else if (be_id == OMAP_ABE_DAI_BT_VX)
+ channels->min = 1;
+ snd_mask_set(¶ms->masks[SNDRV_PCM_HW_PARAM_FORMAT -
+ SNDRV_PCM_HW_PARAM_FIRST_MASK],
+ SNDRV_PCM_FORMAT_S16_LE);
+ return 0;
+}
+
/* Headset jack */
static struct snd_soc_jack hs_jack;
@@ -95,6 +274,7 @@
return 0;
twl6040_power_mode = ucontrol->value.integer.value[0];
+ abe_dsp_set_power_mode(twl6040_power_mode);
return 1;
}
@@ -146,11 +326,34 @@
{"AFMR", NULL, "Aux/FM Stereo In"},
};
+static int sdp4430_set_pdm_dl1_gains(struct snd_soc_dapm_context *dapm)
+{
+ int output, val;
+
+ if (snd_soc_dapm_get_pin_power(dapm, "Earphone Spk")) {
+ output = OMAP_ABE_DL1_EARPIECE;
+ } else if (snd_soc_dapm_get_pin_power(dapm, "Headset Stereophone")) {
+ val = snd_soc_read(twl6040_codec, TWL6040_REG_HSLCTL);
+ if (val & TWL6040_HSDACMODEL)
+ /* HSDACL in LP mode */
+ output = OMAP_ABE_DL1_HEADSET_LP;
+ else
+ /* HSDACL in HP mode */
+ output = OMAP_ABE_DL1_HEADSET_HP;
+ } else {
+ output = OMAP_ABE_DL1_NO_PDM;
+ }
+
+ return omap_abe_set_dl1_output(output);
+}
+
static int sdp4430_twl6040_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
+ struct twl6040 *twl6040 = codec->control_data;
struct snd_soc_dapm_context *dapm = &codec->dapm;
- int ret;
+ int hsotrim, left_offset, right_offset, mode, ret;
+
/* Add SDP4430 specific controls */
ret = snd_soc_add_controls(codec, sdp4430_controls,
@@ -175,6 +378,14 @@
snd_soc_dapm_enable_pin(dapm, "Headset Mic");
snd_soc_dapm_enable_pin(dapm, "Headset Stereophone");
+ /* allow audio paths from the audio modem to run during suspend */
+ snd_soc_dapm_ignore_suspend(dapm, "Ext Mic");
+ snd_soc_dapm_ignore_suspend(dapm, "Ext Spk");
+ snd_soc_dapm_ignore_suspend(dapm, "AFML");
+ snd_soc_dapm_ignore_suspend(dapm, "AFMR");
+ snd_soc_dapm_ignore_suspend(dapm, "Headset Mic");
+ snd_soc_dapm_ignore_suspend(dapm, "Headset Stereophone");
+
ret = snd_soc_dapm_sync(dapm);
if (ret)
return ret;
@@ -193,37 +404,454 @@
else
snd_soc_jack_report(&hs_jack, SND_JACK_HEADSET, SND_JACK_HEADSET);
+ /* DC offset cancellation computation */
+ hsotrim = snd_soc_read(codec, TWL6040_REG_HSOTRIM);
+ right_offset = (hsotrim & TWL6040_HSRO) >> TWL6040_HSRO_OFFSET;
+ left_offset = hsotrim & TWL6040_HSLO;
+
+ if (twl6040_get_icrev(twl6040) < TWL6040_REV_1_3)
+ /* For ES under ES_1.3 HS step is 2 mV */
+ mode = 2;
+ else
+ /* For ES_1.3 HS step is 1 mV */
+ mode = 1;
+
+ abe_dsp_set_hs_offset(left_offset, right_offset, mode);
+
+ /* don't wait before switching of HS power */
+ rtd->pmdown_time = 0;
+
return ret;
}
+static int sdp4430_twl6040_dl2_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_codec *codec = rtd->codec;
+ int hfotrim, left_offset, right_offset;
+
+ /* DC offset cancellation computation */
+ hfotrim = snd_soc_read(codec, TWL6040_REG_HFOTRIM);
+ right_offset = (hfotrim & TWL6040_HFRO) >> TWL6040_HFRO_OFFSET;
+ left_offset = hfotrim & TWL6040_HFLO;
+
+ abe_dsp_set_hf_offset(left_offset, right_offset);
+
+ /* don't wait before switching of HF power */
+ rtd->pmdown_time = 0;
+
+ return 0;
+}
+
+static int sdp4430_twl6040_fe_init(struct snd_soc_pcm_runtime *rtd)
+{
+
+ /* don't wait before switching of FE power */
+ rtd->pmdown_time = 0;
+
+ return 0;
+}
+
+static int sdp4430_bt_init(struct snd_soc_pcm_runtime *rtd)
+{
+
+ /* don't wait before switching of BT power */
+ rtd->pmdown_time = 0;
+
+ return 0;
+}
+
+static int sdp4430_stream_event(struct snd_soc_dapm_context *dapm)
+{
+ /*
+ * set DL1 gains dynamically according to the active output
+ * (Headset, Earpiece) and HSDAC power mode
+ */
+ return sdp4430_set_pdm_dl1_gains(dapm);
+}
+
+/* TODO: make this a separate BT CODEC driver or DUMMY */
+static struct snd_soc_dai_driver dai[] = {
+{
+ .name = "Bluetooth",
+ .playback = {
+ .stream_name = "BT Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "BT Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+},
+/* TODO: make this a separate FM CODEC driver or DUMMY */
+{
+ .name = "FM Digital",
+ .playback = {
+ .stream_name = "FM Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "FM Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+},
+{
+ .name = "HDMI",
+ .playback = {
+ .stream_name = "HDMI Playback",
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+ },
+},
+};
+
+struct snd_soc_dsp_link fe_media = {
+ .playback = true,
+ .capture = true,
+ .trigger =
+ {SND_SOC_DSP_TRIGGER_BESPOKE, SND_SOC_DSP_TRIGGER_BESPOKE},
+};
+
+struct snd_soc_dsp_link fe_media_capture = {
+ .capture = true,
+ .trigger =
+ {SND_SOC_DSP_TRIGGER_BESPOKE, SND_SOC_DSP_TRIGGER_BESPOKE},
+};
+
+struct snd_soc_dsp_link fe_tones = {
+ .playback = true,
+ .trigger =
+ {SND_SOC_DSP_TRIGGER_BESPOKE, SND_SOC_DSP_TRIGGER_BESPOKE},
+};
+
+struct snd_soc_dsp_link fe_vib = {
+ .playback = true,
+ .trigger =
+ {SND_SOC_DSP_TRIGGER_BESPOKE, SND_SOC_DSP_TRIGGER_BESPOKE},
+};
+
+struct snd_soc_dsp_link fe_modem = {
+ .playback = true,
+ .capture = true,
+ .trigger =
+ {SND_SOC_DSP_TRIGGER_BESPOKE, SND_SOC_DSP_TRIGGER_BESPOKE},
+};
+
+struct snd_soc_dsp_link fe_lp_media = {
+ .playback = true,
+ .trigger =
+ {SND_SOC_DSP_TRIGGER_BESPOKE, SND_SOC_DSP_TRIGGER_BESPOKE},
+};
/* Digital audio interface glue - connects codec <--> CPU */
-static struct snd_soc_dai_link sdp4430_dai = {
- .name = "TWL6040",
- .stream_name = "TWL6040",
- .cpu_dai_name ="omap-mcpdm-dai",
- .codec_dai_name = "twl6040-hifi",
- .platform_name = "omap-pcm-audio",
- .codec_name = "twl6040-codec",
- .init = sdp4430_twl6040_init,
- .ops = &sdp4430_ops,
+static struct snd_soc_dai_link sdp4430_dai[] = {
+
+/*
+ * Frontend DAIs - i.e. userspace visible interfaces (ALSA PCMs)
+ */
+
+ {
+ .name = "SDP4430 Media",
+ .stream_name = "Multimedia",
+
+ /* ABE components - MM-UL & MM_DL */
+ .cpu_dai_name = "MultiMedia1",
+ .platform_name = "omap-pcm-audio",
+
+ .dynamic = 1, /* BE is dynamic */
+ .init = sdp4430_twl6040_fe_init,
+ .dsp_link = &fe_media,
+ },
+ {
+ .name = "SDP4430 Media Capture",
+ .stream_name = "Multimedia Capture",
+
+ /* ABE components - MM-UL2 */
+ .cpu_dai_name = "MultiMedia2",
+ .platform_name = "omap-pcm-audio",
+
+ .dynamic = 1, /* BE is dynamic */
+ .dsp_link = &fe_media_capture,
+ },
+ {
+ .name = "SDP4430 Voice",
+ .stream_name = "Voice",
+
+ /* ABE components - VX-UL & VX-DL */
+ .cpu_dai_name = "Voice",
+ .platform_name = "omap-pcm-audio",
+
+ .dynamic = 1, /* BE is dynamic */
+ .dsp_link = &fe_media,
+ .no_host_mode = SND_SOC_DAI_LINK_OPT_HOST,
+ },
+ {
+ .name = "SDP4430 Tones Playback",
+ .stream_name = "Tone Playback",
+
+ /* ABE components - TONES_DL */
+ .cpu_dai_name = "Tones",
+ .platform_name = "omap-pcm-audio",
+
+ .dynamic = 1, /* BE is dynamic */
+ .dsp_link = &fe_tones,
+ },
+ {
+ .name = "SDP4430 Vibra Playback",
+ .stream_name = "VIB-DL",
+
+ /* ABE components - DMIC UL 2 */
+ .cpu_dai_name = "Vibra",
+ .platform_name = "omap-pcm-audio",
+
+ .dynamic = 1, /* BE is dynamic */
+ .dsp_link = &fe_vib,
+ },
+ {
+ .name = "SDP4430 MODEM",
+ .stream_name = "MODEM",
+
+ /* ABE components - MODEM <-> McBSP2 */
+ .cpu_dai_name = "MODEM",
+ .platform_name = "aess",
+
+ .dynamic = 1, /* BE is dynamic */
+ .init = sdp4430_twl6040_fe_init,
+ .dsp_link = &fe_modem,
+ .ops = &sdp4430_modem_ops,
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = "SDP4430 Media LP",
+ .stream_name = "Multimedia",
+
+ /* ABE components - MM-DL (mmap) */
+ .cpu_dai_name = "MultiMedia1 LP",
+ .platform_name = "aess",
+
+ .dynamic = 1, /* BE is dynamic */
+ .dsp_link = &fe_lp_media,
+ },
+ {
+ .name = "Legacy McBSP",
+ .stream_name = "Multimedia",
+
+ /* ABE components - MCBSP2 - MM-EXT */
+ .cpu_dai_name = "omap-mcbsp-dai.1",
+ .platform_name = "omap-pcm-audio",
+
+ /* FM */
+ .codec_dai_name = "FM Digital",
+
+ .no_codec = 1, /* TODO: have a dummy CODEC */
+ .ops = &sdp4430_mcbsp_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = "Legacy McPDM",
+ .stream_name = "Headset Playback",
+
+ /* ABE components - DL1 */
+ .cpu_dai_name = "mcpdm-dl",
+ .platform_name = "omap-pcm-audio",
+
+ /* Phoenix - DL1 DAC */
+ .codec_dai_name = "twl6040-dl1",
+ .codec_name = "twl6040-codec",
+
+ .ops = &sdp4430_mcpdm_ops,
+ .ignore_suspend = 1,
+ },
+
+/*
+ * Backend DAIs - i.e. dynamically matched interfaces, invisible to userspace.
+ * Matched to above interfaces at runtime, based upon use case.
+ */
+
+ {
+ .name = OMAP_ABE_BE_PDM_DL1,
+ .stream_name = "HS Playback",
+
+ /* ABE components - DL1 */
+ .cpu_dai_name = "mcpdm-dl1",
+ .platform_name = "aess",
+
+ /* Phoenix - DL1 DAC */
+ .codec_dai_name = "twl6040-dl1",
+ .codec_name = "twl6040-codec",
+
+ .no_pcm = 1, /* don't create ALSA pcm for this */
+ .init = sdp4430_twl6040_init,
+ .ops = &sdp4430_mcpdm_ops,
+ .be_id = OMAP_ABE_DAI_PDM_DL1,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = OMAP_ABE_BE_PDM_UL1,
+ .stream_name = "Analog Capture",
+
+ /* ABE components - UL1 */
+ .cpu_dai_name = "mcpdm-ul1",
+ .platform_name = "aess",
+
+ /* Phoenix - UL ADC */
+ .codec_dai_name = "twl6040-ul",
+ .codec_name = "twl6040-codec",
+
+ .no_pcm = 1, /* don't create ALSA pcm for this */
+ .ops = &sdp4430_mcpdm_ops,
+ .be_id = OMAP_ABE_DAI_PDM_UL,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = OMAP_ABE_BE_PDM_DL2,
+ .stream_name = "HF Playback",
+
+ /* ABE components - DL2 */
+ .cpu_dai_name = "mcpdm-dl2",
+ .platform_name = "aess",
+
+ /* Phoenix - DL2 DAC */
+ .codec_dai_name = "twl6040-dl2",
+ .codec_name = "twl6040-codec",
+
+ .no_pcm = 1, /* don't create ALSA pcm for this */
+ .init = sdp4430_twl6040_dl2_init,
+ .ops = &sdp4430_mcpdm_ops,
+ .be_id = OMAP_ABE_DAI_PDM_DL2,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = OMAP_ABE_BE_PDM_VIB,
+ .stream_name = "Vibra",
+
+ /* ABE components - VIB1 DL */
+ .cpu_dai_name = "mcpdm-vib",
+ .platform_name = "aess",
+
+ /* Phoenix - PDM to PWM */
+ .codec_dai_name = "twl6040-vib",
+ .codec_name = "twl6040-codec",
+
+ .no_pcm = 1, /* don't create ALSA pcm for this */
+ .ops = &sdp4430_mcpdm_ops,
+ .be_id = OMAP_ABE_DAI_PDM_VIB,
+ },
+ {
+ .name = OMAP_ABE_BE_BT_VX_UL,
+ .stream_name = "BT Capture",
+
+ /* ABE components - MCBSP1 - BT-VX */
+ .cpu_dai_name = "omap-mcbsp-dai.0",
+ .platform_name = "aess",
+
+ /* Bluetooth */
+ .codec_dai_name = "Bluetooth",
+
+ .no_pcm = 1, /* don't create ALSA pcm for this */
+ .no_codec = 1, /* TODO: have a dummy CODEC */
+ .be_hw_params_fixup = mcbsp_be_hw_params_fixup,
+ .ops = &sdp4430_mcbsp_ops,
+ .be_id = OMAP_ABE_DAI_BT_VX,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = OMAP_ABE_BE_BT_VX_DL,
+ .stream_name = "BT Playback",
+
+ /* ABE components - MCBSP1 - BT-VX */
+ .cpu_dai_name = "omap-mcbsp-dai.0",
+ .platform_name = "aess",
+
+ /* Bluetooth */
+ .codec_dai_name = "Bluetooth",
+
+ .no_pcm = 1, /* don't create ALSA pcm for this */
+ .no_codec = 1, /* TODO: have a dummy CODEC */
+ .init = sdp4430_bt_init,
+ .be_hw_params_fixup = mcbsp_be_hw_params_fixup,
+ .ops = &sdp4430_mcbsp_ops,
+ .be_id = OMAP_ABE_DAI_BT_VX,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = OMAP_ABE_BE_MM_EXT0,
+ .stream_name = "FM",
+
+ /* ABE components - MCBSP2 - MM-EXT */
+ .cpu_dai_name = "omap-mcbsp-dai.1",
+ .platform_name = "aess",
+
+ /* FM */
+ .codec_dai_name = "FM Digital",
+
+ .no_pcm = 1, /* don't create ALSA pcm for this */
+ .no_codec = 1, /* TODO: have a dummy CODEC */
+ .be_hw_params_fixup = mcbsp_be_hw_params_fixup,
+ .ops = &sdp4430_mcbsp_ops,
+ .be_id = OMAP_ABE_DAI_MM_FM,
+ },
+ {
+ .name = OMAP_ABE_BE_MM_EXT1,
+ .stream_name = "MODEM",
+
+ /* ABE components - MCBSP2 - MM-EXT */
+ .cpu_dai_name = "omap-mcbsp-dai.1",
+ .platform_name = "aess",
+
+ /* MODEM */
+ .codec_dai_name = "MODEM",
+
+ .no_pcm = 1, /* don't create ALSA pcm for this */
+ .no_codec = 1, /* TODO: have a dummy CODEC */
+ .be_hw_params_fixup = mcbsp_be_hw_params_fixup,
+ .ops = &sdp4430_mcbsp_ops,
+ .be_id = OMAP_ABE_DAI_MODEM,
+ .ignore_suspend = 1,
+ },
};
/* Audio machine driver */
static struct snd_soc_card snd_soc_sdp4430 = {
- .name = "SDP4430",
- .dai_link = &sdp4430_dai,
- .num_links = 1,
+ .driver_name = "OMAP4",
+ .long_name = "TI OMAP4 Board",
+ .dai_link = sdp4430_dai,
+ .num_links = ARRAY_SIZE(sdp4430_dai),
+ .stream_event = sdp4430_stream_event,
};
static struct platform_device *sdp4430_snd_device;
+struct i2c_adapter *adapter;
static int __init sdp4430_soc_init(void)
{
int ret;
- if (!machine_is_omap_4430sdp())
+ if (!machine_is_omap_4430sdp() && !machine_is_omap4_panda()) {
+ pr_debug("Not SDP4430 or PandaBoard!\n");
return -ENODEV;
+ }
printk(KERN_INFO "SDP4430 SoC init\n");
+ if (machine_is_omap_4430sdp())
+ snd_soc_sdp4430.name = "SDP4430";
+ else if (machine_is_omap4_panda())
+ snd_soc_sdp4430.name = "Panda";
sdp4430_snd_device = platform_device_alloc("soc-audio", -1);
if (!sdp4430_snd_device) {
@@ -231,14 +859,17 @@
return -ENOMEM;
}
+ ret = snd_soc_register_dais(&sdp4430_snd_device->dev, dai, ARRAY_SIZE(dai));
+ if (ret < 0)
+ goto err;
platform_set_drvdata(sdp4430_snd_device, &snd_soc_sdp4430);
ret = platform_device_add(sdp4430_snd_device);
if (ret)
goto err;
- /* Codec starts in HP mode */
- twl6040_power_mode = 1;
+ twl6040_codec = snd_soc_card_get_codec(&snd_soc_sdp4430,
+ "twl6040-codec");
return 0;
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index fab20a5..da28394 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -85,9 +85,11 @@
static u64 pxa2xx_pcm_dmamask = DMA_BIT_MASK(32);
-static int pxa2xx_soc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
- struct snd_pcm *pcm)
+static int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_pcm *pcm = rtd->pcm;
int ret = 0;
if (!card->dev->dma_mask)
diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c
index ab3ccae..80c85fd6 100644
--- a/sound/soc/s6000/s6000-pcm.c
+++ b/sound/soc/s6000/s6000-pcm.c
@@ -443,10 +443,11 @@
static u64 s6000_pcm_dmamask = DMA_BIT_MASK(32);
-static int s6000_pcm_new(struct snd_card *card,
- struct snd_soc_dai *dai, struct snd_pcm *pcm)
+static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
{
- struct snd_soc_pcm_runtime *runtime = pcm->private_data;
+ struct snd_card *card = runtime->card->snd_card;
+ struct snd_soc_dai *dai = runtime->cpu_dai;
+ struct snd_pcm *pcm = runtime->pcm;
struct s6000_pcm_dma_params *params;
int res;
diff --git a/sound/soc/samsung/dma.c b/sound/soc/samsung/dma.c
index 5cb3b88..9465588 100644
--- a/sound/soc/samsung/dma.c
+++ b/sound/soc/samsung/dma.c
@@ -425,9 +425,11 @@
static u64 dma_mask = DMA_BIT_MASK(32);
-static int dma_new(struct snd_card *card,
- struct snd_soc_dai *dai, struct snd_pcm *pcm)
+static int dma_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_pcm *pcm = rtd->pcm;
int ret = 0;
pr_debug("Entered %s\n", __func__);
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c
index c326d29..db74005 100644
--- a/sound/soc/sh/dma-sh7760.c
+++ b/sound/soc/sh/dma-sh7760.c
@@ -327,10 +327,10 @@
snd_pcm_lib_preallocate_free_for_all(pcm);
}
-static int camelot_pcm_new(struct snd_card *card,
- struct snd_soc_dai *dai,
- struct snd_pcm *pcm)
+static int camelot_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_pcm *pcm = rtd->pcm;
+
/* dont use SNDRV_DMA_TYPE_DEV, since it will oops the SH kernel
* in MMAP mode (i.e. aplay -M)
*/
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 4a9da6b..339a1df 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -1129,10 +1129,10 @@
snd_pcm_lib_preallocate_free_for_all(pcm);
}
-static int fsi_pcm_new(struct snd_card *card,
- struct snd_soc_dai *dai,
- struct snd_pcm *pcm)
+static int fsi_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_pcm *pcm = rtd->pcm;
+
/*
* dont use SNDRV_DMA_TYPE_DEV, since it will oops the SH kernel
* in MMAP mode (i.e. aplay -M)
diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c
index a423bab..f8f6816 100644
--- a/sound/soc/sh/siu_pcm.c
+++ b/sound/soc/sh/siu_pcm.c
@@ -527,10 +527,11 @@
return bytes_to_frames(ss->runtime, ptr);
}
-static int siu_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
- struct snd_pcm *pcm)
+static int siu_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
/* card->dev == socdev->dev, see snd_soc_new_pcms() */
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_pcm *pcm = rtd->pcm;
struct siu_info *info = siu_i2s_data;
struct platform_device *pdev = to_platform_device(card->dev);
int ret;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index e2bfe1d..c171182 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -38,6 +38,7 @@
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/soc-dsp.h>
#include <sound/initval.h>
#define CREATE_TRACE_POINTS
@@ -45,7 +46,6 @@
#define NAME_SIZE 32
-static DEFINE_MUTEX(pcm_mutex);
static DECLARE_WAIT_QUEUE_HEAD(soc_pm_waitq);
#ifdef CONFIG_DEBUG_FS
@@ -60,6 +60,7 @@
static LIST_HEAD(codec_list);
static int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num);
+int soc_dsp_debugfs_add(struct snd_soc_pcm_runtime *rtd);
/*
* This is a timeout to do a DAPM powerdown after a stream is closed().
@@ -123,6 +124,24 @@
return 0;
}
+/* ASoC no host IO hardware.
+ * TODO: fine tune these values for all host less transfers.
+ */
+static const struct snd_pcm_hardware no_host_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ .period_bytes_min = PAGE_SIZE >> 2,
+ .period_bytes_max = PAGE_SIZE >> 1,
+ .periods_min = 2,
+ .periods_max = 4,
+ .buffer_bytes_max = PAGE_SIZE,
+};
+
/* codec register dump */
static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf,
size_t count, loff_t pos)
@@ -527,7 +546,7 @@
* then initialized and any private data can be allocated. This also calls
* startup for the cpu DAI, platform, machine and codec DAI.
*/
-static int soc_pcm_open(struct snd_pcm_substream *substream)
+int soc_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -538,7 +557,19 @@
struct snd_soc_dai_driver *codec_dai_drv = codec_dai->driver;
int ret = 0;
- mutex_lock(&pcm_mutex);
+ mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
+
+ if (rtd->dai_link->no_host_mode == SND_SOC_DAI_LINK_NO_HOST)
+ snd_soc_set_runtime_hwparams(substream, &no_host_hardware);
+
+ if (rtd->dai_link->ops && rtd->dai_link->ops->startup) {
+ ret = rtd->dai_link->ops->startup(substream);
+ if (ret < 0) {
+ printk(KERN_ERR "asoc: %s startup failed\n",
+ rtd->dai_link->name);
+ goto machine_err;
+ }
+ }
/* startup the audio subsystem */
if (cpu_dai->driver->ops->startup) {
@@ -546,7 +577,7 @@
if (ret < 0) {
printk(KERN_ERR "asoc: can't open interface %s\n",
cpu_dai->name);
- goto out;
+ goto cpu_err;
}
}
@@ -567,13 +598,9 @@
}
}
- if (rtd->dai_link->ops && rtd->dai_link->ops->startup) {
- ret = rtd->dai_link->ops->startup(substream);
- if (ret < 0) {
- printk(KERN_ERR "asoc: %s startup failed\n", rtd->dai_link->name);
- goto machine_err;
- }
- }
+ /* DSP DAI links compat checks are different */
+ if (rtd->dai_link->dynamic || rtd->dai_link->no_pcm)
+ goto dynamic;
/* Check that the codec and cpu DAIs are compatible */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -658,6 +685,7 @@
pr_debug("asoc: min rate %d max rate %d\n", runtime->hw.rate_min,
runtime->hw.rate_max);
+dynamic:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
cpu_dai->playback_active++;
codec_dai->playback_active++;
@@ -668,14 +696,11 @@
cpu_dai->active++;
codec_dai->active++;
rtd->codec->active++;
- mutex_unlock(&pcm_mutex);
+ rtd->dai_link->active++;
+ mutex_unlock(&rtd->pcm_mutex);
return 0;
config_err:
- if (rtd->dai_link->ops && rtd->dai_link->ops->shutdown)
- rtd->dai_link->ops->shutdown(substream);
-
-machine_err:
if (codec_dai->driver->ops->shutdown)
codec_dai->driver->ops->shutdown(substream, codec_dai);
@@ -686,8 +711,12 @@
platform_err:
if (cpu_dai->driver->ops->shutdown)
cpu_dai->driver->ops->shutdown(substream, cpu_dai);
-out:
- mutex_unlock(&pcm_mutex);
+cpu_err:
+ if (rtd->dai_link->ops && rtd->dai_link->ops->shutdown)
+ rtd->dai_link->ops->shutdown(substream);
+
+machine_err:
+ mutex_unlock(&rtd->pcm_mutex);
return ret;
}
@@ -702,7 +731,7 @@
container_of(work, struct snd_soc_pcm_runtime, delayed_work.work);
struct snd_soc_dai *codec_dai = rtd->codec_dai;
- mutex_lock(&pcm_mutex);
+ mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
pr_debug("pop wq checking: %s status: %s waiting: %s\n",
codec_dai->driver->playback.stream_name,
@@ -717,7 +746,7 @@
SND_SOC_DAPM_STREAM_STOP);
}
- mutex_unlock(&pcm_mutex);
+ mutex_unlock(&rtd->pcm_mutex);
}
/*
@@ -725,7 +754,7 @@
* freed here. The cpu DAI, codec DAI, machine and platform are also
* shutdown.
*/
-static int soc_codec_close(struct snd_pcm_substream *substream)
+int soc_pcm_close(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_platform *platform = rtd->platform;
@@ -733,7 +762,7 @@
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_codec *codec = rtd->codec;
- mutex_lock(&pcm_mutex);
+ mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
cpu_dai->playback_active--;
@@ -746,6 +775,7 @@
cpu_dai->active--;
codec_dai->active--;
codec->active--;
+ rtd->dai_link->active--;
/* Muting the DAC suppresses artifacts caused during digital
* shutdown, for example from stopping clocks.
@@ -759,11 +789,12 @@
if (codec_dai->driver->ops->shutdown)
codec_dai->driver->ops->shutdown(substream, codec_dai);
+ if (platform->driver->ops && platform->driver->ops->close)
+ platform->driver->ops->close(substream);
+
if (rtd->dai_link->ops && rtd->dai_link->ops->shutdown)
rtd->dai_link->ops->shutdown(substream);
- if (platform->driver->ops && platform->driver->ops->close)
- platform->driver->ops->close(substream);
cpu_dai->runtime = NULL;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -778,7 +809,7 @@
SND_SOC_DAPM_STREAM_STOP);
}
- mutex_unlock(&pcm_mutex);
+ mutex_unlock(&rtd->pcm_mutex);
return 0;
}
@@ -787,7 +818,7 @@
* rate, etc. This function is non atomic and can be called multiple times,
* it can refer to the runtime info.
*/
-static int soc_pcm_prepare(struct snd_pcm_substream *substream)
+int soc_pcm_prepare(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_platform *platform = rtd->platform;
@@ -795,7 +826,7 @@
struct snd_soc_dai *codec_dai = rtd->codec_dai;
int ret = 0;
- mutex_lock(&pcm_mutex);
+ mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
if (rtd->dai_link->ops && rtd->dai_link->ops->prepare) {
ret = rtd->dai_link->ops->prepare(substream);
@@ -848,7 +879,7 @@
snd_soc_dai_digital_mute(codec_dai, 0);
out:
- mutex_unlock(&pcm_mutex);
+ mutex_unlock(&rtd->pcm_mutex);
return ret;
}
@@ -857,7 +888,7 @@
* function can also be called multiple times and can allocate buffers
* (using snd_pcm_lib_* ). It's non-atomic.
*/
-static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
+int soc_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -866,7 +897,7 @@
struct snd_soc_dai *codec_dai = rtd->codec_dai;
int ret = 0;
- mutex_lock(&pcm_mutex);
+ mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
if (rtd->dai_link->ops && rtd->dai_link->ops->hw_params) {
ret = rtd->dai_link->ops->hw_params(substream, params);
@@ -905,8 +936,21 @@
rtd->rate = params_rate(params);
+ /* malloc a page for hostless IO.
+ * FIXME: rework with alsa-lib changes so that this malloc is not required.
+ */
+ if (rtd->dai_link->no_host_mode == SND_SOC_DAI_LINK_NO_HOST) {
+ substream->dma_buffer.dev.type = SNDRV_DMA_TYPE_DEV;
+ substream->dma_buffer.dev.dev = &rtd->dev;
+ substream->dma_buffer.dev.dev->coherent_dma_mask = ISA_DMA_THRESHOLD;
+ substream->dma_buffer.private_data = NULL;
+
+ ret = snd_pcm_lib_malloc_pages(substream, PAGE_SIZE);
+ if (ret < 0)
+ goto platform_err;
+ }
out:
- mutex_unlock(&pcm_mutex);
+ mutex_unlock(&rtd->pcm_mutex);
return ret;
platform_err:
@@ -921,14 +965,14 @@
if (rtd->dai_link->ops && rtd->dai_link->ops->hw_free)
rtd->dai_link->ops->hw_free(substream);
- mutex_unlock(&pcm_mutex);
+ mutex_unlock(&rtd->pcm_mutex);
return ret;
}
/*
* Frees resources allocated by hw_params, can be called multiple times
*/
-static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
+int soc_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_platform *platform = rtd->platform;
@@ -936,7 +980,7 @@
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_codec *codec = rtd->codec;
- mutex_lock(&pcm_mutex);
+ mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
/* apply codec digital mute */
if (!codec->active)
@@ -957,11 +1001,13 @@
if (cpu_dai->driver->ops->hw_free)
cpu_dai->driver->ops->hw_free(substream, cpu_dai);
- mutex_unlock(&pcm_mutex);
+ if (rtd->dai_link->no_host_mode == SND_SOC_DAI_LINK_NO_HOST)
+ snd_pcm_lib_free_pages(substream);
+ mutex_unlock(&rtd->pcm_mutex);
return 0;
}
-static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_platform *platform = rtd->platform;
@@ -989,12 +1035,40 @@
return 0;
}
+int soc_pcm_bespoke_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int ret;
+
+ if (codec_dai->driver->ops->bespoke_trigger) {
+ ret = codec_dai->driver->ops->bespoke_trigger(substream, cmd, codec_dai);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (platform->driver->bespoke_trigger) {
+ ret = platform->driver->bespoke_trigger(substream, cmd);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (cpu_dai->driver->ops->bespoke_trigger) {
+ ret = cpu_dai->driver->ops->bespoke_trigger(substream, cmd, cpu_dai);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
/*
* soc level wrapper for pointer callback
* If cpu_dai, codec_dai, platform driver has the delay callback, than
* the runtime->delay will be updated accordingly.
*/
-static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
+snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_platform *platform = rtd->platform;
@@ -1021,16 +1095,74 @@
return offset;
}
-/* ASoC PCM operations */
-static struct snd_pcm_ops soc_pcm_ops = {
- .open = soc_pcm_open,
- .close = soc_codec_close,
- .hw_params = soc_pcm_hw_params,
- .hw_free = soc_pcm_hw_free,
- .prepare = soc_pcm_prepare,
- .trigger = soc_pcm_trigger,
- .pointer = soc_pcm_pointer,
-};
+static int soc_pcm_ioctl(struct snd_pcm_substream *substream,
+ unsigned int cmd, void *arg)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_platform *platform = rtd->platform;
+
+ if (platform->driver->ops->ioctl)
+ return platform->driver->ops->ioctl(substream, cmd, arg);
+ return snd_pcm_lib_ioctl(substream, cmd, arg);
+}
+
+struct snd_soc_codec *snd_soc_card_get_codec(struct snd_soc_card *card,
+ const char *codec_name)
+{
+ struct snd_soc_codec *codec = NULL;
+
+ list_for_each_entry(codec, &card->codec_dev_list, card_list) {
+ if (!strcmp(codec->name, codec_name))
+ return codec;
+ }
+
+ return codec;
+}
+EXPORT_SYMBOL(snd_soc_card_get_codec);
+
+int snd_soc_card_active_links(struct snd_soc_card *card)
+{
+ int i;
+ int count = 0;
+
+ for (i = 0; i < card->num_rtd; i++) {
+ /* count FEs: dynamic and legacy */
+ if (!card->rtd[i].dai_link->no_pcm)
+ count += card->rtd[i].dai_link->active;
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(snd_soc_card_active_links);
+
+struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
+ const char *dai_link, int stream)
+{
+ int i;
+
+ for (i = 0; i < card->num_links; i++) {
+ if (card->rtd[i].dai_link->no_pcm &&
+ !strcmp(card->rtd[i].dai_link->name, dai_link))
+ return card->rtd[i].pcm->streams[stream].substream;
+ }
+ dev_dbg(card->dev, "failed to find dai link %s\n", dai_link);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_get_dai_substream);
+
+struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
+ const char *dai_link)
+{
+ int i;
+
+ for (i = 0; i < card->num_links; i++) {
+ if (!strcmp(card->rtd[i].dai_link->name, dai_link))
+ return &card->rtd[i];
+ }
+ dev_dbg(card->dev, "failed to find rtd %s\n", dai_link);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_get_pcm_runtime);
#ifdef CONFIG_PM_SLEEP
/* powers down audio subsystem for suspend */
@@ -1040,6 +1172,9 @@
struct snd_soc_codec *codec;
int i;
+ /* cancel pending deferred resume if any */
+ cancel_work_sync(&card->deferred_resume_work);
+
/* If the initialization of this soc device failed, there is no codec
* associated with it. Just bail out in this case.
*/
@@ -1061,16 +1196,22 @@
struct snd_soc_dai *dai = card->rtd[i].codec_dai;
struct snd_soc_dai_driver *drv = dai->driver;
- if (card->rtd[i].dai_link->ignore_suspend)
+ if (card->rtd[i].dai_link->ignore_suspend ||
+ card->rtd[i].dai_link->no_pcm)
continue;
- if (drv->ops->digital_mute && dai->playback_active)
- drv->ops->digital_mute(dai, 1);
+ if (card->rtd[i].dai_link->dynamic)
+ soc_dsp_be_digital_mute(&card->rtd[i], 1);
+ else {
+ if (drv->ops->digital_mute && dai->playback_active)
+ drv->ops->digital_mute(dai, 1);
+ }
}
/* suspend all pcms */
for (i = 0; i < card->num_rtd; i++) {
- if (card->rtd[i].dai_link->ignore_suspend)
+ if (card->rtd[i].dai_link->ignore_suspend ||
+ card->rtd[i].dai_link->no_pcm)
continue;
snd_pcm_suspend_all(card->rtd[i].pcm);
@@ -1083,14 +1224,19 @@
struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
struct snd_soc_platform *platform = card->rtd[i].platform;
- if (card->rtd[i].dai_link->ignore_suspend)
+ if (card->rtd[i].dai_link->ignore_suspend ||
+ card->rtd[i].dai_link->no_pcm)
continue;
- if (cpu_dai->driver->suspend && !cpu_dai->driver->ac97_control)
- cpu_dai->driver->suspend(cpu_dai);
- if (platform->driver->suspend && !platform->suspended) {
- platform->driver->suspend(cpu_dai);
- platform->suspended = 1;
+ if (card->rtd[i].dai_link->dynamic) {
+ soc_dsp_fe_suspend(&card->rtd[i]);
+ } else {
+ if (cpu_dai->driver->suspend && !cpu_dai->driver->ac97_control)
+ cpu_dai->driver->suspend(cpu_dai);
+ if (platform->driver->suspend && !platform->suspended) {
+ platform->driver->suspend(cpu_dai);
+ platform->suspended = 1;
+ }
}
}
@@ -1103,7 +1249,8 @@
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai_driver *driver = card->rtd[i].codec_dai->driver;
- if (card->rtd[i].dai_link->ignore_suspend)
+ if (card->rtd[i].dai_link->ignore_suspend ||
+ card->rtd[i].dai_link->no_pcm)
continue;
if (driver->playback.stream_name != NULL)
@@ -1137,11 +1284,15 @@
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
- if (card->rtd[i].dai_link->ignore_suspend)
+ if (card->rtd[i].dai_link->ignore_suspend ||
+ card->rtd[i].dai_link->no_pcm)
continue;
- if (cpu_dai->driver->suspend && cpu_dai->driver->ac97_control)
- cpu_dai->driver->suspend(cpu_dai);
+ if (card->rtd[i].dai_link->dynamic)
+ soc_dsp_be_ac97_cpu_dai_suspend(&card->rtd[i]);
+ else
+ if (cpu_dai->driver->suspend && cpu_dai->driver->ac97_control)
+ cpu_dai->driver->suspend(cpu_dai);
}
if (card->suspend_post)
@@ -1177,11 +1328,15 @@
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
- if (card->rtd[i].dai_link->ignore_suspend)
+ if (card->rtd[i].dai_link->ignore_suspend ||
+ card->rtd[i].dai_link->no_pcm)
continue;
- if (cpu_dai->driver->resume && cpu_dai->driver->ac97_control)
- cpu_dai->driver->resume(cpu_dai);
+ if (card->rtd[i].dai_link->dynamic)
+ soc_dsp_be_ac97_cpu_dai_resume(&card->rtd[i]);
+ else
+ if (cpu_dai->driver->resume && cpu_dai->driver->ac97_control)
+ cpu_dai->driver->resume(cpu_dai);
}
list_for_each_entry(codec, &card->codec_dev_list, card_list) {
@@ -1206,7 +1361,8 @@
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai_driver *driver = card->rtd[i].codec_dai->driver;
- if (card->rtd[i].dai_link->ignore_suspend)
+ if (card->rtd[i].dai_link->ignore_suspend ||
+ card->rtd[i].dai_link->no_pcm)
continue;
if (driver->playback.stream_name != NULL)
@@ -1223,25 +1379,35 @@
struct snd_soc_dai *dai = card->rtd[i].codec_dai;
struct snd_soc_dai_driver *drv = dai->driver;
- if (card->rtd[i].dai_link->ignore_suspend)
+ if (card->rtd[i].dai_link->ignore_suspend ||
+ card->rtd[i].dai_link->no_pcm)
continue;
- if (drv->ops->digital_mute && dai->playback_active)
- drv->ops->digital_mute(dai, 0);
+ if (card->rtd[i].dai_link->dynamic)
+ soc_dsp_be_digital_mute(&card->rtd[i], 0);
+ else {
+ if (drv->ops->digital_mute && dai->playback_active)
+ drv->ops->digital_mute(dai, 0);
+ }
}
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
struct snd_soc_platform *platform = card->rtd[i].platform;
- if (card->rtd[i].dai_link->ignore_suspend)
+ if (card->rtd[i].dai_link->ignore_suspend ||
+ card->rtd[i].dai_link->no_pcm)
continue;
- if (cpu_dai->driver->resume && !cpu_dai->driver->ac97_control)
- cpu_dai->driver->resume(cpu_dai);
- if (platform->driver->resume && platform->suspended) {
- platform->driver->resume(cpu_dai);
- platform->suspended = 0;
+ if (card->rtd[i].dai_link->dynamic) {
+ soc_dsp_fe_resume(&card->rtd[i]);
+ } else {
+ if (cpu_dai->driver->resume && !cpu_dai->driver->ac97_control)
+ cpu_dai->driver->resume(cpu_dai);
+ if (platform->driver->resume && platform->suspended) {
+ platform->driver->resume(cpu_dai);
+ platform->suspended = 0;
+ }
}
}
@@ -1286,8 +1452,30 @@
#define snd_soc_resume NULL
#endif
+#define NULL_FORMATS \
+ (SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_U16 |\
+ SNDRV_PCM_FMTBIT_S24 | SNDRV_PCM_FMTBIT_U24 |\
+ SNDRV_PCM_FMTBIT_S32 | SNDRV_PCM_FMTBIT_U32)
+
static struct snd_soc_dai_ops null_dai_ops = {
};
+static struct snd_soc_dai_driver null_codec_dai_drv = {
+ .name = "null-codec-dai",
+ .ops = &null_dai_ops,
+ .capture = {
+ .channels_min = 1 ,
+ .channels_max = 16,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .formats = NULL_FORMATS,
+ },
+ .playback = {
+ .channels_min = 1 ,
+ .channels_max = 16,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .formats = NULL_FORMATS,
+ },
+};
+static struct snd_soc_codec_driver null_codec_drv = {};
static int soc_bind_dai_link(struct snd_soc_card *card, int num)
{
@@ -1329,7 +1517,7 @@
/* CODEC found, so find CODEC DAI from registered DAIs from this CODEC*/
list_for_each_entry(codec_dai, &dai_list, list) {
- if (codec->dev == codec_dai->dev &&
+ if ((codec->dev == codec_dai->dev || codec->driver == &null_codec_drv) &&
!strcmp(codec_dai->name, dai_link->codec_dai_name)) {
rtd->codec_dai = codec_dai;
goto find_platform;
@@ -1396,7 +1584,7 @@
module_put(codec->dev->driver->owner);
}
-static void soc_remove_dai_link(struct snd_soc_card *card, int num)
+static void soc_remove_dai_link(struct snd_soc_card *card, int num, int order)
{
struct snd_soc_pcm_runtime *rtd = &card->rtd[num];
struct snd_soc_codec *codec = rtd->codec;
@@ -1413,7 +1601,8 @@
}
/* remove the CODEC DAI */
- if (codec_dai && codec_dai->probed) {
+ if (codec_dai && codec_dai->probed &&
+ codec_dai->driver->remove_order == order) {
if (codec_dai->driver->remove) {
err = codec_dai->driver->remove(codec_dai);
if (err < 0)
@@ -1421,10 +1610,12 @@
}
codec_dai->probed = 0;
list_del(&codec_dai->card_list);
+ module_put(codec_dai->dev->driver->owner);
}
/* remove the platform */
- if (platform && platform->probed) {
+ if (platform && platform->probed &&
+ platform->driver->remove_order == order) {
if (platform->driver->remove) {
err = platform->driver->remove(platform);
if (err < 0)
@@ -1436,11 +1627,13 @@
}
/* remove the CODEC */
- if (codec && codec->probed)
+ if (codec && codec->probed &&
+ codec->driver->remove_order == order)
soc_remove_codec(codec);
/* remove the cpu_dai */
- if (cpu_dai && cpu_dai->probed) {
+ if (cpu_dai && cpu_dai->probed &&
+ cpu_dai->driver->remove_order == order) {
if (cpu_dai->driver->remove) {
err = cpu_dai->driver->remove(cpu_dai);
if (err < 0)
@@ -1454,11 +1647,13 @@
static void soc_remove_dai_links(struct snd_soc_card *card)
{
- int i;
+ int dai, order;
- for (i = 0; i < card->num_rtd; i++)
- soc_remove_dai_link(card, i);
-
+ for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
+ order++) {
+ for (dai = 0; dai < card->num_rtd; dai++)
+ soc_remove_dai_link(card, dai, order);
+ }
card->num_rtd = 0;
}
@@ -1575,6 +1770,11 @@
rtd->dev.parent = card->dev;
rtd->dev.release = rtd_release;
rtd->dev.init_name = name;
+ mutex_init(&rtd->pcm_mutex);
+ INIT_LIST_HEAD(&rtd->dsp[SNDRV_PCM_STREAM_PLAYBACK].be_clients);
+ INIT_LIST_HEAD(&rtd->dsp[SNDRV_PCM_STREAM_CAPTURE].be_clients);
+ INIT_LIST_HEAD(&rtd->dsp[SNDRV_PCM_STREAM_PLAYBACK].fe_clients);
+ INIT_LIST_HEAD(&rtd->dsp[SNDRV_PCM_STREAM_CAPTURE].fe_clients);
ret = device_register(&rtd->dev);
if (ret < 0) {
dev_err(card->dev,
@@ -1596,10 +1796,21 @@
dev_err(codec->dev,
"asoc: failed to add codec sysfs files: %d\n", ret);
+#ifdef CONFIG_DEBUG_FS
+ /* add DSP sysfs entries */
+ if (!dai_link->dynamic)
+ goto out;
+
+ ret = soc_dsp_debugfs_add(rtd);
+ if (ret < 0)
+ dev_err(&rtd->dev, "asoc: failed to add dsp sysfs entries: %d\n", ret);
+
+out:
+#endif
return 0;
}
-static int soc_probe_dai_link(struct snd_soc_card *card, int num)
+static int soc_probe_dai_link(struct snd_soc_card *card, int num, int order)
{
struct snd_soc_dai_link *dai_link = &card->dai_link[num];
struct snd_soc_pcm_runtime *rtd = &card->rtd[num];
@@ -1608,19 +1819,22 @@
struct snd_soc_dai *codec_dai = rtd->codec_dai, *cpu_dai = rtd->cpu_dai;
int ret;
- dev_dbg(card->dev, "probe %s dai link %d\n", card->name, num);
+ dev_dbg(card->dev, "probe %s dai link %d late %d\n",
+ card->name, num, order);
/* config components */
codec_dai->codec = codec;
cpu_dai->platform = platform;
codec_dai->card = card;
cpu_dai->card = card;
+ codec->dapm.card = platform->dapm.card = card;
/* set default power off timeout */
rtd->pmdown_time = pmdown_time;
/* probe the cpu_dai */
- if (!cpu_dai->probed) {
+ if (!cpu_dai->probed &&
+ cpu_dai->driver->probe_order == order) {
if (!try_module_get(cpu_dai->dev->driver->owner))
return -ENODEV;
@@ -1639,17 +1853,20 @@
}
/* probe the CODEC */
- if (!codec->probed) {
+ if (!codec->probed &&
+ codec->driver->probe_order == order) {
ret = soc_probe_codec(card, codec);
if (ret < 0)
return ret;
}
/* probe the platform */
- if (!platform->probed) {
+ if (!platform->probed &&
+ platform->driver->probe_order == order) {
if (!try_module_get(platform->dev->driver->owner))
return -ENODEV;
+ platform->card = card;
if (platform->driver->probe) {
ret = platform->driver->probe(platform);
if (ret < 0) {
@@ -1665,12 +1882,15 @@
}
/* probe the CODEC DAI */
- if (!codec_dai->probed) {
+ if (!codec_dai->probed && codec_dai->driver->probe_order == order) {
+ if (!try_module_get(codec_dai->dev->driver->owner))
+ return -ENODEV;
if (codec_dai->driver->probe) {
ret = codec_dai->driver->probe(codec_dai);
if (ret < 0) {
printk(KERN_ERR "asoc: failed to probe CODEC DAI %s\n",
codec_dai->name);
+ module_put(codec_dai->dev->driver->owner);
return ret;
}
}
@@ -1680,6 +1900,10 @@
list_add(&codec_dai->card_list, &card->dai_dev_list);
}
+ /* complete DAI probe during last probe */
+ if (order != SND_SOC_COMP_ORDER_LAST)
+ return 0;
+
/* DAPM dai link stream work */
INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work);
@@ -1820,7 +2044,7 @@
struct snd_soc_codec *codec;
struct snd_soc_codec_conf *codec_conf;
enum snd_soc_compress_type compress_type;
- int ret, i;
+ int ret, i, order;
mutex_lock(&card->mutex);
@@ -1876,6 +2100,7 @@
card->dapm.bias_level = SND_SOC_BIAS_OFF;
card->dapm.dev = card->dev;
card->dapm.card = card;
+ card->dapm.stream_event = card->stream_event;
list_add(&card->dapm.list, &card->dapm_list);
#ifdef CONFIG_DEBUG_FS
@@ -1898,12 +2123,16 @@
goto card_probe_error;
}
- for (i = 0; i < card->num_links; i++) {
- ret = soc_probe_dai_link(card, i);
- if (ret < 0) {
- pr_err("asoc: failed to instantiate card %s: %d\n",
+ /* early DAI link probe */
+ for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
+ order++) {
+ for (i = 0; i < card->num_links; i++) {
+ ret = soc_probe_dai_link(card, i, order);
+ if (ret < 0) {
+ pr_err("asoc: failed to instantiate card %s: %d\n",
card->name, ret);
- goto probe_dai_err;
+ goto probe_dai_err;
+ }
}
}
@@ -2092,6 +2321,11 @@
}
EXPORT_SYMBOL_GPL(snd_soc_poweroff);
+void soc_shutdown(struct platform_device *pdev)
+{
+ snd_soc_poweroff(&pdev->dev);
+}
+
const struct dev_pm_ops snd_soc_pm_ops = {
.suspend = snd_soc_suspend,
.resume = snd_soc_resume,
@@ -2108,6 +2342,7 @@
},
.probe = soc_probe,
.remove = soc_remove,
+ .shutdown = soc_shutdown,
};
/* create a new pcm */
@@ -2117,6 +2352,7 @@
struct snd_soc_platform *platform = rtd->platform;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_pcm_substream *substream[2];
struct snd_pcm *pcm;
char new_name[64];
int ret = 0, playback = 0, capture = 0;
@@ -2125,10 +2361,15 @@
snprintf(new_name, sizeof(new_name), "%s %s-%d",
rtd->dai_link->stream_name, codec_dai->name, num);
- if (codec_dai->driver->playback.channels_min)
- playback = 1;
- if (codec_dai->driver->capture.channels_min)
- capture = 1;
+ if (rtd->dai_link->dynamic) {
+ playback = rtd->dai_link->dsp_link->playback;
+ capture = rtd->dai_link->dsp_link->capture;
+ } else {
+ if (codec_dai->driver->playback.channels_min)
+ playback = 1;
+ if (codec_dai->driver->capture.channels_min)
+ capture = 1;
+ }
dev_dbg(rtd->card->dev, "registered pcm #%d %s\n",num,new_name);
ret = snd_pcm_new(rtd->card->snd_card, new_name,
@@ -2140,25 +2381,67 @@
rtd->pcm = pcm;
pcm->private_data = rtd;
+
+ substream[SNDRV_PCM_STREAM_PLAYBACK] =
+ pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ substream[SNDRV_PCM_STREAM_CAPTURE] =
+ pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
+
+ if (rtd->dai_link->no_pcm) {
+ if (playback)
+ substream[SNDRV_PCM_STREAM_PLAYBACK]->private_data = rtd;
+ if (capture)
+ substream[SNDRV_PCM_STREAM_CAPTURE]->private_data = rtd;
+ goto out;
+ }
+
+ /* setup any hostless PCMs - i.e. no host IO is performed */
+ if (rtd->dai_link->no_host_mode) {
+ substream[SNDRV_PCM_STREAM_PLAYBACK]->hw_no_buffer = 1;
+ substream[SNDRV_PCM_STREAM_CAPTURE]->hw_no_buffer = 1;
+ snd_soc_set_runtime_hwparams(substream[SNDRV_PCM_STREAM_PLAYBACK],
+ &no_host_hardware);
+ snd_soc_set_runtime_hwparams(substream[SNDRV_PCM_STREAM_CAPTURE],
+ &no_host_hardware);
+ }
+
+ /* ASoC PCM operations */
+ if (rtd->dai_link->dynamic) {
+ rtd->ops.open = soc_dsp_fe_dai_open;
+ rtd->ops.hw_params = soc_dsp_fe_dai_hw_params;
+ rtd->ops.prepare = soc_dsp_fe_dai_prepare;
+ rtd->ops.trigger = soc_dsp_fe_dai_trigger;
+ rtd->ops.hw_free = soc_dsp_fe_dai_hw_free;
+ rtd->ops.close = soc_dsp_fe_dai_close;
+ rtd->ops.pointer = soc_pcm_pointer;
+ rtd->ops.ioctl = soc_pcm_ioctl;
+ } else {
+ rtd->ops.open = soc_pcm_open;
+ rtd->ops.hw_params = soc_pcm_hw_params;
+ rtd->ops.prepare = soc_pcm_prepare;
+ rtd->ops.trigger = soc_pcm_trigger;
+ rtd->ops.hw_free = soc_pcm_hw_free;
+ rtd->ops.close = soc_pcm_close;
+ rtd->ops.pointer = soc_pcm_pointer;
+ rtd->ops.ioctl = soc_pcm_ioctl;
+ }
+
if (platform->driver->ops) {
- soc_pcm_ops.mmap = platform->driver->ops->mmap;
- soc_pcm_ops.pointer = platform->driver->ops->pointer;
- soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
- soc_pcm_ops.copy = platform->driver->ops->copy;
- soc_pcm_ops.silence = platform->driver->ops->silence;
- soc_pcm_ops.ack = platform->driver->ops->ack;
- soc_pcm_ops.page = platform->driver->ops->page;
+ rtd->ops.ack = platform->driver->ops->ack;
+ rtd->ops.copy = platform->driver->ops->copy;
+ rtd->ops.silence = platform->driver->ops->silence;
+ rtd->ops.page = platform->driver->ops->page;
+ rtd->ops.mmap = platform->driver->ops->mmap;
}
if (playback)
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &soc_pcm_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &rtd->ops);
if (capture)
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &soc_pcm_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &rtd->ops);
if (platform->driver->pcm_new) {
- ret = platform->driver->pcm_new(rtd->card->snd_card,
- codec_dai, pcm);
+ ret = platform->driver->pcm_new(rtd);
if (ret < 0) {
pr_err("asoc: platform pcm constructor failed\n");
return ret;
@@ -2166,6 +2449,7 @@
}
pcm->private_free = platform->driver->pcm_free;
+out:
printk(KERN_INFO "asoc: %s <-> %s mapping ok\n", codec_dai->name,
cpu_dai->name);
return ret;
@@ -2189,6 +2473,28 @@
}
EXPORT_SYMBOL_GPL(snd_soc_codec_volatile_register);
+unsigned int snd_soc_platform_read(struct snd_soc_platform *platform,
+ unsigned int reg)
+{
+ unsigned int ret;
+
+ ret = platform->driver->read(platform, reg);
+ dev_dbg(platform->dev, "read %x => %x\n", reg, ret);
+ trace_snd_soc_preg_read(platform, reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_platform_read);
+
+unsigned int snd_soc_platform_write(struct snd_soc_platform *platform,
+ unsigned int reg, unsigned int val)
+{
+ dev_dbg(platform->dev, "write %x = %x\n", reg, val);
+ trace_snd_soc_preg_write(platform, reg, val);
+ return platform->driver->write(platform, reg, val);
+}
+EXPORT_SYMBOL_GPL(snd_soc_platform_write);
+
/**
* snd_soc_codec_readable_register: Report if a register is readable.
*
@@ -2411,6 +2717,8 @@
const struct snd_pcm_hardware *hw)
{
struct snd_pcm_runtime *runtime = substream->runtime;
+ if (!runtime)
+ return 0;
runtime->hw.info = hw->info;
runtime->hw.formats = hw->formats;
runtime->hw.period_bytes_min = hw->period_bytes_min;
@@ -2504,6 +2812,36 @@
EXPORT_SYMBOL_GPL(snd_soc_add_controls);
/**
+ * snd_soc_add_platform_controls - add an array of controls to a platform.
+ * Convienience function to add a list of controls.
+ *
+ * @platform: platform to add controls to
+ * @controls: array of controls to add
+ * @num_controls: number of elements in the array
+ *
+ * Return 0 for success, else error.
+ */
+int snd_soc_add_platform_controls(struct snd_soc_platform *platform,
+ const struct snd_kcontrol_new *controls, int num_controls)
+{
+ struct snd_card *card = platform->card->snd_card;
+ int err, i;
+
+ for (i = 0; i < num_controls; i++) {
+ const struct snd_kcontrol_new *control = &controls[i];
+ err = snd_ctl_add(card, snd_soc_cnew(control, platform,
+ control->name, NULL));
+ if (err < 0) {
+ dev_err(platform->dev, "Failed to add %s %d\n",control->name, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_add_platform_controls);
+
+/**
* snd_soc_info_enum_double - enumerated double mixer info callback
* @kcontrol: mixer control
* @uinfo: control element information
@@ -2525,7 +2863,8 @@
if (uinfo->value.enumerated.item > e->max - 1)
uinfo->value.enumerated.item = e->max - 1;
strcpy(uinfo->value.enumerated.name,
- e->texts[uinfo->value.enumerated.item]);
+ snd_soc_get_enum_text(e, uinfo->value.enumerated.item));
+
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_info_enum_double);
@@ -2689,7 +3028,7 @@
if (uinfo->value.enumerated.item > e->max - 1)
uinfo->value.enumerated.item = e->max - 1;
strcpy(uinfo->value.enumerated.name,
- e->texts[uinfo->value.enumerated.item]);
+ snd_soc_get_enum_text(e, uinfo->value.enumerated.item));
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_info_enum_ext);
@@ -3370,6 +3709,7 @@
int snd_soc_register_card(struct snd_soc_card *card)
{
int i;
+ int ret = 0;
if (!card->name || !card->dev)
return -EINVAL;
@@ -3387,12 +3727,43 @@
return -ENOMEM;
card->rtd_aux = &card->rtd[card->num_links];
- for (i = 0; i < card->num_links; i++)
+ for (i = 0; i < card->num_links; i++) {
card->rtd[i].dai_link = &card->dai_link[i];
+ if (card->rtd[i].dai_link->dynamic) {
+
+ card->rtd[i].dai_link->codec_name = "null-codec";
+ card->rtd[i].dai_link->codec_dai_name = "null-codec-dai";
+
+ ret = snd_soc_register_codec(card->dev, &null_codec_drv,
+ &null_codec_dai_drv, 1);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: failed to register dynamic DAI link %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ continue;
+ }
+ if (card->rtd[i].dai_link->no_codec) {
+ card->rtd[i].dai_link->codec_name = "null-codec";
+
+ ret = snd_soc_register_codec(card->dev, &null_codec_drv,
+ &null_codec_dai_drv, 1);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: failed to register dynamic DAI link %d\n",
+ __func__, ret);
+ goto out;
+ }
+ continue;
+ }
+ }
INIT_LIST_HEAD(&card->list);
card->instantiated = 0;
mutex_init(&card->mutex);
+ mutex_init(&card->dapm_mutex);
+ mutex_init(&card->dsp_mutex);
+ mutex_init(&card->power_mutex);
mutex_lock(&client_mutex);
list_add(&card->list, &card_list);
@@ -3401,7 +3772,8 @@
dev_dbg(card->dev, "Registered card '%s'\n", card->name);
- return 0;
+out:
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_register_card);
@@ -3646,7 +4018,10 @@
}
platform->dev = dev;
+ platform->dapm.platform = platform;
platform->driver = platform_drv;
+ platform->dapm.dev = dev;
+ platform->dapm.stream_event = platform_drv->stream_event;
mutex_lock(&client_mutex);
list_add(&platform->list, &platform_list);
@@ -3739,7 +4114,10 @@
return -ENOMEM;
/* create CODEC component name */
- codec->name = fmt_single_name(dev, &codec->id);
+ if (codec_drv == &null_codec_drv)
+ codec->name = kstrdup("null-codec", GFP_KERNEL);
+ else
+ codec->name = fmt_single_name(dev, &codec->id);
if (codec->name == NULL) {
kfree(codec);
return -ENOMEM;
@@ -3759,6 +4137,7 @@
codec->dapm.dev = dev;
codec->dapm.codec = codec;
codec->dapm.seq_notifier = codec_drv->seq_notifier;
+ codec->dapm.stream_event = codec_drv->stream_event;
codec->dev = dev;
codec->driver = codec_drv;
codec->num_dai = num_dai;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 0c9dee2..042d4ae 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -48,6 +48,10 @@
#include <trace/events/asoc.h>
+#define PATH_MAX_HOPS 16
+
+int soc_dsp_runtime_update(struct snd_soc_dapm_widget *);
+
/* dapm power sequences - make this per codec in the future */
static int dapm_up_seq[] = {
[snd_soc_dapm_pre] = 0,
@@ -126,6 +130,390 @@
return kmemdup(_widget, sizeof(*_widget), GFP_KERNEL);
}
+static inline struct snd_card *dapm_get_card(struct snd_soc_dapm_context *dapm)
+{
+ if (dapm->codec)
+ return dapm->codec->card->snd_card;
+ else if (dapm->platform)
+ return dapm->platform->card->snd_card;
+ else
+ BUG();
+}
+
+static inline struct snd_soc_card *dapm_get_soc_card(
+ struct snd_soc_dapm_context *dapm)
+{
+ if (dapm->codec)
+ return dapm->codec->card;
+ else if (dapm->platform)
+ return dapm->platform->card;
+ else
+ BUG();
+}
+
+static int soc_widget_read(struct snd_soc_dapm_widget *w, int reg)
+{
+ if (w->codec)
+ return snd_soc_read(w->codec, reg);
+ else if (w->platform)
+ return snd_soc_platform_read(w->platform, reg);
+ return 0;
+}
+
+static int soc_widget_write(struct snd_soc_dapm_widget *w,int reg, int val)
+{
+ if (w->codec)
+ return snd_soc_write(w->codec, reg, val);
+ else if (w->platform)
+ return snd_soc_platform_write(w->platform, reg, val);
+ return 0;
+}
+
+int soc_widget_update_bits(struct snd_soc_dapm_widget *w, unsigned short reg,
+ unsigned int mask, unsigned int value)
+{
+ int change;
+ unsigned int old, new;
+
+ old = soc_widget_read(w, reg);
+ new = (old & ~mask) | value;
+ change = old != new;
+ if (change)
+ soc_widget_write(w, reg, new);
+
+ return change;
+}
+
+int soc_widget_test_bits(struct snd_soc_dapm_widget *w, unsigned short reg,
+ unsigned int mask, unsigned int value)
+{
+ int change;
+ unsigned int old, new;
+
+ old = soc_widget_read(w, reg);
+ new = (old & ~mask) | value;
+ change = old != new;
+
+ return change;
+}
+
+/* reset 'walked' bit for each dapm path */
+static inline void dapm_clear_walk(struct snd_soc_dapm_context *dapm)
+{
+ struct snd_soc_dapm_path *p;
+
+ list_for_each_entry(p, &dapm->card->paths, list)
+ p->walked = 0;
+}
+
+static void dapm_clear_paths(struct snd_soc_dapm_context *dapm)
+{
+ struct snd_soc_dapm_path *p;
+ struct snd_soc_dapm_widget *w;
+ struct list_head *l;
+
+ list_for_each(l, &dapm->card->paths) {
+ p = list_entry(l, struct snd_soc_dapm_path, list);
+ p->length = 0;
+ }
+ list_for_each(l, &dapm->card->widgets) {
+ w = list_entry(l, struct snd_soc_dapm_widget, list);
+ w->hops = 0;
+ }
+ dapm_clear_walk(dapm);
+}
+
+static int dapm_add_unique_widget(struct snd_soc_dapm_context *dapm,
+ struct snd_soc_dapm_widget_list **list, struct snd_soc_dapm_widget *w)
+{
+ struct snd_soc_dapm_widget_list *wlist;
+ int wlistsize, wlistentries, i;
+
+ /* is the list empty ? */
+ if (*list == NULL) {
+
+ wlistsize = sizeof(struct snd_soc_dapm_widget_list) +
+ sizeof(struct snd_soc_dapm_widget *);
+ *list = kzalloc(wlistsize, GFP_KERNEL);
+ if (*list == NULL) {
+ dev_err(dapm->dev, "can't allocate widget list for %s\n", w->name);
+ return -ENOMEM;
+ }
+ } else {
+
+ wlist = *list;
+ /* is this widget already in the list */
+ for (i = 0; i < wlist->num_widgets; i++) {
+ if (wlist->widgets[i] == w)
+ return 0;
+ }
+
+ wlistentries = wlist->num_widgets + 1;
+ wlistsize = sizeof(struct snd_soc_dapm_widget_list) +
+ wlistentries * sizeof(struct snd_soc_dapm_widget *);
+ *list = krealloc(wlist, wlistsize, GFP_KERNEL);
+ if (*list == NULL) {
+ dev_err(dapm->dev, "can't allocate widget list for %s\n", w->name);
+ return -ENOMEM;
+ }
+ }
+ wlist = *list;
+
+ /* insert the widget */
+ dev_dbg(dapm->dev, "added %s in widget list pos %d\n",
+ w->name, wlist->num_widgets);
+ wlist->widgets[wlist->num_widgets] = w;
+ wlist->num_widgets++;
+ return 1;
+}
+
+static int is_output_widget_ep(struct snd_soc_dapm_widget *widget)
+{
+ switch (widget->id) {
+ case snd_soc_dapm_adc:
+ case snd_soc_dapm_aif_out:
+ return 1;
+ case snd_soc_dapm_output:
+ if (widget->connected && !widget->ext)
+ return 1;
+ else
+ return 0;
+ case snd_soc_dapm_hp:
+ case snd_soc_dapm_spk:
+ case snd_soc_dapm_line:
+ return !list_empty(&widget->sources);
+ default:
+ return 0;
+ }
+}
+
+static int is_input_widget_ep(struct snd_soc_dapm_widget *widget)
+{
+ switch (widget->id) {
+ case snd_soc_dapm_dac:
+ case snd_soc_dapm_aif_in:
+ return 1;
+ case snd_soc_dapm_input:
+ if (widget->connected && !widget->ext)
+ return 1;
+ else
+ return 0;
+ case snd_soc_dapm_mic:
+ return !list_empty(&widget->sources);
+ default:
+ return 0;
+ }
+}
+
+/*
+ * find all the paths between source and sink
+ */
+static int dapm_find_playback_paths(struct snd_soc_dapm_context *dapm,
+ struct snd_soc_dapm_widget *root,
+ struct snd_soc_dapm_widget_list **list, int hops)
+{
+ struct list_head *lp;
+ struct snd_soc_dapm_path *path;
+ int dist = 0;
+
+ if (hops > PATH_MAX_HOPS)
+ return 0;
+
+ if (is_output_widget_ep(root) && hops != 1) {
+ dev_dbg(dapm->dev," ! %d: valid playback route found\n", hops);
+ dapm->num_valid_paths++;
+ return 1;
+ }
+
+ if (root->hops && root->hops <= hops)
+ return 0;
+ root->hops = hops;
+
+ /* check all the output paths on this source widget by walking
+ * from source to sink */
+ list_for_each(lp, &root->sinks) {
+ path = list_entry(lp, struct snd_soc_dapm_path, list_source);
+
+ dev_dbg(dapm->dev," %c %d: %s -> %s -> %s\n",
+ path->connect ? '*' : ' ', hops,
+ root->name, path->name, path->sink->name);
+
+ /* been here before ? */
+ if (path->length && path->length <= hops)
+ continue;
+
+ /* check down the next path if connected */
+ if (path->sink && path->connect &&
+ dapm_find_playback_paths(dapm, path->sink, list, hops + 1)) {
+ path->length = hops;
+
+ /* add widget to list */
+ dapm_add_unique_widget(dapm, list, path->sink);
+
+ if (!dist || dist > path->length)
+ dist = path->length;
+ }
+ }
+
+ return dist;
+}
+
+static int dapm_find_capture_paths(struct snd_soc_dapm_context *dapm,
+ struct snd_soc_dapm_widget *root,
+ struct snd_soc_dapm_widget_list **list, int hops)
+{
+ struct list_head *lp;
+ struct snd_soc_dapm_path *path;
+ int dist = 0;
+
+ if (hops > PATH_MAX_HOPS)
+ return 0;
+
+ if (is_input_widget_ep(root) && hops != 1) {
+ dev_dbg(dapm->dev," ! %d: valid capture route found\n", hops);
+ dapm->num_valid_paths++;
+ return 1;
+ }
+
+ if (root->hops && root->hops <= hops)
+ return 0;
+ root->hops = hops;
+
+ /* check all the output paths on this source widget by walking from
+ * sink to source */
+ list_for_each(lp, &root->sources) {
+ path = list_entry(lp, struct snd_soc_dapm_path, list_sink);
+
+ dev_dbg(dapm->dev," %c %d: %s <- %s <- %s\n",
+ path->connect ? '*' : ' ', hops,
+ root->name, path->name, path->source->name);
+
+ /* been here before ? */
+ if (path->length && path->length <= hops)
+ continue;
+
+ /* check down the next path if connected */
+ if (path->source && path->connect &&
+ dapm_find_capture_paths(dapm, path->source, list, hops + 1)) {
+ path->length = hops;
+
+ /* add widget to list */
+ dapm_add_unique_widget(dapm, list, path->source);
+
+ if (!dist || dist > path->length)
+ dist = path->length;
+ }
+ }
+
+ return dist;
+}
+
+/*
+ * traverse the tree from sink to source via the shortest path
+ */
+static int dapm_get_playback_paths(struct snd_soc_dapm_context *dapm,
+ struct snd_soc_dapm_widget *root,
+ struct snd_soc_dapm_widget_list **list)
+{
+ dev_dbg(dapm->dev, "Playback: checking paths from %s\n",root->name);
+ dapm_find_playback_paths(dapm, root, list, 1);
+ return dapm->num_valid_paths;
+}
+
+static int dapm_get_capture_paths(struct snd_soc_dapm_context *dapm,
+ struct snd_soc_dapm_widget *root,
+ struct snd_soc_dapm_widget_list **list)
+{
+ dev_dbg(dapm->dev, "Capture: checking paths to %s\n", root->name);
+ dapm_find_capture_paths(dapm, root, list, 1);
+ return dapm->num_valid_paths;
+}
+
+/**
+ * snd_soc_dapm_get_connected_widgets_type - query audio path and it's widgets.
+ * @dapm: the dapm context.
+ * @stream_name: stream name.
+ * @list: list of active widgets for this stream.
+ * @stream: stream direction.
+ * @type: Initial widget type.
+ *
+ * Queries DAPM graph as to whether an valid audio stream path exists for
+ * the DAPM stream and initial widget type specified. This takes into account
+ * current mixer and mux kcontrol settings. Creates list of valid widgets.
+ *
+ * Returns the number of valid paths or negative error.
+ */
+int snd_soc_dapm_get_connected_widgets_type(struct snd_soc_dapm_context *dapm,
+ const char *stream_name, struct snd_soc_dapm_widget_list **list,
+ int stream, enum snd_soc_dapm_type type)
+{
+ struct snd_soc_dapm_widget *w;
+ int paths;
+
+ /* get stream root widget AIF, DAC or ADC from stream string and direction */
+ list_for_each_entry(w, &dapm->card->widgets, list) {
+
+ if (!w->sname)
+ continue;
+
+ if (w->id != type)
+ continue;
+
+ if (strstr(w->sname, stream_name))
+ goto found;
+ }
+ dev_err(dapm->dev, "root widget not found\n");
+ return 0;
+
+found:
+ dapm->num_valid_paths = 0;
+ *list = NULL;
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ paths = dapm_get_playback_paths(dapm, w, list);
+ else
+ paths = dapm_get_capture_paths(dapm, w, list);
+
+ dapm_clear_paths(dapm);
+ return paths;
+}
+/**
+ * snd_soc_dapm_get_connected_widgets_name - query audio path and it's widgets.
+ * @dapm: the dapm context.
+ * @name: initial widget name.
+ * @list: list of active widgets for this stream.
+ * @stream: stream direction.
+ *
+ * Queries DAPM graph as to whether an valid audio stream path exists for
+ * the initial widget specified by name. This takes into account
+ * current mixer and mux kcontrol settings. Creates list of valid widgets.
+ *
+ * Returns the number of valid paths or negative error.
+ */
+int snd_soc_dapm_get_connected_widgets_name(struct snd_soc_dapm_context *dapm,
+ const char *name, struct snd_soc_dapm_widget_list **list, int stream)
+{
+ struct snd_soc_dapm_widget *w;
+ int paths;
+
+ /* get stream root widget AIF, DAC or ADC from stream string and direction */
+ list_for_each_entry(w, &dapm->card->widgets, list) {
+
+ if (strstr(w->name, name))
+ goto found;
+ }
+ dev_err(dapm->dev, "root widget %s not found\n", name);
+ return 0;
+
+found:
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ paths = dapm_get_playback_paths(dapm, w, list);
+ else
+ paths = dapm_get_capture_paths(dapm, w, list);
+
+ dapm_clear_paths(dapm);
+ return paths;
+}
+
/**
* snd_soc_dapm_set_bias_level - set the bias level for the system
* @dapm: DAPM context
@@ -196,7 +584,7 @@
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
- val = snd_soc_read(w->codec, reg);
+ val = soc_widget_read(w, reg);
val = (val >> shift) & mask;
if ((invert && !val) || (!invert && val))
@@ -212,12 +600,12 @@
for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
;
- val = snd_soc_read(w->codec, e->reg);
+ val = soc_widget_read(w, e->reg);
item = (val >> e->shift_l) & (bitmask - 1);
p->connect = 0;
for (i = 0; i < e->max; i++) {
- if (!(strcmp(p->name, e->texts[i])) && item == i)
+ if (!(strcmp(p->name, snd_soc_get_enum_text(e, i))) && item == i)
p->connect = 1;
}
}
@@ -233,7 +621,7 @@
* that the default mux choice (the first) will be
* correctly powered up during initialization.
*/
- if (!strcmp(p->name, e->texts[0]))
+ if (!strcmp(p->name, snd_soc_get_enum_text(e, 0)))
p->connect = 1;
}
break;
@@ -242,7 +630,7 @@
w->kcontrol_news[i].private_value;
int val, item;
- val = snd_soc_read(w->codec, e->reg);
+ val = soc_widget_read(w, e->reg);
val = (val >> e->shift_l) & e->mask;
for (item = 0; item < e->max; item++) {
if (val == e->values[item])
@@ -251,7 +639,7 @@
p->connect = 0;
for (i = 0; i < e->max; i++) {
- if (!(strcmp(p->name, e->texts[i])) && item == i)
+ if (!(strcmp(p->name, snd_soc_get_enum_text(e, i))) && item == i)
p->connect = 1;
}
}
@@ -292,11 +680,11 @@
int i;
for (i = 0; i < e->max; i++) {
- if (!(strcmp(control_name, e->texts[i]))) {
+ if (!(strcmp(control_name, snd_soc_get_enum_text(e, i)))) {
list_add(&path->list, &dapm->card->paths);
list_add(&path->list_sink, &dest->sources);
list_add(&path->list_source, &src->sinks);
- path->name = (char*)e->texts[i];
+ path->name = (char*)snd_soc_get_enum_text(e, i);
dapm_set_path_status(dest, path, 0);
return 0;
}
@@ -494,7 +882,7 @@
wlist->widgets[wlistentries - 1] = w;
if (!kcontrol) {
- if (dapm->codec)
+ if (dapm->codec && dapm->codec->name_prefix)
prefix = dapm->codec->name_prefix;
else
prefix = NULL;
@@ -516,7 +904,7 @@
* cut the prefix off the front of the widget name.
*/
kcontrol = snd_soc_cnew(&w->kcontrol_news[0], wlist,
- name + prefix_len, prefix);
+ name, prefix);
ret = snd_ctl_add(card, kcontrol);
if (ret < 0) {
dev_err(dapm->dev,
@@ -546,15 +934,6 @@
return 0;
}
-/* reset 'walked' bit for each dapm path */
-static inline void dapm_clear_walk(struct snd_soc_dapm_context *dapm)
-{
- struct snd_soc_dapm_path *p;
-
- list_for_each_entry(p, &dapm->card->paths, list)
- p->walked = 0;
-}
-
/* We implement power down on suspend by checking the power state of
* the ALSA card - when we are suspending the ALSA state for the card
* is set to D3.
@@ -683,7 +1062,7 @@
else
val = w->off_val;
- snd_soc_update_bits(w->codec, -(w->reg + 1),
+ soc_widget_update_bits(w, -(w->reg + 1),
w->mask << w->shift, val << w->shift);
return 0;
@@ -855,14 +1234,15 @@
struct list_head *pending)
{
struct snd_soc_card *card = dapm->card;
- struct snd_soc_dapm_widget *w;
+ struct snd_soc_dapm_widget *w, *_w;
int reg, power;
unsigned int value = 0;
unsigned int mask = 0;
unsigned int cur_mask;
- reg = list_first_entry(pending, struct snd_soc_dapm_widget,
- power_list)->reg;
+ _w = list_first_entry(pending, struct snd_soc_dapm_widget,
+ power_list);
+ reg = _w->reg;
list_for_each_entry(w, pending, power_list) {
cur_mask = 1 << w->shift;
@@ -887,11 +1267,17 @@
}
if (reg >= 0) {
+ /* Any widget will do, they should all be updating the
+ * same register.
+ */
+ w = list_first_entry(pending, struct snd_soc_dapm_widget,
+ power_list);
+
pop_dbg(dapm->dev, card->pop_time,
"pop test : Applying 0x%x/0x%x to %x in %dms\n",
value, mask, reg, card->pop_time);
pop_wait(card->pop_time);
- snd_soc_update_bits(dapm->codec, reg, mask, value);
+ soc_widget_update_bits(_w, reg, mask, value);
}
list_for_each_entry(w, pending, power_list) {
@@ -1114,12 +1500,14 @@
trace_snd_soc_dapm_start(card);
list_for_each_entry(d, &card->dapm_list, list)
- if (d->n_widgets || d->codec == NULL)
+ if (d->n_widgets || d->codec == NULL ||
+ strstr(d->codec->name, "null-codec"))
d->dev_power = 0;
/* Check which widgets we need to power and store them in
* lists indicating if they should be powered up or down.
*/
+ mutex_lock(&card->power_mutex);
list_for_each_entry(w, &card->widgets, list) {
switch (w->id) {
case snd_soc_dapm_pre:
@@ -1165,7 +1553,11 @@
dapm->dev_power = 1;
break;
case SND_SOC_DAPM_STREAM_STOP:
- dapm->dev_power = !!dapm->codec->active;
+#warning need re-work
+ if (dapm->codec)
+ dapm->dev_power = !!dapm->codec->active;
+ else
+ dapm->dev_power = 0;
break;
case SND_SOC_DAPM_STREAM_SUSPEND:
dapm->dev_power = 0;
@@ -1221,6 +1613,8 @@
trace_snd_soc_dapm_done(card);
+ mutex_unlock(&card->power_mutex);
+
return 0;
}
@@ -1399,7 +1793,7 @@
#endif
/* test and update the power status of a mux widget */
-static int dapm_mux_update_power(struct snd_soc_dapm_widget *widget,
+int snd_soc_dapm_mux_update_power(struct snd_soc_dapm_widget *widget,
struct snd_kcontrol *kcontrol, int change,
int mux, struct soc_enum *e)
{
@@ -1416,29 +1810,33 @@
/* find dapm widget path assoc with kcontrol */
list_for_each_entry(path, &widget->dapm->card->paths, list) {
+
if (path->kcontrol != kcontrol)
continue;
- if (!path->name || !e->texts[mux])
+ if (!path->name || !snd_soc_get_enum_text(e, mux))
continue;
found = 1;
/* we now need to match the string in the enum to the path */
- if (!(strcmp(path->name, e->texts[mux])))
+ if (!(strcmp(path->name, snd_soc_get_enum_text(e, mux))))
path->connect = 1; /* new connection */
else
path->connect = 0; /* old connection must be powered down */
}
- if (found)
+ if (found) {
dapm_power_widgets(widget->dapm, SND_SOC_DAPM_STREAM_NOP);
+ soc_dsp_runtime_update(widget);
+ }
return 0;
}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_mux_update_power);
/* test and update the power status of a mixer or switch widget */
-static int dapm_mixer_update_power(struct snd_soc_dapm_widget *widget,
- struct snd_kcontrol *kcontrol, int connect)
+int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_widget *widget,
+ struct snd_kcontrol *kcontrol, int connect)
{
struct snd_soc_dapm_path *path;
int found = 0;
@@ -1459,26 +1857,25 @@
break;
}
- if (found)
+ if (found) {
dapm_power_widgets(widget->dapm, SND_SOC_DAPM_STREAM_NOP);
+ soc_dsp_runtime_update(widget);
+ }
return 0;
}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_mixer_update_power);
/* show dapm widget status in sys fs */
-static ssize_t dapm_widget_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t widget_show(struct snd_soc_dapm_context *dapm,
+ const char *name, char *buf, ssize_t count)
{
- struct snd_soc_pcm_runtime *rtd =
- container_of(dev, struct snd_soc_pcm_runtime, dev);
- struct snd_soc_codec *codec =rtd->codec;
struct snd_soc_dapm_widget *w;
- int count = 0;
char *state = "not set";
- list_for_each_entry(w, &codec->card->widgets, list) {
- if (w->dapm != &codec->dapm)
- continue;
+ count += sprintf(buf + count, "\n%s\n", name);
+
+ list_for_each_entry(w, &dapm->card->widgets, list) {
/* only display widgets that burnm power */
switch (w->id) {
@@ -1503,7 +1900,7 @@
}
}
- switch (codec->dapm.bias_level) {
+ switch (dapm->bias_level) {
case SND_SOC_BIAS_ON:
state = "On";
break;
@@ -1522,6 +1919,21 @@
return count;
}
+/* show dapm widget status in sys fs */
+static ssize_t dapm_widget_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct snd_soc_pcm_runtime *rtd =
+ container_of(dev, struct snd_soc_pcm_runtime, dev);
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_platform *platform = rtd->platform;
+ ssize_t count = 0;
+
+ count += widget_show(&codec->dapm, codec->name, buf, count);
+ count += widget_show(&platform->dapm, platform->name, buf, count);
+ return count;
+}
+
static DEVICE_ATTR(dapm_widget, 0444, dapm_widget_show, NULL);
int snd_soc_dapm_sys_add(struct device *dev)
@@ -1867,7 +2279,7 @@
/* Read the initial power state from the device */
if (w->reg >= 0) {
- val = snd_soc_read(w->codec, w->reg);
+ val = soc_widget_read(w, w->reg);
val &= 1 << w->shift;
if (w->invert)
val = !val;
@@ -1886,6 +2298,24 @@
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_new_widgets);
+const char *snd_soc_dapm_get_aif(struct snd_soc_dapm_context *dapm,
+ const char *stream_name, enum snd_soc_dapm_type type)
+{
+ struct snd_soc_dapm_widget *w;
+
+ list_for_each_entry(w, &dapm->card->widgets, list) {
+
+ if (!w->sname)
+ continue;
+
+ if (w->id == type && strstr(w->sname, stream_name))
+ return w->name;
+
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_get_aif);
+
/**
* snd_soc_dapm_get_volsw - dapm mixer get callback
* @kcontrol: mixer control
@@ -1983,7 +2413,7 @@
update.val = val;
widget->dapm->update = &update;
- dapm_mixer_update_power(widget, kcontrol, connect);
+ snd_soc_dapm_mixer_update_power(widget, kcontrol, connect);
widget->dapm->update = NULL;
}
@@ -2074,7 +2504,7 @@
update.val = val;
widget->dapm->update = &update;
- dapm_mux_update_power(widget, kcontrol, change, mux, e);
+ snd_soc_dapm_mux_update_power(widget, kcontrol, change, mux, e);
widget->dapm->update = NULL;
}
@@ -2135,8 +2565,8 @@
widget->value = ucontrol->value.enumerated.item[0];
- dapm_mux_update_power(widget, kcontrol, change,
- widget->value, e);
+ snd_soc_dapm_mux_update_power(widget, kcontrol, change,
+ widget->value, e);
}
}
@@ -2239,7 +2669,7 @@
update.val = val;
widget->dapm->update = &update;
- dapm_mux_update_power(widget, kcontrol, change, mux, e);
+ snd_soc_dapm_mux_update_power(widget, kcontrol, change, mux, e);
widget->dapm->update = NULL;
}
@@ -2355,6 +2785,7 @@
dapm->n_widgets++;
w->dapm = dapm;
w->codec = dapm->codec;
+ w->platform = dapm->platform;
INIT_LIST_HEAD(&w->sources);
INIT_LIST_HEAD(&w->sinks);
INIT_LIST_HEAD(&w->list);
@@ -2401,6 +2832,9 @@
{
struct snd_soc_dapm_widget *w;
+ if (!dapm)
+ return;
+
list_for_each_entry(w, &dapm->card->widgets, list)
{
if (!w->sname || w->dapm != dapm)
@@ -2425,8 +2859,41 @@
}
dapm_power_widgets(dapm, event);
+ /* do we need to notify any clients that DAPM stream is complete */
+ if (dapm->stream_event)
+ dapm->stream_event(dapm);
}
+static void soc_dapm_platform_stream_event(struct snd_soc_platform *platform,
+ const char *stream, int event)
+{
+ soc_dapm_stream_event(&platform->dapm, stream, event);
+}
+
+static void soc_dapm_codec_stream_event(struct snd_soc_codec *codec,
+ const char *stream, int event)
+{
+ soc_dapm_stream_event(&codec->dapm, stream, event);
+}
+
+void snd_soc_dapm_platform_stream_event(struct snd_soc_platform *platform,
+ const char *stream, int event)
+{
+ mutex_lock(&platform->card->dapm_mutex);
+ soc_dapm_platform_stream_event(platform, stream, event);
+ mutex_unlock(&platform->card->dapm_mutex);
+}
+EXPORT_SYMBOL(snd_soc_dapm_platform_stream_event);
+
+void snd_soc_dapm_codec_stream_event(struct snd_soc_codec *codec,
+ const char *stream, int event)
+{
+ mutex_lock(&codec->card->dapm_mutex);
+ soc_dapm_codec_stream_event(codec, stream, event);
+ mutex_unlock(&codec->card->dapm_mutex);
+}
+EXPORT_SYMBOL(snd_soc_dapm_codec_stream_event);
+
/**
* snd_soc_dapm_stream_event - send a stream event to the dapm core
* @rtd: PCM runtime data
@@ -2441,14 +2908,16 @@
int snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd,
const char *stream, int event)
{
- struct snd_soc_codec *codec = rtd->codec;
-
if (stream == NULL)
return 0;
- mutex_lock(&codec->mutex);
- soc_dapm_stream_event(&codec->dapm, stream, event);
- mutex_unlock(&codec->mutex);
+ mutex_lock(&rtd->card->dapm_mutex);
+
+ soc_dapm_platform_stream_event(rtd->platform, stream, event);
+ soc_dapm_codec_stream_event(rtd->codec, stream, event);
+ soc_dapm_stream_event(&rtd->card->dapm, stream, event);
+
+ mutex_unlock(&rtd->card->dapm_mutex);
return 0;
}
@@ -2556,6 +3025,27 @@
EXPORT_SYMBOL_GPL(snd_soc_dapm_get_pin_status);
/**
+ * snd_soc_dapm_get_pin_power - get audio pin power state
+ * @dapm: DAPM context
+ * @pin: audio signal pin endpoint (or start point)
+ *
+ * Get audio pin power state - powered or not-powered.
+ *
+ * Returns 1 if powered, otherwise 0.
+ */
+int snd_soc_dapm_get_pin_power(struct snd_soc_dapm_context *dapm,
+ const char *pin)
+{
+ struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
+
+ if (w)
+ return w->power;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_get_pin_power);
+
+/**
* snd_soc_dapm_ignore_suspend - ignore suspend status for DAPM endpoint
* @dapm: DAPM context
* @pin: audio signal pin endpoint (or start point)
@@ -2633,6 +3123,7 @@
void snd_soc_dapm_shutdown(struct snd_soc_card *card)
{
struct snd_soc_codec *codec;
+ struct snd_soc_platform *platform;
list_for_each_entry(codec, &card->codec_dev_list, card_list) {
soc_dapm_shutdown_codec(&codec->dapm);
@@ -2640,6 +3131,11 @@
snd_soc_dapm_set_bias_level(&codec->dapm,
SND_SOC_BIAS_OFF);
}
+
+ list_for_each_entry(platform, &card->platform_dev_list, card_list) {
+ soc_dapm_shutdown_codec(&platform->dapm);
+ snd_soc_dapm_set_bias_level(&platform->dapm, SND_SOC_BIAS_OFF);
+ }
}
/* Module information */
diff --git a/sound/soc/soc-dsp.c b/sound/soc/soc-dsp.c
new file mode 100644
index 0000000..199b428
--- /dev/null
+++ b/sound/soc/soc-dsp.c
@@ -0,0 +1,1737 @@
+/*
+ * soc-dsp.c -- ALSA SoC Audio DSP
+ *
+ * Copyright (C) 2010 Texas Instruments Inc.
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/ac97_codec.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/soc-dsp.h>
+
+int soc_pcm_open(struct snd_pcm_substream *);
+void soc_pcm_close(struct snd_pcm_substream *);
+int soc_pcm_hw_params(struct snd_pcm_substream *, struct snd_pcm_hw_params *);
+int soc_pcm_hw_free(struct snd_pcm_substream *);
+int soc_pcm_prepare(struct snd_pcm_substream *);
+int soc_pcm_trigger(struct snd_pcm_substream *, int);
+int soc_pcm_bespoke_trigger(struct snd_pcm_substream *, int);
+
+/* count the number of FE clients in a particular state */
+int soc_dsp_fe_state_count(struct snd_soc_pcm_runtime *be, int stream,
+ enum snd_soc_dsp_state state)
+{
+ struct snd_soc_dsp_params *dsp_params;
+ int count = 0;
+
+ list_for_each_entry(dsp_params, &be->dsp[stream].fe_clients, list_fe) {
+ if (dsp_params->fe->dsp[stream].state == state)
+ count++;
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(soc_dsp_fe_state_count);
+
+static inline int be_connect(struct snd_soc_pcm_runtime *fe,
+ struct snd_soc_pcm_runtime *be, int stream)
+{
+ struct snd_soc_dsp_params *dsp_params;
+
+ /* only add new dsp_paramss */
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+ if (dsp_params->be == be && dsp_params->fe == fe)
+ return 0;
+ }
+
+ dsp_params = kzalloc(sizeof(struct snd_soc_dsp_params), GFP_KERNEL);
+ if (!dsp_params)
+ return -ENOMEM;
+
+ dsp_params->be = be;
+ dsp_params->fe = fe;
+ be->dsp[stream].runtime = fe->dsp[stream].runtime;
+ dsp_params->state = SND_SOC_DSP_LINK_STATE_NEW;
+ list_add(&dsp_params->list_be, &fe->dsp[stream].be_clients);
+ list_add(&dsp_params->list_fe, &be->dsp[stream].fe_clients);
+
+ dev_dbg(&fe->dev, " connected new DSP %s path %s %s %s\n",
+ stream ? "capture" : "playback", fe->dai_link->name,
+ stream ? "<-" : "->", be->dai_link->name);
+
+#ifdef CONFIG_DEBUG_FS
+ dsp_params->debugfs_state = debugfs_create_u32(be->dai_link->name, 0644,
+ fe->debugfs_dsp_root, &dsp_params->state);
+#endif
+
+ return 1;
+}
+
+static inline void be_reparent(struct snd_soc_pcm_runtime *fe,
+ struct snd_soc_pcm_runtime *be, int stream)
+{
+ struct snd_soc_dsp_params *dsp_params;
+ struct snd_pcm_substream *fe_substream, *be_substream;
+
+ /* reparent if BE is connected to other FEs */
+ if (!be->dsp[stream].users)
+ return;
+
+ be_substream = snd_soc_dsp_get_substream(be, stream);
+
+ list_for_each_entry(dsp_params, &be->dsp[stream].fe_clients, list_fe) {
+ if (dsp_params->fe != fe) {
+
+ dev_dbg(&fe->dev, " reparent %s path %s %s %s\n",
+ stream ? "capture" : "playback",
+ dsp_params->fe->dai_link->name,
+ stream ? "<-" : "->", dsp_params->be->dai_link->name);
+
+ fe_substream = snd_soc_dsp_get_substream(dsp_params->fe,
+ stream);
+ be_substream->runtime = fe_substream->runtime;
+ break;
+ }
+ }
+}
+
+static inline void be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
+{
+ struct snd_soc_dsp_params *dsp_params, *d;
+
+ list_for_each_entry_safe(dsp_params, d, &fe->dsp[stream].be_clients, list_be) {
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+
+ dev_dbg(&fe->dev, "BE %s disconnect check for %s\n",
+ stream ? "capture" : "playback",
+ be->dai_link->name);
+
+ if (dsp_params->state == SND_SOC_DSP_LINK_STATE_FREE) {
+ dev_dbg(&fe->dev, " freed DSP %s path %s %s %s\n",
+ stream ? "capture" : "playback",
+ fe->dai_link->name, stream ? "<-" : "->",
+ be->dai_link->name);
+
+ /* BEs still alive need new FE */
+ be_reparent(fe, be, stream);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove(dsp_params->debugfs_state);
+#endif
+
+ list_del(&dsp_params->list_be);
+ list_del(&dsp_params->list_fe);
+ kfree(dsp_params);
+ }
+ }
+}
+
+static struct snd_soc_pcm_runtime *be_get_rtd(struct snd_soc_card *card,
+ struct snd_soc_dapm_widget *widget)
+{
+ struct snd_soc_pcm_runtime *be;
+ int i;
+
+ if (!widget->sname)
+ return NULL;
+
+ for (i = 0; i < card->num_links; i++) {
+ be = &card->rtd[i];
+
+ if (!strcmp(widget->sname, be->dai_link->stream_name))
+ return be;
+ }
+
+ return NULL;
+}
+
+static struct snd_soc_dapm_widget *be_get_widget(struct snd_soc_card *card,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dapm_widget *widget;
+
+ list_for_each_entry(widget, &card->widgets, list) {
+
+ if (!widget->sname)
+ continue;
+
+ if (!strcmp(widget->sname, rtd->dai_link->stream_name))
+ return widget;
+ }
+
+ return NULL;
+}
+
+static int widget_in_list(struct snd_soc_dapm_widget_list *list,
+ struct snd_soc_dapm_widget *widget)
+{
+ int i;
+
+ for (i = 0; i < list->num_widgets; i++) {
+ if (widget == list->widgets[i])
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Find the corresponding BE DAIs that source or sink audio to this
+ * FE substream.
+ */
+static int dsp_add_new_paths(struct snd_soc_pcm_runtime *fe,
+ int stream, int pending)
+{
+ struct snd_soc_dai *cpu_dai = fe->cpu_dai;
+ struct snd_soc_card *card = fe->card;
+ struct snd_soc_dapm_widget_list *list = NULL;
+ enum snd_soc_dapm_type fe_type, be_type;
+ int i, count = 0, err, paths;
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ fe_type = snd_soc_dapm_aif_in;
+ be_type = snd_soc_dapm_aif_out;
+ } else {
+ fe_type = snd_soc_dapm_aif_out;
+ be_type = snd_soc_dapm_aif_in;
+ }
+
+ /* get number of valid playback paths and their widgets */
+ paths = snd_soc_dapm_get_connected_widgets_type(&card->dapm,
+ cpu_dai->driver->name, &list, stream, fe_type);
+
+ dev_dbg(&fe->dev, "found %d audio %s paths\n", paths,
+ stream ? "capture" : "playback");
+ if (!paths)
+ goto out;
+
+ /* find BE DAI widgets and and connect the to FE */
+ for (i = 0; i < list->num_widgets; i++) {
+
+ if (list->widgets[i]->id == be_type) {
+ struct snd_soc_pcm_runtime *be;
+
+ /* is there a valid BE rtd for this widget */
+ be = be_get_rtd(card, list->widgets[i]);
+ if (!be) {
+ dev_err(&fe->dev, "no BE found for %s\n",
+ list->widgets[i]->name);
+ continue;
+ }
+
+ /* don't connect if FE is not running */
+ if (!fe->dsp[stream].runtime)
+ continue;
+
+ /* newly connected FE and BE */
+ err = be_connect(fe, be, stream);
+ if (err < 0) {
+ dev_err(&fe->dev, "can't connect %s\n", list->widgets[i]->name);
+ break;
+ } else if (err == 0)
+ continue;
+
+ be->dsp[stream].runtime_update = pending;
+ count++;
+ }
+ }
+
+out:
+ /* list could be not initialized if root widget not found */
+ if (list != NULL)
+ kfree(list);
+ return count;
+}
+
+/*
+ * Find the corresponding BE DAIs that source or sink audio to this
+ * FE substream.
+ */
+static int dsp_prune_old_paths(struct snd_soc_pcm_runtime *fe, int stream,
+ int pending)
+{
+ struct snd_soc_dai *cpu_dai = fe->cpu_dai;
+ struct snd_soc_card *card = fe->card;
+ struct snd_soc_dsp_params *dsp_params;
+ struct snd_soc_dapm_widget_list *list = NULL;
+ int count = 0, paths;
+ enum snd_soc_dapm_type fe_type, be_type;
+ struct snd_soc_dapm_widget *widget;
+
+ dev_dbg(&fe->dev, "scan for old %s %s streams\n", fe->dai_link->name,
+ stream ? "capture" : "playback");
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ fe_type = snd_soc_dapm_aif_in;
+ be_type = snd_soc_dapm_aif_out;
+ } else {
+ fe_type = snd_soc_dapm_aif_out;
+ be_type = snd_soc_dapm_aif_in;
+ }
+
+ /* get number of valid playback paths and their widgets */
+ paths = snd_soc_dapm_get_connected_widgets_type(&card->dapm,
+ cpu_dai->driver->name, &list, stream, fe_type);
+
+ dev_dbg(&fe->dev, "found %d audio %s paths\n", paths,
+ stream ? "capture" : "playback");
+ if (!paths) {
+ /* prune all BEs */
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+
+ dsp_params->state = SND_SOC_DSP_LINK_STATE_FREE;
+ dsp_params->be->dsp[stream].runtime_update = pending;
+ count++;
+ }
+
+ dev_dbg(&fe->dev, "pruned all %s BE for FE %s\n", fe->dai_link->name,
+ stream ? "capture" : "playback");
+ goto out;
+ }
+
+ /* search card for valid BE AIFs */
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+
+ /* is there a valid widget for this BE */
+ widget = be_get_widget(card, dsp_params->be);
+ if (!widget) {
+ dev_err(&fe->dev, "no widget found for %s\n",
+ dsp_params->be->dai_link->name);
+ continue;
+ }
+
+ /* prune the BE if it's no longer in our active list */
+ if (widget_in_list(list, widget))
+ continue;
+
+ dev_dbg(&fe->dev, "pruning %s BE %s for %s\n",
+ stream ? "capture" : "playback", dsp_params->be->dai_link->name,
+ fe->dai_link->name);
+ dsp_params->state = SND_SOC_DSP_LINK_STATE_FREE;
+ dsp_params->be->dsp[stream].runtime_update = pending;
+ count++;
+ }
+
+ /* the number of old paths pruned */
+out:
+ /* list could be not initialized if root widget not found */
+ if (list != NULL)
+ kfree(list);
+ return count;
+}
+
+/*
+ * Clear the runtime pending state of all BE's.
+ */
+static void fe_clear_pending(struct snd_soc_pcm_runtime *fe, int stream)
+{
+ struct snd_soc_dsp_params *dsp_params;
+
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be)
+ dsp_params->be->dsp[stream].runtime_update =
+ SND_SOC_DSP_UPDATE_NO;
+}
+
+/* Unwind the BE startup */
+static void soc_dsp_be_dai_startup_unwind(struct snd_soc_pcm_runtime *fe, int stream)
+{
+ struct snd_soc_dsp_params *dsp_params;
+
+ /* disable any enabled and non active backends */
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_pcm_substream *be_substream =
+ snd_soc_dsp_get_substream(be, stream);
+
+ if (--be->dsp[stream].users != 0)
+ continue;
+
+ if (be->dsp[stream].state != SND_SOC_DSP_STATE_OPEN)
+ continue;
+
+ soc_pcm_close(be_substream);
+ be_substream->runtime = NULL;
+
+ be->dsp[stream].state = SND_SOC_DSP_STATE_CLOSE;
+ }
+}
+
+/* Startup all new BE */
+static int soc_dsp_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream)
+{
+ struct snd_soc_dsp_params *dsp_params;
+ int err, count = 0;
+
+ /* only startup BE DAIs that are either sinks or sources to this FE DAI */
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_pcm_substream *be_substream =
+ snd_soc_dsp_get_substream(be, stream);
+
+ /* is this op for this BE ? */
+ if (!snd_soc_dsp_is_op_for_be(fe, be, stream))
+ continue;
+
+ /* first time the dsp_params is open ? */
+ if (be->dsp[stream].users++ != 0)
+ continue;
+
+ if ((be->dsp[stream].state != SND_SOC_DSP_STATE_NEW) &&
+ (be->dsp[stream].state != SND_SOC_DSP_STATE_CLOSE))
+ continue;
+
+ dev_dbg(&be->dev, "dsp: open BE %s\n", be->dai_link->name);
+
+ be_substream->runtime = be->dsp[stream].runtime;
+ err = soc_pcm_open(be_substream);
+ if (err < 0) {
+ dev_err(&be->dev, "BE open failed %d\n", err);
+ be->dsp[stream].users--;
+ be->dsp[stream].state = SND_SOC_DSP_STATE_CLOSE;
+ be_substream->runtime = NULL;
+ goto unwind;
+ }
+ be->dsp[stream].state = SND_SOC_DSP_STATE_OPEN;
+ count++;
+ }
+
+ return count;
+
+unwind:
+ /* disable any enabled and non active backends */
+ list_for_each_entry_continue_reverse(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_pcm_substream *be_substream =
+ snd_soc_dsp_get_substream(be, stream);
+
+ if (!snd_soc_dsp_is_op_for_be(fe, be, stream))
+ continue;
+
+ if (--be->dsp[stream].users != 0)
+ continue;
+
+ if (be->dsp[stream].state != SND_SOC_DSP_STATE_OPEN)
+ continue;
+
+ soc_pcm_close(be_substream);
+ be_substream->runtime = NULL;
+
+ be->dsp[stream].state = SND_SOC_DSP_STATE_CLOSE;
+ }
+
+ return err;
+}
+
+void soc_dsp_set_dynamic_runtime(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai_driver *cpu_dai_drv = cpu_dai->driver;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ runtime->hw.rate_min = cpu_dai_drv->playback.rate_min;
+ runtime->hw.rate_max = cpu_dai_drv->playback.rate_max;
+ runtime->hw.channels_min = cpu_dai_drv->playback.channels_min;
+ runtime->hw.channels_max = cpu_dai_drv->playback.channels_max;
+ runtime->hw.formats &= cpu_dai_drv->playback.formats;
+ runtime->hw.rates = cpu_dai_drv->playback.rates;
+ } else {
+ runtime->hw.rate_min = cpu_dai_drv->capture.rate_min;
+ runtime->hw.rate_max = cpu_dai_drv->capture.rate_max;
+ runtime->hw.channels_min = cpu_dai_drv->capture.channels_min;
+ runtime->hw.channels_max = cpu_dai_drv->capture.channels_max;
+ runtime->hw.formats &= cpu_dai_drv->capture.formats;
+ runtime->hw.rates = cpu_dai_drv->capture.rates;
+ }
+}
+
+static int soc_dsp_fe_dai_startup(struct snd_pcm_substream *fe_substream)
+{
+ struct snd_soc_pcm_runtime *fe = fe_substream->private_data;
+ struct snd_pcm_runtime *runtime = fe_substream->runtime;
+ int runtime_update, stream = fe_substream->stream, ret = 0;
+
+ mutex_lock(&fe->card->dsp_mutex);
+
+ runtime_update = fe->dsp[stream].runtime_update;
+ fe->dsp[stream].runtime_update = SND_SOC_DSP_UPDATE_FE;
+
+ ret = soc_dsp_be_dai_startup(fe, fe_substream->stream);
+ if (ret < 0)
+ goto be_err;
+
+ dev_dbg(&fe->dev, "dsp: open FE %s\n", fe->dai_link->name);
+
+ /* start the DAI frontend */
+ ret = soc_pcm_open(fe_substream);
+ if (ret < 0) {
+ dev_err(&fe->dev,"dsp: failed to start FE %d\n", ret);
+ goto unwind;
+ }
+
+ fe->dsp[stream].state = SND_SOC_DSP_STATE_OPEN;
+
+ soc_dsp_set_dynamic_runtime(fe_substream);
+ snd_pcm_limit_hw_rates(runtime);
+
+ mutex_unlock(&fe->card->dsp_mutex);
+ return 0;
+
+unwind:
+ soc_dsp_be_dai_startup_unwind(fe, fe_substream->stream);
+be_err:
+ fe->dsp[stream].runtime_update = runtime_update;
+ mutex_unlock(&fe->card->dsp_mutex);
+ return ret;
+}
+
+/* BE shutdown - called on DAPM sync updates (i.e. FE is already running)*/
+static int soc_dsp_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
+{
+ struct snd_soc_dsp_params *dsp_params;
+
+ /* only shutdown backends that are either sinks or sources to this frontend DAI */
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_pcm_substream *be_substream =
+ snd_soc_dsp_get_substream(be, stream);
+
+ /* is this op for this BE ? */
+ if (!snd_soc_dsp_is_op_for_be(fe, be, stream))
+ continue;
+
+ if (--be->dsp[stream].users != 0)
+ continue;
+
+ if ((be->dsp[stream].state != SND_SOC_DSP_STATE_HW_FREE) &&
+ (be->dsp[stream].state != SND_SOC_DSP_STATE_OPEN))
+ continue;
+
+ dev_dbg(&be->dev, "dsp: close BE %s\n",
+ dsp_params->fe->dai_link->name);
+
+ soc_pcm_close(be_substream);
+ be_substream->runtime = NULL;
+
+ be->dsp[stream].state = SND_SOC_DSP_STATE_CLOSE;
+ }
+ return 0;
+}
+
+/* FE +BE shutdown - called on FE PCM ops */
+static int soc_dsp_fe_dai_shutdown(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+ int runtime_update, stream = substream->stream;
+
+ mutex_lock(&fe->card->dsp_mutex);
+
+ runtime_update = fe->dsp[stream].runtime_update;
+ fe->dsp[stream].runtime_update = SND_SOC_DSP_UPDATE_FE;
+
+ /* shutdown the BEs */
+ soc_dsp_be_dai_shutdown(fe, substream->stream);
+
+ dev_dbg(&fe->dev, "dsp: close FE %s\n", fe->dai_link->name);
+
+ /* now shutdown the frontend */
+ soc_pcm_close(substream);
+
+ /* run the stream event for each BE */
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ soc_dsp_dapm_stream_event(fe, stream,
+ fe->cpu_dai->driver->playback.stream_name,
+ SND_SOC_DAPM_STREAM_STOP);
+ else
+ soc_dsp_dapm_stream_event(fe, stream,
+ fe->cpu_dai->driver->capture.stream_name,
+ SND_SOC_DAPM_STREAM_STOP);
+
+ fe->dsp[stream].state = SND_SOC_DSP_STATE_CLOSE;
+ fe->dsp[stream].runtime_update = runtime_update;
+
+ mutex_unlock(&fe->card->dsp_mutex);
+ return 0;
+}
+
+static int soc_dsp_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
+{
+ struct snd_soc_dsp_params *dsp_params;
+ int ret;
+
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_pcm_substream *be_substream =
+ snd_soc_dsp_get_substream(be, stream);
+
+ /* is this op for this BE ? */
+ if (!snd_soc_dsp_is_op_for_be(fe, be, stream))
+ continue;
+
+ /* first time the dsp_params is open ? */
+ if (be->dsp[stream].users != 1)
+ continue;
+
+ if ((be->dsp[stream].state != SND_SOC_DSP_STATE_OPEN) &&
+ (be->dsp[stream].state != SND_SOC_DSP_STATE_HW_FREE))
+ continue;
+
+ dev_dbg(&be->dev, "dsp: hw_params BE %s\n",
+ dsp_params->fe->dai_link->name);
+
+ /* copy params for each dsp_params */
+ memcpy(&dsp_params->hw_params, &fe->dsp[stream].hw_params,
+ sizeof(struct snd_pcm_hw_params));
+
+ /* perform any hw_params fixups */
+ if (be->dai_link->be_hw_params_fixup) {
+ ret = be->dai_link->be_hw_params_fixup(be,
+ &dsp_params->hw_params);
+ if (ret < 0) {
+ dev_err(&be->dev,
+ "dsp: hw_params BE fixup failed %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = soc_pcm_hw_params(be_substream, &dsp_params->hw_params);
+ if (ret < 0) {
+ dev_err(&dsp_params->be->dev, "dsp: hw_params BE failed %d\n", ret);
+ return ret;
+ }
+
+ be->dsp[stream].state = SND_SOC_DSP_STATE_HW_PARAMS;
+ }
+ return 0;
+}
+
+int soc_dsp_fe_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+ int ret, runtime_update, stream = substream->stream;
+
+ mutex_lock(&fe->card->dsp_mutex);
+
+ runtime_update = fe->dsp[stream].runtime_update;
+ fe->dsp[stream].runtime_update = SND_SOC_DSP_UPDATE_FE;
+
+ memcpy(&fe->dsp[substream->stream].hw_params, params,
+ sizeof(struct snd_pcm_hw_params));
+ ret = soc_dsp_be_dai_hw_params(fe, substream->stream);
+ if (ret < 0)
+ goto out;
+
+ dev_dbg(&fe->dev, "dsp: hw_params FE %s\n", fe->dai_link->name);
+
+ /* call hw_params on the frontend */
+ ret = soc_pcm_hw_params(substream, params);
+ if (ret < 0)
+ dev_err(&fe->dev,"dsp: hw_params FE failed %d\n", ret);
+
+ fe->dsp[stream].state = SND_SOC_DSP_STATE_HW_PARAMS;
+
+out:
+ fe->dsp[stream].runtime_update = runtime_update;
+ mutex_unlock(&fe->card->dsp_mutex);
+ return ret;
+}
+
+static int dsp_do_trigger(struct snd_soc_dsp_params *dsp_params,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ int ret;
+
+ dev_dbg(&dsp_params->be->dev, "dsp: trigger BE %s cmd %d\n",
+ dsp_params->fe->dai_link->name, cmd);
+
+ ret = soc_pcm_trigger(substream, cmd);
+ if (ret < 0)
+ dev_err(&dsp_params->be->dev,"dsp: trigger BE failed %d\n", ret);
+
+ return ret;
+}
+
+int soc_dsp_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, int cmd)
+{
+ struct snd_soc_dsp_params *dsp_params;
+ int ret = 0;
+
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_pcm_substream *be_substream =
+ snd_soc_dsp_get_substream(be, stream);
+
+ /* is this op for this BE ? */
+ if (!snd_soc_dsp_is_op_for_be(fe, be, stream))
+ continue;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if ((be->dsp[stream].state != SND_SOC_DSP_STATE_PREPARE) &&
+ (be->dsp[stream].state != SND_SOC_DSP_STATE_STOP))
+ continue;
+
+ ret = dsp_do_trigger(dsp_params, be_substream, cmd);
+ if (ret)
+ return ret;
+
+ be->dsp[stream].state = SND_SOC_DSP_STATE_START;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ if (be->dsp[stream].state != SND_SOC_DSP_STATE_START)
+ continue;
+
+ if (soc_dsp_fe_state_count(be, stream,
+ SND_SOC_DSP_STATE_START) > 1)
+ continue;
+
+ ret = dsp_do_trigger(dsp_params, be_substream, cmd);
+ if (ret)
+ return ret;
+
+ be->dsp[stream].state = SND_SOC_DSP_STATE_STOP;
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(soc_dsp_be_dai_trigger);
+
+int soc_dsp_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+ struct snd_soc_dsp_link *dsp_link = fe->dai_link->dsp_link;
+ int stream = substream->stream, ret;
+ int runtime_update = fe->dsp[stream].runtime_update;
+
+ fe->dsp[stream].runtime_update = SND_SOC_DSP_UPDATE_FE;
+
+ switch (dsp_link->trigger[stream]) {
+ case SND_SOC_DSP_TRIGGER_PRE:
+ /* call trigger on the frontend before the backend. */
+
+ dev_dbg(&fe->dev, "dsp: pre trigger FE %s cmd %d\n",
+ fe->dai_link->name, cmd);
+
+ ret = soc_pcm_trigger(substream, cmd);
+ if (ret < 0) {
+ dev_err(&fe->dev,"dsp: trigger FE failed %d\n", ret);
+ goto out;
+ }
+
+ ret = soc_dsp_be_dai_trigger(fe, substream->stream, cmd);
+ break;
+ case SND_SOC_DSP_TRIGGER_POST:
+ /* call trigger on the frontend after the backend. */
+
+ ret = soc_dsp_be_dai_trigger(fe, substream->stream, cmd);
+ if (ret < 0) {
+ dev_err(&fe->dev,"dsp: trigger FE failed %d\n", ret);
+ goto out;
+ }
+
+ dev_dbg(&fe->dev, "dsp: post trigger FE %s cmd %d\n",
+ fe->dai_link->name, cmd);
+
+ ret = soc_pcm_trigger(substream, cmd);
+ break;
+ case SND_SOC_DSP_TRIGGER_BESPOKE:
+ /* bespoke trigger() - handles both FE and BEs */
+
+ dev_dbg(&fe->dev, "dsp: bespoke trigger FE %s cmd %d\n",
+ fe->dai_link->name, cmd);
+
+ ret = soc_pcm_bespoke_trigger(substream, cmd);
+ if (ret < 0) {
+ dev_err(&fe->dev,"dsp: trigger FE failed %d\n", ret);
+ goto out;
+ }
+ break;
+ default:
+ dev_err(&fe->dev, "dsp: invalid trigger cmd %d for %s\n", cmd,
+ fe->dai_link->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ fe->dsp[stream].state = SND_SOC_DSP_STATE_START;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ fe->dsp[stream].state = SND_SOC_DSP_STATE_STOP;
+ break;
+ }
+
+out:
+ fe->dsp[stream].runtime_update = runtime_update;
+ return ret;
+}
+
+static int soc_dsp_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
+{
+ struct snd_soc_dsp_params *dsp_params;
+ int ret = 0;
+
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_pcm_substream *be_substream =
+ snd_soc_dsp_get_substream(be, stream);
+
+ /* is this op for this BE ? */
+ if (!snd_soc_dsp_is_op_for_be(fe, be, stream))
+ continue;
+
+ if ((be->dsp[stream].state != SND_SOC_DSP_STATE_HW_PARAMS) &&
+ (be->dsp[stream].state != SND_SOC_DSP_STATE_STOP))
+ continue;
+
+ dev_dbg(&be->dev, "dsp: prepare BE %s\n",
+ dsp_params->fe->dai_link->name);
+
+ ret = soc_pcm_prepare(be_substream);
+ if (ret < 0) {
+ dev_err(&be->dev, "dsp: backend prepare failed %d\n",
+ ret);
+ break;
+ }
+
+ be->dsp[stream].state = SND_SOC_DSP_STATE_PREPARE;
+ }
+ return ret;
+}
+
+int soc_dsp_fe_dai_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+ int runtime_update, stream = substream->stream, ret = 0;
+
+ mutex_lock(&fe->card->dsp_mutex);
+
+ dev_dbg(&fe->dev, "dsp: prepare FE %s\n", fe->dai_link->name);
+
+ runtime_update = fe->dsp[stream].runtime_update;
+ fe->dsp[stream].runtime_update = SND_SOC_DSP_UPDATE_FE;
+
+ /* there is no point preparing this FE if there are no BEs */
+ if (list_empty(&fe->dsp[stream].be_clients)) {
+ dev_err(&fe->dev, "dsp: no backend DAIs enabled for %s\n",
+ fe->dai_link->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = soc_dsp_be_dai_prepare(fe, substream->stream);
+ if (ret < 0)
+ goto out;
+
+ /* call prepare on the frontend */
+ ret = soc_pcm_prepare(substream);
+ if (ret < 0) {
+ dev_err(&fe->dev,"dsp: prepare FE %s failed\n", fe->dai_link->name);
+ goto out;
+ }
+
+ /* run the stream event for each BE */
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ soc_dsp_dapm_stream_event(fe, stream,
+ fe->cpu_dai->driver->playback.stream_name,
+ SNDRV_PCM_TRIGGER_START);
+ else
+ soc_dsp_dapm_stream_event(fe, stream,
+ fe->cpu_dai->driver->capture.stream_name,
+ SNDRV_PCM_TRIGGER_START);
+
+ fe->dsp[stream].state = SND_SOC_DSP_STATE_PREPARE;
+
+out:
+ fe->dsp[stream].runtime_update = runtime_update;
+ mutex_unlock(&fe->card->dsp_mutex);
+ return ret;
+}
+
+static int soc_dsp_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
+{
+ struct snd_soc_dsp_params *dsp_params;
+
+ /* only hw_params backends that are either sinks or sources
+ * to this frontend DAI */
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_pcm_substream *be_substream =
+ snd_soc_dsp_get_substream(be, stream);
+
+ /* is this op for this BE ? */
+ if (!snd_soc_dsp_is_op_for_be(fe, be, stream))
+ continue;
+
+ /* only free hw when no longer used */
+ if (be->dsp[stream].users != 1)
+ continue;
+
+ if ((be->dsp[stream].state != SND_SOC_DSP_STATE_HW_PARAMS) &&
+ (be->dsp[stream].state != SND_SOC_DSP_STATE_PREPARE) &&
+ (be->dsp[stream].state != SND_SOC_DSP_STATE_STOP))
+ continue;
+
+ dev_dbg(&be->dev, "dsp: hw_free BE %s\n",
+ dsp_params->fe->dai_link->name);
+
+ soc_pcm_hw_free(be_substream);
+
+ be->dsp[stream].state = SND_SOC_DSP_STATE_HW_FREE;
+ }
+
+ return 0;
+}
+
+int soc_dsp_fe_dai_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+ int ret, runtime_update, stream = substream->stream;
+
+ mutex_lock(&fe->card->dsp_mutex);
+
+ runtime_update = fe->dsp[stream].runtime_update;
+ fe->dsp[stream].runtime_update = SND_SOC_DSP_UPDATE_FE;
+
+ dev_dbg(&fe->dev, "dsp: hw_free FE %s\n", fe->dai_link->name);
+
+ /* call hw_free on the frontend */
+ ret = soc_pcm_hw_free(substream);
+ if (ret < 0)
+ dev_err(&fe->dev,"dsp: hw_free FE %s failed\n", fe->dai_link->name);
+
+ /* only hw_params backends that are either sinks or sources
+ * to this frontend DAI */
+ ret = soc_dsp_be_dai_hw_free(fe, stream);
+
+ fe->dsp[stream].state = SND_SOC_DSP_STATE_HW_FREE;
+ fe->dsp[stream].runtime_update = runtime_update;
+
+ mutex_unlock(&fe->card->dsp_mutex);
+ return ret;
+}
+
+/*
+ * FE stream event, send event to all active BEs.
+ */
+int soc_dsp_dapm_stream_event(struct snd_soc_pcm_runtime *fe,
+ int dir, const char *stream, int event)
+{
+ struct snd_soc_dsp_params *dsp_params;
+
+ /* resume for playback */
+ list_for_each_entry(dsp_params, &fe->dsp[dir].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+
+ dev_dbg(&be->dev, "pm: BE %s stream %s event %d dir %d\n",
+ be->dai_link->name, stream, event, dir);
+
+ snd_soc_dapm_stream_event(be, stream, event);
+ }
+
+ return 0;
+}
+
+static int dsp_run_update_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
+{
+ struct snd_soc_dsp_link *dsp_link = fe->dai_link->dsp_link;
+ struct snd_pcm_substream *substream = snd_soc_dsp_get_substream(fe, stream);
+ int ret;
+
+ dev_dbg(&fe->dev, "runtime %s close on FE %s\n",
+ stream ? "capture" : "playback", fe->dai_link->name);
+
+ if (dsp_link->trigger[stream] == SND_SOC_DSP_TRIGGER_BESPOKE) {
+ /* call bespoke trigger - FE takes care of all BE triggers */
+ dev_dbg(&fe->dev, "dsp: bespoke trigger FE %s cmd stop\n",
+ fe->dai_link->name);
+
+ ret = soc_pcm_bespoke_trigger(substream, SNDRV_PCM_TRIGGER_STOP);
+ if (ret < 0) {
+ dev_err(&fe->dev,"dsp: trigger FE failed %d\n", ret);
+ return ret;
+ }
+ } else {
+ dev_dbg(&fe->dev, "dsp: trigger FE %s cmd stop\n",
+ fe->dai_link->name);
+
+ ret = soc_dsp_be_dai_trigger(fe, stream, SNDRV_PCM_TRIGGER_STOP);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = soc_dsp_be_dai_hw_free(fe, stream);
+ if (ret < 0)
+ return ret;
+
+ ret = soc_dsp_be_dai_shutdown(fe, stream);
+ if (ret < 0)
+ return ret;
+
+ /* run the stream event for each BE */
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ soc_dsp_dapm_stream_event(fe, stream,
+ fe->cpu_dai->driver->playback.stream_name,
+ SNDRV_PCM_TRIGGER_STOP);
+ else
+ soc_dsp_dapm_stream_event(fe, stream,
+ fe->cpu_dai->driver->capture.stream_name,
+ SNDRV_PCM_TRIGGER_STOP);
+
+ return 0;
+}
+
+static int dsp_run_update_startup(struct snd_soc_pcm_runtime *fe, int stream)
+{
+ struct snd_soc_dsp_link *dsp_link = fe->dai_link->dsp_link;
+ struct snd_pcm_substream *substream = snd_soc_dsp_get_substream(fe, stream);
+ struct snd_soc_dsp_params *dsp_params;
+ int ret;
+
+ dev_dbg(&fe->dev, "runtime %s open on FE %s\n",
+ stream ? "capture" : "playback", fe->dai_link->name);
+
+ ret = soc_dsp_be_dai_startup(fe, stream);
+ if (ret < 0) {
+ goto disconnect;
+ return ret;
+ }
+
+ ret = soc_dsp_be_dai_hw_params(fe, stream);
+ if (ret < 0) {
+ goto close;
+ return ret;
+ }
+ ret = soc_dsp_be_dai_prepare(fe, stream);
+ if (ret < 0) {
+ goto hw_free;
+ return ret;
+ }
+
+ /* run the stream event for each BE */
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ soc_dsp_dapm_stream_event(fe, stream,
+ fe->cpu_dai->driver->playback.stream_name,
+ SNDRV_PCM_TRIGGER_START);
+ else
+ soc_dsp_dapm_stream_event(fe, stream,
+ fe->cpu_dai->driver->capture.stream_name,
+ SNDRV_PCM_TRIGGER_START);
+
+ if (dsp_link->trigger[stream] == SND_SOC_DSP_TRIGGER_BESPOKE) {
+ /* call trigger on the frontend - FE takes care of all BE triggers */
+ dev_dbg(&fe->dev, "dsp: bespoke trigger FE %s cmd start\n",
+ fe->dai_link->name);
+
+ ret = soc_pcm_bespoke_trigger(substream, SNDRV_PCM_TRIGGER_START);
+ if (ret < 0) {
+ dev_err(&fe->dev,"dsp: bespoke trigger FE failed %d\n", ret);
+ goto stream_stop;
+ }
+ } else {
+ dev_dbg(&fe->dev, "dsp: trigger FE %s cmd start\n",
+ fe->dai_link->name);
+
+ ret = soc_dsp_be_dai_trigger(fe, stream,
+ SNDRV_PCM_TRIGGER_START);
+ if (ret < 0) {
+ dev_err(&fe->dev,"dsp: trigger FE failed %d\n", ret);
+ goto stream_stop;
+ }
+ }
+
+ return 0;
+
+stream_stop:
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ soc_dsp_dapm_stream_event(fe, stream,
+ fe->cpu_dai->driver->playback.stream_name,
+ SNDRV_PCM_TRIGGER_STOP);
+ else
+ soc_dsp_dapm_stream_event(fe, stream,
+ fe->cpu_dai->driver->capture.stream_name,
+ SNDRV_PCM_TRIGGER_STOP);
+hw_free:
+ soc_dsp_be_dai_hw_free(fe, stream);
+close:
+ soc_dsp_be_dai_shutdown(fe, stream);
+disconnect:
+ /* disconnect any non started BEs */
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ if (be->dsp[stream].state != SND_SOC_DSP_STATE_START)
+ dsp_params->state = SND_SOC_DSP_LINK_STATE_FREE;
+ }
+
+ return ret;
+}
+
+static int dsp_run_new_update(struct snd_soc_pcm_runtime *fe, int stream)
+{
+ int ret;
+
+ fe->dsp[stream].runtime_update = SND_SOC_DSP_UPDATE_BE;
+ ret = dsp_run_update_startup(fe, stream);
+ if (ret < 0)
+ dev_err(&fe->dev, "failed to startup some BEs\n");
+ fe->dsp[stream].runtime_update = SND_SOC_DSP_UPDATE_NO;
+
+ return ret;
+}
+
+static int dsp_run_old_update(struct snd_soc_pcm_runtime *fe, int stream)
+{
+ int ret;
+
+ fe->dsp[stream].runtime_update = SND_SOC_DSP_UPDATE_BE;
+ ret = dsp_run_update_shutdown(fe, stream);
+ if (ret < 0)
+ dev_err(&fe->dev, "failed to shutdown some BEs\n");
+ fe->dsp[stream].runtime_update = SND_SOC_DSP_UPDATE_NO;
+
+ return ret;
+}
+
+/* called when any mixer updates change FE -> BE the stream */
+int soc_dsp_runtime_update(struct snd_soc_dapm_widget *widget)
+{
+ struct snd_soc_card *card;
+ int i, ret = 0, start, stop;
+
+ if (widget->codec)
+ card = widget->codec->card;
+ else if (widget->platform)
+ card = widget->platform->card;
+ else
+ return -EINVAL;
+
+ mutex_lock(&widget->dapm->card->dsp_mutex);
+
+ for (i = 0; i < card->num_rtd; i++) {
+ struct snd_soc_pcm_runtime *fe = &card->rtd[i];
+
+ /* make sure link is BE */
+ if (!fe->dai_link->dsp_link)
+ continue;
+
+ /* only check active links */
+ if (!fe->cpu_dai->active) {
+ continue;
+ }
+
+ /* DAPM sync will call this to update DSP paths */
+ dev_dbg(card->dev, "DSP runtime update for FE %s\n", fe->dai_link->name);
+
+ /* skip if FE doesn't have playback capability */
+ if (!fe->cpu_dai->driver->playback.channels_min)
+ goto capture;
+
+ /* update new playback paths */
+ start = dsp_add_new_paths(fe, SNDRV_PCM_STREAM_PLAYBACK, 1);
+ if (start) {
+ dsp_run_new_update(fe, SNDRV_PCM_STREAM_PLAYBACK);
+ fe_clear_pending(fe, SNDRV_PCM_STREAM_PLAYBACK);
+ }
+
+ /* update old playback paths */
+ stop = dsp_prune_old_paths(fe, SNDRV_PCM_STREAM_PLAYBACK, 1);
+ if (stop) {
+ dsp_run_old_update(fe, SNDRV_PCM_STREAM_PLAYBACK);
+ fe_clear_pending(fe, SNDRV_PCM_STREAM_PLAYBACK);
+ be_disconnect(fe, SNDRV_PCM_STREAM_PLAYBACK);
+ }
+
+capture:
+ /* skip if FE doesn't have capture capability */
+ if (!fe->cpu_dai->driver->capture.channels_min)
+ continue;
+
+ /* update new capture paths */
+ start = dsp_add_new_paths(fe, SNDRV_PCM_STREAM_CAPTURE, 1);
+ if (start) {
+ dsp_run_new_update(fe, SNDRV_PCM_STREAM_CAPTURE);
+ fe_clear_pending(fe, SNDRV_PCM_STREAM_CAPTURE);
+ }
+
+ /* update old capture paths */
+ stop = dsp_prune_old_paths(fe, SNDRV_PCM_STREAM_CAPTURE, 1);
+ if (stop) {
+ dsp_run_old_update(fe, SNDRV_PCM_STREAM_CAPTURE);
+ fe_clear_pending(fe, SNDRV_PCM_STREAM_CAPTURE);
+ be_disconnect(fe, SNDRV_PCM_STREAM_CAPTURE);
+ }
+ }
+
+ mutex_unlock(&widget->dapm->card->dsp_mutex);
+ return ret;
+}
+
+int soc_dsp_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute)
+{
+ struct snd_soc_dsp_params *dsp_params;
+
+ list_for_each_entry(dsp_params,
+ &fe->dsp[SNDRV_PCM_STREAM_PLAYBACK].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_soc_dai *dai = be->codec_dai;
+ struct snd_soc_dai_driver *drv = dai->driver;
+
+ dev_dbg(&be->dev, "BE digital mute %s\n", be->dai_link->name);
+
+ if (be->dai_link->ignore_suspend)
+ continue;
+
+ if (drv->ops->digital_mute && dai->playback_active)
+ drv->ops->digital_mute(dai, mute);
+ }
+
+ return 0;
+}
+
+int soc_dsp_be_cpu_dai_suspend(struct snd_soc_pcm_runtime *fe)
+{
+ struct snd_soc_dsp_params *dsp_params;
+
+ /* suspend for playback */
+ list_for_each_entry(dsp_params,
+ &fe->dsp[SNDRV_PCM_STREAM_PLAYBACK].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_soc_dai *dai = be->cpu_dai;
+ struct snd_soc_dai_driver *drv = dai->driver;
+
+ dev_dbg(&be->dev, "pm: BE CPU DAI playback suspend %s\n",
+ be->dai_link->name);
+
+ if (be->dai_link->ignore_suspend)
+ continue;
+
+ if (drv->suspend && !drv->ac97_control)
+ drv->suspend(dai);
+ }
+
+ /* suspend for capture */
+ list_for_each_entry(dsp_params,
+ &fe->dsp[SNDRV_PCM_STREAM_CAPTURE].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_soc_dai *dai = be->cpu_dai;
+ struct snd_soc_dai_driver *drv = dai->driver;
+
+ dev_dbg(&be->dev, "pm: BE CPU DAI capture suspend %s\n",
+ be->dai_link->name);
+
+ if (be->dai_link->ignore_suspend)
+ continue;
+
+ if (drv->suspend && !drv->ac97_control)
+ drv->suspend(dai);
+ }
+
+ return 0;
+}
+
+int soc_dsp_be_ac97_cpu_dai_suspend(struct snd_soc_pcm_runtime *fe)
+{
+ struct snd_soc_dsp_params *dsp_params;
+
+ /* suspend for playback */
+ list_for_each_entry(dsp_params,
+ &fe->dsp[SNDRV_PCM_STREAM_PLAYBACK].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_soc_dai *dai = be->cpu_dai;
+ struct snd_soc_dai_driver *drv = dai->driver;
+
+ dev_dbg(&be->dev, "pm: BE CPU DAI playback suspend %s\n",
+ be->dai_link->name);
+
+ if (be->dai_link->ignore_suspend)
+ continue;
+
+ if (drv->suspend && drv->ac97_control)
+ drv->suspend(dai);
+ }
+
+ /* suspend for capture */
+ list_for_each_entry(dsp_params,
+ &fe->dsp[SNDRV_PCM_STREAM_CAPTURE].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_soc_dai *dai = be->cpu_dai;
+ struct snd_soc_dai_driver *drv = dai->driver;
+
+ dev_dbg(&be->dev, "pm: BE CPU DAI capture suspend %s\n",
+ be->dai_link->name);
+
+ if (be->dai_link->ignore_suspend)
+ continue;
+
+ if (drv->suspend && drv->ac97_control)
+ drv->suspend(dai);
+ }
+
+ return 0;
+}
+
+int soc_dsp_be_platform_suspend(struct snd_soc_pcm_runtime *fe)
+{
+ struct snd_soc_dsp_params *dsp_params;
+
+ /* suspend for playback */
+ list_for_each_entry(dsp_params,
+ &fe->dsp[SNDRV_PCM_STREAM_PLAYBACK].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_soc_platform *platform = be->platform;
+ struct snd_soc_platform_driver *drv = platform->driver;
+ struct snd_soc_dai *dai = be->cpu_dai;
+
+ dev_dbg(&be->dev, "pm: BE platform playback suspend %s\n",
+ be->dai_link->name);
+
+ if (be->dai_link->ignore_suspend)
+ continue;
+
+ if (drv->suspend && !platform->suspended) {
+ drv->suspend(dai);
+ platform->suspended = 1;
+ }
+ }
+
+ /* suspend for capture */
+ list_for_each_entry(dsp_params,
+ &fe->dsp[SNDRV_PCM_STREAM_CAPTURE].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_soc_platform *platform = be->platform;
+ struct snd_soc_platform_driver *drv = platform->driver;
+ struct snd_soc_dai *dai = be->cpu_dai;
+
+ dev_dbg(&be->dev, "pm: BE platform capture suspend %s\n",
+ be->dai_link->name);
+
+ if (be->dai_link->ignore_suspend)
+ continue;
+
+ if (drv->suspend && !platform->suspended) {
+ drv->suspend(dai);
+ platform->suspended = 1;
+ }
+ }
+
+ return 0;
+}
+
+int soc_dsp_fe_suspend(struct snd_soc_pcm_runtime *fe)
+{
+ struct snd_soc_dai *dai = fe->cpu_dai;
+ struct snd_soc_dai_driver *dai_drv = dai->driver;
+ struct snd_soc_platform *platform = fe->platform;
+ struct snd_soc_platform_driver *plat_drv = platform->driver;
+
+ if (dai_drv->suspend && !dai_drv->ac97_control)
+ dai_drv->suspend(dai);
+
+ if (plat_drv->suspend && !platform->suspended) {
+ plat_drv->suspend(dai);
+ platform->suspended = 1;
+ }
+
+ soc_dsp_be_cpu_dai_suspend(fe);
+ soc_dsp_be_platform_suspend(fe);
+
+ return 0;
+}
+
+int soc_dsp_be_cpu_dai_resume(struct snd_soc_pcm_runtime *fe)
+{
+ struct snd_soc_dsp_params *dsp_params;
+
+ /* resume for playback */
+ list_for_each_entry(dsp_params,
+ &fe->dsp[SNDRV_PCM_STREAM_PLAYBACK].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_soc_dai *dai = be->cpu_dai;
+ struct snd_soc_dai_driver *drv = dai->driver;
+
+ dev_dbg(&be->dev, "pm: BE CPU DAI playback resume %s\n",
+ be->dai_link->name);
+
+ if (be->dai_link->ignore_suspend)
+ continue;
+
+ if (drv->resume && !drv->ac97_control)
+ drv->resume(dai);
+ }
+
+ /* suspend for capture */
+ list_for_each_entry(dsp_params,
+ &fe->dsp[SNDRV_PCM_STREAM_CAPTURE].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_soc_dai *dai = be->cpu_dai;
+ struct snd_soc_dai_driver *drv = dai->driver;
+
+ dev_dbg(&be->dev, "pm: BE CPU DAI capture resume %s\n",
+ be->dai_link->name);
+
+ if (be->dai_link->ignore_suspend)
+ continue;
+
+ if (drv->resume && !drv->ac97_control)
+ drv->resume(dai);
+ }
+
+ return 0;
+}
+
+int soc_dsp_be_ac97_cpu_dai_resume(struct snd_soc_pcm_runtime *fe)
+{
+ struct snd_soc_dsp_params *dsp_params;
+
+ /* resume for playback */
+ list_for_each_entry(dsp_params,
+ &fe->dsp[SNDRV_PCM_STREAM_PLAYBACK].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_soc_dai *dai = be->cpu_dai;
+ struct snd_soc_dai_driver *drv = dai->driver;
+
+ dev_dbg(&be->dev, "pm: BE CPU DAI playback resume %s\n",
+ be->dai_link->name);
+
+ if (be->dai_link->ignore_suspend)
+ continue;
+
+ if (drv->resume && drv->ac97_control)
+ drv->resume(dai);
+ }
+
+ /* suspend for capture */
+ list_for_each_entry(dsp_params,
+ &fe->dsp[SNDRV_PCM_STREAM_CAPTURE].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_soc_dai *dai = be->cpu_dai;
+ struct snd_soc_dai_driver *drv = dai->driver;
+
+ dev_dbg(&be->dev, "pm: BE CPU DAI capture resume %s\n",
+ be->dai_link->name);
+
+ if (be->dai_link->ignore_suspend)
+ continue;
+
+ if (drv->resume && drv->ac97_control)
+ drv->resume(dai);
+ }
+
+ return 0;
+}
+
+int soc_dsp_be_platform_resume(struct snd_soc_pcm_runtime *fe)
+{
+ struct snd_soc_dsp_params *dsp_params;
+
+ /* resume for playback */
+ list_for_each_entry(dsp_params,
+ &fe->dsp[SNDRV_PCM_STREAM_PLAYBACK].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_soc_platform *platform = be->platform;
+ struct snd_soc_platform_driver *drv = platform->driver;
+ struct snd_soc_dai *dai = be->cpu_dai;
+
+ dev_dbg(&be->dev, "pm: BE platform playback resume %s\n",
+ be->dai_link->name);
+
+ if (be->dai_link->ignore_suspend)
+ continue;
+
+ if (drv->resume && platform->suspended) {
+ drv->resume(dai);
+ platform->suspended = 0;
+ }
+ }
+
+ /* resume for capture */
+ list_for_each_entry(dsp_params,
+ &fe->dsp[SNDRV_PCM_STREAM_CAPTURE].be_clients, list_be) {
+
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+ struct snd_soc_platform *platform = be->platform;
+ struct snd_soc_platform_driver *drv = platform->driver;
+ struct snd_soc_dai *dai = be->cpu_dai;
+
+ dev_dbg(&be->dev, "pm: BE platform capture resume %s\n",
+ be->dai_link->name);
+
+ if (be->dai_link->ignore_suspend)
+ continue;
+
+ if (drv->resume && platform->suspended) {
+ drv->resume(dai);
+ platform->suspended = 0;
+ }
+ }
+
+ return 0;
+}
+
+int soc_dsp_fe_resume(struct snd_soc_pcm_runtime *fe)
+{
+ struct snd_soc_dai *dai = fe->cpu_dai;
+ struct snd_soc_dai_driver *dai_drv = dai->driver;
+ struct snd_soc_platform *platform = fe->platform;
+ struct snd_soc_platform_driver *plat_drv = platform->driver;
+
+ soc_dsp_be_cpu_dai_resume(fe);
+ soc_dsp_be_platform_resume(fe);
+
+ if (dai_drv->resume && !dai_drv->ac97_control)
+ dai_drv->resume(dai);
+
+ if (plat_drv->resume && platform->suspended) {
+ plat_drv->resume(dai);
+ platform->suspended = 0;
+ }
+
+ return 0;
+}
+
+/* called when opening FE stream */
+int soc_dsp_fe_dai_open(struct snd_pcm_substream *fe_substream)
+{
+ struct snd_soc_pcm_runtime *fe = fe_substream->private_data;
+ struct snd_soc_dsp_params *dsp_params;
+ int err, ret;
+ int stream = fe_substream->stream;
+
+ fe->dsp[fe_substream->stream].runtime = fe_substream->runtime;
+
+ /* calculate valid and active FE <-> BE dsp_paramss */
+ err = dsp_add_new_paths(fe, fe_substream->stream, 0);
+ if (err <= 0) {
+ dev_warn(&fe->dev, "asoc: %s no valid %s route from source to sink\n",
+ fe->dai_link->name, fe_substream->stream ? "capture" : "playback");
+ }
+
+ ret = soc_dsp_fe_dai_startup(fe_substream);
+ if (ret < 0) {
+ /* clean up all links */
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be)
+ dsp_params->state = SND_SOC_DSP_LINK_STATE_FREE;
+
+ be_disconnect(fe, stream);
+ fe->dsp[stream].runtime = NULL;
+ }
+ return ret;
+}
+
+/* called when closing FE stream */
+int soc_dsp_fe_dai_close(struct snd_pcm_substream *fe_substream)
+{
+ struct snd_soc_pcm_runtime *fe = fe_substream->private_data;
+ struct snd_soc_dsp_params *dsp_params;
+ int stream = fe_substream->stream, ret;
+
+ ret = soc_dsp_fe_dai_shutdown(fe_substream);
+
+ /* mark FE's links ready to prune */
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be)
+ dsp_params->state = SND_SOC_DSP_LINK_STATE_FREE;
+
+ be_disconnect(fe, stream);
+
+ fe->dsp[stream].runtime = NULL;
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static char *dsp_state_string(enum snd_soc_dsp_state state)
+{
+ switch (state) {
+ case SND_SOC_DSP_STATE_NEW:
+ return "new";
+ case SND_SOC_DSP_STATE_OPEN:
+ return "open";
+ case SND_SOC_DSP_STATE_HW_PARAMS:
+ return "hw_params";
+ case SND_SOC_DSP_STATE_PREPARE:
+ return "prepare";
+ case SND_SOC_DSP_STATE_START:
+ return "start";
+ case SND_SOC_DSP_STATE_STOP:
+ return "stop";
+ case SND_SOC_DSP_STATE_HW_FREE:
+ return "hw_free";
+ case SND_SOC_DSP_STATE_CLOSE:
+ return "close";
+ }
+
+ return "unknown";
+}
+
+static int soc_dsp_state_open_file(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t soc_dsp_show_state(struct snd_soc_pcm_runtime *fe,
+ int stream, char *buf, size_t size)
+{
+ struct snd_pcm_hw_params *params = &fe->dsp[stream].hw_params;
+ struct snd_soc_dsp_params *dsp_params;
+ ssize_t offset = 0;
+
+ /* FE state */
+ offset += snprintf(buf + offset, size - offset,
+ "[%s - %s]\n", fe->dai_link->name,
+ stream ? "Capture" : "Playback");
+
+ offset += snprintf(buf + offset, size - offset, "State: %s\n",
+ dsp_state_string(fe->dsp[stream].state));
+
+ if ((fe->dsp[stream].state >= SND_SOC_DSP_STATE_HW_PARAMS) &&
+ (fe->dsp[stream].state <= SND_SOC_DSP_STATE_STOP))
+ offset += snprintf(buf + offset, size - offset,
+ "Hardware Params: "
+ "Format = %s, Channels = %d, Rate = %d\n",
+ snd_pcm_format_name(params_format(params)),
+ params_channels(params),
+ params_rate(params));
+
+ /* BEs state */
+ offset += snprintf(buf + offset, size - offset, "Backends:\n");
+
+ if (list_empty(&fe->dsp[stream].be_clients)) {
+ offset += snprintf(buf + offset, size - offset,
+ " No active DSP links\n");
+ goto out;
+ }
+
+ list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
+ struct snd_soc_pcm_runtime *be = dsp_params->be;
+
+ offset += snprintf(buf + offset, size - offset,
+ "- %s\n", be->dai_link->name);
+
+ offset += snprintf(buf + offset, size - offset,
+ " State: %s\n",
+ dsp_state_string(fe->dsp[stream].state));
+
+ if ((be->dsp[stream].state >= SND_SOC_DSP_STATE_HW_PARAMS) &&
+ (be->dsp[stream].state <= SND_SOC_DSP_STATE_STOP))
+ offset += snprintf(buf + offset, size - offset,
+ " Hardware Params: "
+ "Format = %s, Channels = %d, Rate = %d\n",
+ snd_pcm_format_name(params_format(params)),
+ params_channels(params),
+ params_rate(params));
+ }
+
+out:
+ return offset;
+}
+
+static ssize_t soc_dsp_state_read_file(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct snd_soc_pcm_runtime *fe = file->private_data;
+ ssize_t out_count = PAGE_SIZE, offset = 0, ret = 0;
+ char *buf;
+
+ buf = kmalloc(out_count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (fe->cpu_dai->driver->playback.channels_min)
+ offset += soc_dsp_show_state(fe, SNDRV_PCM_STREAM_PLAYBACK,
+ buf + offset, out_count - offset);
+
+ if (fe->cpu_dai->driver->capture.channels_min)
+ offset += soc_dsp_show_state(fe, SNDRV_PCM_STREAM_CAPTURE,
+ buf + offset, out_count - offset);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, offset);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations soc_dsp_state_fops = {
+ .open = soc_dsp_state_open_file,
+ .read = soc_dsp_state_read_file,
+ .llseek = default_llseek,
+};
+
+int soc_dsp_debugfs_add(struct snd_soc_pcm_runtime *rtd)
+{
+ rtd->debugfs_dsp_root = debugfs_create_dir(rtd->dai_link->name,
+ rtd->card->debugfs_card_root);
+ if (!rtd->debugfs_dsp_root) {
+ dev_dbg(&rtd->dev,
+ "ASoC: Failed to create dsp debugfs directory %s\n",
+ rtd->dai_link->name);
+ return -EINVAL;
+ }
+
+ rtd->debugfs_dsp_state = debugfs_create_file("state", 0644,
+ rtd->debugfs_dsp_root,
+ rtd, &soc_dsp_state_fops);
+
+ return 0;
+}
+#endif
+
+/* Module information */
+MODULE_AUTHOR("Liam Girdwood, lrg@slimlogic.co.uk");
+MODULE_DESCRIPTION("ALSA SoC DSP Core");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index f4aa4e0..34aa972 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -288,9 +288,10 @@
snd_pcm_lib_preallocate_free_for_all(pcm);
}
-static int txx9aclc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
- struct snd_pcm *pcm)
+static int txx9aclc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_pcm *pcm = rtd->pcm;
struct platform_device *pdev = to_platform_device(dai->platform->dev);
struct txx9aclc_soc_device *dev;
struct resource *r;